repo_name
stringclasses
6 values
pr_number
int64
512
78.9k
pr_title
stringlengths
3
144
pr_description
stringlengths
0
30.3k
author
stringlengths
2
21
date_created
timestamp[ns, tz=UTC]
date_merged
timestamp[ns, tz=UTC]
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
17
30.4k
filepath
stringlengths
9
210
before_content
stringlengths
0
112M
after_content
stringlengths
0
112M
label
int64
-1
1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/tests/Loader/classloader/regressions/dev10_813331/Case1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //Non-generic classes A and B public class HelloWorld { public static int Main() { try { B b = new B(GetName()); } catch (System.Exception) { System.Console.WriteLine("PASS"); return 100; } System.Console.WriteLine("FAIL"); return -1; } public static string GetName() { throw new System.Exception(); } } public class B : A { public B(string name) { System.Console.WriteLine("Creating object B({0})", name); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //Non-generic classes A and B public class HelloWorld { public static int Main() { try { B b = new B(GetName()); } catch (System.Exception) { System.Console.WriteLine("PASS"); return 100; } System.Console.WriteLine("FAIL"); return -1; } public static string GetName() { throw new System.Exception(); } } public class B : A { public B(string name) { System.Console.WriteLine("Creating object B({0})", name); } }
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest1405/Generated1405.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated1405.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated1405.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/libraries/Microsoft.Extensions.DependencyInjection.Specification.Tests/src/Fakes/TransientFactoryService.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace Microsoft.Extensions.DependencyInjection.Specification.Fakes { public class TransientFactoryService : IFactoryService { public IFakeService FakeService { get; set; } public int Value { get; set; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace Microsoft.Extensions.DependencyInjection.Specification.Fakes { public class TransientFactoryService : IFactoryService { public IFakeService FakeService { get; set; } public int Value { get; set; } } }
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/libraries/System.IO.FileSystem.DriveInfo/ref/System.IO.FileSystem.DriveInfo.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <Nullable>enable</Nullable> </PropertyGroup> <ItemGroup> <Compile Include="System.IO.FileSystem.DriveInfo.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\..\System.Runtime\ref\System.Runtime.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <Nullable>enable</Nullable> </PropertyGroup> <ItemGroup> <Compile Include="System.IO.FileSystem.DriveInfo.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\..\System.Runtime\ref\System.Runtime.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest826/Generated826.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated826.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated826.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/mono/mono/mini/ee.h
/* * Licensed to the .NET Foundation under one or more agreements. * The .NET Foundation licenses this file to you under the MIT license. */ #include <config.h> #include <mono/metadata/metadata.h> #include <mono/metadata/object.h> #include <mono/utils/mono-compiler.h> #include <mono/utils/mono-error.h> #include <mono/utils/mono-publib.h> #include <mono/eglib/glib.h> #ifndef __MONO_EE_H__ #define __MONO_EE_H__ #define MONO_EE_API_VERSION 0x16 typedef struct _MonoInterpStackIter MonoInterpStackIter; /* Needed for stack allocation */ struct _MonoInterpStackIter { gpointer dummy [8]; }; typedef gpointer MonoInterpFrameHandle; #define MONO_EE_CALLBACKS \ MONO_EE_CALLBACK (void, entry_from_trampoline, (gpointer ccontext, gpointer imethod)) \ MONO_EE_CALLBACK (void, to_native_trampoline, (gpointer addr, gpointer ccontext)) \ MONO_EE_CALLBACK (gpointer, create_method_pointer, (MonoMethod *method, gboolean compile, MonoError *error)) \ MONO_EE_CALLBACK (MonoFtnDesc*, create_method_pointer_llvmonly, (MonoMethod *method, gboolean unbox, MonoError *error)) \ MONO_EE_CALLBACK (void, free_method, (MonoMethod *method)) \ MONO_EE_CALLBACK (MonoObject*, runtime_invoke, (MonoMethod *method, void *obj, void **params, MonoObject **exc, MonoError *error)) \ MONO_EE_CALLBACK (void, init_delegate, (MonoDelegate *del, MonoDelegateTrampInfo **out_info, MonoError *error)) \ MONO_EE_CALLBACK (void, delegate_ctor, (MonoObjectHandle this_obj, MonoObjectHandle target, gpointer addr, MonoError *error)) \ MONO_EE_CALLBACK (void, set_resume_state, (MonoJitTlsData *jit_tls, MonoObject *ex, MonoJitExceptionInfo *ei, MonoInterpFrameHandle interp_frame, gpointer handler_ip)) \ MONO_EE_CALLBACK (void, get_resume_state, (const MonoJitTlsData *jit_tls, gboolean *has_resume_state, MonoInterpFrameHandle *interp_frame, gpointer *handler_ip)) \ MONO_EE_CALLBACK (gboolean, run_finally, (StackFrameInfo *frame, int clause_index, gpointer handler_ip, gpointer handler_ip_end)) \ MONO_EE_CALLBACK (gboolean, run_filter, (StackFrameInfo *frame, MonoException *ex, int clause_index, gpointer handler_ip, gpointer handler_ip_end)) \ MONO_EE_CALLBACK (gboolean, run_clause_with_il_state, (gpointer il_state, int clause_index, gpointer handler_ip, gpointer handler_ip_end, MonoObject *ex, gboolean *filtered, MonoExceptionEnum clause_type)) \ MONO_EE_CALLBACK (void, frame_iter_init, (MonoInterpStackIter *iter, gpointer interp_exit_data)) \ MONO_EE_CALLBACK (gboolean, frame_iter_next, (MonoInterpStackIter *iter, StackFrameInfo *frame)) \ MONO_EE_CALLBACK (MonoJitInfo*, find_jit_info, (MonoMethod *method)) \ MONO_EE_CALLBACK (void, set_breakpoint, (MonoJitInfo *jinfo, gpointer ip)) \ MONO_EE_CALLBACK (void, clear_breakpoint, (MonoJitInfo *jinfo, gpointer ip)) \ MONO_EE_CALLBACK (MonoJitInfo*, frame_get_jit_info, (MonoInterpFrameHandle frame)) \ MONO_EE_CALLBACK (gpointer, frame_get_ip, (MonoInterpFrameHandle frame)) \ MONO_EE_CALLBACK (gpointer, frame_get_arg, (MonoInterpFrameHandle frame, int pos)) \ MONO_EE_CALLBACK (gpointer, frame_get_local, (MonoInterpFrameHandle frame, int pos)) \ MONO_EE_CALLBACK (gpointer, frame_get_this, (MonoInterpFrameHandle frame)) \ MONO_EE_CALLBACK (void, frame_arg_to_data, (MonoInterpFrameHandle frame, MonoMethodSignature *sig, int index, gpointer data)) \ MONO_EE_CALLBACK (void, data_to_frame_arg, (MonoInterpFrameHandle frame, MonoMethodSignature *sig, int index, gconstpointer data)) \ MONO_EE_CALLBACK (gpointer, frame_arg_to_storage, (MonoInterpFrameHandle frame, MonoMethodSignature *sig, int index)) \ MONO_EE_CALLBACK (MonoInterpFrameHandle, frame_get_parent, (MonoInterpFrameHandle frame)) \ MONO_EE_CALLBACK (void, start_single_stepping, (void)) \ MONO_EE_CALLBACK (void, stop_single_stepping, (void)) \ MONO_EE_CALLBACK (void, free_context, (gpointer)) \ MONO_EE_CALLBACK (void, set_optimizations, (guint32)) \ MONO_EE_CALLBACK (void, invalidate_transformed, (void)) \ MONO_EE_CALLBACK (void, cleanup, (void)) \ MONO_EE_CALLBACK (void, mark_stack, (gpointer thread_info, GcScanFunc func, gpointer gc_data, gboolean precise)) \ MONO_EE_CALLBACK (void, jit_info_foreach, (InterpJitInfoFunc func, gpointer user_data)) \ MONO_EE_CALLBACK (gboolean, sufficient_stack, (gsize size)) \ MONO_EE_CALLBACK (void, entry_llvmonly, (gpointer res, gpointer *args, gpointer imethod)) \ MONO_EE_CALLBACK (gpointer, get_interp_method, (MonoMethod *method, MonoError *error)) \ MONO_EE_CALLBACK (MonoJitInfo*, compile_interp_method, (MonoMethod *method, MonoError *error)) \ typedef struct _MonoEECallbacks { #undef MONO_EE_CALLBACK #define MONO_EE_CALLBACK(ret, name, sig) ret (*name) sig; MONO_EE_CALLBACKS } MonoEECallbacks; #endif /* __MONO_EE_H__ */
/* * Licensed to the .NET Foundation under one or more agreements. * The .NET Foundation licenses this file to you under the MIT license. */ #include <config.h> #include <mono/metadata/metadata.h> #include <mono/metadata/object.h> #include <mono/utils/mono-compiler.h> #include <mono/utils/mono-error.h> #include <mono/utils/mono-publib.h> #include <mono/eglib/glib.h> #ifndef __MONO_EE_H__ #define __MONO_EE_H__ #define MONO_EE_API_VERSION 0x16 typedef struct _MonoInterpStackIter MonoInterpStackIter; /* Needed for stack allocation */ struct _MonoInterpStackIter { gpointer dummy [8]; }; typedef gpointer MonoInterpFrameHandle; #define MONO_EE_CALLBACKS \ MONO_EE_CALLBACK (void, entry_from_trampoline, (gpointer ccontext, gpointer imethod)) \ MONO_EE_CALLBACK (void, to_native_trampoline, (gpointer addr, gpointer ccontext)) \ MONO_EE_CALLBACK (gpointer, create_method_pointer, (MonoMethod *method, gboolean compile, MonoError *error)) \ MONO_EE_CALLBACK (MonoFtnDesc*, create_method_pointer_llvmonly, (MonoMethod *method, gboolean unbox, MonoError *error)) \ MONO_EE_CALLBACK (void, free_method, (MonoMethod *method)) \ MONO_EE_CALLBACK (MonoObject*, runtime_invoke, (MonoMethod *method, void *obj, void **params, MonoObject **exc, MonoError *error)) \ MONO_EE_CALLBACK (void, init_delegate, (MonoDelegate *del, MonoDelegateTrampInfo **out_info, MonoError *error)) \ MONO_EE_CALLBACK (void, delegate_ctor, (MonoObjectHandle this_obj, MonoObjectHandle target, gpointer addr, MonoError *error)) \ MONO_EE_CALLBACK (void, set_resume_state, (MonoJitTlsData *jit_tls, MonoObject *ex, MonoJitExceptionInfo *ei, MonoInterpFrameHandle interp_frame, gpointer handler_ip)) \ MONO_EE_CALLBACK (void, get_resume_state, (const MonoJitTlsData *jit_tls, gboolean *has_resume_state, MonoInterpFrameHandle *interp_frame, gpointer *handler_ip)) \ MONO_EE_CALLBACK (gboolean, run_finally, (StackFrameInfo *frame, int clause_index, gpointer handler_ip, gpointer handler_ip_end)) \ MONO_EE_CALLBACK (gboolean, run_filter, (StackFrameInfo *frame, MonoException *ex, int clause_index, gpointer handler_ip, gpointer handler_ip_end)) \ MONO_EE_CALLBACK (gboolean, run_clause_with_il_state, (gpointer il_state, int clause_index, gpointer handler_ip, gpointer handler_ip_end, MonoObject *ex, gboolean *filtered, MonoExceptionEnum clause_type)) \ MONO_EE_CALLBACK (void, frame_iter_init, (MonoInterpStackIter *iter, gpointer interp_exit_data)) \ MONO_EE_CALLBACK (gboolean, frame_iter_next, (MonoInterpStackIter *iter, StackFrameInfo *frame)) \ MONO_EE_CALLBACK (MonoJitInfo*, find_jit_info, (MonoMethod *method)) \ MONO_EE_CALLBACK (void, set_breakpoint, (MonoJitInfo *jinfo, gpointer ip)) \ MONO_EE_CALLBACK (void, clear_breakpoint, (MonoJitInfo *jinfo, gpointer ip)) \ MONO_EE_CALLBACK (MonoJitInfo*, frame_get_jit_info, (MonoInterpFrameHandle frame)) \ MONO_EE_CALLBACK (gpointer, frame_get_ip, (MonoInterpFrameHandle frame)) \ MONO_EE_CALLBACK (gpointer, frame_get_arg, (MonoInterpFrameHandle frame, int pos)) \ MONO_EE_CALLBACK (gpointer, frame_get_local, (MonoInterpFrameHandle frame, int pos)) \ MONO_EE_CALLBACK (gpointer, frame_get_this, (MonoInterpFrameHandle frame)) \ MONO_EE_CALLBACK (void, frame_arg_to_data, (MonoInterpFrameHandle frame, MonoMethodSignature *sig, int index, gpointer data)) \ MONO_EE_CALLBACK (void, data_to_frame_arg, (MonoInterpFrameHandle frame, MonoMethodSignature *sig, int index, gconstpointer data)) \ MONO_EE_CALLBACK (gpointer, frame_arg_to_storage, (MonoInterpFrameHandle frame, MonoMethodSignature *sig, int index)) \ MONO_EE_CALLBACK (MonoInterpFrameHandle, frame_get_parent, (MonoInterpFrameHandle frame)) \ MONO_EE_CALLBACK (void, start_single_stepping, (void)) \ MONO_EE_CALLBACK (void, stop_single_stepping, (void)) \ MONO_EE_CALLBACK (void, free_context, (gpointer)) \ MONO_EE_CALLBACK (void, set_optimizations, (guint32)) \ MONO_EE_CALLBACK (void, invalidate_transformed, (void)) \ MONO_EE_CALLBACK (void, cleanup, (void)) \ MONO_EE_CALLBACK (void, mark_stack, (gpointer thread_info, GcScanFunc func, gpointer gc_data, gboolean precise)) \ MONO_EE_CALLBACK (void, jit_info_foreach, (InterpJitInfoFunc func, gpointer user_data)) \ MONO_EE_CALLBACK (gboolean, sufficient_stack, (gsize size)) \ MONO_EE_CALLBACK (void, entry_llvmonly, (gpointer res, gpointer *args, gpointer imethod)) \ MONO_EE_CALLBACK (gpointer, get_interp_method, (MonoMethod *method, MonoError *error)) \ MONO_EE_CALLBACK (MonoJitInfo*, compile_interp_method, (MonoMethod *method, MonoError *error)) \ typedef struct _MonoEECallbacks { #undef MONO_EE_CALLBACK #define MONO_EE_CALLBACK(ret, name, sig) ret (*name) sig; MONO_EE_CALLBACKS } MonoEECallbacks; #endif /* __MONO_EE_H__ */
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/libraries/System.ComponentModel.TypeConverter/src/System/ComponentModel/WeakHashtable.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; namespace System.ComponentModel { /// <summary> /// This is a hashtable that stores object keys as weak references. /// It monitors memory usage and will periodically scavenge the /// hash table to clean out dead references. /// </summary> internal sealed class WeakHashtable : Hashtable { private static readonly IEqualityComparer s_comparer = new WeakKeyComparer(); private long _lastGlobalMem; private int _lastHashCount; internal WeakHashtable() : base(s_comparer) { } /// <summary> /// Override of Item that wraps a weak reference around the /// key and performs a scavenge. /// </summary> public void SetWeak(object key, object value) { ScavengeKeys(); this[new EqualityWeakReference(key)] = value; } /// <summary> /// This method checks to see if it is necessary to /// scavenge keys, and if it is it performs a scan /// of all keys to see which ones are no longer valid. /// To determine if we need to scavenge keys we need to /// try to track the current GC memory. Our rule of /// thumb is that if GC memory is decreasing and our /// key count is constant we need to scavenge. We /// will need to see if this is too often for extreme /// use cases like the CompactFramework (they add /// custom type data for every object at design time). /// </summary> private void ScavengeKeys() { int hashCount = Count; if (hashCount == 0) { return; } if (_lastHashCount == 0) { _lastHashCount = hashCount; return; } long globalMem = GC.GetTotalMemory(false); if (_lastGlobalMem == 0) { _lastGlobalMem = globalMem; return; } float memDelta = (globalMem - _lastGlobalMem) / (float)_lastGlobalMem; float hashDelta = (hashCount - _lastHashCount) / (float)_lastHashCount; if (memDelta < 0 && hashDelta >= 0) { // Perform a scavenge through our keys, looking // for dead references. List<object>? cleanupList = null; foreach (object o in Keys) { if (o is WeakReference wr && !wr.IsAlive) { if (cleanupList == null) { cleanupList = new List<object>(); } cleanupList.Add(wr); } } if (cleanupList != null) { foreach (object o in cleanupList) { Remove(o); } } } _lastGlobalMem = globalMem; _lastHashCount = hashCount; } private sealed class WeakKeyComparer : IEqualityComparer { bool IEqualityComparer.Equals(object? x, object? y) { if (x == null) { return y == null; } if (y != null && x.GetHashCode() == y.GetHashCode()) { if (x is WeakReference wX) { if (!wX.IsAlive) { return false; } x = wX.Target; } if (y is WeakReference wY) { if (!wY.IsAlive) { return false; } y = wY.Target; } return object.ReferenceEquals(x, y); } return false; } int IEqualityComparer.GetHashCode(object obj) => obj.GetHashCode(); } /// <summary> /// A subclass of WeakReference that overrides GetHashCode and /// Equals so that the weak reference returns the same equality /// semantics as the object it wraps. This will always return /// the object's hash code and will return True for a Equals /// comparison of the object it is wrapping. If the object /// it is wrapping has finalized, Equals always returns false. /// </summary> private sealed class EqualityWeakReference : WeakReference { private readonly int _hashCode; internal EqualityWeakReference(object o) : base(o) { _hashCode = o.GetHashCode(); } public override bool Equals(object? o) { if (o?.GetHashCode() != _hashCode) { return false; } if (o == this || (IsAlive && ReferenceEquals(o, Target))) { return true; } return false; } public override int GetHashCode() => _hashCode; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; namespace System.ComponentModel { /// <summary> /// This is a hashtable that stores object keys as weak references. /// It monitors memory usage and will periodically scavenge the /// hash table to clean out dead references. /// </summary> internal sealed class WeakHashtable : Hashtable { private static readonly IEqualityComparer s_comparer = new WeakKeyComparer(); private long _lastGlobalMem; private int _lastHashCount; internal WeakHashtable() : base(s_comparer) { } /// <summary> /// Override of Item that wraps a weak reference around the /// key and performs a scavenge. /// </summary> public void SetWeak(object key, object value) { ScavengeKeys(); this[new EqualityWeakReference(key)] = value; } /// <summary> /// This method checks to see if it is necessary to /// scavenge keys, and if it is it performs a scan /// of all keys to see which ones are no longer valid. /// To determine if we need to scavenge keys we need to /// try to track the current GC memory. Our rule of /// thumb is that if GC memory is decreasing and our /// key count is constant we need to scavenge. We /// will need to see if this is too often for extreme /// use cases like the CompactFramework (they add /// custom type data for every object at design time). /// </summary> private void ScavengeKeys() { int hashCount = Count; if (hashCount == 0) { return; } if (_lastHashCount == 0) { _lastHashCount = hashCount; return; } long globalMem = GC.GetTotalMemory(false); if (_lastGlobalMem == 0) { _lastGlobalMem = globalMem; return; } float memDelta = (globalMem - _lastGlobalMem) / (float)_lastGlobalMem; float hashDelta = (hashCount - _lastHashCount) / (float)_lastHashCount; if (memDelta < 0 && hashDelta >= 0) { // Perform a scavenge through our keys, looking // for dead references. List<object>? cleanupList = null; foreach (object o in Keys) { if (o is WeakReference wr && !wr.IsAlive) { if (cleanupList == null) { cleanupList = new List<object>(); } cleanupList.Add(wr); } } if (cleanupList != null) { foreach (object o in cleanupList) { Remove(o); } } } _lastGlobalMem = globalMem; _lastHashCount = hashCount; } private sealed class WeakKeyComparer : IEqualityComparer { bool IEqualityComparer.Equals(object? x, object? y) { if (x == null) { return y == null; } if (y != null && x.GetHashCode() == y.GetHashCode()) { if (x is WeakReference wX) { if (!wX.IsAlive) { return false; } x = wX.Target; } if (y is WeakReference wY) { if (!wY.IsAlive) { return false; } y = wY.Target; } return object.ReferenceEquals(x, y); } return false; } int IEqualityComparer.GetHashCode(object obj) => obj.GetHashCode(); } /// <summary> /// A subclass of WeakReference that overrides GetHashCode and /// Equals so that the weak reference returns the same equality /// semantics as the object it wraps. This will always return /// the object's hash code and will return True for a Equals /// comparison of the object it is wrapping. If the object /// it is wrapping has finalized, Equals always returns false. /// </summary> private sealed class EqualityWeakReference : WeakReference { private readonly int _hashCode; internal EqualityWeakReference(object o) : base(o) { _hashCode = o.GetHashCode(); } public override bool Equals(object? o) { if (o?.GetHashCode() != _hashCode) { return false; } if (o == this || (IsAlive && ReferenceEquals(o, Target))) { return true; } return false; } public override int GetHashCode() => _hashCode; } } }
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/mono/mono/tests/verifier/valid_delegate_return_variant_iface.cs
using System; using System.Collections.Generic; interface IFoo {} class Foo : IFoo {} class Driver { static IEnumerable <Foo> Dele (bool b) { return null; } static void Main () { Func<bool, IEnumerable<IFoo>> dele = Dele; dele (true); } }
using System; using System.Collections.Generic; interface IFoo {} class Foo : IFoo {} class Driver { static IEnumerable <Foo> Dele (bool b) { return null; } static void Main () { Func<bool, IEnumerable<IFoo>> dele = Dele; dele (true); } }
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/tests/GC/Scenarios/BinTree/thdtreegrowingobj.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <GCStressIncompatible>true</GCStressIncompatible> <!-- The test leaves threads running at exit --> <UnloadabilityIncompatible>true</UnloadabilityIncompatible> </PropertyGroup> <ItemGroup> <Compile Include="thdtreegrowingobj.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="bintree.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <GCStressIncompatible>true</GCStressIncompatible> <!-- The test leaves threads running at exit --> <UnloadabilityIncompatible>true</UnloadabilityIncompatible> </PropertyGroup> <ItemGroup> <Compile Include="thdtreegrowingobj.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="bintree.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/libraries/System.Linq.Expressions/src/System/Dynamic/ExpandoClass.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; namespace System.Dynamic { /// <summary> /// Represents a dynamically assigned class. Expando objects which share the same /// members will share the same class. Classes are dynamically assigned as the /// expando object gains members. /// </summary> internal sealed class ExpandoClass { private readonly string[] _keys; // list of names associated with each element in the data array, sorted private readonly int _hashCode; // pre-calculated hash code of all the keys the class contains private Dictionary<int, List<WeakReference>>? _transitions; // cached transitions private const int EmptyHashCode = 6551; // hash code of the empty ExpandoClass. internal static readonly ExpandoClass Empty = new ExpandoClass(); // The empty Expando class - all Expando objects start off w/ this class. /// <summary> /// Constructs the empty ExpandoClass. This is the class used when an /// empty Expando object is initially constructed. /// </summary> internal ExpandoClass() { _hashCode = EmptyHashCode; _keys = Array.Empty<string>(); } /// <summary> /// Constructs a new ExpandoClass that can hold onto the specified keys. The /// keys must be sorted ordinally. The hash code must be precalculated for /// the keys. /// </summary> internal ExpandoClass(string[] keys, int hashCode) { _hashCode = hashCode; _keys = keys; } /// <summary> /// Finds or creates a new ExpandoClass given the existing set of keys /// in this ExpandoClass plus the new key to be added. Members in an /// ExpandoClass are always stored case sensitively. /// </summary> internal ExpandoClass FindNewClass(string newKey) { // just XOR the newKey hash code int hashCode = _hashCode ^ newKey.GetHashCode(); lock (this) { List<WeakReference> infos = GetTransitionList(hashCode); for (int i = 0; i < infos.Count; i++) { ExpandoClass? klass = infos[i].Target as ExpandoClass; if (klass == null) { infos.RemoveAt(i); i--; continue; } if (string.Equals(klass._keys[klass._keys.Length - 1], newKey, StringComparison.Ordinal)) { // the new key is the key we added in this transition return klass; } } // no applicable transition, create a new one string[] keys = new string[_keys.Length + 1]; Array.Copy(_keys, keys, _keys.Length); keys[_keys.Length] = newKey; ExpandoClass ec = new ExpandoClass(keys, hashCode); infos.Add(new WeakReference(ec)); return ec; } } /// <summary> /// Gets the lists of transitions that are valid from this ExpandoClass /// to an ExpandoClass whose keys hash to the appropriate hash code. /// </summary> private List<WeakReference> GetTransitionList(int hashCode) { if (_transitions == null) { _transitions = new Dictionary<int, List<WeakReference>>(); } if (!_transitions.TryGetValue(hashCode, out List<WeakReference>? infos)) { _transitions[hashCode] = infos = new List<WeakReference>(); } return infos; } /// <summary> /// Gets the index at which the value should be stored for the specified name. /// </summary> internal int GetValueIndex(string name, bool caseInsensitive, ExpandoObject obj) { if (caseInsensitive) { return GetValueIndexCaseInsensitive(name, obj); } else { return GetValueIndexCaseSensitive(name); } } /// <summary> /// Gets the index at which the value should be stored for the specified name /// case sensitively. Returns the index even if the member is marked as deleted. /// </summary> internal int GetValueIndexCaseSensitive(string name) { for (int i = 0; i < _keys.Length; i++) { if (string.Equals( _keys[i], name, StringComparison.Ordinal)) { return i; } } return ExpandoObject.NoMatch; } /// <summary> /// Gets the index at which the value should be stored for the specified name, /// the method is only used in the case-insensitive case. /// </summary> /// <param name="name">the name of the member</param> /// <param name="obj">The ExpandoObject associated with the class /// that is used to check if a member has been deleted.</param> /// <returns> /// the exact match if there is one /// if there is exactly one member with case insensitive match, return it /// otherwise we throw AmbiguousMatchException. /// </returns> private int GetValueIndexCaseInsensitive(string name, ExpandoObject obj) { int caseInsensitiveMatch = ExpandoObject.NoMatch; //the location of the case-insensitive matching member lock (obj.LockObject) { for (int i = _keys.Length - 1; i >= 0; i--) { if (string.Equals( _keys[i], name, StringComparison.OrdinalIgnoreCase)) { //if the matching member is deleted, continue searching if (!obj.IsDeletedMember(i)) { if (caseInsensitiveMatch == ExpandoObject.NoMatch) { caseInsensitiveMatch = i; } else { //Ambiguous match, stop searching return ExpandoObject.AmbiguousMatchFound; } } } } } //There is exactly one member with case insensitive match. return caseInsensitiveMatch; } /// <summary> /// Gets the names of the keys that can be stored in the Expando class. The /// list is sorted ordinally. /// </summary> internal string[] Keys => _keys; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; namespace System.Dynamic { /// <summary> /// Represents a dynamically assigned class. Expando objects which share the same /// members will share the same class. Classes are dynamically assigned as the /// expando object gains members. /// </summary> internal sealed class ExpandoClass { private readonly string[] _keys; // list of names associated with each element in the data array, sorted private readonly int _hashCode; // pre-calculated hash code of all the keys the class contains private Dictionary<int, List<WeakReference>>? _transitions; // cached transitions private const int EmptyHashCode = 6551; // hash code of the empty ExpandoClass. internal static readonly ExpandoClass Empty = new ExpandoClass(); // The empty Expando class - all Expando objects start off w/ this class. /// <summary> /// Constructs the empty ExpandoClass. This is the class used when an /// empty Expando object is initially constructed. /// </summary> internal ExpandoClass() { _hashCode = EmptyHashCode; _keys = Array.Empty<string>(); } /// <summary> /// Constructs a new ExpandoClass that can hold onto the specified keys. The /// keys must be sorted ordinally. The hash code must be precalculated for /// the keys. /// </summary> internal ExpandoClass(string[] keys, int hashCode) { _hashCode = hashCode; _keys = keys; } /// <summary> /// Finds or creates a new ExpandoClass given the existing set of keys /// in this ExpandoClass plus the new key to be added. Members in an /// ExpandoClass are always stored case sensitively. /// </summary> internal ExpandoClass FindNewClass(string newKey) { // just XOR the newKey hash code int hashCode = _hashCode ^ newKey.GetHashCode(); lock (this) { List<WeakReference> infos = GetTransitionList(hashCode); for (int i = 0; i < infos.Count; i++) { ExpandoClass? klass = infos[i].Target as ExpandoClass; if (klass == null) { infos.RemoveAt(i); i--; continue; } if (string.Equals(klass._keys[klass._keys.Length - 1], newKey, StringComparison.Ordinal)) { // the new key is the key we added in this transition return klass; } } // no applicable transition, create a new one string[] keys = new string[_keys.Length + 1]; Array.Copy(_keys, keys, _keys.Length); keys[_keys.Length] = newKey; ExpandoClass ec = new ExpandoClass(keys, hashCode); infos.Add(new WeakReference(ec)); return ec; } } /// <summary> /// Gets the lists of transitions that are valid from this ExpandoClass /// to an ExpandoClass whose keys hash to the appropriate hash code. /// </summary> private List<WeakReference> GetTransitionList(int hashCode) { if (_transitions == null) { _transitions = new Dictionary<int, List<WeakReference>>(); } if (!_transitions.TryGetValue(hashCode, out List<WeakReference>? infos)) { _transitions[hashCode] = infos = new List<WeakReference>(); } return infos; } /// <summary> /// Gets the index at which the value should be stored for the specified name. /// </summary> internal int GetValueIndex(string name, bool caseInsensitive, ExpandoObject obj) { if (caseInsensitive) { return GetValueIndexCaseInsensitive(name, obj); } else { return GetValueIndexCaseSensitive(name); } } /// <summary> /// Gets the index at which the value should be stored for the specified name /// case sensitively. Returns the index even if the member is marked as deleted. /// </summary> internal int GetValueIndexCaseSensitive(string name) { for (int i = 0; i < _keys.Length; i++) { if (string.Equals( _keys[i], name, StringComparison.Ordinal)) { return i; } } return ExpandoObject.NoMatch; } /// <summary> /// Gets the index at which the value should be stored for the specified name, /// the method is only used in the case-insensitive case. /// </summary> /// <param name="name">the name of the member</param> /// <param name="obj">The ExpandoObject associated with the class /// that is used to check if a member has been deleted.</param> /// <returns> /// the exact match if there is one /// if there is exactly one member with case insensitive match, return it /// otherwise we throw AmbiguousMatchException. /// </returns> private int GetValueIndexCaseInsensitive(string name, ExpandoObject obj) { int caseInsensitiveMatch = ExpandoObject.NoMatch; //the location of the case-insensitive matching member lock (obj.LockObject) { for (int i = _keys.Length - 1; i >= 0; i--) { if (string.Equals( _keys[i], name, StringComparison.OrdinalIgnoreCase)) { //if the matching member is deleted, continue searching if (!obj.IsDeletedMember(i)) { if (caseInsensitiveMatch == ExpandoObject.NoMatch) { caseInsensitiveMatch = i; } else { //Ambiguous match, stop searching return ExpandoObject.AmbiguousMatchFound; } } } } } //There is exactly one member with case insensitive match. return caseInsensitiveMatch; } /// <summary> /// Gets the names of the keys that can be stored in the Expando class. The /// list is sorted ordinally. /// </summary> internal string[] Keys => _keys; } }
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/libraries/System.Net.NetworkInformation/src/System/Net/NetworkInformation/IPv6InterfaceProperties.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Net.NetworkInformation { /// <summary> /// Provides information about network interfaces that support Internet Protocol (IP) version 6.0. /// </summary> public abstract class IPv6InterfaceProperties { /// <summary> /// Gets the interface index for the Internet Protocol (IP) address. /// </summary> public abstract int Index { get; } /// <summary> /// Gets the maximum transmission unit (MTU) for this network interface. /// </summary> public abstract int Mtu { get; } /// <summary> /// Returns IPv6 scope identifiers. /// </summary> /// <param name="scopeLevel">The scope level.</param> /// <returns>The IPv6 scope identifier.</returns> public virtual long GetScopeId(ScopeLevel scopeLevel) { throw NotImplemented.ByDesignWithMessage(SR.net_MethodNotImplementedException); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Net.NetworkInformation { /// <summary> /// Provides information about network interfaces that support Internet Protocol (IP) version 6.0. /// </summary> public abstract class IPv6InterfaceProperties { /// <summary> /// Gets the interface index for the Internet Protocol (IP) address. /// </summary> public abstract int Index { get; } /// <summary> /// Gets the maximum transmission unit (MTU) for this network interface. /// </summary> public abstract int Mtu { get; } /// <summary> /// Returns IPv6 scope identifiers. /// </summary> /// <param name="scopeLevel">The scope level.</param> /// <returns>The IPv6 scope identifier.</returns> public virtual long GetScopeId(ScopeLevel scopeLevel) { throw NotImplemented.ByDesignWithMessage(SR.net_MethodNotImplementedException); } } }
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/tests/baseservices/threading/regressions/2164/foreground-shutdown.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using System.Reflection; using System.Threading; /* * Issue description: Running foreground threads do not prevent runtime shutdown on return from main Change description: For CoreCLR: introduce BOOL waitForOtherThreads parameter to Assembly::ExecuteMainMethod and exit conditionally; For CoreRT aka NativeAOT: implement missing logic */ public class Test_foreground_shutdown { public static int Main() { new Thread(() => { Thread.Sleep(TimeSpan.FromSeconds(1)); Environment.Exit(100); }).Start(); // foreground thread created above prevents // runtime shutdown and non-100 exit code propagation return 101; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using System.Reflection; using System.Threading; /* * Issue description: Running foreground threads do not prevent runtime shutdown on return from main Change description: For CoreCLR: introduce BOOL waitForOtherThreads parameter to Assembly::ExecuteMainMethod and exit conditionally; For CoreRT aka NativeAOT: implement missing logic */ public class Test_foreground_shutdown { public static int Main() { new Thread(() => { Thread.Sleep(TimeSpan.FromSeconds(1)); Environment.Exit(100); }).Start(); // foreground thread created above prevents // runtime shutdown and non-100 exit code propagation return 101; } }
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/coreclr/vm/comutilnative.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // /*============================================================ ** ** File: COMUtilNative ** ** ** ** Purpose: A dumping ground for classes which aren't large ** enough to get their own file in the EE. ** ** ** ===========================================================*/ #include "common.h" #include "object.h" #include "excep.h" #include "vars.hpp" #include "comutilnative.h" #include "utilcode.h" #include "frames.h" #include "field.h" #include "winwrap.h" #include "gcheaputilities.h" #include "fcall.h" #include "invokeutil.h" #include "eeconfig.h" #include "typestring.h" #include "finalizerthread.h" #include "threadsuspend.h" #ifdef FEATURE_COMINTEROP #include "comcallablewrapper.h" #include "comcache.h" #endif // FEATURE_COMINTEROP #include "arraynative.inl" /*===================================IsDigit==================================== **Returns a bool indicating whether the character passed in represents a ** **digit. ==============================================================================*/ bool IsDigit(WCHAR c, int radix, int *result) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(result)); } CONTRACTL_END; if (IS_DIGIT(c)) { *result = DIGIT_TO_INT(c); } else if (c>='A' && c<='Z') { //+10 is necessary because A is actually 10, etc. *result = c-'A'+10; } else if (c>='a' && c<='z') { //+10 is necessary because a is actually 10, etc. *result = c-'a'+10; } else { *result = -1; } if ((*result >=0) && (*result < radix)) return true; return false; } INT32 wtoi(_In_reads_(length) WCHAR* wstr, DWORD length) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(wstr)); PRECONDITION(length >= 0); } CONTRACTL_END; DWORD i = 0; int value; INT32 result = 0; while ( (i < length) && (IsDigit(wstr[i], 10 ,&value)) ) { //Read all of the digits and convert to a number result = result*10 + value; i++; } return result; } // // // EXCEPTION NATIVE // // FCIMPL1(FC_BOOL_RET, ExceptionNative::IsImmutableAgileException, Object* pExceptionUNSAFE) { FCALL_CONTRACT; ASSERT(pExceptionUNSAFE != NULL); OBJECTREF pException = (OBJECTREF) pExceptionUNSAFE; // The preallocated exception objects may be used from multiple AppDomains // and therefore must remain immutable from the application's perspective. FC_RETURN_BOOL(CLRException::IsPreallocatedExceptionObject(pException)); } FCIMPLEND // This FCall sets a flag against the thread exception state to indicate to // IL_Throw and the StackTraceInfo implementation to account for the fact // that we have restored a foreign exception dispatch details. // // Refer to the respective methods for details on how they use this flag. FCIMPL0(VOID, ExceptionNative::PrepareForForeignExceptionRaise) { FCALL_CONTRACT; PTR_ThreadExceptionState pCurTES = GetThread()->GetExceptionState(); // Set a flag against the TES to indicate this is a foreign exception raise. pCurTES->SetRaisingForeignException(); } FCIMPLEND // Given an exception object, this method will extract the stacktrace and dynamic method array and set them up for return to the caller. FCIMPL3(VOID, ExceptionNative::GetStackTracesDeepCopy, Object* pExceptionObjectUnsafe, Object **pStackTraceUnsafe, Object **pDynamicMethodsUnsafe); { CONTRACTL { FCALL_CHECK; } CONTRACTL_END; ASSERT(pExceptionObjectUnsafe != NULL); ASSERT(pStackTraceUnsafe != NULL); ASSERT(pDynamicMethodsUnsafe != NULL); struct _gc { StackTraceArray stackTrace; StackTraceArray stackTraceCopy; EXCEPTIONREF refException; PTRARRAYREF dynamicMethodsArray; // Object array of Managed Resolvers PTRARRAYREF dynamicMethodsArrayCopy; // Copy of the object array of Managed Resolvers }; _gc gc; ZeroMemory(&gc, sizeof(gc)); // GC protect the array reference HELPER_METHOD_FRAME_BEGIN_PROTECT(gc); // Get the exception object reference gc.refException = (EXCEPTIONREF)(ObjectToOBJECTREF(pExceptionObjectUnsafe)); // Fetch the stacktrace details from the exception under a lock gc.refException->GetStackTrace(gc.stackTrace, &gc.dynamicMethodsArray); bool fHaveStackTrace = false; bool fHaveDynamicMethodArray = false; if ((unsigned)gc.stackTrace.Size() > 0) { // Deepcopy the array gc.stackTraceCopy.CopyFrom(gc.stackTrace); fHaveStackTrace = true; } if (gc.dynamicMethodsArray != NULL) { // Get the number of elements in the dynamic methods array unsigned cOrigDynamic = gc.dynamicMethodsArray->GetNumComponents(); // ..and allocate a new array. This can trigger GC or throw under OOM. gc.dynamicMethodsArrayCopy = (PTRARRAYREF)AllocateObjectArray(cOrigDynamic, g_pObjectClass); // Deepcopy references to the new array we just allocated memmoveGCRefs(gc.dynamicMethodsArrayCopy->GetDataPtr(), gc.dynamicMethodsArray->GetDataPtr(), cOrigDynamic * sizeof(Object *)); fHaveDynamicMethodArray = true; } // Prep to return *pStackTraceUnsafe = fHaveStackTrace?OBJECTREFToObject(gc.stackTraceCopy.Get()):NULL; *pDynamicMethodsUnsafe = fHaveDynamicMethodArray?OBJECTREFToObject(gc.dynamicMethodsArrayCopy):NULL; HELPER_METHOD_FRAME_END(); } FCIMPLEND // Given an exception object and deep copied instances of a stacktrace and/or dynamic method array, this method will set the latter in the exception object instance. FCIMPL3(VOID, ExceptionNative::SaveStackTracesFromDeepCopy, Object* pExceptionObjectUnsafe, Object *pStackTraceUnsafe, Object *pDynamicMethodsUnsafe); { CONTRACTL { FCALL_CHECK; } CONTRACTL_END; ASSERT(pExceptionObjectUnsafe != NULL); struct _gc { StackTraceArray stackTrace; EXCEPTIONREF refException; PTRARRAYREF dynamicMethodsArray; // Object array of Managed Resolvers }; _gc gc; ZeroMemory(&gc, sizeof(gc)); // GC protect the array reference HELPER_METHOD_FRAME_BEGIN_PROTECT(gc); // Get the exception object reference gc.refException = (EXCEPTIONREF)(ObjectToOBJECTREF(pExceptionObjectUnsafe)); if (pStackTraceUnsafe != NULL) { // Copy the stacktrace StackTraceArray stackTraceArray((I1ARRAYREF)ObjectToOBJECTREF(pStackTraceUnsafe)); gc.stackTrace.Swap(stackTraceArray); } gc.dynamicMethodsArray = NULL; if (pDynamicMethodsUnsafe != NULL) { gc.dynamicMethodsArray = (PTRARRAYREF)ObjectToOBJECTREF(pDynamicMethodsUnsafe); } // If there is no stacktrace, then there cannot be any dynamic method array. Thus, // save stacktrace only when we have it. if (gc.stackTrace.Size() > 0) { // Save the stacktrace details in the exception under a lock gc.refException->SetStackTrace(gc.stackTrace.Get(), gc.dynamicMethodsArray); } else { gc.refException->SetStackTrace(NULL, NULL); } HELPER_METHOD_FRAME_END(); } FCIMPLEND BSTR BStrFromString(STRINGREF s) { CONTRACTL { THROWS; } CONTRACTL_END; WCHAR *wz; int cch; BSTR bstr; if (s == NULL) return NULL; s->RefInterpretGetStringValuesDangerousForGC(&wz, &cch); bstr = SysAllocString(wz); if (bstr == NULL) COMPlusThrowOM(); return bstr; } static BSTR GetExceptionDescription(OBJECTREF objException) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION( IsException(objException->GetMethodTable()) ); } CONTRACTL_END; BSTR bstrDescription; STRINGREF MessageString = NULL; GCPROTECT_BEGIN(MessageString) GCPROTECT_BEGIN(objException) { // read Exception.Message property MethodDescCallSite getMessage(METHOD__EXCEPTION__GET_MESSAGE, &objException); ARG_SLOT GetMessageArgs[] = { ObjToArgSlot(objException)}; MessageString = getMessage.Call_RetSTRINGREF(GetMessageArgs); // if the message string is empty then use the exception classname. if (MessageString == NULL || MessageString->GetStringLength() == 0) { // call GetClassName MethodDescCallSite getClassName(METHOD__EXCEPTION__GET_CLASS_NAME, &objException); ARG_SLOT GetClassNameArgs[] = { ObjToArgSlot(objException)}; MessageString = getClassName.Call_RetSTRINGREF(GetClassNameArgs); _ASSERTE(MessageString != NULL && MessageString->GetStringLength() != 0); } // Allocate the description BSTR. int DescriptionLen = MessageString->GetStringLength(); bstrDescription = SysAllocStringLen(MessageString->GetBuffer(), DescriptionLen); } GCPROTECT_END(); GCPROTECT_END(); return bstrDescription; } static BSTR GetExceptionSource(OBJECTREF objException) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION( IsException(objException->GetMethodTable()) ); } CONTRACTL_END; STRINGREF refRetVal; GCPROTECT_BEGIN(objException) // read Exception.Source property MethodDescCallSite getSource(METHOD__EXCEPTION__GET_SOURCE, &objException); ARG_SLOT GetSourceArgs[] = { ObjToArgSlot(objException)}; refRetVal = getSource.Call_RetSTRINGREF(GetSourceArgs); GCPROTECT_END(); return BStrFromString(refRetVal); } static void GetExceptionHelp(OBJECTREF objException, BSTR *pbstrHelpFile, DWORD *pdwHelpContext) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; INJECT_FAULT(COMPlusThrowOM()); PRECONDITION(IsException(objException->GetMethodTable())); PRECONDITION(CheckPointer(pbstrHelpFile)); PRECONDITION(CheckPointer(pdwHelpContext)); } CONTRACTL_END; *pdwHelpContext = 0; GCPROTECT_BEGIN(objException); // read Exception.HelpLink property MethodDescCallSite getHelpLink(METHOD__EXCEPTION__GET_HELP_LINK, &objException); ARG_SLOT GetHelpLinkArgs[] = { ObjToArgSlot(objException)}; *pbstrHelpFile = BStrFromString(getHelpLink.Call_RetSTRINGREF(GetHelpLinkArgs)); GCPROTECT_END(); // parse the help file to check for the presence of helpcontext int len = SysStringLen(*pbstrHelpFile); int pos = len; WCHAR *pwstr = *pbstrHelpFile; if (pwstr) { BOOL fFoundPound = FALSE; for (pos = len - 1; pos >= 0; pos--) { if (pwstr[pos] == W('#')) { fFoundPound = TRUE; break; } } if (fFoundPound) { int PoundPos = pos; int NumberStartPos = -1; BOOL bNumberStarted = FALSE; BOOL bNumberFinished = FALSE; BOOL bInvalidDigitsFound = FALSE; _ASSERTE(pwstr[pos] == W('#')); // Check to see if the string to the right of the pound a valid number. for (pos++; pos < len; pos++) { if (bNumberFinished) { if (!COMCharacter::nativeIsWhiteSpace(pwstr[pos])) { bInvalidDigitsFound = TRUE; break; } } else if (bNumberStarted) { if (COMCharacter::nativeIsWhiteSpace(pwstr[pos])) { bNumberFinished = TRUE; } else if (!COMCharacter::nativeIsDigit(pwstr[pos])) { bInvalidDigitsFound = TRUE; break; } } else { if (COMCharacter::nativeIsDigit(pwstr[pos])) { NumberStartPos = pos; bNumberStarted = TRUE; } else if (!COMCharacter::nativeIsWhiteSpace(pwstr[pos])) { bInvalidDigitsFound = TRUE; break; } } } if (bNumberStarted && !bInvalidDigitsFound) { // Grab the help context and remove it from the help file. *pdwHelpContext = (DWORD)wtoi(&pwstr[NumberStartPos], len - NumberStartPos); // Allocate a new help file string of the right length. BSTR strOld = *pbstrHelpFile; *pbstrHelpFile = SysAllocStringLen(strOld, PoundPos); SysFreeString(strOld); if (!*pbstrHelpFile) COMPlusThrowOM(); } } } } // NOTE: caller cleans up any partially initialized BSTRs in pED void ExceptionNative::GetExceptionData(OBJECTREF objException, ExceptionData *pED) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(IsException(objException->GetMethodTable())); PRECONDITION(CheckPointer(pED)); } CONTRACTL_END; ZeroMemory(pED, sizeof(ExceptionData)); GCPROTECT_BEGIN(objException); pED->hr = GetExceptionHResult(objException); pED->bstrDescription = GetExceptionDescription(objException); pED->bstrSource = GetExceptionSource(objException); GetExceptionHelp(objException, &pED->bstrHelpFile, &pED->dwHelpContext); GCPROTECT_END(); return; } #ifdef FEATURE_COMINTEROP HRESULT SimpleComCallWrapper::IErrorInfo_hr() { WRAPPER_NO_CONTRACT; return GetExceptionHResult(this->GetObjectRef()); } BSTR SimpleComCallWrapper::IErrorInfo_bstrDescription() { WRAPPER_NO_CONTRACT; return GetExceptionDescription(this->GetObjectRef()); } BSTR SimpleComCallWrapper::IErrorInfo_bstrSource() { WRAPPER_NO_CONTRACT; return GetExceptionSource(this->GetObjectRef()); } BSTR SimpleComCallWrapper::IErrorInfo_bstrHelpFile() { WRAPPER_NO_CONTRACT; BSTR bstrHelpFile; DWORD dwHelpContext; GetExceptionHelp(this->GetObjectRef(), &bstrHelpFile, &dwHelpContext); return bstrHelpFile; } DWORD SimpleComCallWrapper::IErrorInfo_dwHelpContext() { WRAPPER_NO_CONTRACT; BSTR bstrHelpFile; DWORD dwHelpContext; GetExceptionHelp(this->GetObjectRef(), &bstrHelpFile, &dwHelpContext); SysFreeString(bstrHelpFile); return dwHelpContext; } GUID SimpleComCallWrapper::IErrorInfo_guid() { LIMITED_METHOD_CONTRACT; return GUID_NULL; } #endif // FEATURE_COMINTEROP FCIMPL0(EXCEPTION_POINTERS*, ExceptionNative::GetExceptionPointers) { FCALL_CONTRACT; EXCEPTION_POINTERS* retVal = NULL; Thread *pThread = GetThread(); if (pThread->IsExceptionInProgress()) { retVal = pThread->GetExceptionState()->GetExceptionPointers(); } return retVal; } FCIMPLEND FCIMPL0(INT32, ExceptionNative::GetExceptionCode) { FCALL_CONTRACT; INT32 retVal = 0; Thread *pThread = GetThread(); if (pThread->IsExceptionInProgress()) { retVal = pThread->GetExceptionState()->GetExceptionCode(); } return retVal; } FCIMPLEND extern uint32_t g_exceptionCount; FCIMPL0(UINT32, ExceptionNative::GetExceptionCount) { FCALL_CONTRACT; return g_exceptionCount; } FCIMPLEND // // This must be implemented as an FCALL because managed code cannot // swallow a thread abort exception without resetting the abort, // which we don't want to do. Additionally, we can run into deadlocks // if we use the ResourceManager to do resource lookups - it requires // taking managed locks when initializing Globalization & Security, // but a thread abort on a separate thread initializing those same // systems would also do a resource lookup via the ResourceManager. // We've deadlocked in CompareInfo.GetCompareInfo & // Environment.GetResourceString. It's not practical to take all of // our locks within CER's to avoid this problem - just use the CLR's // unmanaged resources. // extern "C" void QCALLTYPE ExceptionNative_GetMessageFromNativeResources(ExceptionMessageKind kind, QCall::StringHandleOnStack retMesg) { QCALL_CONTRACT; BEGIN_QCALL; SString buffer; HRESULT hr = S_OK; const WCHAR * wszFallbackString = NULL; switch(kind) { case ExceptionMessageKind::ThreadAbort: hr = buffer.LoadResourceAndReturnHR(CCompRC::Error, IDS_EE_THREAD_ABORT); if (FAILED(hr)) { wszFallbackString = W("Thread was being aborted."); } break; case ExceptionMessageKind::ThreadInterrupted: hr = buffer.LoadResourceAndReturnHR(CCompRC::Error, IDS_EE_THREAD_INTERRUPTED); if (FAILED(hr)) { wszFallbackString = W("Thread was interrupted from a waiting state."); } break; case ExceptionMessageKind::OutOfMemory: hr = buffer.LoadResourceAndReturnHR(CCompRC::Error, IDS_EE_OUT_OF_MEMORY); if (FAILED(hr)) { wszFallbackString = W("Insufficient memory to continue the execution of the program."); } break; default: _ASSERTE(!"Unknown ExceptionMessageKind value!"); } if (FAILED(hr)) { STRESS_LOG1(LF_BCL, LL_ALWAYS, "LoadResource error: %x", hr); _ASSERTE(wszFallbackString != NULL); retMesg.Set(wszFallbackString); } else { retMesg.Set(buffer); } END_QCALL; } extern "C" void QCALLTYPE Buffer_Clear(void *dst, size_t length) { QCALL_CONTRACT; #if defined(HOST_X86) || defined(HOST_AMD64) if (length > 0x100) { // memset ends up calling rep stosb if the hardware claims to support it efficiently. rep stosb is up to 2x slower // on misaligned blocks. Workaround this issue by aligning the blocks passed to memset upfront. *(uint64_t*)dst = 0; *((uint64_t*)dst + 1) = 0; *((uint64_t*)dst + 2) = 0; *((uint64_t*)dst + 3) = 0; void* end = (uint8_t*)dst + length; *((uint64_t*)end - 1) = 0; *((uint64_t*)end - 2) = 0; *((uint64_t*)end - 3) = 0; *((uint64_t*)end - 4) = 0; dst = ALIGN_UP((uint8_t*)dst + 1, 32); length = ALIGN_DOWN((uint8_t*)end - 1, 32) - (uint8_t*)dst; } #endif memset(dst, 0, length); } FCIMPL3(VOID, Buffer::BulkMoveWithWriteBarrier, void *dst, void *src, size_t byteCount) { FCALL_CONTRACT; if (dst != src && byteCount != 0) InlinedMemmoveGCRefsHelper(dst, src, byteCount); FC_GC_POLL(); } FCIMPLEND extern "C" void QCALLTYPE Buffer_MemMove(void *dst, void *src, size_t length) { QCALL_CONTRACT; memmove(dst, src, length); } // // GCInterface // INT32 GCInterface::m_gc_counts[3] = {0,0,0}; UINT64 GCInterface::m_addPressure[MEM_PRESSURE_COUNT] = {0, 0, 0, 0}; // history of memory pressure additions UINT64 GCInterface::m_remPressure[MEM_PRESSURE_COUNT] = {0, 0, 0, 0}; // history of memory pressure removals // incremented after a gen2 GC has been detected, // (m_iteration % MEM_PRESSURE_COUNT) is used as an index into m_addPressure and m_remPressure UINT GCInterface::m_iteration = 0; FCIMPL2(void, GCInterface::GetMemoryInfo, Object* objUNSAFE, int kind) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); GCMEMORYINFODATAREF objGCMemoryInfo = (GCMEMORYINFODATAREF)(ObjectToOBJECTREF (objUNSAFE)); UINT64* genInfoRaw = (UINT64*)&(objGCMemoryInfo->generationInfo0); UINT64* pauseInfoRaw = (UINT64*)&(objGCMemoryInfo->pauseDuration0); return GCHeapUtilities::GetGCHeap()->GetMemoryInfo( &(objGCMemoryInfo->highMemLoadThresholdBytes), &(objGCMemoryInfo->totalAvailableMemoryBytes), &(objGCMemoryInfo->lastRecordedMemLoadBytes), &(objGCMemoryInfo->lastRecordedHeapSizeBytes), &(objGCMemoryInfo->lastRecordedFragmentationBytes), &(objGCMemoryInfo->totalCommittedBytes), &(objGCMemoryInfo->promotedBytes), &(objGCMemoryInfo->pinnedObjectCount), &(objGCMemoryInfo->finalizationPendingCount), &(objGCMemoryInfo->index), &(objGCMemoryInfo->generation), &(objGCMemoryInfo->pauseTimePercent), (bool*)&(objGCMemoryInfo->isCompaction), (bool*)&(objGCMemoryInfo->isConcurrent), genInfoRaw, pauseInfoRaw, kind); } FCIMPLEND FCIMPL0(UINT32, GCInterface::GetMemoryLoad) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); int result = (INT32)GCHeapUtilities::GetGCHeap()->GetMemoryLoad(); return result; } FCIMPLEND FCIMPL0(int, GCInterface::GetGcLatencyMode) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); int result = (INT32)GCHeapUtilities::GetGCHeap()->GetGcLatencyMode(); return result; } FCIMPLEND FCIMPL1(int, GCInterface::SetGcLatencyMode, int newLatencyMode) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); return GCHeapUtilities::GetGCHeap()->SetGcLatencyMode(newLatencyMode); } FCIMPLEND FCIMPL0(int, GCInterface::GetLOHCompactionMode) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); int result = (INT32)GCHeapUtilities::GetGCHeap()->GetLOHCompactionMode(); return result; } FCIMPLEND FCIMPL1(void, GCInterface::SetLOHCompactionMode, int newLOHCompactionyMode) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); GCHeapUtilities::GetGCHeap()->SetLOHCompactionMode(newLOHCompactionyMode); } FCIMPLEND FCIMPL2(FC_BOOL_RET, GCInterface::RegisterForFullGCNotification, UINT32 gen2Percentage, UINT32 lohPercentage) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->RegisterForFullGCNotification(gen2Percentage, lohPercentage)); } FCIMPLEND FCIMPL0(FC_BOOL_RET, GCInterface::CancelFullGCNotification) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->CancelFullGCNotification()); } FCIMPLEND FCIMPL1(int, GCInterface::WaitForFullGCApproach, int millisecondsTimeout) { CONTRACTL { THROWS; MODE_COOPERATIVE; DISABLED(GC_TRIGGERS); // can't use this in an FCALL because we're in forbid gc mode until we setup a H_M_F. } CONTRACTL_END; int result = 0; //We don't need to check the top end because the GC will take care of that. HELPER_METHOD_FRAME_BEGIN_RET_0(); DWORD dwMilliseconds = ((millisecondsTimeout == -1) ? INFINITE : millisecondsTimeout); result = GCHeapUtilities::GetGCHeap()->WaitForFullGCApproach(dwMilliseconds); HELPER_METHOD_FRAME_END(); return result; } FCIMPLEND FCIMPL1(int, GCInterface::WaitForFullGCComplete, int millisecondsTimeout) { CONTRACTL { THROWS; MODE_COOPERATIVE; DISABLED(GC_TRIGGERS); // can't use this in an FCALL because we're in forbid gc mode until we setup a H_M_F. } CONTRACTL_END; int result = 0; //We don't need to check the top end because the GC will take care of that. HELPER_METHOD_FRAME_BEGIN_RET_0(); DWORD dwMilliseconds = ((millisecondsTimeout == -1) ? INFINITE : millisecondsTimeout); result = GCHeapUtilities::GetGCHeap()->WaitForFullGCComplete(dwMilliseconds); HELPER_METHOD_FRAME_END(); return result; } FCIMPLEND /*================================GetGeneration================================= **Action: Returns the generation in which args->obj is found. **Returns: The generation in which args->obj is found. **Arguments: args->obj -- The object to locate. **Exceptions: ArgumentException if args->obj is null. ==============================================================================*/ FCIMPL1(int, GCInterface::GetGeneration, Object* objUNSAFE) { FCALL_CONTRACT; if (objUNSAFE == NULL) FCThrowArgumentNull(W("obj")); int result = (INT32)GCHeapUtilities::GetGCHeap()->WhichGeneration(objUNSAFE); FC_GC_POLL_RET(); return result; } FCIMPLEND /*================================GetSegmentSize========-======================= **Action: Returns the maximum GC heap segment size **Returns: The maximum segment size of either the normal heap or the large object heap, whichever is bigger ==============================================================================*/ FCIMPL0(UINT64, GCInterface::GetSegmentSize) { FCALL_CONTRACT; IGCHeap * pGC = GCHeapUtilities::GetGCHeap(); size_t segment_size = pGC->GetValidSegmentSize(false); size_t large_segment_size = pGC->GetValidSegmentSize(true); _ASSERTE(segment_size < SIZE_T_MAX && large_segment_size < SIZE_T_MAX); if (segment_size < large_segment_size) segment_size = large_segment_size; FC_GC_POLL_RET(); return (UINT64) segment_size; } FCIMPLEND /*================================CollectionCount================================= **Action: Returns the number of collections for this generation since the beginning of the life of the process **Returns: The collection count. **Arguments: args->generation -- The generation **Exceptions: Argument exception if args->generation is < 0 or > GetMaxGeneration(); ==============================================================================*/ FCIMPL2(int, GCInterface::CollectionCount, INT32 generation, INT32 getSpecialGCCount) { FCALL_CONTRACT; //We've already checked this in GC.cs, so we'll just assert it here. _ASSERTE(generation >= 0); //We don't need to check the top end because the GC will take care of that. int result = (INT32)GCHeapUtilities::GetGCHeap()->CollectionCount(generation, getSpecialGCCount); FC_GC_POLL_RET(); return result; } FCIMPLEND extern "C" int QCALLTYPE GCInterface_StartNoGCRegion(INT64 totalSize, BOOL lohSizeKnown, INT64 lohSize, BOOL disallowFullBlockingGC) { QCALL_CONTRACT; int retVal = 0; BEGIN_QCALL; GCX_COOP(); retVal = GCHeapUtilities::GetGCHeap()->StartNoGCRegion((ULONGLONG)totalSize, !!lohSizeKnown, (ULONGLONG)lohSize, !!disallowFullBlockingGC); END_QCALL; return retVal; } extern "C" int QCALLTYPE GCInterface_EndNoGCRegion() { QCALL_CONTRACT; int retVal = FALSE; BEGIN_QCALL; retVal = GCHeapUtilities::GetGCHeap()->EndNoGCRegion(); END_QCALL; return retVal; } /*===============================GetGenerationWR================================ **Action: Returns the generation in which the object pointed to by a WeakReference is found. **Returns: **Arguments: args->handle -- the OBJECTHANDLE to the object which we're locating. **Exceptions: ArgumentException if handle points to an object which is not accessible. ==============================================================================*/ FCIMPL1(int, GCInterface::GetGenerationWR, LPVOID handle) { FCALL_CONTRACT; int iRetVal = 0; HELPER_METHOD_FRAME_BEGIN_RET_0(); OBJECTREF temp; temp = ObjectFromHandle((OBJECTHANDLE) handle); if (temp == NULL) COMPlusThrowArgumentNull(W("wo")); iRetVal = (INT32)GCHeapUtilities::GetGCHeap()->WhichGeneration(OBJECTREFToObject(temp)); HELPER_METHOD_FRAME_END(); return iRetVal; } FCIMPLEND FCIMPL0(int, GCInterface::GetLastGCPercentTimeInGC) { FCALL_CONTRACT; return GCHeapUtilities::GetGCHeap()->GetLastGCPercentTimeInGC(); } FCIMPLEND FCIMPL1(UINT64, GCInterface::GetGenerationSize, int gen) { FCALL_CONTRACT; return (UINT64)(GCHeapUtilities::GetGCHeap()->GetLastGCGenerationSize(gen)); } FCIMPLEND /*================================GetTotalMemory================================ **Action: Returns the total number of bytes in use **Returns: The total number of bytes in use **Arguments: None **Exceptions: None ==============================================================================*/ extern "C" INT64 QCALLTYPE GCInterface_GetTotalMemory() { QCALL_CONTRACT; INT64 iRetVal = 0; BEGIN_QCALL; GCX_COOP(); iRetVal = (INT64) GCHeapUtilities::GetGCHeap()->GetTotalBytesInUse(); END_QCALL; return iRetVal; } /*==============================Collect========================================= **Action: Collects all generations <= args->generation **Returns: void **Arguments: args->generation: The maximum generation to collect **Exceptions: Argument exception if args->generation is < 0 or > GetMaxGeneration(); ==============================================================================*/ extern "C" void QCALLTYPE GCInterface_Collect(INT32 generation, INT32 mode) { QCALL_CONTRACT; BEGIN_QCALL; //We've already checked this in GC.cs, so we'll just assert it here. _ASSERTE(generation >= -1); //We don't need to check the top end because the GC will take care of that. GCX_COOP(); GCHeapUtilities::GetGCHeap()->GarbageCollect(generation, false, mode); END_QCALL; } /*==========================WaitForPendingFinalizers============================ **Action: Run all Finalizers that haven't been run. **Arguments: None **Exceptions: None ==============================================================================*/ extern "C" void QCALLTYPE GCInterface_WaitForPendingFinalizers() { QCALL_CONTRACT; BEGIN_QCALL; FinalizerThread::FinalizerThreadWait(); END_QCALL; } /*===============================GetMaxGeneration=============================== **Action: Returns the largest GC generation **Returns: The largest GC Generation **Arguments: None **Exceptions: None ==============================================================================*/ FCIMPL0(int, GCInterface::GetMaxGeneration) { FCALL_CONTRACT; return(INT32)GCHeapUtilities::GetGCHeap()->GetMaxGeneration(); } FCIMPLEND /*===============================GetAllocatedBytesForCurrentThread=============================== **Action: Computes the allocated bytes so far on the current thread **Returns: The allocated bytes so far on the current thread **Arguments: None **Exceptions: None ==============================================================================*/ FCIMPL0(INT64, GCInterface::GetAllocatedBytesForCurrentThread) { FCALL_CONTRACT; INT64 currentAllocated = 0; Thread *pThread = GetThread(); gc_alloc_context* ac = pThread->GetAllocContext(); currentAllocated = ac->alloc_bytes + ac->alloc_bytes_uoh - (ac->alloc_limit - ac->alloc_ptr); return currentAllocated; } FCIMPLEND /*===============================AllocateNewArray=============================== **Action: Allocates a new array object. Allows passing extra flags **Returns: The allocated array. **Arguments: elementTypeHandle -> type of the element, ** length -> number of elements, ** zeroingOptional -> whether caller prefers to skip clearing the content of the array, if possible. **Exceptions: IDS_EE_ARRAY_DIMENSIONS_EXCEEDED when size is too large. OOM if can't allocate. ==============================================================================*/ FCIMPL3(Object*, GCInterface::AllocateNewArray, void* arrayTypeHandle, INT32 length, INT32 flags) { CONTRACTL { FCALL_CHECK; } CONTRACTL_END; OBJECTREF pRet = NULL; TypeHandle arrayType = TypeHandle::FromPtr(arrayTypeHandle); HELPER_METHOD_FRAME_BEGIN_RET_0(); //Only the following flags are used by GC.cs, so we'll just assert it here. _ASSERTE((flags & ~(GC_ALLOC_ZEROING_OPTIONAL | GC_ALLOC_PINNED_OBJECT_HEAP)) == 0); pRet = AllocateSzArray(arrayType, length, (GC_ALLOC_FLAGS)flags); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(pRet); } FCIMPLEND FCIMPL1(INT64, GCInterface::GetTotalAllocatedBytes, CLR_BOOL precise) { FCALL_CONTRACT; if (!precise) { #ifdef TARGET_64BIT uint64_t unused_bytes = Thread::dead_threads_non_alloc_bytes; #else // As it could be noticed we read 64bit values that may be concurrently updated. // Such reads are not guaranteed to be atomic on 32bit so extra care should be taken. uint64_t unused_bytes = FastInterlockCompareExchangeLong((LONG64*)& Thread::dead_threads_non_alloc_bytes, 0, 0); #endif uint64_t allocated_bytes = GCHeapUtilities::GetGCHeap()->GetTotalAllocatedBytes() - unused_bytes; // highest reported allocated_bytes. We do not want to report a value less than that even if unused_bytes has increased. static uint64_t high_watermark; uint64_t current_high = high_watermark; while (allocated_bytes > current_high) { uint64_t orig = FastInterlockCompareExchangeLong((LONG64*)& high_watermark, allocated_bytes, current_high); if (orig == current_high) return allocated_bytes; current_high = orig; } return current_high; } INT64 allocated; HELPER_METHOD_FRAME_BEGIN_RET_0(); // We need to suspend/restart the EE to get each thread's // non-allocated memory from their allocation contexts ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_OTHER); allocated = GCHeapUtilities::GetGCHeap()->GetTotalAllocatedBytes() - Thread::dead_threads_non_alloc_bytes; for (Thread *pThread = ThreadStore::GetThreadList(NULL); pThread; pThread = ThreadStore::GetThreadList(pThread)) { gc_alloc_context* ac = pThread->GetAllocContext(); allocated -= ac->alloc_limit - ac->alloc_ptr; } ThreadSuspend::RestartEE(FALSE, TRUE); HELPER_METHOD_FRAME_END(); return allocated; } FCIMPLEND; #ifdef FEATURE_BASICFREEZE /*===============================RegisterFrozenSegment=============================== **Action: Registers the frozen segment **Returns: segment_handle **Arguments: args-> pointer to section, size of section **Exceptions: None ==============================================================================*/ extern "C" void* QCALLTYPE GCInterface_RegisterFrozenSegment(void* pSection, SIZE_T sizeSection) { QCALL_CONTRACT; void* retVal = nullptr; BEGIN_QCALL; _ASSERTE(pSection != nullptr); _ASSERTE(sizeSection > 0); GCX_COOP(); segment_info seginfo; seginfo.pvMem = pSection; seginfo.ibFirstObject = sizeof(ObjHeader); seginfo.ibAllocated = sizeSection; seginfo.ibCommit = seginfo.ibAllocated; seginfo.ibReserved = seginfo.ibAllocated; retVal = (void*)GCHeapUtilities::GetGCHeap()->RegisterFrozenSegment(&seginfo); END_QCALL; return retVal; } /*===============================UnregisterFrozenSegment=============================== **Action: Unregisters the frozen segment **Returns: void **Arguments: args-> segment handle **Exceptions: None ==============================================================================*/ extern "C" void QCALLTYPE GCInterface_UnregisterFrozenSegment(void* segment) { QCALL_CONTRACT; BEGIN_QCALL; _ASSERTE(segment != nullptr); GCX_COOP(); GCHeapUtilities::GetGCHeap()->UnregisterFrozenSegment((segment_handle)segment); END_QCALL; } #endif // FEATURE_BASICFREEZE /*==============================SuppressFinalize================================ **Action: Indicate that an object's finalizer should not be run by the system **Arguments: Object of interest **Exceptions: None ==============================================================================*/ FCIMPL1(void, GCInterface::SuppressFinalize, Object *obj) { FCALL_CONTRACT; // Checked by the caller _ASSERTE(obj != NULL); if (!obj->GetMethodTable ()->HasFinalizer()) return; GCHeapUtilities::GetGCHeap()->SetFinalizationRun(obj); FC_GC_POLL(); } FCIMPLEND /*============================ReRegisterForFinalize============================== **Action: Indicate that an object's finalizer should be run by the system. **Arguments: Object of interest **Exceptions: None ==============================================================================*/ FCIMPL1(void, GCInterface::ReRegisterForFinalize, Object *obj) { FCALL_CONTRACT; // Checked by the caller _ASSERTE(obj != NULL); if (obj->GetMethodTable()->HasFinalizer()) { HELPER_METHOD_FRAME_BEGIN_1(obj); if (!GCHeapUtilities::GetGCHeap()->RegisterForFinalization(-1, obj)) { ThrowOutOfMemory(); } HELPER_METHOD_FRAME_END(); } } FCIMPLEND FORCEINLINE UINT64 GCInterface::InterlockedAdd (UINT64 *pAugend, UINT64 addend) { WRAPPER_NO_CONTRACT; UINT64 oldMemValue; UINT64 newMemValue; do { oldMemValue = *pAugend; newMemValue = oldMemValue + addend; // check for overflow if (newMemValue < oldMemValue) { newMemValue = UINT64_MAX; } } while (InterlockedCompareExchange64((LONGLONG*) pAugend, (LONGLONG) newMemValue, (LONGLONG) oldMemValue) != (LONGLONG) oldMemValue); return newMemValue; } FORCEINLINE UINT64 GCInterface::InterlockedSub(UINT64 *pMinuend, UINT64 subtrahend) { WRAPPER_NO_CONTRACT; UINT64 oldMemValue; UINT64 newMemValue; do { oldMemValue = *pMinuend; newMemValue = oldMemValue - subtrahend; // check for underflow if (newMemValue > oldMemValue) newMemValue = 0; } while (InterlockedCompareExchange64((LONGLONG*) pMinuend, (LONGLONG) newMemValue, (LONGLONG) oldMemValue) != (LONGLONG) oldMemValue); return newMemValue; } extern "C" void QCALLTYPE GCInterface_AddMemoryPressure(UINT64 bytesAllocated) { QCALL_CONTRACT; BEGIN_QCALL; GCInterface::AddMemoryPressure(bytesAllocated); END_QCALL; } #ifdef HOST_64BIT const unsigned MIN_MEMORYPRESSURE_BUDGET = 4 * 1024 * 1024; // 4 MB #else // HOST_64BIT const unsigned MIN_MEMORYPRESSURE_BUDGET = 3 * 1024 * 1024; // 3 MB #endif // HOST_64BIT const unsigned MAX_MEMORYPRESSURE_RATIO = 10; // 40 MB or 30 MB // Resets pressure accounting after a gen2 GC has occurred. void GCInterface::CheckCollectionCount() { LIMITED_METHOD_CONTRACT; IGCHeap * pHeap = GCHeapUtilities::GetGCHeap(); if (m_gc_counts[2] != pHeap->CollectionCount(2)) { for (int i = 0; i < 3; i++) { m_gc_counts[i] = pHeap->CollectionCount(i); } m_iteration++; UINT p = m_iteration % MEM_PRESSURE_COUNT; m_addPressure[p] = 0; // new pressure will be accumulated here m_remPressure[p] = 0; } } // AddMemoryPressure implementation // // 1. Start budget - MIN_MEMORYPRESSURE_BUDGET // 2. Focuses more on newly added memory pressure // 3. Budget adjusted by effectiveness of last 3 triggered GC (add / remove ratio, max 10x) // 4. Budget maxed with 30% of current managed GC size // 5. If Gen2 GC is happening naturally, ignore past pressure // // Here's a brief description of the ideal algorithm for Add/Remove memory pressure: // Do a GC when (HeapStart < X * MemPressureGrowth) where // - HeapStart is GC Heap size after doing the last GC // - MemPressureGrowth is the net of Add and Remove since the last GC // - X is proportional to our guess of the ummanaged memory death rate per GC interval, // and would be calculated based on historic data using standard exponential approximation: // Xnew = UMDeath/UMTotal * 0.5 + Xprev // void GCInterface::AddMemoryPressure(UINT64 bytesAllocated) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; CheckCollectionCount(); UINT p = m_iteration % MEM_PRESSURE_COUNT; UINT64 newMemValue = InterlockedAdd(&m_addPressure[p], bytesAllocated); static_assert(MEM_PRESSURE_COUNT == 4, "AddMemoryPressure contains unrolled loops which depend on MEM_PRESSURE_COUNT"); UINT64 add = m_addPressure[0] + m_addPressure[1] + m_addPressure[2] + m_addPressure[3] - m_addPressure[p]; UINT64 rem = m_remPressure[0] + m_remPressure[1] + m_remPressure[2] + m_remPressure[3] - m_remPressure[p]; STRESS_LOG4(LF_GCINFO, LL_INFO10000, "AMP Add: %I64u => added=%I64u total_added=%I64u total_removed=%I64u", bytesAllocated, newMemValue, add, rem); SendEtwAddMemoryPressureEvent(bytesAllocated); if (newMemValue >= MIN_MEMORYPRESSURE_BUDGET) { UINT64 budget = MIN_MEMORYPRESSURE_BUDGET; if (m_iteration >= MEM_PRESSURE_COUNT) // wait until we have enough data points { // Adjust according to effectiveness of GC // Scale budget according to past m_addPressure / m_remPressure ratio if (add >= rem * MAX_MEMORYPRESSURE_RATIO) { budget = MIN_MEMORYPRESSURE_BUDGET * MAX_MEMORYPRESSURE_RATIO; } else if (add > rem) { CONSISTENCY_CHECK(rem != 0); // Avoid overflow by calculating addPressure / remPressure as fixed point (1 = 1024) budget = (add * 1024 / rem) * budget / 1024; } } // If still over budget, check current managed heap size if (newMemValue >= budget) { IGCHeap *pGCHeap = GCHeapUtilities::GetGCHeap(); UINT64 heapOver3 = pGCHeap->GetCurrentObjSize() / 3; if (budget < heapOver3) // Max { budget = heapOver3; } if (newMemValue >= budget) { // last check - if we would exceed 20% of GC "duty cycle", do not trigger GC at this time if ((size_t)(pGCHeap->GetNow() - pGCHeap->GetLastGCStartTime(2)) > (pGCHeap->GetLastGCDuration(2) * 5)) { STRESS_LOG6(LF_GCINFO, LL_INFO10000, "AMP Budget: pressure=%I64u ? budget=%I64u (total_added=%I64u, total_removed=%I64u, mng_heap=%I64u) pos=%d", newMemValue, budget, add, rem, heapOver3 * 3, m_iteration); GarbageCollectModeAny(2); CheckCollectionCount(); } } } } } extern "C" void QCALLTYPE GCInterface_RemoveMemoryPressure(UINT64 bytesAllocated) { QCALL_CONTRACT; BEGIN_QCALL; GCInterface::RemoveMemoryPressure(bytesAllocated); END_QCALL; } void GCInterface::RemoveMemoryPressure(UINT64 bytesAllocated) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; CheckCollectionCount(); UINT p = m_iteration % MEM_PRESSURE_COUNT; SendEtwRemoveMemoryPressureEvent(bytesAllocated); InterlockedAdd(&m_remPressure[p], bytesAllocated); STRESS_LOG2(LF_GCINFO, LL_INFO10000, "AMP Remove: %I64u => removed=%I64u", bytesAllocated, m_remPressure[p]); } inline void GCInterface::SendEtwAddMemoryPressureEvent(UINT64 bytesAllocated) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; FireEtwIncreaseMemoryPressure(bytesAllocated, GetClrInstanceId()); } // Out-of-line helper to avoid EH prolog/epilog in functions that otherwise don't throw. NOINLINE void GCInterface::SendEtwRemoveMemoryPressureEvent(UINT64 bytesAllocated) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; EX_TRY { FireEtwDecreaseMemoryPressure(bytesAllocated, GetClrInstanceId()); } EX_CATCH { // Ignore failures } EX_END_CATCH(SwallowAllExceptions) } // Out-of-line helper to avoid EH prolog/epilog in functions that otherwise don't throw. NOINLINE void GCInterface::GarbageCollectModeAny(int generation) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; GCX_COOP(); GCHeapUtilities::GetGCHeap()->GarbageCollect(generation, false, collection_non_blocking); } // // COMInterlocked // #include <optsmallperfcritical.h> FCIMPL2(INT32,COMInterlocked::Exchange, INT32 *location, INT32 value) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } return FastInterlockExchange((LONG *) location, value); } FCIMPLEND FCIMPL2_IV(INT64,COMInterlocked::Exchange64, INT64 *location, INT64 value) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } return FastInterlockExchangeLong((INT64 *) location, value); } FCIMPLEND FCIMPL3(INT32, COMInterlocked::CompareExchange, INT32* location, INT32 value, INT32 comparand) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } return FastInterlockCompareExchange((LONG*)location, value, comparand); } FCIMPLEND FCIMPL3_IVV(INT64, COMInterlocked::CompareExchange64, INT64* location, INT64 value, INT64 comparand) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } return FastInterlockCompareExchangeLong((INT64*)location, value, comparand); } FCIMPLEND FCIMPL2_IV(float,COMInterlocked::ExchangeFloat, float *location, float value) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } LONG ret = FastInterlockExchange((LONG *) location, *(LONG*)&value); return *(float*)&ret; } FCIMPLEND FCIMPL2_IV(double,COMInterlocked::ExchangeDouble, double *location, double value) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } INT64 ret = FastInterlockExchangeLong((INT64 *) location, *(INT64*)&value); return *(double*)&ret; } FCIMPLEND FCIMPL3_IVV(float,COMInterlocked::CompareExchangeFloat, float *location, float value, float comparand) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } LONG ret = (LONG)FastInterlockCompareExchange((LONG*) location, *(LONG*)&value, *(LONG*)&comparand); return *(float*)&ret; } FCIMPLEND FCIMPL3_IVV(double,COMInterlocked::CompareExchangeDouble, double *location, double value, double comparand) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } INT64 ret = (INT64)FastInterlockCompareExchangeLong((INT64*) location, *(INT64*)&value, *(INT64*)&comparand); return *(double*)&ret; } FCIMPLEND FCIMPL2(LPVOID,COMInterlocked::ExchangeObject, LPVOID*location, LPVOID value) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } LPVOID ret = FastInterlockExchangePointer(location, value); #ifdef _DEBUG Thread::ObjectRefAssign((OBJECTREF *)location); #endif ErectWriteBarrier((OBJECTREF*) location, ObjectToOBJECTREF((Object*) value)); return ret; } FCIMPLEND FCIMPL3(LPVOID,COMInterlocked::CompareExchangeObject, LPVOID *location, LPVOID value, LPVOID comparand) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } // <TODO>@todo: only set ref if is updated</TODO> LPVOID ret = FastInterlockCompareExchangePointer(location, value, comparand); if (ret == comparand) { #ifdef _DEBUG Thread::ObjectRefAssign((OBJECTREF *)location); #endif ErectWriteBarrier((OBJECTREF*) location, ObjectToOBJECTREF((Object*) value)); } return ret; } FCIMPLEND FCIMPL2(INT32,COMInterlocked::ExchangeAdd32, INT32 *location, INT32 value) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } return FastInterlockExchangeAdd((LONG *) location, value); } FCIMPLEND FCIMPL2_IV(INT64,COMInterlocked::ExchangeAdd64, INT64 *location, INT64 value) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } return FastInterlockExchangeAddLong((INT64 *) location, value); } FCIMPLEND FCIMPL0(void, COMInterlocked::FCMemoryBarrier) { FCALL_CONTRACT; MemoryBarrier(); FC_GC_POLL(); } FCIMPLEND FCIMPL0(void, COMInterlocked::FCMemoryBarrierLoad) { FCALL_CONTRACT; VolatileLoadBarrier(); FC_GC_POLL(); } FCIMPLEND #include <optdefault.h> extern "C" void QCALLTYPE Interlocked_MemoryBarrierProcessWide() { QCALL_CONTRACT; FlushProcessWriteBuffers(); } static BOOL HasOverriddenMethod(MethodTable* mt, MethodTable* classMT, WORD methodSlot) { CONTRACTL{ NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(mt != NULL); _ASSERTE(classMT != NULL); _ASSERTE(methodSlot != 0); PCODE actual = mt->GetRestoredSlot(methodSlot); PCODE base = classMT->GetRestoredSlot(methodSlot); if (actual == base) { return FALSE; } // If CoreLib is JITed, the slots can be patched and thus we need to compare the actual MethodDescs // to detect match reliably if (MethodTable::GetMethodDescForSlotAddress(actual) == MethodTable::GetMethodDescForSlotAddress(base)) { return FALSE; } return TRUE; } static BOOL CanCompareBitsOrUseFastGetHashCode(MethodTable* mt) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; _ASSERTE(mt != NULL); if (mt->HasCheckedCanCompareBitsOrUseFastGetHashCode()) { return mt->CanCompareBitsOrUseFastGetHashCode(); } if (mt->ContainsPointers() || mt->IsNotTightlyPacked()) { mt->SetHasCheckedCanCompareBitsOrUseFastGetHashCode(); return FALSE; } MethodTable* valueTypeMT = CoreLibBinder::GetClass(CLASS__VALUE_TYPE); WORD slotEquals = CoreLibBinder::GetMethod(METHOD__VALUE_TYPE__EQUALS)->GetSlot(); WORD slotGetHashCode = CoreLibBinder::GetMethod(METHOD__VALUE_TYPE__GET_HASH_CODE)->GetSlot(); // Check the input type. if (HasOverriddenMethod(mt, valueTypeMT, slotEquals) || HasOverriddenMethod(mt, valueTypeMT, slotGetHashCode)) { mt->SetHasCheckedCanCompareBitsOrUseFastGetHashCode(); // If overridden Equals or GetHashCode found, stop searching further. return FALSE; } BOOL canCompareBitsOrUseFastGetHashCode = TRUE; // The type itself did not override Equals or GetHashCode, go for its fields. ApproxFieldDescIterator iter = ApproxFieldDescIterator(mt, ApproxFieldDescIterator::INSTANCE_FIELDS); for (FieldDesc* pField = iter.Next(); pField != NULL; pField = iter.Next()) { if (pField->GetFieldType() == ELEMENT_TYPE_VALUETYPE) { // Check current field type. MethodTable* fieldMethodTable = pField->GetApproxFieldTypeHandleThrowing().GetMethodTable(); if (!CanCompareBitsOrUseFastGetHashCode(fieldMethodTable)) { canCompareBitsOrUseFastGetHashCode = FALSE; break; } } else if (pField->GetFieldType() == ELEMENT_TYPE_R8 || pField->GetFieldType() == ELEMENT_TYPE_R4) { // We have double/single field, cannot compare in fast path. canCompareBitsOrUseFastGetHashCode = FALSE; break; } } // We've gone through all instance fields. It's time to cache the result. // Note SetCanCompareBitsOrUseFastGetHashCode(BOOL) ensures the checked flag // and canCompare flag being set atomically to avoid race. mt->SetCanCompareBitsOrUseFastGetHashCode(canCompareBitsOrUseFastGetHashCode); return canCompareBitsOrUseFastGetHashCode; } NOINLINE static FC_BOOL_RET CanCompareBitsHelper(MethodTable* mt, OBJECTREF objRef) { FC_INNER_PROLOG(ValueTypeHelper::CanCompareBits); _ASSERTE(mt != NULL); _ASSERTE(objRef != NULL); BOOL ret = FALSE; HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, objRef); ret = CanCompareBitsOrUseFastGetHashCode(mt); HELPER_METHOD_FRAME_END(); FC_INNER_EPILOG(); FC_RETURN_BOOL(ret); } // Return true if the valuetype does not contain pointer, is tightly packed, // does not have floating point number field and does not override Equals method. FCIMPL1(FC_BOOL_RET, ValueTypeHelper::CanCompareBits, Object* obj) { FCALL_CONTRACT; _ASSERTE(obj != NULL); MethodTable* mt = obj->GetMethodTable(); if (mt->HasCheckedCanCompareBitsOrUseFastGetHashCode()) { FC_RETURN_BOOL(mt->CanCompareBitsOrUseFastGetHashCode()); } OBJECTREF objRef(obj); FC_INNER_RETURN(FC_BOOL_RET, CanCompareBitsHelper(mt, objRef)); } FCIMPLEND FCIMPL2(FC_BOOL_RET, ValueTypeHelper::FastEqualsCheck, Object* obj1, Object* obj2) { FCALL_CONTRACT; _ASSERTE(obj1 != NULL); _ASSERTE(obj2 != NULL); _ASSERTE(!obj1->GetMethodTable()->ContainsPointers()); _ASSERTE(obj1->GetSize() == obj2->GetSize()); TypeHandle pTh = obj1->GetTypeHandle(); FC_RETURN_BOOL(memcmp(obj1->GetData(),obj2->GetData(),pTh.GetSize()) == 0); } FCIMPLEND static INT32 FastGetValueTypeHashCodeHelper(MethodTable *mt, void *pObjRef) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; INT32 hashCode = 0; INT32 *pObj = (INT32*)pObjRef; // this is a struct with no refs and no "strange" offsets, just go through the obj and xor the bits INT32 size = mt->GetNumInstanceFieldBytes(); for (INT32 i = 0; i < (INT32)(size / sizeof(INT32)); i++) hashCode ^= *pObj++; return hashCode; } static INT32 RegularGetValueTypeHashCode(MethodTable *mt, void *pObjRef) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; INT32 hashCode = 0; GCPROTECT_BEGININTERIOR(pObjRef); BOOL canUseFastGetHashCodeHelper = FALSE; if (mt->HasCheckedCanCompareBitsOrUseFastGetHashCode()) { canUseFastGetHashCodeHelper = mt->CanCompareBitsOrUseFastGetHashCode(); } else { canUseFastGetHashCodeHelper = CanCompareBitsOrUseFastGetHashCode(mt); } // While we shouln't get here directly from ValueTypeHelper::GetHashCode, if we recurse we need to // be able to handle getting the hashcode for an embedded structure whose hashcode is computed by the fast path. if (canUseFastGetHashCodeHelper) { hashCode = FastGetValueTypeHashCodeHelper(mt, pObjRef); } else { // it's looking ugly so we'll use the old behavior in managed code. Grab the first non-null // field and return its hash code or 'it' as hash code // <TODO> Note that the old behavior has already been broken for value types // that is qualified for CanUseFastGetHashCodeHelper. So maybe we should // change the implementation here to use all fields instead of just the 1st one. // </TODO> // // <TODO> check this approximation - we may be losing exact type information </TODO> ApproxFieldDescIterator fdIterator(mt, ApproxFieldDescIterator::INSTANCE_FIELDS); FieldDesc *field; while ((field = fdIterator.Next()) != NULL) { _ASSERTE(!field->IsRVA()); if (field->IsObjRef()) { // if we get an object reference we get the hash code out of that if (*(Object**)((BYTE *)pObjRef + field->GetOffsetUnsafe()) != NULL) { PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__OBJECT__GET_HASH_CODE, (*(Object**)((BYTE *)pObjRef + field->GetOffsetUnsafe()))); DECLARE_ARGHOLDER_ARRAY(args, 1); args[ARGNUM_0] = PTR_TO_ARGHOLDER(*(Object**)((BYTE *)pObjRef + field->GetOffsetUnsafe())); CALL_MANAGED_METHOD(hashCode, INT32, args); } else { // null object reference, try next continue; } } else { CorElementType fieldType = field->GetFieldType(); if (fieldType == ELEMENT_TYPE_R8) { PREPARE_NONVIRTUAL_CALLSITE(METHOD__DOUBLE__GET_HASH_CODE); DECLARE_ARGHOLDER_ARRAY(args, 1); args[ARGNUM_0] = PTR_TO_ARGHOLDER(((BYTE *)pObjRef + field->GetOffsetUnsafe())); CALL_MANAGED_METHOD(hashCode, INT32, args); } else if (fieldType == ELEMENT_TYPE_R4) { PREPARE_NONVIRTUAL_CALLSITE(METHOD__SINGLE__GET_HASH_CODE); DECLARE_ARGHOLDER_ARRAY(args, 1); args[ARGNUM_0] = PTR_TO_ARGHOLDER(((BYTE *)pObjRef + field->GetOffsetUnsafe())); CALL_MANAGED_METHOD(hashCode, INT32, args); } else if (fieldType != ELEMENT_TYPE_VALUETYPE) { UINT fieldSize = field->LoadSize(); INT32 *pValue = (INT32*)((BYTE *)pObjRef + field->GetOffsetUnsafe()); for (INT32 j = 0; j < (INT32)(fieldSize / sizeof(INT32)); j++) hashCode ^= *pValue++; } else { // got another value type. Get the type TypeHandle fieldTH = field->GetFieldTypeHandleThrowing(); _ASSERTE(!fieldTH.IsNull()); hashCode = RegularGetValueTypeHashCode(fieldTH.GetMethodTable(), (BYTE *)pObjRef + field->GetOffsetUnsafe()); } } break; } } GCPROTECT_END(); return hashCode; } // The default implementation of GetHashCode() for all value types. // Note that this implementation reveals the value of the fields. // So if the value type contains any sensitive information it should // implement its own GetHashCode(). FCIMPL1(INT32, ValueTypeHelper::GetHashCode, Object* objUNSAFE) { FCALL_CONTRACT; if (objUNSAFE == NULL) FCThrow(kNullReferenceException); OBJECTREF obj = ObjectToOBJECTREF(objUNSAFE); VALIDATEOBJECTREF(obj); INT32 hashCode = 0; MethodTable *pMT = objUNSAFE->GetMethodTable(); // We don't want to expose the method table pointer in the hash code // Let's use the typeID instead. UINT32 typeID = pMT->LookupTypeID(); if (typeID == TypeIDProvider::INVALID_TYPE_ID) { // If the typeID has yet to be generated, fall back to GetTypeID // This only needs to be done once per MethodTable HELPER_METHOD_FRAME_BEGIN_RET_1(obj); typeID = pMT->GetTypeID(); HELPER_METHOD_FRAME_END(); } // To get less colliding and more evenly distributed hash codes, // we munge the class index with two big prime numbers hashCode = typeID * 711650207 + 2506965631U; BOOL canUseFastGetHashCodeHelper = FALSE; if (pMT->HasCheckedCanCompareBitsOrUseFastGetHashCode()) { canUseFastGetHashCodeHelper = pMT->CanCompareBitsOrUseFastGetHashCode(); } else { HELPER_METHOD_FRAME_BEGIN_RET_1(obj); canUseFastGetHashCodeHelper = CanCompareBitsOrUseFastGetHashCode(pMT); HELPER_METHOD_FRAME_END(); } if (canUseFastGetHashCodeHelper) { hashCode ^= FastGetValueTypeHashCodeHelper(pMT, obj->UnBox()); } else { HELPER_METHOD_FRAME_BEGIN_RET_1(obj); hashCode ^= RegularGetValueTypeHashCode(pMT, obj->UnBox()); HELPER_METHOD_FRAME_END(); } return hashCode; } FCIMPLEND static LONG s_dwSeed; FCIMPL1(INT32, ValueTypeHelper::GetHashCodeOfPtr, LPVOID ptr) { FCALL_CONTRACT; INT32 hashCode = (INT32)((INT64)(ptr)); if (hashCode == 0) { return 0; } DWORD dwSeed = s_dwSeed; // Initialize s_dwSeed lazily if (dwSeed == 0) { // We use the first non-0 pointer as the seed, all hashcodes will be based off that. // This is to make sure that we only reveal relative memory addresses and never absolute ones. dwSeed = hashCode; InterlockedCompareExchange(&s_dwSeed, dwSeed, 0); dwSeed = s_dwSeed; } _ASSERTE(dwSeed != 0); return hashCode - dwSeed; } FCIMPLEND static MethodTable * g_pStreamMT; static WORD g_slotBeginRead, g_slotEndRead; static WORD g_slotBeginWrite, g_slotEndWrite; static bool HasOverriddenStreamMethod(MethodTable * pMT, WORD slot) { CONTRACTL{ NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; PCODE actual = pMT->GetRestoredSlot(slot); PCODE base = g_pStreamMT->GetRestoredSlot(slot); if (actual == base) return false; // If CoreLib is JITed, the slots can be patched and thus we need to compare the actual MethodDescs // to detect match reliably if (MethodTable::GetMethodDescForSlotAddress(actual) == MethodTable::GetMethodDescForSlotAddress(base)) return false; return true; } FCIMPL1(FC_BOOL_RET, StreamNative::HasOverriddenBeginEndRead, Object *stream) { FCALL_CONTRACT; if (stream == NULL) FC_RETURN_BOOL(TRUE); if (g_pStreamMT == NULL || g_slotBeginRead == 0 || g_slotEndRead == 0) { HELPER_METHOD_FRAME_BEGIN_RET_1(stream); g_pStreamMT = CoreLibBinder::GetClass(CLASS__STREAM); g_slotBeginRead = CoreLibBinder::GetMethod(METHOD__STREAM__BEGIN_READ)->GetSlot(); g_slotEndRead = CoreLibBinder::GetMethod(METHOD__STREAM__END_READ)->GetSlot(); HELPER_METHOD_FRAME_END(); } MethodTable * pMT = stream->GetMethodTable(); FC_RETURN_BOOL(HasOverriddenStreamMethod(pMT, g_slotBeginRead) || HasOverriddenStreamMethod(pMT, g_slotEndRead)); } FCIMPLEND FCIMPL1(FC_BOOL_RET, StreamNative::HasOverriddenBeginEndWrite, Object *stream) { FCALL_CONTRACT; if (stream == NULL) FC_RETURN_BOOL(TRUE); if (g_pStreamMT == NULL || g_slotBeginWrite == 0 || g_slotEndWrite == 0) { HELPER_METHOD_FRAME_BEGIN_RET_1(stream); g_pStreamMT = CoreLibBinder::GetClass(CLASS__STREAM); g_slotBeginWrite = CoreLibBinder::GetMethod(METHOD__STREAM__BEGIN_WRITE)->GetSlot(); g_slotEndWrite = CoreLibBinder::GetMethod(METHOD__STREAM__END_WRITE)->GetSlot(); HELPER_METHOD_FRAME_END(); } MethodTable * pMT = stream->GetMethodTable(); FC_RETURN_BOOL(HasOverriddenStreamMethod(pMT, g_slotBeginWrite) || HasOverriddenStreamMethod(pMT, g_slotEndWrite)); } FCIMPLEND
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // /*============================================================ ** ** File: COMUtilNative ** ** ** ** Purpose: A dumping ground for classes which aren't large ** enough to get their own file in the EE. ** ** ** ===========================================================*/ #include "common.h" #include "object.h" #include "excep.h" #include "vars.hpp" #include "comutilnative.h" #include "utilcode.h" #include "frames.h" #include "field.h" #include "winwrap.h" #include "gcheaputilities.h" #include "fcall.h" #include "invokeutil.h" #include "eeconfig.h" #include "typestring.h" #include "finalizerthread.h" #include "threadsuspend.h" #ifdef FEATURE_COMINTEROP #include "comcallablewrapper.h" #include "comcache.h" #endif // FEATURE_COMINTEROP #include "arraynative.inl" /*===================================IsDigit==================================== **Returns a bool indicating whether the character passed in represents a ** **digit. ==============================================================================*/ bool IsDigit(WCHAR c, int radix, int *result) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(result)); } CONTRACTL_END; if (IS_DIGIT(c)) { *result = DIGIT_TO_INT(c); } else if (c>='A' && c<='Z') { //+10 is necessary because A is actually 10, etc. *result = c-'A'+10; } else if (c>='a' && c<='z') { //+10 is necessary because a is actually 10, etc. *result = c-'a'+10; } else { *result = -1; } if ((*result >=0) && (*result < radix)) return true; return false; } INT32 wtoi(_In_reads_(length) WCHAR* wstr, DWORD length) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(CheckPointer(wstr)); PRECONDITION(length >= 0); } CONTRACTL_END; DWORD i = 0; int value; INT32 result = 0; while ( (i < length) && (IsDigit(wstr[i], 10 ,&value)) ) { //Read all of the digits and convert to a number result = result*10 + value; i++; } return result; } // // // EXCEPTION NATIVE // // FCIMPL1(FC_BOOL_RET, ExceptionNative::IsImmutableAgileException, Object* pExceptionUNSAFE) { FCALL_CONTRACT; ASSERT(pExceptionUNSAFE != NULL); OBJECTREF pException = (OBJECTREF) pExceptionUNSAFE; // The preallocated exception objects may be used from multiple AppDomains // and therefore must remain immutable from the application's perspective. FC_RETURN_BOOL(CLRException::IsPreallocatedExceptionObject(pException)); } FCIMPLEND // This FCall sets a flag against the thread exception state to indicate to // IL_Throw and the StackTraceInfo implementation to account for the fact // that we have restored a foreign exception dispatch details. // // Refer to the respective methods for details on how they use this flag. FCIMPL0(VOID, ExceptionNative::PrepareForForeignExceptionRaise) { FCALL_CONTRACT; PTR_ThreadExceptionState pCurTES = GetThread()->GetExceptionState(); // Set a flag against the TES to indicate this is a foreign exception raise. pCurTES->SetRaisingForeignException(); } FCIMPLEND // Given an exception object, this method will extract the stacktrace and dynamic method array and set them up for return to the caller. FCIMPL3(VOID, ExceptionNative::GetStackTracesDeepCopy, Object* pExceptionObjectUnsafe, Object **pStackTraceUnsafe, Object **pDynamicMethodsUnsafe); { CONTRACTL { FCALL_CHECK; } CONTRACTL_END; ASSERT(pExceptionObjectUnsafe != NULL); ASSERT(pStackTraceUnsafe != NULL); ASSERT(pDynamicMethodsUnsafe != NULL); struct _gc { StackTraceArray stackTrace; StackTraceArray stackTraceCopy; EXCEPTIONREF refException; PTRARRAYREF dynamicMethodsArray; // Object array of Managed Resolvers PTRARRAYREF dynamicMethodsArrayCopy; // Copy of the object array of Managed Resolvers }; _gc gc; ZeroMemory(&gc, sizeof(gc)); // GC protect the array reference HELPER_METHOD_FRAME_BEGIN_PROTECT(gc); // Get the exception object reference gc.refException = (EXCEPTIONREF)(ObjectToOBJECTREF(pExceptionObjectUnsafe)); // Fetch the stacktrace details from the exception under a lock gc.refException->GetStackTrace(gc.stackTrace, &gc.dynamicMethodsArray); bool fHaveStackTrace = false; bool fHaveDynamicMethodArray = false; if ((unsigned)gc.stackTrace.Size() > 0) { // Deepcopy the array gc.stackTraceCopy.CopyFrom(gc.stackTrace); fHaveStackTrace = true; } if (gc.dynamicMethodsArray != NULL) { // Get the number of elements in the dynamic methods array unsigned cOrigDynamic = gc.dynamicMethodsArray->GetNumComponents(); // ..and allocate a new array. This can trigger GC or throw under OOM. gc.dynamicMethodsArrayCopy = (PTRARRAYREF)AllocateObjectArray(cOrigDynamic, g_pObjectClass); // Deepcopy references to the new array we just allocated memmoveGCRefs(gc.dynamicMethodsArrayCopy->GetDataPtr(), gc.dynamicMethodsArray->GetDataPtr(), cOrigDynamic * sizeof(Object *)); fHaveDynamicMethodArray = true; } // Prep to return *pStackTraceUnsafe = fHaveStackTrace?OBJECTREFToObject(gc.stackTraceCopy.Get()):NULL; *pDynamicMethodsUnsafe = fHaveDynamicMethodArray?OBJECTREFToObject(gc.dynamicMethodsArrayCopy):NULL; HELPER_METHOD_FRAME_END(); } FCIMPLEND // Given an exception object and deep copied instances of a stacktrace and/or dynamic method array, this method will set the latter in the exception object instance. FCIMPL3(VOID, ExceptionNative::SaveStackTracesFromDeepCopy, Object* pExceptionObjectUnsafe, Object *pStackTraceUnsafe, Object *pDynamicMethodsUnsafe); { CONTRACTL { FCALL_CHECK; } CONTRACTL_END; ASSERT(pExceptionObjectUnsafe != NULL); struct _gc { StackTraceArray stackTrace; EXCEPTIONREF refException; PTRARRAYREF dynamicMethodsArray; // Object array of Managed Resolvers }; _gc gc; ZeroMemory(&gc, sizeof(gc)); // GC protect the array reference HELPER_METHOD_FRAME_BEGIN_PROTECT(gc); // Get the exception object reference gc.refException = (EXCEPTIONREF)(ObjectToOBJECTREF(pExceptionObjectUnsafe)); if (pStackTraceUnsafe != NULL) { // Copy the stacktrace StackTraceArray stackTraceArray((I1ARRAYREF)ObjectToOBJECTREF(pStackTraceUnsafe)); gc.stackTrace.Swap(stackTraceArray); } gc.dynamicMethodsArray = NULL; if (pDynamicMethodsUnsafe != NULL) { gc.dynamicMethodsArray = (PTRARRAYREF)ObjectToOBJECTREF(pDynamicMethodsUnsafe); } // If there is no stacktrace, then there cannot be any dynamic method array. Thus, // save stacktrace only when we have it. if (gc.stackTrace.Size() > 0) { // Save the stacktrace details in the exception under a lock gc.refException->SetStackTrace(gc.stackTrace.Get(), gc.dynamicMethodsArray); } else { gc.refException->SetStackTrace(NULL, NULL); } HELPER_METHOD_FRAME_END(); } FCIMPLEND BSTR BStrFromString(STRINGREF s) { CONTRACTL { THROWS; } CONTRACTL_END; WCHAR *wz; int cch; BSTR bstr; if (s == NULL) return NULL; s->RefInterpretGetStringValuesDangerousForGC(&wz, &cch); bstr = SysAllocString(wz); if (bstr == NULL) COMPlusThrowOM(); return bstr; } static BSTR GetExceptionDescription(OBJECTREF objException) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION( IsException(objException->GetMethodTable()) ); } CONTRACTL_END; BSTR bstrDescription; STRINGREF MessageString = NULL; GCPROTECT_BEGIN(MessageString) GCPROTECT_BEGIN(objException) { // read Exception.Message property MethodDescCallSite getMessage(METHOD__EXCEPTION__GET_MESSAGE, &objException); ARG_SLOT GetMessageArgs[] = { ObjToArgSlot(objException)}; MessageString = getMessage.Call_RetSTRINGREF(GetMessageArgs); // if the message string is empty then use the exception classname. if (MessageString == NULL || MessageString->GetStringLength() == 0) { // call GetClassName MethodDescCallSite getClassName(METHOD__EXCEPTION__GET_CLASS_NAME, &objException); ARG_SLOT GetClassNameArgs[] = { ObjToArgSlot(objException)}; MessageString = getClassName.Call_RetSTRINGREF(GetClassNameArgs); _ASSERTE(MessageString != NULL && MessageString->GetStringLength() != 0); } // Allocate the description BSTR. int DescriptionLen = MessageString->GetStringLength(); bstrDescription = SysAllocStringLen(MessageString->GetBuffer(), DescriptionLen); } GCPROTECT_END(); GCPROTECT_END(); return bstrDescription; } static BSTR GetExceptionSource(OBJECTREF objException) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION( IsException(objException->GetMethodTable()) ); } CONTRACTL_END; STRINGREF refRetVal; GCPROTECT_BEGIN(objException) // read Exception.Source property MethodDescCallSite getSource(METHOD__EXCEPTION__GET_SOURCE, &objException); ARG_SLOT GetSourceArgs[] = { ObjToArgSlot(objException)}; refRetVal = getSource.Call_RetSTRINGREF(GetSourceArgs); GCPROTECT_END(); return BStrFromString(refRetVal); } static void GetExceptionHelp(OBJECTREF objException, BSTR *pbstrHelpFile, DWORD *pdwHelpContext) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; INJECT_FAULT(COMPlusThrowOM()); PRECONDITION(IsException(objException->GetMethodTable())); PRECONDITION(CheckPointer(pbstrHelpFile)); PRECONDITION(CheckPointer(pdwHelpContext)); } CONTRACTL_END; *pdwHelpContext = 0; GCPROTECT_BEGIN(objException); // read Exception.HelpLink property MethodDescCallSite getHelpLink(METHOD__EXCEPTION__GET_HELP_LINK, &objException); ARG_SLOT GetHelpLinkArgs[] = { ObjToArgSlot(objException)}; *pbstrHelpFile = BStrFromString(getHelpLink.Call_RetSTRINGREF(GetHelpLinkArgs)); GCPROTECT_END(); // parse the help file to check for the presence of helpcontext int len = SysStringLen(*pbstrHelpFile); int pos = len; WCHAR *pwstr = *pbstrHelpFile; if (pwstr) { BOOL fFoundPound = FALSE; for (pos = len - 1; pos >= 0; pos--) { if (pwstr[pos] == W('#')) { fFoundPound = TRUE; break; } } if (fFoundPound) { int PoundPos = pos; int NumberStartPos = -1; BOOL bNumberStarted = FALSE; BOOL bNumberFinished = FALSE; BOOL bInvalidDigitsFound = FALSE; _ASSERTE(pwstr[pos] == W('#')); // Check to see if the string to the right of the pound a valid number. for (pos++; pos < len; pos++) { if (bNumberFinished) { if (!COMCharacter::nativeIsWhiteSpace(pwstr[pos])) { bInvalidDigitsFound = TRUE; break; } } else if (bNumberStarted) { if (COMCharacter::nativeIsWhiteSpace(pwstr[pos])) { bNumberFinished = TRUE; } else if (!COMCharacter::nativeIsDigit(pwstr[pos])) { bInvalidDigitsFound = TRUE; break; } } else { if (COMCharacter::nativeIsDigit(pwstr[pos])) { NumberStartPos = pos; bNumberStarted = TRUE; } else if (!COMCharacter::nativeIsWhiteSpace(pwstr[pos])) { bInvalidDigitsFound = TRUE; break; } } } if (bNumberStarted && !bInvalidDigitsFound) { // Grab the help context and remove it from the help file. *pdwHelpContext = (DWORD)wtoi(&pwstr[NumberStartPos], len - NumberStartPos); // Allocate a new help file string of the right length. BSTR strOld = *pbstrHelpFile; *pbstrHelpFile = SysAllocStringLen(strOld, PoundPos); SysFreeString(strOld); if (!*pbstrHelpFile) COMPlusThrowOM(); } } } } // NOTE: caller cleans up any partially initialized BSTRs in pED void ExceptionNative::GetExceptionData(OBJECTREF objException, ExceptionData *pED) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; PRECONDITION(IsException(objException->GetMethodTable())); PRECONDITION(CheckPointer(pED)); } CONTRACTL_END; ZeroMemory(pED, sizeof(ExceptionData)); GCPROTECT_BEGIN(objException); pED->hr = GetExceptionHResult(objException); pED->bstrDescription = GetExceptionDescription(objException); pED->bstrSource = GetExceptionSource(objException); GetExceptionHelp(objException, &pED->bstrHelpFile, &pED->dwHelpContext); GCPROTECT_END(); return; } #ifdef FEATURE_COMINTEROP HRESULT SimpleComCallWrapper::IErrorInfo_hr() { WRAPPER_NO_CONTRACT; return GetExceptionHResult(this->GetObjectRef()); } BSTR SimpleComCallWrapper::IErrorInfo_bstrDescription() { WRAPPER_NO_CONTRACT; return GetExceptionDescription(this->GetObjectRef()); } BSTR SimpleComCallWrapper::IErrorInfo_bstrSource() { WRAPPER_NO_CONTRACT; return GetExceptionSource(this->GetObjectRef()); } BSTR SimpleComCallWrapper::IErrorInfo_bstrHelpFile() { WRAPPER_NO_CONTRACT; BSTR bstrHelpFile; DWORD dwHelpContext; GetExceptionHelp(this->GetObjectRef(), &bstrHelpFile, &dwHelpContext); return bstrHelpFile; } DWORD SimpleComCallWrapper::IErrorInfo_dwHelpContext() { WRAPPER_NO_CONTRACT; BSTR bstrHelpFile; DWORD dwHelpContext; GetExceptionHelp(this->GetObjectRef(), &bstrHelpFile, &dwHelpContext); SysFreeString(bstrHelpFile); return dwHelpContext; } GUID SimpleComCallWrapper::IErrorInfo_guid() { LIMITED_METHOD_CONTRACT; return GUID_NULL; } #endif // FEATURE_COMINTEROP FCIMPL0(EXCEPTION_POINTERS*, ExceptionNative::GetExceptionPointers) { FCALL_CONTRACT; EXCEPTION_POINTERS* retVal = NULL; Thread *pThread = GetThread(); if (pThread->IsExceptionInProgress()) { retVal = pThread->GetExceptionState()->GetExceptionPointers(); } return retVal; } FCIMPLEND FCIMPL0(INT32, ExceptionNative::GetExceptionCode) { FCALL_CONTRACT; INT32 retVal = 0; Thread *pThread = GetThread(); if (pThread->IsExceptionInProgress()) { retVal = pThread->GetExceptionState()->GetExceptionCode(); } return retVal; } FCIMPLEND extern uint32_t g_exceptionCount; FCIMPL0(UINT32, ExceptionNative::GetExceptionCount) { FCALL_CONTRACT; return g_exceptionCount; } FCIMPLEND // // This must be implemented as an FCALL because managed code cannot // swallow a thread abort exception without resetting the abort, // which we don't want to do. Additionally, we can run into deadlocks // if we use the ResourceManager to do resource lookups - it requires // taking managed locks when initializing Globalization & Security, // but a thread abort on a separate thread initializing those same // systems would also do a resource lookup via the ResourceManager. // We've deadlocked in CompareInfo.GetCompareInfo & // Environment.GetResourceString. It's not practical to take all of // our locks within CER's to avoid this problem - just use the CLR's // unmanaged resources. // extern "C" void QCALLTYPE ExceptionNative_GetMessageFromNativeResources(ExceptionMessageKind kind, QCall::StringHandleOnStack retMesg) { QCALL_CONTRACT; BEGIN_QCALL; SString buffer; HRESULT hr = S_OK; const WCHAR * wszFallbackString = NULL; switch(kind) { case ExceptionMessageKind::ThreadAbort: hr = buffer.LoadResourceAndReturnHR(CCompRC::Error, IDS_EE_THREAD_ABORT); if (FAILED(hr)) { wszFallbackString = W("Thread was being aborted."); } break; case ExceptionMessageKind::ThreadInterrupted: hr = buffer.LoadResourceAndReturnHR(CCompRC::Error, IDS_EE_THREAD_INTERRUPTED); if (FAILED(hr)) { wszFallbackString = W("Thread was interrupted from a waiting state."); } break; case ExceptionMessageKind::OutOfMemory: hr = buffer.LoadResourceAndReturnHR(CCompRC::Error, IDS_EE_OUT_OF_MEMORY); if (FAILED(hr)) { wszFallbackString = W("Insufficient memory to continue the execution of the program."); } break; default: _ASSERTE(!"Unknown ExceptionMessageKind value!"); } if (FAILED(hr)) { STRESS_LOG1(LF_BCL, LL_ALWAYS, "LoadResource error: %x", hr); _ASSERTE(wszFallbackString != NULL); retMesg.Set(wszFallbackString); } else { retMesg.Set(buffer); } END_QCALL; } extern "C" void QCALLTYPE Buffer_Clear(void *dst, size_t length) { QCALL_CONTRACT; #if defined(HOST_X86) || defined(HOST_AMD64) if (length > 0x100) { // memset ends up calling rep stosb if the hardware claims to support it efficiently. rep stosb is up to 2x slower // on misaligned blocks. Workaround this issue by aligning the blocks passed to memset upfront. *(uint64_t*)dst = 0; *((uint64_t*)dst + 1) = 0; *((uint64_t*)dst + 2) = 0; *((uint64_t*)dst + 3) = 0; void* end = (uint8_t*)dst + length; *((uint64_t*)end - 1) = 0; *((uint64_t*)end - 2) = 0; *((uint64_t*)end - 3) = 0; *((uint64_t*)end - 4) = 0; dst = ALIGN_UP((uint8_t*)dst + 1, 32); length = ALIGN_DOWN((uint8_t*)end - 1, 32) - (uint8_t*)dst; } #endif memset(dst, 0, length); } FCIMPL3(VOID, Buffer::BulkMoveWithWriteBarrier, void *dst, void *src, size_t byteCount) { FCALL_CONTRACT; if (dst != src && byteCount != 0) InlinedMemmoveGCRefsHelper(dst, src, byteCount); FC_GC_POLL(); } FCIMPLEND extern "C" void QCALLTYPE Buffer_MemMove(void *dst, void *src, size_t length) { QCALL_CONTRACT; memmove(dst, src, length); } // // GCInterface // INT32 GCInterface::m_gc_counts[3] = {0,0,0}; UINT64 GCInterface::m_addPressure[MEM_PRESSURE_COUNT] = {0, 0, 0, 0}; // history of memory pressure additions UINT64 GCInterface::m_remPressure[MEM_PRESSURE_COUNT] = {0, 0, 0, 0}; // history of memory pressure removals // incremented after a gen2 GC has been detected, // (m_iteration % MEM_PRESSURE_COUNT) is used as an index into m_addPressure and m_remPressure UINT GCInterface::m_iteration = 0; FCIMPL2(void, GCInterface::GetMemoryInfo, Object* objUNSAFE, int kind) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); GCMEMORYINFODATAREF objGCMemoryInfo = (GCMEMORYINFODATAREF)(ObjectToOBJECTREF (objUNSAFE)); UINT64* genInfoRaw = (UINT64*)&(objGCMemoryInfo->generationInfo0); UINT64* pauseInfoRaw = (UINT64*)&(objGCMemoryInfo->pauseDuration0); return GCHeapUtilities::GetGCHeap()->GetMemoryInfo( &(objGCMemoryInfo->highMemLoadThresholdBytes), &(objGCMemoryInfo->totalAvailableMemoryBytes), &(objGCMemoryInfo->lastRecordedMemLoadBytes), &(objGCMemoryInfo->lastRecordedHeapSizeBytes), &(objGCMemoryInfo->lastRecordedFragmentationBytes), &(objGCMemoryInfo->totalCommittedBytes), &(objGCMemoryInfo->promotedBytes), &(objGCMemoryInfo->pinnedObjectCount), &(objGCMemoryInfo->finalizationPendingCount), &(objGCMemoryInfo->index), &(objGCMemoryInfo->generation), &(objGCMemoryInfo->pauseTimePercent), (bool*)&(objGCMemoryInfo->isCompaction), (bool*)&(objGCMemoryInfo->isConcurrent), genInfoRaw, pauseInfoRaw, kind); } FCIMPLEND FCIMPL0(UINT32, GCInterface::GetMemoryLoad) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); int result = (INT32)GCHeapUtilities::GetGCHeap()->GetMemoryLoad(); return result; } FCIMPLEND FCIMPL0(int, GCInterface::GetGcLatencyMode) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); int result = (INT32)GCHeapUtilities::GetGCHeap()->GetGcLatencyMode(); return result; } FCIMPLEND FCIMPL1(int, GCInterface::SetGcLatencyMode, int newLatencyMode) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); return GCHeapUtilities::GetGCHeap()->SetGcLatencyMode(newLatencyMode); } FCIMPLEND FCIMPL0(int, GCInterface::GetLOHCompactionMode) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); int result = (INT32)GCHeapUtilities::GetGCHeap()->GetLOHCompactionMode(); return result; } FCIMPLEND FCIMPL1(void, GCInterface::SetLOHCompactionMode, int newLOHCompactionyMode) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); GCHeapUtilities::GetGCHeap()->SetLOHCompactionMode(newLOHCompactionyMode); } FCIMPLEND FCIMPL2(FC_BOOL_RET, GCInterface::RegisterForFullGCNotification, UINT32 gen2Percentage, UINT32 lohPercentage) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->RegisterForFullGCNotification(gen2Percentage, lohPercentage)); } FCIMPLEND FCIMPL0(FC_BOOL_RET, GCInterface::CancelFullGCNotification) { FCALL_CONTRACT; FC_GC_POLL_NOT_NEEDED(); FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->CancelFullGCNotification()); } FCIMPLEND FCIMPL1(int, GCInterface::WaitForFullGCApproach, int millisecondsTimeout) { CONTRACTL { THROWS; MODE_COOPERATIVE; DISABLED(GC_TRIGGERS); // can't use this in an FCALL because we're in forbid gc mode until we setup a H_M_F. } CONTRACTL_END; int result = 0; //We don't need to check the top end because the GC will take care of that. HELPER_METHOD_FRAME_BEGIN_RET_0(); DWORD dwMilliseconds = ((millisecondsTimeout == -1) ? INFINITE : millisecondsTimeout); result = GCHeapUtilities::GetGCHeap()->WaitForFullGCApproach(dwMilliseconds); HELPER_METHOD_FRAME_END(); return result; } FCIMPLEND FCIMPL1(int, GCInterface::WaitForFullGCComplete, int millisecondsTimeout) { CONTRACTL { THROWS; MODE_COOPERATIVE; DISABLED(GC_TRIGGERS); // can't use this in an FCALL because we're in forbid gc mode until we setup a H_M_F. } CONTRACTL_END; int result = 0; //We don't need to check the top end because the GC will take care of that. HELPER_METHOD_FRAME_BEGIN_RET_0(); DWORD dwMilliseconds = ((millisecondsTimeout == -1) ? INFINITE : millisecondsTimeout); result = GCHeapUtilities::GetGCHeap()->WaitForFullGCComplete(dwMilliseconds); HELPER_METHOD_FRAME_END(); return result; } FCIMPLEND /*================================GetGeneration================================= **Action: Returns the generation in which args->obj is found. **Returns: The generation in which args->obj is found. **Arguments: args->obj -- The object to locate. **Exceptions: ArgumentException if args->obj is null. ==============================================================================*/ FCIMPL1(int, GCInterface::GetGeneration, Object* objUNSAFE) { FCALL_CONTRACT; if (objUNSAFE == NULL) FCThrowArgumentNull(W("obj")); int result = (INT32)GCHeapUtilities::GetGCHeap()->WhichGeneration(objUNSAFE); FC_GC_POLL_RET(); return result; } FCIMPLEND /*================================GetSegmentSize========-======================= **Action: Returns the maximum GC heap segment size **Returns: The maximum segment size of either the normal heap or the large object heap, whichever is bigger ==============================================================================*/ FCIMPL0(UINT64, GCInterface::GetSegmentSize) { FCALL_CONTRACT; IGCHeap * pGC = GCHeapUtilities::GetGCHeap(); size_t segment_size = pGC->GetValidSegmentSize(false); size_t large_segment_size = pGC->GetValidSegmentSize(true); _ASSERTE(segment_size < SIZE_T_MAX && large_segment_size < SIZE_T_MAX); if (segment_size < large_segment_size) segment_size = large_segment_size; FC_GC_POLL_RET(); return (UINT64) segment_size; } FCIMPLEND /*================================CollectionCount================================= **Action: Returns the number of collections for this generation since the beginning of the life of the process **Returns: The collection count. **Arguments: args->generation -- The generation **Exceptions: Argument exception if args->generation is < 0 or > GetMaxGeneration(); ==============================================================================*/ FCIMPL2(int, GCInterface::CollectionCount, INT32 generation, INT32 getSpecialGCCount) { FCALL_CONTRACT; //We've already checked this in GC.cs, so we'll just assert it here. _ASSERTE(generation >= 0); //We don't need to check the top end because the GC will take care of that. int result = (INT32)GCHeapUtilities::GetGCHeap()->CollectionCount(generation, getSpecialGCCount); FC_GC_POLL_RET(); return result; } FCIMPLEND extern "C" int QCALLTYPE GCInterface_StartNoGCRegion(INT64 totalSize, BOOL lohSizeKnown, INT64 lohSize, BOOL disallowFullBlockingGC) { QCALL_CONTRACT; int retVal = 0; BEGIN_QCALL; GCX_COOP(); retVal = GCHeapUtilities::GetGCHeap()->StartNoGCRegion((ULONGLONG)totalSize, !!lohSizeKnown, (ULONGLONG)lohSize, !!disallowFullBlockingGC); END_QCALL; return retVal; } extern "C" int QCALLTYPE GCInterface_EndNoGCRegion() { QCALL_CONTRACT; int retVal = FALSE; BEGIN_QCALL; retVal = GCHeapUtilities::GetGCHeap()->EndNoGCRegion(); END_QCALL; return retVal; } /*===============================GetGenerationWR================================ **Action: Returns the generation in which the object pointed to by a WeakReference is found. **Returns: **Arguments: args->handle -- the OBJECTHANDLE to the object which we're locating. **Exceptions: ArgumentException if handle points to an object which is not accessible. ==============================================================================*/ FCIMPL1(int, GCInterface::GetGenerationWR, LPVOID handle) { FCALL_CONTRACT; int iRetVal = 0; HELPER_METHOD_FRAME_BEGIN_RET_0(); OBJECTREF temp; temp = ObjectFromHandle((OBJECTHANDLE) handle); if (temp == NULL) COMPlusThrowArgumentNull(W("wo")); iRetVal = (INT32)GCHeapUtilities::GetGCHeap()->WhichGeneration(OBJECTREFToObject(temp)); HELPER_METHOD_FRAME_END(); return iRetVal; } FCIMPLEND FCIMPL0(int, GCInterface::GetLastGCPercentTimeInGC) { FCALL_CONTRACT; return GCHeapUtilities::GetGCHeap()->GetLastGCPercentTimeInGC(); } FCIMPLEND FCIMPL1(UINT64, GCInterface::GetGenerationSize, int gen) { FCALL_CONTRACT; return (UINT64)(GCHeapUtilities::GetGCHeap()->GetLastGCGenerationSize(gen)); } FCIMPLEND /*================================GetTotalMemory================================ **Action: Returns the total number of bytes in use **Returns: The total number of bytes in use **Arguments: None **Exceptions: None ==============================================================================*/ extern "C" INT64 QCALLTYPE GCInterface_GetTotalMemory() { QCALL_CONTRACT; INT64 iRetVal = 0; BEGIN_QCALL; GCX_COOP(); iRetVal = (INT64) GCHeapUtilities::GetGCHeap()->GetTotalBytesInUse(); END_QCALL; return iRetVal; } /*==============================Collect========================================= **Action: Collects all generations <= args->generation **Returns: void **Arguments: args->generation: The maximum generation to collect **Exceptions: Argument exception if args->generation is < 0 or > GetMaxGeneration(); ==============================================================================*/ extern "C" void QCALLTYPE GCInterface_Collect(INT32 generation, INT32 mode) { QCALL_CONTRACT; BEGIN_QCALL; //We've already checked this in GC.cs, so we'll just assert it here. _ASSERTE(generation >= -1); //We don't need to check the top end because the GC will take care of that. GCX_COOP(); GCHeapUtilities::GetGCHeap()->GarbageCollect(generation, false, mode); END_QCALL; } /*==========================WaitForPendingFinalizers============================ **Action: Run all Finalizers that haven't been run. **Arguments: None **Exceptions: None ==============================================================================*/ extern "C" void QCALLTYPE GCInterface_WaitForPendingFinalizers() { QCALL_CONTRACT; BEGIN_QCALL; FinalizerThread::FinalizerThreadWait(); END_QCALL; } /*===============================GetMaxGeneration=============================== **Action: Returns the largest GC generation **Returns: The largest GC Generation **Arguments: None **Exceptions: None ==============================================================================*/ FCIMPL0(int, GCInterface::GetMaxGeneration) { FCALL_CONTRACT; return(INT32)GCHeapUtilities::GetGCHeap()->GetMaxGeneration(); } FCIMPLEND /*===============================GetAllocatedBytesForCurrentThread=============================== **Action: Computes the allocated bytes so far on the current thread **Returns: The allocated bytes so far on the current thread **Arguments: None **Exceptions: None ==============================================================================*/ FCIMPL0(INT64, GCInterface::GetAllocatedBytesForCurrentThread) { FCALL_CONTRACT; INT64 currentAllocated = 0; Thread *pThread = GetThread(); gc_alloc_context* ac = pThread->GetAllocContext(); currentAllocated = ac->alloc_bytes + ac->alloc_bytes_uoh - (ac->alloc_limit - ac->alloc_ptr); return currentAllocated; } FCIMPLEND /*===============================AllocateNewArray=============================== **Action: Allocates a new array object. Allows passing extra flags **Returns: The allocated array. **Arguments: elementTypeHandle -> type of the element, ** length -> number of elements, ** zeroingOptional -> whether caller prefers to skip clearing the content of the array, if possible. **Exceptions: IDS_EE_ARRAY_DIMENSIONS_EXCEEDED when size is too large. OOM if can't allocate. ==============================================================================*/ FCIMPL3(Object*, GCInterface::AllocateNewArray, void* arrayTypeHandle, INT32 length, INT32 flags) { CONTRACTL { FCALL_CHECK; } CONTRACTL_END; OBJECTREF pRet = NULL; TypeHandle arrayType = TypeHandle::FromPtr(arrayTypeHandle); HELPER_METHOD_FRAME_BEGIN_RET_0(); //Only the following flags are used by GC.cs, so we'll just assert it here. _ASSERTE((flags & ~(GC_ALLOC_ZEROING_OPTIONAL | GC_ALLOC_PINNED_OBJECT_HEAP)) == 0); pRet = AllocateSzArray(arrayType, length, (GC_ALLOC_FLAGS)flags); HELPER_METHOD_FRAME_END(); return OBJECTREFToObject(pRet); } FCIMPLEND FCIMPL1(INT64, GCInterface::GetTotalAllocatedBytes, CLR_BOOL precise) { FCALL_CONTRACT; if (!precise) { #ifdef TARGET_64BIT uint64_t unused_bytes = Thread::dead_threads_non_alloc_bytes; #else // As it could be noticed we read 64bit values that may be concurrently updated. // Such reads are not guaranteed to be atomic on 32bit so extra care should be taken. uint64_t unused_bytes = FastInterlockCompareExchangeLong((LONG64*)& Thread::dead_threads_non_alloc_bytes, 0, 0); #endif uint64_t allocated_bytes = GCHeapUtilities::GetGCHeap()->GetTotalAllocatedBytes() - unused_bytes; // highest reported allocated_bytes. We do not want to report a value less than that even if unused_bytes has increased. static uint64_t high_watermark; uint64_t current_high = high_watermark; while (allocated_bytes > current_high) { uint64_t orig = FastInterlockCompareExchangeLong((LONG64*)& high_watermark, allocated_bytes, current_high); if (orig == current_high) return allocated_bytes; current_high = orig; } return current_high; } INT64 allocated; HELPER_METHOD_FRAME_BEGIN_RET_0(); // We need to suspend/restart the EE to get each thread's // non-allocated memory from their allocation contexts ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_OTHER); allocated = GCHeapUtilities::GetGCHeap()->GetTotalAllocatedBytes() - Thread::dead_threads_non_alloc_bytes; for (Thread *pThread = ThreadStore::GetThreadList(NULL); pThread; pThread = ThreadStore::GetThreadList(pThread)) { gc_alloc_context* ac = pThread->GetAllocContext(); allocated -= ac->alloc_limit - ac->alloc_ptr; } ThreadSuspend::RestartEE(FALSE, TRUE); HELPER_METHOD_FRAME_END(); return allocated; } FCIMPLEND; #ifdef FEATURE_BASICFREEZE /*===============================RegisterFrozenSegment=============================== **Action: Registers the frozen segment **Returns: segment_handle **Arguments: args-> pointer to section, size of section **Exceptions: None ==============================================================================*/ extern "C" void* QCALLTYPE GCInterface_RegisterFrozenSegment(void* pSection, SIZE_T sizeSection) { QCALL_CONTRACT; void* retVal = nullptr; BEGIN_QCALL; _ASSERTE(pSection != nullptr); _ASSERTE(sizeSection > 0); GCX_COOP(); segment_info seginfo; seginfo.pvMem = pSection; seginfo.ibFirstObject = sizeof(ObjHeader); seginfo.ibAllocated = sizeSection; seginfo.ibCommit = seginfo.ibAllocated; seginfo.ibReserved = seginfo.ibAllocated; retVal = (void*)GCHeapUtilities::GetGCHeap()->RegisterFrozenSegment(&seginfo); END_QCALL; return retVal; } /*===============================UnregisterFrozenSegment=============================== **Action: Unregisters the frozen segment **Returns: void **Arguments: args-> segment handle **Exceptions: None ==============================================================================*/ extern "C" void QCALLTYPE GCInterface_UnregisterFrozenSegment(void* segment) { QCALL_CONTRACT; BEGIN_QCALL; _ASSERTE(segment != nullptr); GCX_COOP(); GCHeapUtilities::GetGCHeap()->UnregisterFrozenSegment((segment_handle)segment); END_QCALL; } #endif // FEATURE_BASICFREEZE /*==============================SuppressFinalize================================ **Action: Indicate that an object's finalizer should not be run by the system **Arguments: Object of interest **Exceptions: None ==============================================================================*/ FCIMPL1(void, GCInterface::SuppressFinalize, Object *obj) { FCALL_CONTRACT; // Checked by the caller _ASSERTE(obj != NULL); if (!obj->GetMethodTable ()->HasFinalizer()) return; GCHeapUtilities::GetGCHeap()->SetFinalizationRun(obj); FC_GC_POLL(); } FCIMPLEND /*============================ReRegisterForFinalize============================== **Action: Indicate that an object's finalizer should be run by the system. **Arguments: Object of interest **Exceptions: None ==============================================================================*/ FCIMPL1(void, GCInterface::ReRegisterForFinalize, Object *obj) { FCALL_CONTRACT; // Checked by the caller _ASSERTE(obj != NULL); if (obj->GetMethodTable()->HasFinalizer()) { HELPER_METHOD_FRAME_BEGIN_1(obj); if (!GCHeapUtilities::GetGCHeap()->RegisterForFinalization(-1, obj)) { ThrowOutOfMemory(); } HELPER_METHOD_FRAME_END(); } } FCIMPLEND FORCEINLINE UINT64 GCInterface::InterlockedAdd (UINT64 *pAugend, UINT64 addend) { WRAPPER_NO_CONTRACT; UINT64 oldMemValue; UINT64 newMemValue; do { oldMemValue = *pAugend; newMemValue = oldMemValue + addend; // check for overflow if (newMemValue < oldMemValue) { newMemValue = UINT64_MAX; } } while (InterlockedCompareExchange64((LONGLONG*) pAugend, (LONGLONG) newMemValue, (LONGLONG) oldMemValue) != (LONGLONG) oldMemValue); return newMemValue; } FORCEINLINE UINT64 GCInterface::InterlockedSub(UINT64 *pMinuend, UINT64 subtrahend) { WRAPPER_NO_CONTRACT; UINT64 oldMemValue; UINT64 newMemValue; do { oldMemValue = *pMinuend; newMemValue = oldMemValue - subtrahend; // check for underflow if (newMemValue > oldMemValue) newMemValue = 0; } while (InterlockedCompareExchange64((LONGLONG*) pMinuend, (LONGLONG) newMemValue, (LONGLONG) oldMemValue) != (LONGLONG) oldMemValue); return newMemValue; } extern "C" void QCALLTYPE GCInterface_AddMemoryPressure(UINT64 bytesAllocated) { QCALL_CONTRACT; BEGIN_QCALL; GCInterface::AddMemoryPressure(bytesAllocated); END_QCALL; } #ifdef HOST_64BIT const unsigned MIN_MEMORYPRESSURE_BUDGET = 4 * 1024 * 1024; // 4 MB #else // HOST_64BIT const unsigned MIN_MEMORYPRESSURE_BUDGET = 3 * 1024 * 1024; // 3 MB #endif // HOST_64BIT const unsigned MAX_MEMORYPRESSURE_RATIO = 10; // 40 MB or 30 MB // Resets pressure accounting after a gen2 GC has occurred. void GCInterface::CheckCollectionCount() { LIMITED_METHOD_CONTRACT; IGCHeap * pHeap = GCHeapUtilities::GetGCHeap(); if (m_gc_counts[2] != pHeap->CollectionCount(2)) { for (int i = 0; i < 3; i++) { m_gc_counts[i] = pHeap->CollectionCount(i); } m_iteration++; UINT p = m_iteration % MEM_PRESSURE_COUNT; m_addPressure[p] = 0; // new pressure will be accumulated here m_remPressure[p] = 0; } } // AddMemoryPressure implementation // // 1. Start budget - MIN_MEMORYPRESSURE_BUDGET // 2. Focuses more on newly added memory pressure // 3. Budget adjusted by effectiveness of last 3 triggered GC (add / remove ratio, max 10x) // 4. Budget maxed with 30% of current managed GC size // 5. If Gen2 GC is happening naturally, ignore past pressure // // Here's a brief description of the ideal algorithm for Add/Remove memory pressure: // Do a GC when (HeapStart < X * MemPressureGrowth) where // - HeapStart is GC Heap size after doing the last GC // - MemPressureGrowth is the net of Add and Remove since the last GC // - X is proportional to our guess of the ummanaged memory death rate per GC interval, // and would be calculated based on historic data using standard exponential approximation: // Xnew = UMDeath/UMTotal * 0.5 + Xprev // void GCInterface::AddMemoryPressure(UINT64 bytesAllocated) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; CheckCollectionCount(); UINT p = m_iteration % MEM_PRESSURE_COUNT; UINT64 newMemValue = InterlockedAdd(&m_addPressure[p], bytesAllocated); static_assert(MEM_PRESSURE_COUNT == 4, "AddMemoryPressure contains unrolled loops which depend on MEM_PRESSURE_COUNT"); UINT64 add = m_addPressure[0] + m_addPressure[1] + m_addPressure[2] + m_addPressure[3] - m_addPressure[p]; UINT64 rem = m_remPressure[0] + m_remPressure[1] + m_remPressure[2] + m_remPressure[3] - m_remPressure[p]; STRESS_LOG4(LF_GCINFO, LL_INFO10000, "AMP Add: %I64u => added=%I64u total_added=%I64u total_removed=%I64u", bytesAllocated, newMemValue, add, rem); SendEtwAddMemoryPressureEvent(bytesAllocated); if (newMemValue >= MIN_MEMORYPRESSURE_BUDGET) { UINT64 budget = MIN_MEMORYPRESSURE_BUDGET; if (m_iteration >= MEM_PRESSURE_COUNT) // wait until we have enough data points { // Adjust according to effectiveness of GC // Scale budget according to past m_addPressure / m_remPressure ratio if (add >= rem * MAX_MEMORYPRESSURE_RATIO) { budget = MIN_MEMORYPRESSURE_BUDGET * MAX_MEMORYPRESSURE_RATIO; } else if (add > rem) { CONSISTENCY_CHECK(rem != 0); // Avoid overflow by calculating addPressure / remPressure as fixed point (1 = 1024) budget = (add * 1024 / rem) * budget / 1024; } } // If still over budget, check current managed heap size if (newMemValue >= budget) { IGCHeap *pGCHeap = GCHeapUtilities::GetGCHeap(); UINT64 heapOver3 = pGCHeap->GetCurrentObjSize() / 3; if (budget < heapOver3) // Max { budget = heapOver3; } if (newMemValue >= budget) { // last check - if we would exceed 20% of GC "duty cycle", do not trigger GC at this time if ((size_t)(pGCHeap->GetNow() - pGCHeap->GetLastGCStartTime(2)) > (pGCHeap->GetLastGCDuration(2) * 5)) { STRESS_LOG6(LF_GCINFO, LL_INFO10000, "AMP Budget: pressure=%I64u ? budget=%I64u (total_added=%I64u, total_removed=%I64u, mng_heap=%I64u) pos=%d", newMemValue, budget, add, rem, heapOver3 * 3, m_iteration); GarbageCollectModeAny(2); CheckCollectionCount(); } } } } } extern "C" void QCALLTYPE GCInterface_RemoveMemoryPressure(UINT64 bytesAllocated) { QCALL_CONTRACT; BEGIN_QCALL; GCInterface::RemoveMemoryPressure(bytesAllocated); END_QCALL; } void GCInterface::RemoveMemoryPressure(UINT64 bytesAllocated) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; CheckCollectionCount(); UINT p = m_iteration % MEM_PRESSURE_COUNT; SendEtwRemoveMemoryPressureEvent(bytesAllocated); InterlockedAdd(&m_remPressure[p], bytesAllocated); STRESS_LOG2(LF_GCINFO, LL_INFO10000, "AMP Remove: %I64u => removed=%I64u", bytesAllocated, m_remPressure[p]); } inline void GCInterface::SendEtwAddMemoryPressureEvent(UINT64 bytesAllocated) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; FireEtwIncreaseMemoryPressure(bytesAllocated, GetClrInstanceId()); } // Out-of-line helper to avoid EH prolog/epilog in functions that otherwise don't throw. NOINLINE void GCInterface::SendEtwRemoveMemoryPressureEvent(UINT64 bytesAllocated) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; EX_TRY { FireEtwDecreaseMemoryPressure(bytesAllocated, GetClrInstanceId()); } EX_CATCH { // Ignore failures } EX_END_CATCH(SwallowAllExceptions) } // Out-of-line helper to avoid EH prolog/epilog in functions that otherwise don't throw. NOINLINE void GCInterface::GarbageCollectModeAny(int generation) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; GCX_COOP(); GCHeapUtilities::GetGCHeap()->GarbageCollect(generation, false, collection_non_blocking); } // // COMInterlocked // #include <optsmallperfcritical.h> FCIMPL2(INT32,COMInterlocked::Exchange, INT32 *location, INT32 value) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } return FastInterlockExchange((LONG *) location, value); } FCIMPLEND FCIMPL2_IV(INT64,COMInterlocked::Exchange64, INT64 *location, INT64 value) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } return FastInterlockExchangeLong((INT64 *) location, value); } FCIMPLEND FCIMPL3(INT32, COMInterlocked::CompareExchange, INT32* location, INT32 value, INT32 comparand) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } return FastInterlockCompareExchange((LONG*)location, value, comparand); } FCIMPLEND FCIMPL3_IVV(INT64, COMInterlocked::CompareExchange64, INT64* location, INT64 value, INT64 comparand) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } return FastInterlockCompareExchangeLong((INT64*)location, value, comparand); } FCIMPLEND FCIMPL2_IV(float,COMInterlocked::ExchangeFloat, float *location, float value) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } LONG ret = FastInterlockExchange((LONG *) location, *(LONG*)&value); return *(float*)&ret; } FCIMPLEND FCIMPL2_IV(double,COMInterlocked::ExchangeDouble, double *location, double value) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } INT64 ret = FastInterlockExchangeLong((INT64 *) location, *(INT64*)&value); return *(double*)&ret; } FCIMPLEND FCIMPL3_IVV(float,COMInterlocked::CompareExchangeFloat, float *location, float value, float comparand) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } LONG ret = (LONG)FastInterlockCompareExchange((LONG*) location, *(LONG*)&value, *(LONG*)&comparand); return *(float*)&ret; } FCIMPLEND FCIMPL3_IVV(double,COMInterlocked::CompareExchangeDouble, double *location, double value, double comparand) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } INT64 ret = (INT64)FastInterlockCompareExchangeLong((INT64*) location, *(INT64*)&value, *(INT64*)&comparand); return *(double*)&ret; } FCIMPLEND FCIMPL2(LPVOID,COMInterlocked::ExchangeObject, LPVOID*location, LPVOID value) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } LPVOID ret = FastInterlockExchangePointer(location, value); #ifdef _DEBUG Thread::ObjectRefAssign((OBJECTREF *)location); #endif ErectWriteBarrier((OBJECTREF*) location, ObjectToOBJECTREF((Object*) value)); return ret; } FCIMPLEND FCIMPL3(LPVOID,COMInterlocked::CompareExchangeObject, LPVOID *location, LPVOID value, LPVOID comparand) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } // <TODO>@todo: only set ref if is updated</TODO> LPVOID ret = FastInterlockCompareExchangePointer(location, value, comparand); if (ret == comparand) { #ifdef _DEBUG Thread::ObjectRefAssign((OBJECTREF *)location); #endif ErectWriteBarrier((OBJECTREF*) location, ObjectToOBJECTREF((Object*) value)); } return ret; } FCIMPLEND FCIMPL2(INT32,COMInterlocked::ExchangeAdd32, INT32 *location, INT32 value) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } return FastInterlockExchangeAdd((LONG *) location, value); } FCIMPLEND FCIMPL2_IV(INT64,COMInterlocked::ExchangeAdd64, INT64 *location, INT64 value) { FCALL_CONTRACT; if( NULL == location) { FCThrow(kNullReferenceException); } return FastInterlockExchangeAddLong((INT64 *) location, value); } FCIMPLEND FCIMPL0(void, COMInterlocked::FCMemoryBarrier) { FCALL_CONTRACT; MemoryBarrier(); FC_GC_POLL(); } FCIMPLEND FCIMPL0(void, COMInterlocked::FCMemoryBarrierLoad) { FCALL_CONTRACT; VolatileLoadBarrier(); FC_GC_POLL(); } FCIMPLEND #include <optdefault.h> extern "C" void QCALLTYPE Interlocked_MemoryBarrierProcessWide() { QCALL_CONTRACT; FlushProcessWriteBuffers(); } static BOOL HasOverriddenMethod(MethodTable* mt, MethodTable* classMT, WORD methodSlot) { CONTRACTL{ NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; _ASSERTE(mt != NULL); _ASSERTE(classMT != NULL); _ASSERTE(methodSlot != 0); PCODE actual = mt->GetRestoredSlot(methodSlot); PCODE base = classMT->GetRestoredSlot(methodSlot); if (actual == base) { return FALSE; } // If CoreLib is JITed, the slots can be patched and thus we need to compare the actual MethodDescs // to detect match reliably if (MethodTable::GetMethodDescForSlotAddress(actual) == MethodTable::GetMethodDescForSlotAddress(base)) { return FALSE; } return TRUE; } static BOOL CanCompareBitsOrUseFastGetHashCode(MethodTable* mt) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; _ASSERTE(mt != NULL); if (mt->HasCheckedCanCompareBitsOrUseFastGetHashCode()) { return mt->CanCompareBitsOrUseFastGetHashCode(); } if (mt->ContainsPointers() || mt->IsNotTightlyPacked()) { mt->SetHasCheckedCanCompareBitsOrUseFastGetHashCode(); return FALSE; } MethodTable* valueTypeMT = CoreLibBinder::GetClass(CLASS__VALUE_TYPE); WORD slotEquals = CoreLibBinder::GetMethod(METHOD__VALUE_TYPE__EQUALS)->GetSlot(); WORD slotGetHashCode = CoreLibBinder::GetMethod(METHOD__VALUE_TYPE__GET_HASH_CODE)->GetSlot(); // Check the input type. if (HasOverriddenMethod(mt, valueTypeMT, slotEquals) || HasOverriddenMethod(mt, valueTypeMT, slotGetHashCode)) { mt->SetHasCheckedCanCompareBitsOrUseFastGetHashCode(); // If overridden Equals or GetHashCode found, stop searching further. return FALSE; } BOOL canCompareBitsOrUseFastGetHashCode = TRUE; // The type itself did not override Equals or GetHashCode, go for its fields. ApproxFieldDescIterator iter = ApproxFieldDescIterator(mt, ApproxFieldDescIterator::INSTANCE_FIELDS); for (FieldDesc* pField = iter.Next(); pField != NULL; pField = iter.Next()) { if (pField->GetFieldType() == ELEMENT_TYPE_VALUETYPE) { // Check current field type. MethodTable* fieldMethodTable = pField->GetApproxFieldTypeHandleThrowing().GetMethodTable(); if (!CanCompareBitsOrUseFastGetHashCode(fieldMethodTable)) { canCompareBitsOrUseFastGetHashCode = FALSE; break; } } else if (pField->GetFieldType() == ELEMENT_TYPE_R8 || pField->GetFieldType() == ELEMENT_TYPE_R4) { // We have double/single field, cannot compare in fast path. canCompareBitsOrUseFastGetHashCode = FALSE; break; } } // We've gone through all instance fields. It's time to cache the result. // Note SetCanCompareBitsOrUseFastGetHashCode(BOOL) ensures the checked flag // and canCompare flag being set atomically to avoid race. mt->SetCanCompareBitsOrUseFastGetHashCode(canCompareBitsOrUseFastGetHashCode); return canCompareBitsOrUseFastGetHashCode; } NOINLINE static FC_BOOL_RET CanCompareBitsHelper(MethodTable* mt, OBJECTREF objRef) { FC_INNER_PROLOG(ValueTypeHelper::CanCompareBits); _ASSERTE(mt != NULL); _ASSERTE(objRef != NULL); BOOL ret = FALSE; HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, objRef); ret = CanCompareBitsOrUseFastGetHashCode(mt); HELPER_METHOD_FRAME_END(); FC_INNER_EPILOG(); FC_RETURN_BOOL(ret); } // Return true if the valuetype does not contain pointer, is tightly packed, // does not have floating point number field and does not override Equals method. FCIMPL1(FC_BOOL_RET, ValueTypeHelper::CanCompareBits, Object* obj) { FCALL_CONTRACT; _ASSERTE(obj != NULL); MethodTable* mt = obj->GetMethodTable(); if (mt->HasCheckedCanCompareBitsOrUseFastGetHashCode()) { FC_RETURN_BOOL(mt->CanCompareBitsOrUseFastGetHashCode()); } OBJECTREF objRef(obj); FC_INNER_RETURN(FC_BOOL_RET, CanCompareBitsHelper(mt, objRef)); } FCIMPLEND FCIMPL2(FC_BOOL_RET, ValueTypeHelper::FastEqualsCheck, Object* obj1, Object* obj2) { FCALL_CONTRACT; _ASSERTE(obj1 != NULL); _ASSERTE(obj2 != NULL); _ASSERTE(!obj1->GetMethodTable()->ContainsPointers()); _ASSERTE(obj1->GetSize() == obj2->GetSize()); TypeHandle pTh = obj1->GetTypeHandle(); FC_RETURN_BOOL(memcmp(obj1->GetData(),obj2->GetData(),pTh.GetSize()) == 0); } FCIMPLEND static INT32 FastGetValueTypeHashCodeHelper(MethodTable *mt, void *pObjRef) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_COOPERATIVE; } CONTRACTL_END; INT32 hashCode = 0; INT32 *pObj = (INT32*)pObjRef; // this is a struct with no refs and no "strange" offsets, just go through the obj and xor the bits INT32 size = mt->GetNumInstanceFieldBytes(); for (INT32 i = 0; i < (INT32)(size / sizeof(INT32)); i++) hashCode ^= *pObj++; return hashCode; } static INT32 RegularGetValueTypeHashCode(MethodTable *mt, void *pObjRef) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; INT32 hashCode = 0; GCPROTECT_BEGININTERIOR(pObjRef); BOOL canUseFastGetHashCodeHelper = FALSE; if (mt->HasCheckedCanCompareBitsOrUseFastGetHashCode()) { canUseFastGetHashCodeHelper = mt->CanCompareBitsOrUseFastGetHashCode(); } else { canUseFastGetHashCodeHelper = CanCompareBitsOrUseFastGetHashCode(mt); } // While we shouln't get here directly from ValueTypeHelper::GetHashCode, if we recurse we need to // be able to handle getting the hashcode for an embedded structure whose hashcode is computed by the fast path. if (canUseFastGetHashCodeHelper) { hashCode = FastGetValueTypeHashCodeHelper(mt, pObjRef); } else { // it's looking ugly so we'll use the old behavior in managed code. Grab the first non-null // field and return its hash code or 'it' as hash code // <TODO> Note that the old behavior has already been broken for value types // that is qualified for CanUseFastGetHashCodeHelper. So maybe we should // change the implementation here to use all fields instead of just the 1st one. // </TODO> // // <TODO> check this approximation - we may be losing exact type information </TODO> ApproxFieldDescIterator fdIterator(mt, ApproxFieldDescIterator::INSTANCE_FIELDS); FieldDesc *field; while ((field = fdIterator.Next()) != NULL) { _ASSERTE(!field->IsRVA()); if (field->IsObjRef()) { // if we get an object reference we get the hash code out of that if (*(Object**)((BYTE *)pObjRef + field->GetOffsetUnsafe()) != NULL) { PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__OBJECT__GET_HASH_CODE, (*(Object**)((BYTE *)pObjRef + field->GetOffsetUnsafe()))); DECLARE_ARGHOLDER_ARRAY(args, 1); args[ARGNUM_0] = PTR_TO_ARGHOLDER(*(Object**)((BYTE *)pObjRef + field->GetOffsetUnsafe())); CALL_MANAGED_METHOD(hashCode, INT32, args); } else { // null object reference, try next continue; } } else { CorElementType fieldType = field->GetFieldType(); if (fieldType == ELEMENT_TYPE_R8) { PREPARE_NONVIRTUAL_CALLSITE(METHOD__DOUBLE__GET_HASH_CODE); DECLARE_ARGHOLDER_ARRAY(args, 1); args[ARGNUM_0] = PTR_TO_ARGHOLDER(((BYTE *)pObjRef + field->GetOffsetUnsafe())); CALL_MANAGED_METHOD(hashCode, INT32, args); } else if (fieldType == ELEMENT_TYPE_R4) { PREPARE_NONVIRTUAL_CALLSITE(METHOD__SINGLE__GET_HASH_CODE); DECLARE_ARGHOLDER_ARRAY(args, 1); args[ARGNUM_0] = PTR_TO_ARGHOLDER(((BYTE *)pObjRef + field->GetOffsetUnsafe())); CALL_MANAGED_METHOD(hashCode, INT32, args); } else if (fieldType != ELEMENT_TYPE_VALUETYPE) { UINT fieldSize = field->LoadSize(); INT32 *pValue = (INT32*)((BYTE *)pObjRef + field->GetOffsetUnsafe()); for (INT32 j = 0; j < (INT32)(fieldSize / sizeof(INT32)); j++) hashCode ^= *pValue++; } else { // got another value type. Get the type TypeHandle fieldTH = field->GetFieldTypeHandleThrowing(); _ASSERTE(!fieldTH.IsNull()); hashCode = RegularGetValueTypeHashCode(fieldTH.GetMethodTable(), (BYTE *)pObjRef + field->GetOffsetUnsafe()); } } break; } } GCPROTECT_END(); return hashCode; } // The default implementation of GetHashCode() for all value types. // Note that this implementation reveals the value of the fields. // So if the value type contains any sensitive information it should // implement its own GetHashCode(). FCIMPL1(INT32, ValueTypeHelper::GetHashCode, Object* objUNSAFE) { FCALL_CONTRACT; if (objUNSAFE == NULL) FCThrow(kNullReferenceException); OBJECTREF obj = ObjectToOBJECTREF(objUNSAFE); VALIDATEOBJECTREF(obj); INT32 hashCode = 0; MethodTable *pMT = objUNSAFE->GetMethodTable(); // We don't want to expose the method table pointer in the hash code // Let's use the typeID instead. UINT32 typeID = pMT->LookupTypeID(); if (typeID == TypeIDProvider::INVALID_TYPE_ID) { // If the typeID has yet to be generated, fall back to GetTypeID // This only needs to be done once per MethodTable HELPER_METHOD_FRAME_BEGIN_RET_1(obj); typeID = pMT->GetTypeID(); HELPER_METHOD_FRAME_END(); } // To get less colliding and more evenly distributed hash codes, // we munge the class index with two big prime numbers hashCode = typeID * 711650207 + 2506965631U; BOOL canUseFastGetHashCodeHelper = FALSE; if (pMT->HasCheckedCanCompareBitsOrUseFastGetHashCode()) { canUseFastGetHashCodeHelper = pMT->CanCompareBitsOrUseFastGetHashCode(); } else { HELPER_METHOD_FRAME_BEGIN_RET_1(obj); canUseFastGetHashCodeHelper = CanCompareBitsOrUseFastGetHashCode(pMT); HELPER_METHOD_FRAME_END(); } if (canUseFastGetHashCodeHelper) { hashCode ^= FastGetValueTypeHashCodeHelper(pMT, obj->UnBox()); } else { HELPER_METHOD_FRAME_BEGIN_RET_1(obj); hashCode ^= RegularGetValueTypeHashCode(pMT, obj->UnBox()); HELPER_METHOD_FRAME_END(); } return hashCode; } FCIMPLEND static LONG s_dwSeed; FCIMPL1(INT32, ValueTypeHelper::GetHashCodeOfPtr, LPVOID ptr) { FCALL_CONTRACT; INT32 hashCode = (INT32)((INT64)(ptr)); if (hashCode == 0) { return 0; } DWORD dwSeed = s_dwSeed; // Initialize s_dwSeed lazily if (dwSeed == 0) { // We use the first non-0 pointer as the seed, all hashcodes will be based off that. // This is to make sure that we only reveal relative memory addresses and never absolute ones. dwSeed = hashCode; InterlockedCompareExchange(&s_dwSeed, dwSeed, 0); dwSeed = s_dwSeed; } _ASSERTE(dwSeed != 0); return hashCode - dwSeed; } FCIMPLEND static MethodTable * g_pStreamMT; static WORD g_slotBeginRead, g_slotEndRead; static WORD g_slotBeginWrite, g_slotEndWrite; static bool HasOverriddenStreamMethod(MethodTable * pMT, WORD slot) { CONTRACTL{ NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; PCODE actual = pMT->GetRestoredSlot(slot); PCODE base = g_pStreamMT->GetRestoredSlot(slot); if (actual == base) return false; // If CoreLib is JITed, the slots can be patched and thus we need to compare the actual MethodDescs // to detect match reliably if (MethodTable::GetMethodDescForSlotAddress(actual) == MethodTable::GetMethodDescForSlotAddress(base)) return false; return true; } FCIMPL1(FC_BOOL_RET, StreamNative::HasOverriddenBeginEndRead, Object *stream) { FCALL_CONTRACT; if (stream == NULL) FC_RETURN_BOOL(TRUE); if (g_pStreamMT == NULL || g_slotBeginRead == 0 || g_slotEndRead == 0) { HELPER_METHOD_FRAME_BEGIN_RET_1(stream); g_pStreamMT = CoreLibBinder::GetClass(CLASS__STREAM); g_slotBeginRead = CoreLibBinder::GetMethod(METHOD__STREAM__BEGIN_READ)->GetSlot(); g_slotEndRead = CoreLibBinder::GetMethod(METHOD__STREAM__END_READ)->GetSlot(); HELPER_METHOD_FRAME_END(); } MethodTable * pMT = stream->GetMethodTable(); FC_RETURN_BOOL(HasOverriddenStreamMethod(pMT, g_slotBeginRead) || HasOverriddenStreamMethod(pMT, g_slotEndRead)); } FCIMPLEND FCIMPL1(FC_BOOL_RET, StreamNative::HasOverriddenBeginEndWrite, Object *stream) { FCALL_CONTRACT; if (stream == NULL) FC_RETURN_BOOL(TRUE); if (g_pStreamMT == NULL || g_slotBeginWrite == 0 || g_slotEndWrite == 0) { HELPER_METHOD_FRAME_BEGIN_RET_1(stream); g_pStreamMT = CoreLibBinder::GetClass(CLASS__STREAM); g_slotBeginWrite = CoreLibBinder::GetMethod(METHOD__STREAM__BEGIN_WRITE)->GetSlot(); g_slotEndWrite = CoreLibBinder::GetMethod(METHOD__STREAM__END_WRITE)->GetSlot(); HELPER_METHOD_FRAME_END(); } MethodTable * pMT = stream->GetMethodTable(); FC_RETURN_BOOL(HasOverriddenStreamMethod(pMT, g_slotBeginWrite) || HasOverriddenStreamMethod(pMT, g_slotEndWrite)); } FCIMPLEND
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/mono/mono/tests/verifier/valid_call_to_virtual_method_on_sealed_class.il
.assembly extern mscorlib { .ver 2:0:0:0 .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) // .z\V.4.. } .assembly 'cast' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .module cast.exe // GUID = {9CC094E7-72A4-43FA-B1DD-E936A7B60966} .class private auto ansi beforefieldinit X extends [mscorlib]System.Object { .method private static hidebysig default void Main () cil managed { .entrypoint ldstr "test" call instance int32 [mscorlib]System.String::GetHashCode() pop ret } }
.assembly extern mscorlib { .ver 2:0:0:0 .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) // .z\V.4.. } .assembly 'cast' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .module cast.exe // GUID = {9CC094E7-72A4-43FA-B1DD-E936A7B60966} .class private auto ansi beforefieldinit X extends [mscorlib]System.Object { .method private static hidebysig default void Main () cil managed { .entrypoint ldstr "test" call instance int32 [mscorlib]System.String::GetHashCode() pop ret } }
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/tests/JIT/Methodical/fp/exgen/3w1d-01_cs_d.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <!-- Set to 'Full' if the Debug? column is marked in the spreadsheet. Leave blank otherwise. --> <DebugType>Full</DebugType> <Optimize>False</Optimize> <NoStandardLib>True</NoStandardLib> <Noconfig>True</Noconfig> <AllowUnsafeBlocks>True</AllowUnsafeBlocks> </PropertyGroup> <ItemGroup> <Compile Include="3w1d-01.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <!-- Set to 'Full' if the Debug? column is marked in the spreadsheet. Leave blank otherwise. --> <DebugType>Full</DebugType> <Optimize>False</Optimize> <NoStandardLib>True</NoStandardLib> <Noconfig>True</Noconfig> <AllowUnsafeBlocks>True</AllowUnsafeBlocks> </PropertyGroup> <ItemGroup> <Compile Include="3w1d-01.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/native/external/brotli/dec/state.c
/* Copyright 2015 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ #include "./state.h" #include <stdlib.h> /* free, malloc */ #include <brotli/types.h> #include "./huffman.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif BROTLI_BOOL BrotliDecoderStateInit(BrotliDecoderState* s, brotli_alloc_func alloc_func, brotli_free_func free_func, void* opaque) { if (!alloc_func) { s->alloc_func = BrotliDefaultAllocFunc; s->free_func = BrotliDefaultFreeFunc; s->memory_manager_opaque = 0; } else { s->alloc_func = alloc_func; s->free_func = free_func; s->memory_manager_opaque = opaque; } s->error_code = 0; /* BROTLI_DECODER_NO_ERROR */ BrotliInitBitReader(&s->br); s->state = BROTLI_STATE_UNINITED; s->large_window = 0; s->substate_metablock_header = BROTLI_STATE_METABLOCK_HEADER_NONE; s->substate_uncompressed = BROTLI_STATE_UNCOMPRESSED_NONE; s->substate_decode_uint8 = BROTLI_STATE_DECODE_UINT8_NONE; s->substate_read_block_length = BROTLI_STATE_READ_BLOCK_LENGTH_NONE; s->buffer_length = 0; s->loop_counter = 0; s->pos = 0; s->rb_roundtrips = 0; s->partial_pos_out = 0; s->block_type_trees = NULL; s->block_len_trees = NULL; s->ringbuffer = NULL; s->ringbuffer_size = 0; s->new_ringbuffer_size = 0; s->ringbuffer_mask = 0; s->context_map = NULL; s->context_modes = NULL; s->dist_context_map = NULL; s->context_map_slice = NULL; s->dist_context_map_slice = NULL; s->literal_hgroup.codes = NULL; s->literal_hgroup.htrees = NULL; s->insert_copy_hgroup.codes = NULL; s->insert_copy_hgroup.htrees = NULL; s->distance_hgroup.codes = NULL; s->distance_hgroup.htrees = NULL; s->is_last_metablock = 0; s->is_uncompressed = 0; s->is_metadata = 0; s->should_wrap_ringbuffer = 0; s->canny_ringbuffer_allocation = 1; s->window_bits = 0; s->max_distance = 0; s->dist_rb[0] = 16; s->dist_rb[1] = 15; s->dist_rb[2] = 11; s->dist_rb[3] = 4; s->dist_rb_idx = 0; s->block_type_trees = NULL; s->block_len_trees = NULL; s->mtf_upper_bound = 63; s->dictionary = BrotliGetDictionary(); s->transforms = BrotliGetTransforms(); return BROTLI_TRUE; } void BrotliDecoderStateMetablockBegin(BrotliDecoderState* s) { s->meta_block_remaining_len = 0; s->block_length[0] = 1U << 24; s->block_length[1] = 1U << 24; s->block_length[2] = 1U << 24; s->num_block_types[0] = 1; s->num_block_types[1] = 1; s->num_block_types[2] = 1; s->block_type_rb[0] = 1; s->block_type_rb[1] = 0; s->block_type_rb[2] = 1; s->block_type_rb[3] = 0; s->block_type_rb[4] = 1; s->block_type_rb[5] = 0; s->context_map = NULL; s->context_modes = NULL; s->dist_context_map = NULL; s->context_map_slice = NULL; s->literal_htree = NULL; s->dist_context_map_slice = NULL; s->dist_htree_index = 0; s->context_lookup = NULL; s->literal_hgroup.codes = NULL; s->literal_hgroup.htrees = NULL; s->insert_copy_hgroup.codes = NULL; s->insert_copy_hgroup.htrees = NULL; s->distance_hgroup.codes = NULL; s->distance_hgroup.htrees = NULL; } void BrotliDecoderStateCleanupAfterMetablock(BrotliDecoderState* s) { BROTLI_DECODER_FREE(s, s->context_modes); BROTLI_DECODER_FREE(s, s->context_map); BROTLI_DECODER_FREE(s, s->dist_context_map); BROTLI_DECODER_FREE(s, s->literal_hgroup.htrees); BROTLI_DECODER_FREE(s, s->insert_copy_hgroup.htrees); BROTLI_DECODER_FREE(s, s->distance_hgroup.htrees); } void BrotliDecoderStateCleanup(BrotliDecoderState* s) { BrotliDecoderStateCleanupAfterMetablock(s); BROTLI_DECODER_FREE(s, s->ringbuffer); BROTLI_DECODER_FREE(s, s->block_type_trees); } BROTLI_BOOL BrotliDecoderHuffmanTreeGroupInit(BrotliDecoderState* s, HuffmanTreeGroup* group, uint32_t alphabet_size_max, uint32_t alphabet_size_limit, uint32_t ntrees) { /* 376 = 256 (1-st level table) + 4 + 7 + 15 + 31 + 63 (2-nd level mix-tables) This number is discovered "unlimited" "enough" calculator; it is actually a wee bigger than required in several cases (especially for alphabets with less than 16 symbols). */ const size_t max_table_size = alphabet_size_limit + 376; const size_t code_size = sizeof(HuffmanCode) * ntrees * max_table_size; const size_t htree_size = sizeof(HuffmanCode*) * ntrees; /* Pointer alignment is, hopefully, wider than sizeof(HuffmanCode). */ HuffmanCode** p = (HuffmanCode**)BROTLI_DECODER_ALLOC(s, code_size + htree_size); group->alphabet_size_max = (uint16_t)alphabet_size_max; group->alphabet_size_limit = (uint16_t)alphabet_size_limit; group->num_htrees = (uint16_t)ntrees; group->htrees = p; group->codes = (HuffmanCode*)(&p[ntrees]); return !!p; } #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif
/* Copyright 2015 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ #include "./state.h" #include <stdlib.h> /* free, malloc */ #include <brotli/types.h> #include "./huffman.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif BROTLI_BOOL BrotliDecoderStateInit(BrotliDecoderState* s, brotli_alloc_func alloc_func, brotli_free_func free_func, void* opaque) { if (!alloc_func) { s->alloc_func = BrotliDefaultAllocFunc; s->free_func = BrotliDefaultFreeFunc; s->memory_manager_opaque = 0; } else { s->alloc_func = alloc_func; s->free_func = free_func; s->memory_manager_opaque = opaque; } s->error_code = 0; /* BROTLI_DECODER_NO_ERROR */ BrotliInitBitReader(&s->br); s->state = BROTLI_STATE_UNINITED; s->large_window = 0; s->substate_metablock_header = BROTLI_STATE_METABLOCK_HEADER_NONE; s->substate_uncompressed = BROTLI_STATE_UNCOMPRESSED_NONE; s->substate_decode_uint8 = BROTLI_STATE_DECODE_UINT8_NONE; s->substate_read_block_length = BROTLI_STATE_READ_BLOCK_LENGTH_NONE; s->buffer_length = 0; s->loop_counter = 0; s->pos = 0; s->rb_roundtrips = 0; s->partial_pos_out = 0; s->block_type_trees = NULL; s->block_len_trees = NULL; s->ringbuffer = NULL; s->ringbuffer_size = 0; s->new_ringbuffer_size = 0; s->ringbuffer_mask = 0; s->context_map = NULL; s->context_modes = NULL; s->dist_context_map = NULL; s->context_map_slice = NULL; s->dist_context_map_slice = NULL; s->literal_hgroup.codes = NULL; s->literal_hgroup.htrees = NULL; s->insert_copy_hgroup.codes = NULL; s->insert_copy_hgroup.htrees = NULL; s->distance_hgroup.codes = NULL; s->distance_hgroup.htrees = NULL; s->is_last_metablock = 0; s->is_uncompressed = 0; s->is_metadata = 0; s->should_wrap_ringbuffer = 0; s->canny_ringbuffer_allocation = 1; s->window_bits = 0; s->max_distance = 0; s->dist_rb[0] = 16; s->dist_rb[1] = 15; s->dist_rb[2] = 11; s->dist_rb[3] = 4; s->dist_rb_idx = 0; s->block_type_trees = NULL; s->block_len_trees = NULL; s->mtf_upper_bound = 63; s->dictionary = BrotliGetDictionary(); s->transforms = BrotliGetTransforms(); return BROTLI_TRUE; } void BrotliDecoderStateMetablockBegin(BrotliDecoderState* s) { s->meta_block_remaining_len = 0; s->block_length[0] = 1U << 24; s->block_length[1] = 1U << 24; s->block_length[2] = 1U << 24; s->num_block_types[0] = 1; s->num_block_types[1] = 1; s->num_block_types[2] = 1; s->block_type_rb[0] = 1; s->block_type_rb[1] = 0; s->block_type_rb[2] = 1; s->block_type_rb[3] = 0; s->block_type_rb[4] = 1; s->block_type_rb[5] = 0; s->context_map = NULL; s->context_modes = NULL; s->dist_context_map = NULL; s->context_map_slice = NULL; s->literal_htree = NULL; s->dist_context_map_slice = NULL; s->dist_htree_index = 0; s->context_lookup = NULL; s->literal_hgroup.codes = NULL; s->literal_hgroup.htrees = NULL; s->insert_copy_hgroup.codes = NULL; s->insert_copy_hgroup.htrees = NULL; s->distance_hgroup.codes = NULL; s->distance_hgroup.htrees = NULL; } void BrotliDecoderStateCleanupAfterMetablock(BrotliDecoderState* s) { BROTLI_DECODER_FREE(s, s->context_modes); BROTLI_DECODER_FREE(s, s->context_map); BROTLI_DECODER_FREE(s, s->dist_context_map); BROTLI_DECODER_FREE(s, s->literal_hgroup.htrees); BROTLI_DECODER_FREE(s, s->insert_copy_hgroup.htrees); BROTLI_DECODER_FREE(s, s->distance_hgroup.htrees); } void BrotliDecoderStateCleanup(BrotliDecoderState* s) { BrotliDecoderStateCleanupAfterMetablock(s); BROTLI_DECODER_FREE(s, s->ringbuffer); BROTLI_DECODER_FREE(s, s->block_type_trees); } BROTLI_BOOL BrotliDecoderHuffmanTreeGroupInit(BrotliDecoderState* s, HuffmanTreeGroup* group, uint32_t alphabet_size_max, uint32_t alphabet_size_limit, uint32_t ntrees) { /* 376 = 256 (1-st level table) + 4 + 7 + 15 + 31 + 63 (2-nd level mix-tables) This number is discovered "unlimited" "enough" calculator; it is actually a wee bigger than required in several cases (especially for alphabets with less than 16 symbols). */ const size_t max_table_size = alphabet_size_limit + 376; const size_t code_size = sizeof(HuffmanCode) * ntrees * max_table_size; const size_t htree_size = sizeof(HuffmanCode*) * ntrees; /* Pointer alignment is, hopefully, wider than sizeof(HuffmanCode). */ HuffmanCode** p = (HuffmanCode**)BROTLI_DECODER_ALLOC(s, code_size + htree_size); group->alphabet_size_max = (uint16_t)alphabet_size_max; group->alphabet_size_limit = (uint16_t)alphabet_size_limit; group->num_htrees = (uint16_t)ntrees; group->htrees = p; group->codes = (HuffmanCode*)(&p[ntrees]); return !!p; } #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif
-1
dotnet/runtime
66,280
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator
Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
stephentoub
2022-03-07T03:21:23Z
2022-03-07T21:52:38Z
30d66a2c350b4e4a6b5cd5b8aa961b3b23610fc1
457b1ffab6d99b4b9db0e4579c2be4624ba1b3aa
Enable RegexOptions.RightToLeft and lookbehinds in compiler / source generator. Replaces https://github.com/dotnet/runtime/pull/66127 Fixes https://github.com/dotnet/runtime/issues/62345 For .NET 7 we rewrote RegexCompiler as we were writing the source generator, and in doing so we left out support for RegexOptions.RightToLeft as well as lookbehinds (which are implemented via RightToLeft). This adds support for both. I initially started incrementally adding in support for various constructs in lookbehinds, but from a testing perspective it made more sense to just add it all, as then all of the RightToLeft tests are used to validate the constructs that are also in lookbehinds.
./src/native/public/mono/metadata/assembly.h
/** * \file */ #ifndef _MONONET_METADATA_ASSEMBLY_H_ #define _MONONET_METADATA_ASSEMBLY_H_ #include <mono/metadata/details/assembly-types.h> MONO_BEGIN_DECLS #define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args; #include <mono/metadata/details/assembly-functions.h> #undef MONO_API_FUNCTION MONO_END_DECLS #endif
/** * \file */ #ifndef _MONONET_METADATA_ASSEMBLY_H_ #define _MONONET_METADATA_ASSEMBLY_H_ #include <mono/metadata/details/assembly-types.h> MONO_BEGIN_DECLS #define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args; #include <mono/metadata/details/assembly-functions.h> #undef MONO_API_FUNCTION MONO_END_DECLS #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/cpu-amd64.md
# -*- mode:text; -*- # x86-class cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the regsiter allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # i integer register # b base register (used in address references) # f floating point register # a EAX register # d EDX register # l long reg (forced eax:edx) # s ECX register # c register which can be used as a byte register (RAX..RDX) # A - first arg reg (rdi/rcx) # # len:number describe the maximun length in bytes of the instruction # number is a positive integer. If the length is not specified # it defaults to zero. But lengths are only checked if the given opcode # is encountered during compilation. Some opcodes, like CONV_U4 are # transformed into other opcodes in the brg files, so they do not show up # during code generation. # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # c clobbers caller-save registers # 1 clobbers the first source register # a EAX is clobbered # d EDX is clobbered # x both the source operands are clobbered (xchg) # m sets an XMM reg # # flags:spec describe if the instruction uses or sets the flags (unused) # # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-x86.c for more details on how the specifiers are used. # break: len:2 tailcall: len:255 clob:c tailcall_reg: src1:b len:255 clob:c tailcall_membase: src1:b len:255 clob:c # tailcall_parameter models the size of moving one parameter, # so that the required size of a branch around a tailcall can # be accurately estimated; something like: # void f1(volatile long *a) # { # a[large] = a[another large] # } # # If the offsets fit in 32bits, then len:14: # 48 8b 87 e0 04 00 00 movq 1248(%rdi), %rax # 48 89 87 00 08 00 00 movq %rax, 2048(%rdi) # # else 64bits: # 48 b8 e0 fc b3 c4 04 00 00 00 movabsq $20479999200, %rax # 48 8b 04 07 movq (%rdi,%rax), %rax # 48 b9 00 00 b4 c4 04 00 00 00 movabsq $20480000000, %rcx # 48 89 04 0f movq %rax, (%rdi,%rcx) # # Frame size is artificially limited to 1GB in mono_arch_tailcall_supported. # This is presently redundant with tailcall len:255, as the limit of # near branches is [-128, +127], after which the limit is # [-2GB, +2GB-1] # FIXME A fixed size sequence to move parameters would moot this. tailcall_parameter: len:14 br: len:6 label: len:0 seq_point: len:46 clob:c il_seq_point: len:0 long_add: dest:i src1:i src2:i len:3 clob:1 long_sub: dest:i src1:i src2:i len:3 clob:1 long_mul: dest:i src1:i src2:i len:4 clob:1 long_div: dest:a src1:a src2:i len:16 clob:d long_div_un: dest:a src1:a src2:i len:16 clob:d long_rem: dest:d src1:a src2:i len:16 clob:a long_rem_un: dest:d src1:a src2:i len:16 clob:a long_and: dest:i src1:i src2:i len:3 clob:1 long_or: dest:i src1:i src2:i len:3 clob:1 long_xor: dest:i src1:i src2:i len:3 clob:1 long_shl: dest:i src1:i src2:s clob:1 len:3 long_shr: dest:i src1:i src2:s clob:1 len:3 long_shr_un: dest:i src1:i src2:s clob:1 len:3 long_neg: dest:i src1:i len:3 clob:1 long_not: dest:i src1:i len:3 clob:1 long_conv_to_i1: dest:i src1:i len:4 long_conv_to_i2: dest:i src1:i len:4 long_conv_to_i4: dest:i src1:i len:3 long_conv_to_i8: dest:i src1:i len:3 long_conv_to_r4: dest:f src1:i len:15 long_conv_to_r8: dest:f src1:i len:9 long_conv_to_u4: dest:i src1:i len:3 long_conv_to_u8: dest:i src1:i len:3 long_conv_to_r_un: dest:f src1:i len:64 long_conv_to_ovf_i4_un: dest:i src1:i len:16 long_conv_to_ovf_u4: dest:i src1:i len:15 long_conv_to_u2: dest:i src1:i len:4 long_conv_to_u1: dest:i src1:i len:4 zext_i4: dest:i src1:i len:4 long_mul_imm: dest:i src1:i clob:1 len:16 long_min: dest:i src1:i src2:i len:16 clob:1 long_min_un: dest:i src1:i src2:i len:16 clob:1 long_max: dest:i src1:i src2:i len:16 clob:1 long_max_un: dest:i src1:i src2:i len:16 clob:1 throw: src1:i len:24 rethrow: src1:i len:24 start_handler: len:16 endfinally: len:9 endfilter: src1:a len:9 get_ex_obj: dest:a len:16 ckfinite: dest:f src1:f len:43 ceq: dest:c len:8 cgt: dest:c len:8 cgt_un: dest:c len:8 clt: dest:c len:8 clt_un: dest:c len:8 localloc: dest:i src1:i len:120 compare: src1:i src2:i len:3 lcompare: src1:i src2:i len:3 icompare: src1:i src2:i len:3 compare_imm: src1:i len:13 icompare_imm: src1:i len:8 fcompare: src1:f src2:f clob:a len:13 rcompare: src1:f src2:f clob:a len:13 arglist: src1:b len:11 check_this: src1:b len:5 call: dest:a clob:c len:32 voidcall: clob:c len:32 voidcall_reg: src1:i clob:c len:32 voidcall_membase: src1:b clob:c len:32 fcall: dest:f len:64 clob:c fcall_reg: dest:f src1:i len:64 clob:c fcall_membase: dest:f src1:b len:64 clob:c rcall: dest:f len:64 clob:c rcall_reg: dest:f src1:i len:64 clob:c rcall_membase: dest:f src1:b len:64 clob:c lcall: dest:a len:64 clob:c lcall_reg: dest:a src1:i len:64 clob:c lcall_membase: dest:a src1:b len:64 clob:c vcall: len:64 clob:c vcall_reg: src1:i len:64 clob:c vcall_membase: src1:b len:64 clob:c call_reg: dest:a src1:i len:32 clob:c call_membase: dest:a src1:b len:32 clob:c iconst: dest:i len:10 i8const: dest:i len:10 r4const: dest:f len:17 r8const: dest:f len:12 store_membase_imm: dest:b len:15 store_membase_reg: dest:b src1:i len:9 storei8_membase_reg: dest:b src1:i len:9 storei1_membase_imm: dest:b len:11 storei1_membase_reg: dest:b src1:c len:9 storei2_membase_imm: dest:b len:13 storei2_membase_reg: dest:b src1:i len:9 storei4_membase_imm: dest:b len:13 storei4_membase_reg: dest:b src1:i len:9 storei8_membase_imm: dest:b len:18 storer4_membase_reg: dest:b src1:f len:15 storer8_membase_reg: dest:b src1:f len:10 load_membase: dest:i src1:b len:8 loadi1_membase: dest:c src1:b len:9 loadu1_membase: dest:c src1:b len:9 loadi2_membase: dest:i src1:b len:9 loadu2_membase: dest:i src1:b len:9 loadi4_membase: dest:i src1:b len:9 loadu4_membase: dest:i src1:b len:9 loadi8_membase: dest:i src1:b len:18 loadr4_membase: dest:f src1:b len:16 loadr8_membase: dest:f src1:b len:16 loadu4_mem: dest:i len:10 amd64_loadi8_memindex: dest:i src1:i src2:i len:10 move: dest:i src1:i len:3 add_imm: dest:i src1:i len:8 clob:1 sub_imm: dest:i src1:i len:8 clob:1 mul_imm: dest:i src1:i len:12 and_imm: dest:i src1:i len:8 clob:1 or_imm: dest:i src1:i len:8 clob:1 xor_imm: dest:i src1:i len:8 clob:1 shl_imm: dest:i src1:i len:8 clob:1 shr_imm: dest:i src1:i len:8 clob:1 shr_un_imm: dest:i src1:i len:8 clob:1 cond_exc_eq: len:8 cond_exc_ne_un: len:8 cond_exc_lt: len:8 cond_exc_lt_un: len:8 cond_exc_gt: len:8 cond_exc_gt_un: len:8 cond_exc_ge: len:8 cond_exc_ge_un: len:8 cond_exc_le: len:8 cond_exc_le_un: len:8 cond_exc_ov: len:8 cond_exc_no: len:8 cond_exc_c: len:8 cond_exc_nc: len:8 cond_exc_iov: len:8 cond_exc_ic: len:8 long_mul_ovf: dest:i src1:i src2:i clob:1 len:16 long_mul_ovf_un: dest:i src1:i src2:i len:22 long_shr_imm: dest:i src1:i clob:1 len:11 long_shr_un_imm: dest:i src1:i clob:1 len:11 long_shl_imm: dest:i src1:i clob:1 len:11 long_beq: len:8 long_bge: len:8 long_bgt: len:8 long_ble: len:8 long_blt: len:8 long_bne_un: len:8 long_bge_un: len:8 long_bgt_un: len:8 long_ble_un: len:8 long_blt_un: len:8 float_beq: len:13 float_bne_un: len:18 float_blt: len:13 float_blt_un: len:30 float_bgt: len:13 float_bgt_un: len:30 float_bge: len:32 float_bge_un: len:13 float_ble: len:32 float_ble_un: len:13 float_add: dest:f src1:f src2:f clob:1 len:5 float_sub: dest:f src1:f src2:f clob:1 len:5 float_mul: dest:f src1:f src2:f clob:1 len:5 float_div: dest:f src1:f src2:f clob:1 len:5 float_div_un: dest:f src1:f src2:f clob:1 len:5 float_rem: dest:f src1:f src2:f clob:1 len:19 float_rem_un: dest:f src1:f src2:f clob:1 len:19 float_neg: dest:f src1:f clob:1 len:23 float_not: dest:f src1:f clob:1 len:3 float_conv_to_i1: dest:i src1:f len:49 float_conv_to_i2: dest:i src1:f len:49 float_conv_to_i4: dest:i src1:f len:49 float_conv_to_i8: dest:i src1:f len:49 float_conv_to_u4: dest:i src1:f len:49 float_conv_to_u8: dest:i src1:f len:49 float_conv_to_u2: dest:i src1:f len:49 float_conv_to_u1: dest:i src1:f len:49 float_conv_to_i: dest:i src1:f len:49 float_conv_to_ovf_i: dest:a src1:f len:40 float_conv_to_ovd_u: dest:a src1:f len:40 float_mul_ovf: float_ceq: dest:i src1:f src2:f len:35 float_cgt: dest:i src1:f src2:f len:35 float_cgt_un: dest:i src1:f src2:f len:48 float_clt: dest:i src1:f src2:f len:35 float_clt_un: dest:i src1:f src2:f len:42 float_cneq: dest:i src1:f src2:f len:42 float_cge: dest:i src1:f src2:f len:35 float_cle: dest:i src1:f src2:f len:35 float_ceq_membase: dest:i src1:f src2:b len:35 float_cgt_membase: dest:i src1:f src2:b len:35 float_cgt_un_membase: dest:i src1:f src2:b len:48 float_clt_membase: dest:i src1:f src2:b len:35 float_clt_un_membase: dest:i src1:f src2:b len:42 # R4 opcodes r4_conv_to_i1: dest:i src1:f len:32 r4_conv_to_u1: dest:i src1:f len:32 r4_conv_to_i2: dest:i src1:f len:32 r4_conv_to_u2: dest:i src1:f len:32 r4_conv_to_i4: dest:i src1:f len:16 r4_conv_to_u4: dest:i src1:f len:32 r4_conv_to_i8: dest:i src1:f len:32 r4_conv_to_i: dest:i src1:f len:32 r4_conv_to_r8: dest:f src1:f len:17 r4_conv_to_r4: dest:f src1:f len:17 r4_add: dest:f src1:f src2:f clob:1 len:5 r4_sub: dest:f src1:f src2:f clob:1 len:5 r4_mul: dest:f src1:f src2:f clob:1 len:5 r4_div: dest:f src1:f src2:f clob:1 len:5 r4_neg: dest:f src1:f clob:1 len:23 r4_ceq: dest:i src1:f src2:f len:35 r4_cgt: dest:i src1:f src2:f len:35 r4_cgt_un: dest:i src1:f src2:f len:48 r4_clt: dest:i src1:f src2:f len:35 r4_clt_un: dest:i src1:f src2:f len:42 r4_cneq: dest:i src1:f src2:f len:42 r4_cge: dest:i src1:f src2:f len:35 r4_cle: dest:i src1:f src2:f len:35 fmove: dest:f src1:f len:8 rmove: dest:f src1:f len:8 move_f_to_i4: dest:i src1:f len:16 move_i4_to_f: dest:f src1:i len:16 move_f_to_i8: dest:i src1:f len:5 move_i8_to_f: dest:f src1:i len:5 call_handler: len:14 clob:c aotconst: dest:i len:10 gc_safe_point: clob:c src1:i len:40 x86_test_null: src1:i len:5 x86_compare_membase_reg: src1:b src2:i len:9 x86_compare_membase_imm: src1:b len:13 x86_compare_reg_membase: src1:i src2:b len:8 x86_inc_reg: dest:i src1:i clob:1 len:3 x86_inc_membase: src1:b len:8 x86_dec_reg: dest:i src1:i clob:1 len:3 x86_dec_membase: src1:b len:8 x86_add_membase_imm: src1:b len:13 x86_sub_membase_imm: src1:b len:13 x86_push: src1:i len:3 x86_push_imm: len:6 x86_push_membase: src1:b len:8 x86_push_obj: src1:b len:40 x86_lea: dest:i src1:i src2:i len:8 x86_lea_membase: dest:i src1:i len:11 amd64_lea_membase: dest:i src1:i len:11 x86_xchg: src1:i src2:i clob:x len:2 x86_fpop: src1:f len:3 x86_seteq_membase: src1:b len:9 x86_add_reg_membase: dest:i src1:i src2:b clob:1 len:13 x86_sub_reg_membase: dest:i src1:i src2:b clob:1 len:13 x86_mul_reg_membase: dest:i src1:i src2:b clob:1 len:13 x86_and_reg_membase: dest:i src1:i src2:b clob:1 len:13 x86_or_reg_membase: dest:i src1:i src2:b clob:1 len:13 x86_xor_reg_membase: dest:i src1:i src2:b clob:1 len:13 amd64_test_null: src1:i len:5 amd64_icompare_membase_reg: src1:b src2:i len:8 amd64_icompare_membase_imm: src1:b len:13 amd64_icompare_reg_membase: src1:i src2:b len:8 amd64_set_xmmreg_r4: dest:f src1:f len:14 clob:m amd64_set_xmmreg_r8: dest:f src1:f len:14 clob:m amd64_save_sp_to_lmf: len:16 tls_get: dest:i len:32 tls_set: src1:i len:16 atomic_add_i4: src1:b src2:i dest:i len:32 atomic_add_i8: src1:b src2:i dest:i len:32 atomic_exchange_i4: src1:b src2:i dest:i len:12 atomic_exchange_i8: src1:b src2:i dest:i len:12 atomic_cas_i4: src1:b src2:i src3:a dest:a len:24 atomic_cas_i8: src1:b src2:i src3:a dest:a len:24 memory_barrier: len:3 atomic_load_i1: dest:c src1:b len:9 atomic_load_u1: dest:c src1:b len:9 atomic_load_i2: dest:i src1:b len:9 atomic_load_u2: dest:i src1:b len:9 atomic_load_i4: dest:i src1:b len:9 atomic_load_u4: dest:i src1:b len:9 atomic_load_i8: dest:i src1:b len:9 atomic_load_u8: dest:i src1:b len:9 atomic_load_r4: dest:f src1:b len:16 atomic_load_r8: dest:f src1:b len:16 atomic_store_i1: dest:b src1:c len:12 atomic_store_u1: dest:b src1:c len:12 atomic_store_i2: dest:b src1:i len:12 atomic_store_u2: dest:b src1:i len:12 atomic_store_i4: dest:b src1:i len:12 atomic_store_u4: dest:b src1:i len:12 atomic_store_i8: dest:b src1:i len:12 atomic_store_u8: dest:b src1:i len:12 atomic_store_r4: dest:b src1:f len:18 atomic_store_r8: dest:b src1:f len:13 adc: dest:i src1:i src2:i len:3 clob:1 addcc: dest:i src1:i src2:i len:3 clob:1 subcc: dest:i src1:i src2:i len:3 clob:1 adc_imm: dest:i src1:i len:8 clob:1 sbb: dest:i src1:i src2:i len:3 clob:1 sbb_imm: dest:i src1:i len:8 clob:1 br_reg: src1:i len:3 sin: dest:f src1:f len:32 cos: dest:f src1:f len:32 abs: dest:f src1:f clob:1 len:32 tan: dest:f src1:f len:59 atan: dest:f src1:f len:9 sqrt: dest:f src1:f len:32 sext_i1: dest:i src1:i len:4 sext_i2: dest:i src1:i len:4 sext_i4: dest:i src1:i len:8 laddcc: dest:i src1:i src2:i len:3 clob:1 lsubcc: dest:i src1:i src2:i len:3 clob:1 # 32 bit opcodes int_add: dest:i src1:i src2:i clob:1 len:4 int_sub: dest:i src1:i src2:i clob:1 len:4 int_mul: dest:i src1:i src2:i clob:1 len:4 int_mul_ovf: dest:i src1:i src2:i clob:1 len:32 int_mul_ovf_un: dest:i src1:i src2:i clob:1 len:32 int_div: dest:a src1:a src2:i clob:d len:32 int_div_un: dest:a src1:a src2:i clob:d len:32 int_rem: dest:d src1:a src2:i clob:a len:32 int_rem_un: dest:d src1:a src2:i clob:a len:32 int_and: dest:i src1:i src2:i clob:1 len:4 int_or: dest:i src1:i src2:i clob:1 len:4 int_xor: dest:i src1:i src2:i clob:1 len:4 int_shl: dest:i src1:i src2:s clob:1 len:4 int_shr: dest:i src1:i src2:s clob:1 len:4 int_shr_un: dest:i src1:i src2:s clob:1 len:4 int_adc: dest:i src1:i src2:i clob:1 len:4 int_adc_imm: dest:i src1:i clob:1 len:8 int_sbb: dest:i src1:i src2:i clob:1 len:4 int_sbb_imm: dest:i src1:i clob:1 len:8 int_addcc: dest:i src1:i src2:i clob:1 len:16 int_subcc: dest:i src1:i src2:i clob:1 len:16 int_add_imm: dest:i src1:i clob:1 len:8 int_sub_imm: dest:i src1:i clob:1 len:8 int_mul_imm: dest:i src1:i clob:1 len:32 int_div_imm: dest:a src1:i clob:d len:32 int_div_un_imm: dest:a src1:i clob:d len:32 int_rem_un_imm: dest:d src1:i clob:a len:32 int_and_imm: dest:i src1:i clob:1 len:8 int_or_imm: dest:i src1:i clob:1 len:8 int_xor_imm: dest:i src1:i clob:1 len:8 int_shl_imm: dest:i src1:i clob:1 len:8 int_shr_imm: dest:i src1:i clob:1 len:8 int_shr_un_imm: dest:i src1:i clob:1 len:8 int_min: dest:i src1:i src2:i len:16 clob:1 int_max: dest:i src1:i src2:i len:16 clob:1 int_min_un: dest:i src1:i src2:i len:16 clob:1 int_max_un: dest:i src1:i src2:i len:16 clob:1 int_neg: dest:i src1:i clob:1 len:4 int_not: dest:i src1:i clob:1 len:4 int_conv_to_r4: dest:f src1:i len:15 int_conv_to_r8: dest:f src1:i len:9 int_ceq: dest:c len:8 int_cgt: dest:c len:8 int_cgt_un: dest:c len:8 int_clt: dest:c len:8 int_clt_un: dest:c len:8 int_cneq: dest:c len:8 int_cge: dest:c len:8 int_cle: dest:c len:8 int_cge_un: dest:c len:8 int_cle_un: dest:c len:8 int_beq: len:8 int_bne_un: len:8 int_blt: len:8 int_blt_un: len:8 int_bgt: len:8 int_bgt_un: len:8 int_bge: len:8 int_bge_un: len:8 int_ble: len:8 int_ble_un: len:8 card_table_wbarrier: src1:a src2:i clob:d len:56 relaxed_nop: len:2 hard_nop: len:1 # Linear IR opcodes nop: len:0 dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_i8const: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 long_ceq: dest:c len:64 long_cgt: dest:c len:64 long_cgt_un: dest:c len:64 long_clt: dest:c len:64 long_clt_un: dest:c len:64 int_conv_to_i1: dest:i src1:i len:4 int_conv_to_i2: dest:i src1:i len:4 int_conv_to_i4: dest:i src1:i len:3 int_conv_to_i8: dest:i src1:i len:3 int_conv_to_u4: dest:i src1:i len:3 int_conv_to_u8: dest:i src1:i len:3 int_conv_to_u: dest:i src1:i len:4 int_conv_to_u2: dest:i src1:i len:4 int_conv_to_u1: dest:i src1:i len:4 int_conv_to_i: dest:i src1:i len:4 cond_exc_ieq: len:8 cond_exc_ine_un: len:8 cond_exc_ilt: len:8 cond_exc_ilt_un: len:8 cond_exc_igt: len:8 cond_exc_igt_un: len:8 cond_exc_ige: len:8 cond_exc_ige_un: len:8 cond_exc_ile: len:8 cond_exc_ile_un: len:8 cond_exc_ino: len:8 cond_exc_inc: len:8 x86_compare_membase8_imm: src1:b len:9 jump_table: dest:i len:18 cmov_ieq: dest:i src1:i src2:i len:16 clob:1 cmov_ige: dest:i src1:i src2:i len:16 clob:1 cmov_igt: dest:i src1:i src2:i len:16 clob:1 cmov_ile: dest:i src1:i src2:i len:16 clob:1 cmov_ilt: dest:i src1:i src2:i len:16 clob:1 cmov_ine_un: dest:i src1:i src2:i len:16 clob:1 cmov_ige_un: dest:i src1:i src2:i len:16 clob:1 cmov_igt_un: dest:i src1:i src2:i len:16 clob:1 cmov_ile_un: dest:i src1:i src2:i len:16 clob:1 cmov_ilt_un: dest:i src1:i src2:i len:16 clob:1 cmov_leq: dest:i src1:i src2:i len:16 clob:1 cmov_lge: dest:i src1:i src2:i len:16 clob:1 cmov_lgt: dest:i src1:i src2:i len:16 clob:1 cmov_lle: dest:i src1:i src2:i len:16 clob:1 cmov_llt: dest:i src1:i src2:i len:16 clob:1 cmov_lne_un: dest:i src1:i src2:i len:16 clob:1 cmov_lge_un: dest:i src1:i src2:i len:16 clob:1 cmov_lgt_un: dest:i src1:i src2:i len:16 clob:1 cmov_lle_un: dest:i src1:i src2:i len:16 clob:1 cmov_llt_un: dest:i src1:i src2:i len:16 clob:1 long_add_imm: dest:i src1:i clob:1 len:12 long_sub_imm: dest:i src1:i clob:1 len:12 long_and_imm: dest:i src1:i clob:1 len:12 long_or_imm: dest:i src1:i clob:1 len:12 long_xor_imm: dest:i src1:i clob:1 len:12 lcompare_imm: src1:i len:13 amd64_compare_membase_reg: src1:b src2:i len:9 amd64_compare_membase_imm: src1:b len:14 amd64_compare_reg_membase: src1:i src2:b len:9 amd64_add_reg_membase: dest:i src1:i src2:b clob:1 len:14 amd64_sub_reg_membase: dest:i src1:i src2:b clob:1 len:14 amd64_and_reg_membase: dest:i src1:i src2:b clob:1 len:14 amd64_or_reg_membase: dest:i src1:i src2:b clob:1 len:14 amd64_xor_reg_membase: dest:i src1:i src2:b clob:1 len:14 amd64_add_membase_imm: src1:b len:16 amd64_sub_membase_imm: src1:b len:16 amd64_and_membase_imm: src1:b len:13 amd64_or_membase_imm: src1:b len:13 amd64_xor_membase_imm: src1:b len:13 x86_and_membase_imm: src1:b len:12 x86_or_membase_imm: src1:b len:12 x86_xor_membase_imm: src1:b len:12 x86_add_membase_reg: src1:b src2:i len:12 x86_sub_membase_reg: src1:b src2:i len:12 x86_and_membase_reg: src1:b src2:i len:12 x86_or_membase_reg: src1:b src2:i len:12 x86_xor_membase_reg: src1:b src2:i len:12 x86_mul_membase_reg: src1:b src2:i len:14 amd64_add_membase_reg: src1:b src2:i len:13 amd64_sub_membase_reg: src1:b src2:i len:13 amd64_and_membase_reg: src1:b src2:i len:13 amd64_or_membase_reg: src1:b src2:i len:13 amd64_xor_membase_reg: src1:b src2:i len:13 amd64_mul_membase_reg: src1:b src2:i len:15 float_conv_to_r4: dest:f src1:f len:17 vcall2: len:64 clob:c vcall2_reg: src1:i len:64 clob:c vcall2_membase: src1:b len:64 clob:c dyn_call: src1:i src2:i len:192 clob:c localloc_imm: dest:i len:120 load_mem: dest:i len:16 loadi8_mem: dest:i len:16 loadi4_mem: dest:i len:16 loadu1_mem: dest:i len:16 loadu2_mem: dest:i len:16 #SIMD addps: dest:x src1:x src2:x len:4 clob:1 divps: dest:x src1:x src2:x len:4 clob:1 mulps: dest:x src1:x src2:x len:4 clob:1 subps: dest:x src1:x src2:x len:4 clob:1 maxps: dest:x src1:x src2:x len:4 clob:1 minps: dest:x src1:x src2:x len:4 clob:1 compps: dest:x src1:x src2:x len:5 clob:1 andps: dest:x src1:x src2:x len:4 clob:1 andnps: dest:x src1:x src2:x len:4 clob:1 orps: dest:x src1:x src2:x len:4 clob:1 xorps: dest:x src1:x src2:x len:4 clob:1 haddps: dest:x src1:x src2:x len:5 clob:1 hsubps: dest:x src1:x src2:x len:5 clob:1 addsubps: dest:x src1:x src2:x len:5 clob:1 dupps_low: dest:x src1:x len:5 dupps_high: dest:x src1:x len:5 addpd: dest:x src1:x src2:x len:5 clob:1 divpd: dest:x src1:x src2:x len:5 clob:1 mulpd: dest:x src1:x src2:x len:5 clob:1 subpd: dest:x src1:x src2:x len:5 clob:1 maxpd: dest:x src1:x src2:x len:5 clob:1 minpd: dest:x src1:x src2:x len:5 clob:1 comppd: dest:x src1:x src2:x len:6 clob:1 andpd: dest:x src1:x src2:x len:5 clob:1 andnpd: dest:x src1:x src2:x len:5 clob:1 orpd: dest:x src1:x src2:x len:5 clob:1 xorpd: dest:x src1:x src2:x len:5 clob:1 sqrtpd: dest:x src1:x len:5 clob:1 haddpd: dest:x src1:x src2:x len:6 clob:1 hsubpd: dest:x src1:x src2:x len:6 clob:1 addsubpd: dest:x src1:x src2:x len:6 clob:1 duppd: dest:x src1:x len:6 pand: dest:x src1:x src2:x len:5 clob:1 pandn: dest:x src1:x src2:x len:5 clob:1 por: dest:x src1:x src2:x len:5 clob:1 pxor: dest:x src1:x src2:x len:5 clob:1 sqrtps: dest:x src1:x len:5 rsqrtps: dest:x src1:x len:5 rcpps: dest:x src1:x len:5 pshuflew_high: dest:x src1:x len:6 pshuflew_low: dest:x src1:x len:6 pshufled: dest:x src1:x len:6 shufps: dest:x src1:x src2:x len:5 clob:1 shufpd: dest:x src1:x src2:x len:6 clob:1 extract_mask: dest:i src1:x len:6 paddb: dest:x src1:x src2:x len:5 clob:1 paddw: dest:x src1:x src2:x len:5 clob:1 paddd: dest:x src1:x src2:x len:5 clob:1 paddq: dest:x src1:x src2:x len:5 clob:1 psubb: dest:x src1:x src2:x len:5 clob:1 psubw: dest:x src1:x src2:x len:5 clob:1 psubd: dest:x src1:x src2:x len:5 clob:1 psubq: dest:x src1:x src2:x len:5 clob:1 pmaxb_un: dest:x src1:x src2:x len:5 clob:1 pmaxw_un: dest:x src1:x src2:x len:6 clob:1 pmaxd_un: dest:x src1:x src2:x len:6 clob:1 pmaxb: dest:x src1:x src2:x len:6 clob:1 pmaxw: dest:x src1:x src2:x len:5 clob:1 pmaxd: dest:x src1:x src2:x len:6 clob:1 pavgb_un: dest:x src1:x src2:x len:5 clob:1 pavgw_un: dest:x src1:x src2:x len:5 clob:1 pminb_un: dest:x src1:x src2:x len:5 clob:1 pminw_un: dest:x src1:x src2:x len:6 clob:1 pmind_un: dest:x src1:x src2:x len:6 clob:1 pminb: dest:x src1:x src2:x len:6 clob:1 pminw: dest:x src1:x src2:x len:5 clob:1 pmind: dest:x src1:x src2:x len:6 clob:1 pcmpeqb: dest:x src1:x src2:x len:5 clob:1 pcmpeqw: dest:x src1:x src2:x len:5 clob:1 pcmpeqd: dest:x src1:x src2:x len:5 clob:1 pcmpeqq: dest:x src1:x src2:x len:6 clob:1 pcmpgtb: dest:x src1:x src2:x len:5 clob:1 pcmpgtw: dest:x src1:x src2:x len:5 clob:1 pcmpgtd: dest:x src1:x src2:x len:5 clob:1 pcmpgtq: dest:x src1:x src2:x len:6 clob:1 psum_abs_diff: dest:x src1:x src2:x len:5 clob:1 unpack_lowb: dest:x src1:x src2:x len:5 clob:1 unpack_loww: dest:x src1:x src2:x len:5 clob:1 unpack_lowd: dest:x src1:x src2:x len:5 clob:1 unpack_lowq: dest:x src1:x src2:x len:5 clob:1 unpack_lowps: dest:x src1:x src2:x len:5 clob:1 unpack_lowpd: dest:x src1:x src2:x len:5 clob:1 unpack_highb: dest:x src1:x src2:x len:5 clob:1 unpack_highw: dest:x src1:x src2:x len:5 clob:1 unpack_highd: dest:x src1:x src2:x len:5 clob:1 unpack_highq: dest:x src1:x src2:x len:5 clob:1 unpack_highps: dest:x src1:x src2:x len:5 clob:1 unpack_highpd: dest:x src1:x src2:x len:5 clob:1 packw: dest:x src1:x src2:x len:5 clob:1 packd: dest:x src1:x src2:x len:5 clob:1 packw_un: dest:x src1:x src2:x len:5 clob:1 packd_un: dest:x src1:x src2:x len:6 clob:1 paddb_sat: dest:x src1:x src2:x len:5 clob:1 paddb_sat_un: dest:x src1:x src2:x len:5 clob:1 paddw_sat: dest:x src1:x src2:x len:5 clob:1 paddw_sat_un: dest:x src1:x src2:x len:5 clob:1 psubb_sat: dest:x src1:x src2:x len:5 clob:1 psubb_sat_un: dest:x src1:x src2:x len:5 clob:1 psubw_sat: dest:x src1:x src2:x len:5 clob:1 psubw_sat_un: dest:x src1:x src2:x len:5 clob:1 pmulw: dest:x src1:x src2:x len:5 clob:1 pmuld: dest:x src1:x src2:x len:6 clob:1 pmulq: dest:x src1:x src2:x len:5 clob:1 pmulw_high_un: dest:x src1:x src2:x len:5 clob:1 pmulw_high: dest:x src1:x src2:x len:5 clob:1 pshrw: dest:x src1:x len:6 clob:1 pshrw_reg: dest:x src1:x src2:x len:5 clob:1 psarw: dest:x src1:x len:6 clob:1 psarw_reg: dest:x src1:x src2:x len:5 clob:1 pshlw: dest:x src1:x len:6 clob:1 pshlw_reg: dest:x src1:x src2:x len:5 clob:1 pshrd: dest:x src1:x len:6 clob:1 pshrd_reg: dest:x src1:x src2:x len:5 clob:1 psard: dest:x src1:x len:6 clob:1 psard_reg: dest:x src1:x src2:x len:5 clob:1 pshld: dest:x src1:x len:6 clob:1 pshld_reg: dest:x src1:x src2:x len:5 clob:1 pshrq: dest:x src1:x len:6 clob:1 pshrq_reg: dest:x src1:x src2:x len:5 clob:1 pshlq: dest:x src1:x len:6 clob:1 pshlq_reg: dest:x src1:x src2:x len:5 clob:1 cvtdq2pd: dest:x src1:x len:5 clob:1 cvtdq2ps: dest:x src1:x len:4 clob:1 cvtpd2dq: dest:x src1:x len:5 clob:1 cvtpd2ps: dest:x src1:x len:5 clob:1 cvtps2dq: dest:x src1:x len:5 clob:1 cvtps2pd: dest:x src1:x len:4 clob:1 cvttpd2dq: dest:x src1:x len:5 clob:1 cvttps2dq: dest:x src1:x len:5 clob:1 xmove: dest:x src1:x len:5 xzero: dest:x len:5 xones: dest:x len:5 iconv_to_x: dest:x src1:i len:5 extract_i4: dest:i src1:x len:5 extract_i8: dest:i src1:x len:9 extract_i2: dest:i src1:x len:13 extract_i1: dest:i src1:x len:13 extract_r8: dest:f src1:x len:5 iconv_to_r4_raw: dest:f src1:i len:10 insert_i2: dest:x src1:x src2:i len:6 clob:1 extractx_u2: dest:i src1:x len:6 insertx_u1_slow: dest:x src1:i src2:i len:18 clob:x insertx_i4_slow: dest:x src1:x src2:i len:16 clob:x insertx_i8_slow: dest:x src1:x src2:i len:13 insertx_r4_slow: dest:x src1:x src2:f len:24 insertx_r8_slow: dest:x src1:x src2:f len:24 loadx_membase: dest:x src1:b len:9 storex_membase: dest:b src1:x len:9 storex_membase_reg: dest:b src1:x len:9 loadx_aligned_membase: dest:x src1:b len:7 storex_aligned_membase_reg: dest:b src1:x len:7 storex_nta_membase_reg: dest:b src1:x len:7 fconv_to_r8_x: dest:x src1:f len:4 xconv_r8_to_i4: dest:y src1:x len:7 prefetch_membase: src1:b len:4 expand_i2: dest:x src1:i len:18 expand_i4: dest:x src1:i len:11 expand_i8: dest:x src1:i len:11 expand_r4: dest:x src1:f len:16 expand_r8: dest:x src1:f len:13 roundp: dest:x src1:x len:10 liverange_start: len:0 liverange_end: len:0 gc_liveness_def: len:0 gc_liveness_use: len:0 gc_spill_slot_liveness_def: len:0 gc_param_slot_liveness_def: len:0 generic_class_init: src1:A len:32 clob:c get_last_error: dest:i len:32 fill_prof_call_ctx: src1:i len:128 lzcnt32: dest:i src1:i len:16 lzcnt64: dest:i src1:i len:16 popcnt32: dest:i src1:i len:16 popcnt64: dest:i src1:i len:16
# -*- mode:text; -*- # x86-class cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the regsiter allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # i integer register # b base register (used in address references) # f floating point register # a EAX register # d EDX register # l long reg (forced eax:edx) # s ECX register # c register which can be used as a byte register (RAX..RDX) # A - first arg reg (rdi/rcx) # # len:number describe the maximun length in bytes of the instruction # number is a positive integer. If the length is not specified # it defaults to zero. But lengths are only checked if the given opcode # is encountered during compilation. Some opcodes, like CONV_U4 are # transformed into other opcodes in the brg files, so they do not show up # during code generation. # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # c clobbers caller-save registers # 1 clobbers the first source register # a EAX is clobbered # d EDX is clobbered # x both the source operands are clobbered (xchg) # m sets an XMM reg # # flags:spec describe if the instruction uses or sets the flags (unused) # # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-x86.c for more details on how the specifiers are used. # break: len:2 tailcall: len:255 clob:c tailcall_reg: src1:b len:255 clob:c tailcall_membase: src1:b len:255 clob:c # tailcall_parameter models the size of moving one parameter, # so that the required size of a branch around a tailcall can # be accurately estimated; something like: # void f1(volatile long *a) # { # a[large] = a[another large] # } # # If the offsets fit in 32bits, then len:14: # 48 8b 87 e0 04 00 00 movq 1248(%rdi), %rax # 48 89 87 00 08 00 00 movq %rax, 2048(%rdi) # # else 64bits: # 48 b8 e0 fc b3 c4 04 00 00 00 movabsq $20479999200, %rax # 48 8b 04 07 movq (%rdi,%rax), %rax # 48 b9 00 00 b4 c4 04 00 00 00 movabsq $20480000000, %rcx # 48 89 04 0f movq %rax, (%rdi,%rcx) # # Frame size is artificially limited to 1GB in mono_arch_tailcall_supported. # This is presently redundant with tailcall len:255, as the limit of # near branches is [-128, +127], after which the limit is # [-2GB, +2GB-1] # FIXME A fixed size sequence to move parameters would moot this. tailcall_parameter: len:14 br: len:6 label: len:0 seq_point: len:46 clob:c il_seq_point: len:0 long_add: dest:i src1:i src2:i len:3 clob:1 long_sub: dest:i src1:i src2:i len:3 clob:1 long_mul: dest:i src1:i src2:i len:4 clob:1 long_div: dest:a src1:a src2:i len:16 clob:d long_div_un: dest:a src1:a src2:i len:16 clob:d long_rem: dest:d src1:a src2:i len:16 clob:a long_rem_un: dest:d src1:a src2:i len:16 clob:a long_and: dest:i src1:i src2:i len:3 clob:1 long_or: dest:i src1:i src2:i len:3 clob:1 long_xor: dest:i src1:i src2:i len:3 clob:1 long_shl: dest:i src1:i src2:s clob:1 len:3 long_shr: dest:i src1:i src2:s clob:1 len:3 long_shr_un: dest:i src1:i src2:s clob:1 len:3 long_neg: dest:i src1:i len:3 clob:1 long_not: dest:i src1:i len:3 clob:1 long_conv_to_i1: dest:i src1:i len:4 long_conv_to_i2: dest:i src1:i len:4 long_conv_to_i4: dest:i src1:i len:3 long_conv_to_i8: dest:i src1:i len:3 long_conv_to_r4: dest:f src1:i len:15 long_conv_to_r8: dest:f src1:i len:9 long_conv_to_u4: dest:i src1:i len:3 long_conv_to_u8: dest:i src1:i len:3 long_conv_to_r_un: dest:f src1:i len:64 long_conv_to_ovf_i4_un: dest:i src1:i len:16 long_conv_to_ovf_u4: dest:i src1:i len:15 long_conv_to_u2: dest:i src1:i len:4 long_conv_to_u1: dest:i src1:i len:4 zext_i4: dest:i src1:i len:4 long_mul_imm: dest:i src1:i clob:1 len:16 long_min: dest:i src1:i src2:i len:16 clob:1 long_min_un: dest:i src1:i src2:i len:16 clob:1 long_max: dest:i src1:i src2:i len:16 clob:1 long_max_un: dest:i src1:i src2:i len:16 clob:1 throw: src1:i len:24 rethrow: src1:i len:24 start_handler: len:16 endfinally: len:9 endfilter: src1:a len:9 get_ex_obj: dest:a len:16 ckfinite: dest:f src1:f len:43 ceq: dest:c len:8 cgt: dest:c len:8 cgt_un: dest:c len:8 clt: dest:c len:8 clt_un: dest:c len:8 localloc: dest:i src1:i len:120 compare: src1:i src2:i len:3 lcompare: src1:i src2:i len:3 icompare: src1:i src2:i len:3 compare_imm: src1:i len:13 icompare_imm: src1:i len:8 fcompare: src1:f src2:f clob:a len:13 rcompare: src1:f src2:f clob:a len:13 arglist: src1:b len:11 check_this: src1:b len:5 call: dest:a clob:c len:32 voidcall: clob:c len:32 voidcall_reg: src1:i clob:c len:32 voidcall_membase: src1:b clob:c len:32 fcall: dest:f len:64 clob:c fcall_reg: dest:f src1:i len:64 clob:c fcall_membase: dest:f src1:b len:64 clob:c rcall: dest:f len:64 clob:c rcall_reg: dest:f src1:i len:64 clob:c rcall_membase: dest:f src1:b len:64 clob:c lcall: dest:a len:64 clob:c lcall_reg: dest:a src1:i len:64 clob:c lcall_membase: dest:a src1:b len:64 clob:c vcall: len:64 clob:c vcall_reg: src1:i len:64 clob:c vcall_membase: src1:b len:64 clob:c call_reg: dest:a src1:i len:32 clob:c call_membase: dest:a src1:b len:32 clob:c iconst: dest:i len:10 i8const: dest:i len:10 r4const: dest:f len:17 r8const: dest:f len:12 store_membase_imm: dest:b len:15 store_membase_reg: dest:b src1:i len:9 storei8_membase_reg: dest:b src1:i len:9 storei1_membase_imm: dest:b len:11 storei1_membase_reg: dest:b src1:c len:9 storei2_membase_imm: dest:b len:13 storei2_membase_reg: dest:b src1:i len:9 storei4_membase_imm: dest:b len:13 storei4_membase_reg: dest:b src1:i len:9 storei8_membase_imm: dest:b len:18 storer4_membase_reg: dest:b src1:f len:15 storer8_membase_reg: dest:b src1:f len:10 load_membase: dest:i src1:b len:8 loadi1_membase: dest:c src1:b len:9 loadu1_membase: dest:c src1:b len:9 loadi2_membase: dest:i src1:b len:9 loadu2_membase: dest:i src1:b len:9 loadi4_membase: dest:i src1:b len:9 loadu4_membase: dest:i src1:b len:9 loadi8_membase: dest:i src1:b len:18 loadr4_membase: dest:f src1:b len:16 loadr8_membase: dest:f src1:b len:16 loadu4_mem: dest:i len:10 amd64_loadi8_memindex: dest:i src1:i src2:i len:10 move: dest:i src1:i len:3 add_imm: dest:i src1:i len:8 clob:1 sub_imm: dest:i src1:i len:8 clob:1 mul_imm: dest:i src1:i len:12 and_imm: dest:i src1:i len:8 clob:1 or_imm: dest:i src1:i len:8 clob:1 xor_imm: dest:i src1:i len:8 clob:1 shl_imm: dest:i src1:i len:8 clob:1 shr_imm: dest:i src1:i len:8 clob:1 shr_un_imm: dest:i src1:i len:8 clob:1 cond_exc_eq: len:8 cond_exc_ne_un: len:8 cond_exc_lt: len:8 cond_exc_lt_un: len:8 cond_exc_gt: len:8 cond_exc_gt_un: len:8 cond_exc_ge: len:8 cond_exc_ge_un: len:8 cond_exc_le: len:8 cond_exc_le_un: len:8 cond_exc_ov: len:8 cond_exc_no: len:8 cond_exc_c: len:8 cond_exc_nc: len:8 cond_exc_iov: len:8 cond_exc_ic: len:8 long_mul_ovf: dest:i src1:i src2:i clob:1 len:16 long_mul_ovf_un: dest:i src1:i src2:i len:22 long_shr_imm: dest:i src1:i clob:1 len:11 long_shr_un_imm: dest:i src1:i clob:1 len:11 long_shl_imm: dest:i src1:i clob:1 len:11 long_beq: len:8 long_bge: len:8 long_bgt: len:8 long_ble: len:8 long_blt: len:8 long_bne_un: len:8 long_bge_un: len:8 long_bgt_un: len:8 long_ble_un: len:8 long_blt_un: len:8 float_beq: len:13 float_bne_un: len:18 float_blt: len:13 float_blt_un: len:30 float_bgt: len:13 float_bgt_un: len:30 float_bge: len:32 float_bge_un: len:13 float_ble: len:32 float_ble_un: len:13 float_add: dest:f src1:f src2:f clob:1 len:5 float_sub: dest:f src1:f src2:f clob:1 len:5 float_mul: dest:f src1:f src2:f clob:1 len:5 float_div: dest:f src1:f src2:f clob:1 len:5 float_div_un: dest:f src1:f src2:f clob:1 len:5 float_rem: dest:f src1:f src2:f clob:1 len:19 float_rem_un: dest:f src1:f src2:f clob:1 len:19 float_neg: dest:f src1:f clob:1 len:23 float_not: dest:f src1:f clob:1 len:3 float_conv_to_i1: dest:i src1:f len:49 float_conv_to_i2: dest:i src1:f len:49 float_conv_to_i4: dest:i src1:f len:49 float_conv_to_i8: dest:i src1:f len:49 float_conv_to_u4: dest:i src1:f len:49 float_conv_to_u8: dest:i src1:f len:49 float_conv_to_u2: dest:i src1:f len:49 float_conv_to_u1: dest:i src1:f len:49 float_conv_to_ovf_i: dest:a src1:f len:40 float_conv_to_ovd_u: dest:a src1:f len:40 float_mul_ovf: float_ceq: dest:i src1:f src2:f len:35 float_cgt: dest:i src1:f src2:f len:35 float_cgt_un: dest:i src1:f src2:f len:48 float_clt: dest:i src1:f src2:f len:35 float_clt_un: dest:i src1:f src2:f len:42 float_cneq: dest:i src1:f src2:f len:42 float_cge: dest:i src1:f src2:f len:35 float_cle: dest:i src1:f src2:f len:35 float_ceq_membase: dest:i src1:f src2:b len:35 float_cgt_membase: dest:i src1:f src2:b len:35 float_cgt_un_membase: dest:i src1:f src2:b len:48 float_clt_membase: dest:i src1:f src2:b len:35 float_clt_un_membase: dest:i src1:f src2:b len:42 # R4 opcodes r4_conv_to_i1: dest:i src1:f len:32 r4_conv_to_u1: dest:i src1:f len:32 r4_conv_to_i2: dest:i src1:f len:32 r4_conv_to_u2: dest:i src1:f len:32 r4_conv_to_i4: dest:i src1:f len:16 r4_conv_to_u4: dest:i src1:f len:32 r4_conv_to_i8: dest:i src1:f len:32 r4_conv_to_r8: dest:f src1:f len:17 r4_conv_to_r4: dest:f src1:f len:17 r4_add: dest:f src1:f src2:f clob:1 len:5 r4_sub: dest:f src1:f src2:f clob:1 len:5 r4_mul: dest:f src1:f src2:f clob:1 len:5 r4_div: dest:f src1:f src2:f clob:1 len:5 r4_neg: dest:f src1:f clob:1 len:23 r4_ceq: dest:i src1:f src2:f len:35 r4_cgt: dest:i src1:f src2:f len:35 r4_cgt_un: dest:i src1:f src2:f len:48 r4_clt: dest:i src1:f src2:f len:35 r4_clt_un: dest:i src1:f src2:f len:42 r4_cneq: dest:i src1:f src2:f len:42 r4_cge: dest:i src1:f src2:f len:35 r4_cle: dest:i src1:f src2:f len:35 fmove: dest:f src1:f len:8 rmove: dest:f src1:f len:8 move_f_to_i4: dest:i src1:f len:16 move_i4_to_f: dest:f src1:i len:16 move_f_to_i8: dest:i src1:f len:5 move_i8_to_f: dest:f src1:i len:5 call_handler: len:14 clob:c aotconst: dest:i len:10 gc_safe_point: clob:c src1:i len:40 x86_test_null: src1:i len:5 x86_compare_membase_reg: src1:b src2:i len:9 x86_compare_membase_imm: src1:b len:13 x86_compare_reg_membase: src1:i src2:b len:8 x86_inc_reg: dest:i src1:i clob:1 len:3 x86_inc_membase: src1:b len:8 x86_dec_reg: dest:i src1:i clob:1 len:3 x86_dec_membase: src1:b len:8 x86_add_membase_imm: src1:b len:13 x86_sub_membase_imm: src1:b len:13 x86_push: src1:i len:3 x86_push_imm: len:6 x86_push_membase: src1:b len:8 x86_push_obj: src1:b len:40 x86_lea: dest:i src1:i src2:i len:8 x86_lea_membase: dest:i src1:i len:11 amd64_lea_membase: dest:i src1:i len:11 x86_xchg: src1:i src2:i clob:x len:2 x86_fpop: src1:f len:3 x86_seteq_membase: src1:b len:9 x86_add_reg_membase: dest:i src1:i src2:b clob:1 len:13 x86_sub_reg_membase: dest:i src1:i src2:b clob:1 len:13 x86_mul_reg_membase: dest:i src1:i src2:b clob:1 len:13 x86_and_reg_membase: dest:i src1:i src2:b clob:1 len:13 x86_or_reg_membase: dest:i src1:i src2:b clob:1 len:13 x86_xor_reg_membase: dest:i src1:i src2:b clob:1 len:13 amd64_test_null: src1:i len:5 amd64_icompare_membase_reg: src1:b src2:i len:8 amd64_icompare_membase_imm: src1:b len:13 amd64_icompare_reg_membase: src1:i src2:b len:8 amd64_set_xmmreg_r4: dest:f src1:f len:14 clob:m amd64_set_xmmreg_r8: dest:f src1:f len:14 clob:m amd64_save_sp_to_lmf: len:16 tls_get: dest:i len:32 tls_set: src1:i len:16 atomic_add_i4: src1:b src2:i dest:i len:32 atomic_add_i8: src1:b src2:i dest:i len:32 atomic_exchange_i4: src1:b src2:i dest:i len:12 atomic_exchange_i8: src1:b src2:i dest:i len:12 atomic_cas_i4: src1:b src2:i src3:a dest:a len:24 atomic_cas_i8: src1:b src2:i src3:a dest:a len:24 memory_barrier: len:3 atomic_load_i1: dest:c src1:b len:9 atomic_load_u1: dest:c src1:b len:9 atomic_load_i2: dest:i src1:b len:9 atomic_load_u2: dest:i src1:b len:9 atomic_load_i4: dest:i src1:b len:9 atomic_load_u4: dest:i src1:b len:9 atomic_load_i8: dest:i src1:b len:9 atomic_load_u8: dest:i src1:b len:9 atomic_load_r4: dest:f src1:b len:16 atomic_load_r8: dest:f src1:b len:16 atomic_store_i1: dest:b src1:c len:12 atomic_store_u1: dest:b src1:c len:12 atomic_store_i2: dest:b src1:i len:12 atomic_store_u2: dest:b src1:i len:12 atomic_store_i4: dest:b src1:i len:12 atomic_store_u4: dest:b src1:i len:12 atomic_store_i8: dest:b src1:i len:12 atomic_store_u8: dest:b src1:i len:12 atomic_store_r4: dest:b src1:f len:18 atomic_store_r8: dest:b src1:f len:13 adc: dest:i src1:i src2:i len:3 clob:1 addcc: dest:i src1:i src2:i len:3 clob:1 subcc: dest:i src1:i src2:i len:3 clob:1 adc_imm: dest:i src1:i len:8 clob:1 sbb: dest:i src1:i src2:i len:3 clob:1 sbb_imm: dest:i src1:i len:8 clob:1 br_reg: src1:i len:3 sin: dest:f src1:f len:32 cos: dest:f src1:f len:32 abs: dest:f src1:f clob:1 len:32 tan: dest:f src1:f len:59 atan: dest:f src1:f len:9 sqrt: dest:f src1:f len:32 sext_i1: dest:i src1:i len:4 sext_i2: dest:i src1:i len:4 sext_i4: dest:i src1:i len:8 laddcc: dest:i src1:i src2:i len:3 clob:1 lsubcc: dest:i src1:i src2:i len:3 clob:1 # 32 bit opcodes int_add: dest:i src1:i src2:i clob:1 len:4 int_sub: dest:i src1:i src2:i clob:1 len:4 int_mul: dest:i src1:i src2:i clob:1 len:4 int_mul_ovf: dest:i src1:i src2:i clob:1 len:32 int_mul_ovf_un: dest:i src1:i src2:i clob:1 len:32 int_div: dest:a src1:a src2:i clob:d len:32 int_div_un: dest:a src1:a src2:i clob:d len:32 int_rem: dest:d src1:a src2:i clob:a len:32 int_rem_un: dest:d src1:a src2:i clob:a len:32 int_and: dest:i src1:i src2:i clob:1 len:4 int_or: dest:i src1:i src2:i clob:1 len:4 int_xor: dest:i src1:i src2:i clob:1 len:4 int_shl: dest:i src1:i src2:s clob:1 len:4 int_shr: dest:i src1:i src2:s clob:1 len:4 int_shr_un: dest:i src1:i src2:s clob:1 len:4 int_adc: dest:i src1:i src2:i clob:1 len:4 int_adc_imm: dest:i src1:i clob:1 len:8 int_sbb: dest:i src1:i src2:i clob:1 len:4 int_sbb_imm: dest:i src1:i clob:1 len:8 int_addcc: dest:i src1:i src2:i clob:1 len:16 int_subcc: dest:i src1:i src2:i clob:1 len:16 int_add_imm: dest:i src1:i clob:1 len:8 int_sub_imm: dest:i src1:i clob:1 len:8 int_mul_imm: dest:i src1:i clob:1 len:32 int_div_imm: dest:a src1:i clob:d len:32 int_div_un_imm: dest:a src1:i clob:d len:32 int_rem_un_imm: dest:d src1:i clob:a len:32 int_and_imm: dest:i src1:i clob:1 len:8 int_or_imm: dest:i src1:i clob:1 len:8 int_xor_imm: dest:i src1:i clob:1 len:8 int_shl_imm: dest:i src1:i clob:1 len:8 int_shr_imm: dest:i src1:i clob:1 len:8 int_shr_un_imm: dest:i src1:i clob:1 len:8 int_min: dest:i src1:i src2:i len:16 clob:1 int_max: dest:i src1:i src2:i len:16 clob:1 int_min_un: dest:i src1:i src2:i len:16 clob:1 int_max_un: dest:i src1:i src2:i len:16 clob:1 int_neg: dest:i src1:i clob:1 len:4 int_not: dest:i src1:i clob:1 len:4 int_conv_to_r4: dest:f src1:i len:15 int_conv_to_r8: dest:f src1:i len:9 int_ceq: dest:c len:8 int_cgt: dest:c len:8 int_cgt_un: dest:c len:8 int_clt: dest:c len:8 int_clt_un: dest:c len:8 int_cneq: dest:c len:8 int_cge: dest:c len:8 int_cle: dest:c len:8 int_cge_un: dest:c len:8 int_cle_un: dest:c len:8 int_beq: len:8 int_bne_un: len:8 int_blt: len:8 int_blt_un: len:8 int_bgt: len:8 int_bgt_un: len:8 int_bge: len:8 int_bge_un: len:8 int_ble: len:8 int_ble_un: len:8 card_table_wbarrier: src1:a src2:i clob:d len:56 relaxed_nop: len:2 hard_nop: len:1 # Linear IR opcodes nop: len:0 dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_i8const: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 long_ceq: dest:c len:64 long_cgt: dest:c len:64 long_cgt_un: dest:c len:64 long_clt: dest:c len:64 long_clt_un: dest:c len:64 int_conv_to_i1: dest:i src1:i len:4 int_conv_to_i2: dest:i src1:i len:4 int_conv_to_i4: dest:i src1:i len:3 int_conv_to_i8: dest:i src1:i len:3 int_conv_to_u4: dest:i src1:i len:3 int_conv_to_u8: dest:i src1:i len:3 int_conv_to_u: dest:i src1:i len:4 int_conv_to_u2: dest:i src1:i len:4 int_conv_to_u1: dest:i src1:i len:4 int_conv_to_i: dest:i src1:i len:4 cond_exc_ieq: len:8 cond_exc_ine_un: len:8 cond_exc_ilt: len:8 cond_exc_ilt_un: len:8 cond_exc_igt: len:8 cond_exc_igt_un: len:8 cond_exc_ige: len:8 cond_exc_ige_un: len:8 cond_exc_ile: len:8 cond_exc_ile_un: len:8 cond_exc_ino: len:8 cond_exc_inc: len:8 x86_compare_membase8_imm: src1:b len:9 jump_table: dest:i len:18 cmov_ieq: dest:i src1:i src2:i len:16 clob:1 cmov_ige: dest:i src1:i src2:i len:16 clob:1 cmov_igt: dest:i src1:i src2:i len:16 clob:1 cmov_ile: dest:i src1:i src2:i len:16 clob:1 cmov_ilt: dest:i src1:i src2:i len:16 clob:1 cmov_ine_un: dest:i src1:i src2:i len:16 clob:1 cmov_ige_un: dest:i src1:i src2:i len:16 clob:1 cmov_igt_un: dest:i src1:i src2:i len:16 clob:1 cmov_ile_un: dest:i src1:i src2:i len:16 clob:1 cmov_ilt_un: dest:i src1:i src2:i len:16 clob:1 cmov_leq: dest:i src1:i src2:i len:16 clob:1 cmov_lge: dest:i src1:i src2:i len:16 clob:1 cmov_lgt: dest:i src1:i src2:i len:16 clob:1 cmov_lle: dest:i src1:i src2:i len:16 clob:1 cmov_llt: dest:i src1:i src2:i len:16 clob:1 cmov_lne_un: dest:i src1:i src2:i len:16 clob:1 cmov_lge_un: dest:i src1:i src2:i len:16 clob:1 cmov_lgt_un: dest:i src1:i src2:i len:16 clob:1 cmov_lle_un: dest:i src1:i src2:i len:16 clob:1 cmov_llt_un: dest:i src1:i src2:i len:16 clob:1 long_add_imm: dest:i src1:i clob:1 len:12 long_sub_imm: dest:i src1:i clob:1 len:12 long_and_imm: dest:i src1:i clob:1 len:12 long_or_imm: dest:i src1:i clob:1 len:12 long_xor_imm: dest:i src1:i clob:1 len:12 lcompare_imm: src1:i len:13 amd64_compare_membase_reg: src1:b src2:i len:9 amd64_compare_membase_imm: src1:b len:14 amd64_compare_reg_membase: src1:i src2:b len:9 amd64_add_reg_membase: dest:i src1:i src2:b clob:1 len:14 amd64_sub_reg_membase: dest:i src1:i src2:b clob:1 len:14 amd64_and_reg_membase: dest:i src1:i src2:b clob:1 len:14 amd64_or_reg_membase: dest:i src1:i src2:b clob:1 len:14 amd64_xor_reg_membase: dest:i src1:i src2:b clob:1 len:14 amd64_add_membase_imm: src1:b len:16 amd64_sub_membase_imm: src1:b len:16 amd64_and_membase_imm: src1:b len:13 amd64_or_membase_imm: src1:b len:13 amd64_xor_membase_imm: src1:b len:13 x86_and_membase_imm: src1:b len:12 x86_or_membase_imm: src1:b len:12 x86_xor_membase_imm: src1:b len:12 x86_add_membase_reg: src1:b src2:i len:12 x86_sub_membase_reg: src1:b src2:i len:12 x86_and_membase_reg: src1:b src2:i len:12 x86_or_membase_reg: src1:b src2:i len:12 x86_xor_membase_reg: src1:b src2:i len:12 x86_mul_membase_reg: src1:b src2:i len:14 amd64_add_membase_reg: src1:b src2:i len:13 amd64_sub_membase_reg: src1:b src2:i len:13 amd64_and_membase_reg: src1:b src2:i len:13 amd64_or_membase_reg: src1:b src2:i len:13 amd64_xor_membase_reg: src1:b src2:i len:13 amd64_mul_membase_reg: src1:b src2:i len:15 float_conv_to_r4: dest:f src1:f len:17 vcall2: len:64 clob:c vcall2_reg: src1:i len:64 clob:c vcall2_membase: src1:b len:64 clob:c dyn_call: src1:i src2:i len:192 clob:c localloc_imm: dest:i len:120 load_mem: dest:i len:16 loadi8_mem: dest:i len:16 loadi4_mem: dest:i len:16 loadu1_mem: dest:i len:16 loadu2_mem: dest:i len:16 #SIMD addps: dest:x src1:x src2:x len:4 clob:1 divps: dest:x src1:x src2:x len:4 clob:1 mulps: dest:x src1:x src2:x len:4 clob:1 subps: dest:x src1:x src2:x len:4 clob:1 maxps: dest:x src1:x src2:x len:4 clob:1 minps: dest:x src1:x src2:x len:4 clob:1 compps: dest:x src1:x src2:x len:5 clob:1 andps: dest:x src1:x src2:x len:4 clob:1 andnps: dest:x src1:x src2:x len:4 clob:1 orps: dest:x src1:x src2:x len:4 clob:1 xorps: dest:x src1:x src2:x len:4 clob:1 haddps: dest:x src1:x src2:x len:5 clob:1 hsubps: dest:x src1:x src2:x len:5 clob:1 addsubps: dest:x src1:x src2:x len:5 clob:1 dupps_low: dest:x src1:x len:5 dupps_high: dest:x src1:x len:5 addpd: dest:x src1:x src2:x len:5 clob:1 divpd: dest:x src1:x src2:x len:5 clob:1 mulpd: dest:x src1:x src2:x len:5 clob:1 subpd: dest:x src1:x src2:x len:5 clob:1 maxpd: dest:x src1:x src2:x len:5 clob:1 minpd: dest:x src1:x src2:x len:5 clob:1 comppd: dest:x src1:x src2:x len:6 clob:1 andpd: dest:x src1:x src2:x len:5 clob:1 andnpd: dest:x src1:x src2:x len:5 clob:1 orpd: dest:x src1:x src2:x len:5 clob:1 xorpd: dest:x src1:x src2:x len:5 clob:1 sqrtpd: dest:x src1:x len:5 clob:1 haddpd: dest:x src1:x src2:x len:6 clob:1 hsubpd: dest:x src1:x src2:x len:6 clob:1 addsubpd: dest:x src1:x src2:x len:6 clob:1 duppd: dest:x src1:x len:6 pand: dest:x src1:x src2:x len:5 clob:1 pandn: dest:x src1:x src2:x len:5 clob:1 por: dest:x src1:x src2:x len:5 clob:1 pxor: dest:x src1:x src2:x len:5 clob:1 sqrtps: dest:x src1:x len:5 rsqrtps: dest:x src1:x len:5 rcpps: dest:x src1:x len:5 pshuflew_high: dest:x src1:x len:6 pshuflew_low: dest:x src1:x len:6 pshufled: dest:x src1:x len:6 shufps: dest:x src1:x src2:x len:5 clob:1 shufpd: dest:x src1:x src2:x len:6 clob:1 extract_mask: dest:i src1:x len:6 paddb: dest:x src1:x src2:x len:5 clob:1 paddw: dest:x src1:x src2:x len:5 clob:1 paddd: dest:x src1:x src2:x len:5 clob:1 paddq: dest:x src1:x src2:x len:5 clob:1 psubb: dest:x src1:x src2:x len:5 clob:1 psubw: dest:x src1:x src2:x len:5 clob:1 psubd: dest:x src1:x src2:x len:5 clob:1 psubq: dest:x src1:x src2:x len:5 clob:1 pmaxb_un: dest:x src1:x src2:x len:5 clob:1 pmaxw_un: dest:x src1:x src2:x len:6 clob:1 pmaxd_un: dest:x src1:x src2:x len:6 clob:1 pmaxb: dest:x src1:x src2:x len:6 clob:1 pmaxw: dest:x src1:x src2:x len:5 clob:1 pmaxd: dest:x src1:x src2:x len:6 clob:1 pavgb_un: dest:x src1:x src2:x len:5 clob:1 pavgw_un: dest:x src1:x src2:x len:5 clob:1 pminb_un: dest:x src1:x src2:x len:5 clob:1 pminw_un: dest:x src1:x src2:x len:6 clob:1 pmind_un: dest:x src1:x src2:x len:6 clob:1 pminb: dest:x src1:x src2:x len:6 clob:1 pminw: dest:x src1:x src2:x len:5 clob:1 pmind: dest:x src1:x src2:x len:6 clob:1 pcmpeqb: dest:x src1:x src2:x len:5 clob:1 pcmpeqw: dest:x src1:x src2:x len:5 clob:1 pcmpeqd: dest:x src1:x src2:x len:5 clob:1 pcmpeqq: dest:x src1:x src2:x len:6 clob:1 pcmpgtb: dest:x src1:x src2:x len:5 clob:1 pcmpgtw: dest:x src1:x src2:x len:5 clob:1 pcmpgtd: dest:x src1:x src2:x len:5 clob:1 pcmpgtq: dest:x src1:x src2:x len:6 clob:1 psum_abs_diff: dest:x src1:x src2:x len:5 clob:1 unpack_lowb: dest:x src1:x src2:x len:5 clob:1 unpack_loww: dest:x src1:x src2:x len:5 clob:1 unpack_lowd: dest:x src1:x src2:x len:5 clob:1 unpack_lowq: dest:x src1:x src2:x len:5 clob:1 unpack_lowps: dest:x src1:x src2:x len:5 clob:1 unpack_lowpd: dest:x src1:x src2:x len:5 clob:1 unpack_highb: dest:x src1:x src2:x len:5 clob:1 unpack_highw: dest:x src1:x src2:x len:5 clob:1 unpack_highd: dest:x src1:x src2:x len:5 clob:1 unpack_highq: dest:x src1:x src2:x len:5 clob:1 unpack_highps: dest:x src1:x src2:x len:5 clob:1 unpack_highpd: dest:x src1:x src2:x len:5 clob:1 packw: dest:x src1:x src2:x len:5 clob:1 packd: dest:x src1:x src2:x len:5 clob:1 packw_un: dest:x src1:x src2:x len:5 clob:1 packd_un: dest:x src1:x src2:x len:6 clob:1 paddb_sat: dest:x src1:x src2:x len:5 clob:1 paddb_sat_un: dest:x src1:x src2:x len:5 clob:1 paddw_sat: dest:x src1:x src2:x len:5 clob:1 paddw_sat_un: dest:x src1:x src2:x len:5 clob:1 psubb_sat: dest:x src1:x src2:x len:5 clob:1 psubb_sat_un: dest:x src1:x src2:x len:5 clob:1 psubw_sat: dest:x src1:x src2:x len:5 clob:1 psubw_sat_un: dest:x src1:x src2:x len:5 clob:1 pmulw: dest:x src1:x src2:x len:5 clob:1 pmuld: dest:x src1:x src2:x len:6 clob:1 pmulq: dest:x src1:x src2:x len:5 clob:1 pmulw_high_un: dest:x src1:x src2:x len:5 clob:1 pmulw_high: dest:x src1:x src2:x len:5 clob:1 pshrw: dest:x src1:x len:6 clob:1 pshrw_reg: dest:x src1:x src2:x len:5 clob:1 psarw: dest:x src1:x len:6 clob:1 psarw_reg: dest:x src1:x src2:x len:5 clob:1 pshlw: dest:x src1:x len:6 clob:1 pshlw_reg: dest:x src1:x src2:x len:5 clob:1 pshrd: dest:x src1:x len:6 clob:1 pshrd_reg: dest:x src1:x src2:x len:5 clob:1 psard: dest:x src1:x len:6 clob:1 psard_reg: dest:x src1:x src2:x len:5 clob:1 pshld: dest:x src1:x len:6 clob:1 pshld_reg: dest:x src1:x src2:x len:5 clob:1 pshrq: dest:x src1:x len:6 clob:1 pshrq_reg: dest:x src1:x src2:x len:5 clob:1 pshlq: dest:x src1:x len:6 clob:1 pshlq_reg: dest:x src1:x src2:x len:5 clob:1 cvtdq2pd: dest:x src1:x len:5 clob:1 cvtdq2ps: dest:x src1:x len:4 clob:1 cvtpd2dq: dest:x src1:x len:5 clob:1 cvtpd2ps: dest:x src1:x len:5 clob:1 cvtps2dq: dest:x src1:x len:5 clob:1 cvtps2pd: dest:x src1:x len:4 clob:1 cvttpd2dq: dest:x src1:x len:5 clob:1 cvttps2dq: dest:x src1:x len:5 clob:1 xmove: dest:x src1:x len:5 xzero: dest:x len:5 xones: dest:x len:5 iconv_to_x: dest:x src1:i len:5 extract_i4: dest:i src1:x len:5 extract_i8: dest:i src1:x len:9 extract_i2: dest:i src1:x len:13 extract_i1: dest:i src1:x len:13 extract_r8: dest:f src1:x len:5 iconv_to_r4_raw: dest:f src1:i len:10 insert_i2: dest:x src1:x src2:i len:6 clob:1 extractx_u2: dest:i src1:x len:6 insertx_u1_slow: dest:x src1:i src2:i len:18 clob:x insertx_i4_slow: dest:x src1:x src2:i len:16 clob:x insertx_i8_slow: dest:x src1:x src2:i len:13 insertx_r4_slow: dest:x src1:x src2:f len:24 insertx_r8_slow: dest:x src1:x src2:f len:24 loadx_membase: dest:x src1:b len:9 storex_membase: dest:b src1:x len:9 storex_membase_reg: dest:b src1:x len:9 loadx_aligned_membase: dest:x src1:b len:7 storex_aligned_membase_reg: dest:b src1:x len:7 storex_nta_membase_reg: dest:b src1:x len:7 fconv_to_r8_x: dest:x src1:f len:4 xconv_r8_to_i4: dest:y src1:x len:7 prefetch_membase: src1:b len:4 expand_i2: dest:x src1:i len:18 expand_i4: dest:x src1:i len:11 expand_i8: dest:x src1:i len:11 expand_r4: dest:x src1:f len:16 expand_r8: dest:x src1:f len:13 roundp: dest:x src1:x len:10 liverange_start: len:0 liverange_end: len:0 gc_liveness_def: len:0 gc_liveness_use: len:0 gc_spill_slot_liveness_def: len:0 gc_param_slot_liveness_def: len:0 generic_class_init: src1:A len:32 clob:c get_last_error: dest:i len:32 fill_prof_call_ctx: src1:i len:128 lzcnt32: dest:i src1:i len:16 lzcnt64: dest:i src1:i len:16 popcnt32: dest:i src1:i len:16 popcnt64: dest:i src1:i len:16
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/cpu-arm.md
# Copyright 2003-2011 Novell, Inc (http://www.novell.com) # Copyright 2011 Xamarin, Inc (http://www.xamarin.com) # Licensed under the MIT license. See LICENSE file in the project root for full license information. # arm cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the regsiter allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # a r0 register (first argument/result reg) # b base register (used in address references) # f floating point register # g floating point register returned in r0:r1 for soft-float mode # # len:number describe the maximun length in bytes of the instruction # number is a positive integer # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # r 'reserves' the destination register until a later instruction unreserves it # used mostly to set output registers in function calls # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-x86.c for more details on how the specifiers are used. # nop: len:4 relaxed_nop: len:4 break: len:4 br: len:16 switch: src1:i len:12 # See the comment in resume_from_signal_handler, we can't copy the fp regs from sigctx to MonoContext on linux, # since the corresponding sigctx structures are not well defined. seq_point: len:52 clob:c il_seq_point: len:0 throw: src1:i len:24 rethrow: src1:i len:20 start_handler: len:20 endfinally: len:32 call_handler: len:16 clob:c endfilter: src1:i len:16 get_ex_obj: dest:i len:16 ckfinite: dest:f src1:f len:112 ceq: dest:i len:12 cgt: dest:i len:12 cgt_un: dest:i len:12 clt: dest:i len:12 clt_un: dest:i len:12 localloc: dest:i src1:i len:60 compare: src1:i src2:i len:4 compare_imm: src1:i len:12 fcompare: src1:f src2:f len:12 rcompare: src1:f src2:f len:12 arglist: src1:i len:12 setlret: src1:i src2:i len:12 check_this: src1:b len:4 call: dest:a clob:c len:20 call_reg: dest:a src1:i len:8 clob:c call_membase: dest:a src1:b len:30 clob:c voidcall: len:20 clob:c voidcall_reg: src1:i len:8 clob:c voidcall_membase: src1:b len:24 clob:c fcall: dest:g len:28 clob:c fcall_reg: dest:g src1:i len:16 clob:c fcall_membase: dest:g src1:b len:30 clob:c rcall: dest:g len:28 clob:c rcall_reg: dest:g src1:i len:16 clob:c rcall_membase: dest:g src1:b len:30 clob:c lcall: dest:l len:20 clob:c lcall_reg: dest:l src1:i len:8 clob:c lcall_membase: dest:l src1:b len:24 clob:c vcall: len:64 clob:c vcall_reg: src1:i len:64 clob:c vcall_membase: src1:b len:70 clob:c tailcall: len:255 clob:c # FIXME len tailcall_membase: src1:b len:255 clob:c # FIXME len tailcall_reg: src1:b len:255 clob:c # FIXME len # tailcall_parameter models the size of moving one parameter, # so that the required size of a branch around a tailcall can # be accurately estimated; something like: # void f1(volatile long *a) # { # a[large] = a[another large] # } # # In current implementation with 4K limit this is typically # two full instructions, howevever raising the limit some # can lead two instructions and two thumb instructions. # FIXME A fixed size sequence to move parameters would moot this. tailcall_parameter: len:12 iconst: dest:i len:16 r4const: dest:f len:24 r8const: dest:f len:20 label: len:0 store_membase_imm: dest:b len:20 store_membase_reg: dest:b src1:i len:20 storei1_membase_imm: dest:b len:20 storei1_membase_reg: dest:b src1:i len:12 storei2_membase_imm: dest:b len:20 storei2_membase_reg: dest:b src1:i len:12 storei4_membase_imm: dest:b len:20 storei4_membase_reg: dest:b src1:i len:20 storei8_membase_imm: dest:b storei8_membase_reg: dest:b src1:i storer4_membase_reg: dest:b src1:f len:60 storer8_membase_reg: dest:b src1:f len:24 store_memindex: dest:b src1:i src2:i len:4 storei1_memindex: dest:b src1:i src2:i len:4 storei2_memindex: dest:b src1:i src2:i len:4 storei4_memindex: dest:b src1:i src2:i len:4 load_membase: dest:i src1:b len:20 loadi1_membase: dest:i src1:b len:4 loadu1_membase: dest:i src1:b len:4 loadi2_membase: dest:i src1:b len:4 loadu2_membase: dest:i src1:b len:4 loadi4_membase: dest:i src1:b len:4 loadu4_membase: dest:i src1:b len:4 loadi8_membase: dest:i src1:b loadr4_membase: dest:f src1:b len:56 loadr8_membase: dest:f src1:b len:24 load_memindex: dest:i src1:b src2:i len:4 loadi1_memindex: dest:i src1:b src2:i len:4 loadu1_memindex: dest:i src1:b src2:i len:4 loadi2_memindex: dest:i src1:b src2:i len:4 loadu2_memindex: dest:i src1:b src2:i len:4 loadi4_memindex: dest:i src1:b src2:i len:4 loadu4_memindex: dest:i src1:b src2:i len:4 loadu4_mem: dest:i len:8 move: dest:i src1:i len:4 fmove: dest:f src1:f len:4 move_f_to_i4: dest:i src1:f len:28 move_i4_to_f: dest:f src1:i len:8 add_imm: dest:i src1:i len:12 sub_imm: dest:i src1:i len:12 mul_imm: dest:i src1:i len:12 and_imm: dest:i src1:i len:12 or_imm: dest:i src1:i len:12 xor_imm: dest:i src1:i len:12 shl_imm: dest:i src1:i len:8 shr_imm: dest:i src1:i len:8 shr_un_imm: dest:i src1:i len:8 cond_exc_eq: len:8 cond_exc_ne_un: len:8 cond_exc_lt: len:8 cond_exc_lt_un: len:8 cond_exc_gt: len:8 cond_exc_gt_un: len:8 cond_exc_ge: len:8 cond_exc_ge_un: len:8 cond_exc_le: len:8 cond_exc_le_un: len:8 cond_exc_ov: len:12 cond_exc_no: len:8 cond_exc_c: len:12 cond_exc_nc: len:8 #float_beq: src1:f src2:f len:20 #float_bne_un: src1:f src2:f len:20 #float_blt: src1:f src2:f len:20 #float_blt_un: src1:f src2:f len:20 #float_bgt: src1:f src2:f len:20 #float_bgt_un: src1:f src2:f len:20 #float_bge: src1:f src2:f len:20 #float_bge_un: src1:f src2:f len:20 #float_ble: src1:f src2:f len:20 #float_ble_un: src1:f src2:f len:20 float_add: dest:f src1:f src2:f len:4 float_sub: dest:f src1:f src2:f len:4 float_mul: dest:f src1:f src2:f len:4 float_div: dest:f src1:f src2:f len:4 float_div_un: dest:f src1:f src2:f len:4 float_rem: dest:f src1:f src2:f len:16 float_rem_un: dest:f src1:f src2:f len:16 float_neg: dest:f src1:f len:4 float_not: dest:f src1:f len:4 float_conv_to_i1: dest:i src1:f len:88 float_conv_to_i2: dest:i src1:f len:88 float_conv_to_i4: dest:i src1:f len:88 float_conv_to_i8: dest:l src1:f len:88 float_conv_to_r4: dest:f src1:f len:8 float_conv_to_u4: dest:i src1:f len:88 float_conv_to_u8: dest:l src1:f len:88 float_conv_to_u2: dest:i src1:f len:88 float_conv_to_u1: dest:i src1:f len:88 float_conv_to_i: dest:i src1:f len:40 float_ceq: dest:i src1:f src2:f len:16 float_cgt: dest:i src1:f src2:f len:16 float_cgt_un: dest:i src1:f src2:f len:20 float_clt: dest:i src1:f src2:f len:16 float_clt_un: dest:i src1:f src2:f len:20 float_cneq: dest:y src1:f src2:f len:20 float_cge: dest:y src1:f src2:f len:20 float_cle: dest:y src1:f src2:f len:20 # R4 opcodes rmove: dest:f src1:f len:4 r4_conv_to_i1: dest:i src1:f len:88 r4_conv_to_i2: dest:i src1:f len:88 r4_conv_to_i4: dest:i src1:f len:88 r4_conv_to_i: dest:i src1:f len:88 r4_conv_to_u1: dest:i src1:f len:88 r4_conv_to_u2: dest:i src1:f len:88 r4_conv_to_u4: dest:i src1:f len:88 r4_conv_to_r4: dest:f src1:f len:16 r4_conv_to_r8: dest:f src1:f len:16 r4_add: dest:f src1:f src2:f len:4 r4_sub: dest:f src1:f src2:f len:4 r4_mul: dest:f src1:f src2:f len:4 r4_div: dest:f src1:f src2:f len:4 r4_rem: dest:f src1:f src2:f len:16 r4_neg: dest:f src1:f len:4 r4_ceq: dest:i src1:f src2:f len:16 r4_cgt: dest:i src1:f src2:f len:16 r4_cgt_un: dest:i src1:f src2:f len:20 r4_clt: dest:i src1:f src2:f len:16 r4_clt_un: dest:i src1:f src2:f len:20 r4_cneq: dest:y src1:f src2:f len:20 r4_cge: dest:y src1:f src2:f len:20 r4_cle: dest:y src1:f src2:f len:20 setfret: src1:f len:12 aotconst: dest:i len:16 objc_get_selector: dest:i len:32 sqrt: dest:f src1:f len:4 adc: dest:i src1:i src2:i len:4 addcc: dest:i src1:i src2:i len:4 subcc: dest:i src1:i src2:i len:4 adc_imm: dest:i src1:i len:12 addcc_imm: dest:i src1:i len:12 subcc_imm: dest:i src1:i len:12 sbb: dest:i src1:i src2:i len:4 sbb_imm: dest:i src1:i len:12 br_reg: src1:i len:8 bigmul: len:8 dest:l src1:i src2:i bigmul_un: len:8 dest:l src1:i src2:i tls_get: len:16 dest:i tls_set: len:16 src1:i clob:c # 32 bit opcodes int_add: dest:i src1:i src2:i len:4 int_sub: dest:i src1:i src2:i len:4 int_mul: dest:i src1:i src2:i len:4 int_div: dest:i src1:i src2:i len:4 int_div_un: dest:i src1:i src2:i len:4 int_rem: dest:i src1:i src2:i len:8 int_rem_un: dest:i src1:i src2:i len:8 int_and: dest:i src1:i src2:i len:4 int_or: dest:i src1:i src2:i len:4 int_xor: dest:i src1:i src2:i len:4 int_shl: dest:i src1:i src2:i len:4 int_shr: dest:i src1:i src2:i len:4 int_shr_un: dest:i src1:i src2:i len:4 int_neg: dest:i src1:i len:4 int_not: dest:i src1:i len:4 int_conv_to_i1: dest:i src1:i len:8 int_conv_to_i2: dest:i src1:i len:8 int_conv_to_i4: dest:i src1:i len:4 int_conv_to_r4: dest:f src1:i len:84 int_conv_to_r8: dest:f src1:i len:84 int_conv_to_u4: dest:i src1:i int_conv_to_r_un: dest:f src1:i len:56 int_conv_to_u2: dest:i src1:i len:8 int_conv_to_u1: dest:i src1:i len:4 int_beq: len:16 int_bge: len:16 int_bgt: len:16 int_ble: len:16 int_blt: len:16 int_bne_un: len:16 int_bge_un: len:16 int_bgt_un: len:16 int_ble_un: len:16 int_blt_un: len:16 int_add_ovf: dest:i src1:i src2:i len:16 int_add_ovf_un: dest:i src1:i src2:i len:16 int_mul_ovf: dest:i src1:i src2:i len:16 int_mul_ovf_un: dest:i src1:i src2:i len:16 int_sub_ovf: dest:i src1:i src2:i len:16 int_sub_ovf_un: dest:i src1:i src2:i len:16 add_ovf_carry: dest:i src1:i src2:i len:16 sub_ovf_carry: dest:i src1:i src2:i len:16 add_ovf_un_carry: dest:i src1:i src2:i len:16 sub_ovf_un_carry: dest:i src1:i src2:i len:16 arm_rsbs_imm: dest:i src1:i len:4 arm_rsc_imm: dest:i src1:i len:4 # Linear IR opcodes dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 int_adc: dest:i src1:i src2:i len:4 int_addcc: dest:i src1:i src2:i len:4 int_subcc: dest:i src1:i src2:i len:4 int_sbb: dest:i src1:i src2:i len:4 int_adc_imm: dest:i src1:i len:12 int_sbb_imm: dest:i src1:i len:12 int_add_imm: dest:i src1:i len:12 int_sub_imm: dest:i src1:i len:12 int_mul_imm: dest:i src1:i len:12 int_div_imm: dest:i src1:i len:20 int_div_un_imm: dest:i src1:i len:12 int_rem_imm: dest:i src1:i len:28 int_rem_un_imm: dest:i src1:i len:16 int_and_imm: dest:i src1:i len:12 int_or_imm: dest:i src1:i len:12 int_xor_imm: dest:i src1:i len:12 int_shl_imm: dest:i src1:i len:8 int_shr_imm: dest:i src1:i len:8 int_shr_un_imm: dest:i src1:i len:8 int_ceq: dest:i len:12 int_cgt: dest:i len:12 int_cgt_un: dest:i len:12 int_clt: dest:i len:12 int_clt_un: dest:i len:12 int_cneq: dest:i len:12 int_cge: dest:i len:12 int_cle: dest:i len:12 int_cge_un: dest:i len:12 int_cle_un: dest:i len:12 cond_exc_ieq: len:16 cond_exc_ine_un: len:16 cond_exc_ilt: len:16 cond_exc_ilt_un: len:16 cond_exc_igt: len:16 cond_exc_igt_un: len:16 cond_exc_ige: len:16 cond_exc_ige_un: len:16 cond_exc_ile: len:16 cond_exc_ile_un: len:16 cond_exc_iov: len:20 cond_exc_ino: len:16 cond_exc_ic: len:20 cond_exc_inc: len:16 icompare: src1:i src2:i len:4 icompare_imm: src1:i len:12 long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:36 vcall2: len:64 clob:c vcall2_reg: src1:i len:64 clob:c vcall2_membase: src1:b len:64 clob:c dyn_call: src1:i src2:i len:252 clob:c # This is different from the original JIT opcodes float_beq: len:32 float_bne_un: len:32 float_blt: len:32 float_blt_un: len:32 float_bgt: len:32 float_bgt_un: len:32 float_bge: len:32 float_bge_un: len:32 float_ble: len:32 float_ble_un: len:32 liverange_start: len:0 liverange_end: len:0 gc_liveness_def: len:0 gc_liveness_use: len:0 gc_spill_slot_liveness_def: len:0 gc_param_slot_liveness_def: len:0 gc_safe_point: clob:c src1:i len:40 atomic_add_i4: dest:i src1:i src2:i len:64 atomic_exchange_i4: dest:i src1:i src2:i len:64 atomic_cas_i4: dest:i src1:i src2:i src3:i len:64 memory_barrier: len:8 clob:a atomic_load_i1: dest:i src1:b len:28 atomic_load_u1: dest:i src1:b len:28 atomic_load_i2: dest:i src1:b len:28 atomic_load_u2: dest:i src1:b len:28 atomic_load_i4: dest:i src1:b len:28 atomic_load_u4: dest:i src1:b len:28 atomic_load_r4: dest:f src1:b len:80 atomic_load_r8: dest:f src1:b len:32 atomic_store_i1: dest:b src1:i len:28 atomic_store_u1: dest:b src1:i len:28 atomic_store_i2: dest:b src1:i len:28 atomic_store_u2: dest:b src1:i len:28 atomic_store_i4: dest:b src1:i len:28 atomic_store_u4: dest:b src1:i len:28 atomic_store_r4: dest:b src1:f len:80 atomic_store_r8: dest:b src1:f len:32 generic_class_init: src1:a len:44 clob:c fill_prof_call_ctx: src1:i len:128
# Copyright 2003-2011 Novell, Inc (http://www.novell.com) # Copyright 2011 Xamarin, Inc (http://www.xamarin.com) # Licensed under the MIT license. See LICENSE file in the project root for full license information. # arm cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the regsiter allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # a r0 register (first argument/result reg) # b base register (used in address references) # f floating point register # g floating point register returned in r0:r1 for soft-float mode # # len:number describe the maximun length in bytes of the instruction # number is a positive integer # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # r 'reserves' the destination register until a later instruction unreserves it # used mostly to set output registers in function calls # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-x86.c for more details on how the specifiers are used. # nop: len:4 relaxed_nop: len:4 break: len:4 br: len:16 switch: src1:i len:12 # See the comment in resume_from_signal_handler, we can't copy the fp regs from sigctx to MonoContext on linux, # since the corresponding sigctx structures are not well defined. seq_point: len:52 clob:c il_seq_point: len:0 throw: src1:i len:24 rethrow: src1:i len:20 start_handler: len:20 endfinally: len:32 call_handler: len:16 clob:c endfilter: src1:i len:16 get_ex_obj: dest:i len:16 ckfinite: dest:f src1:f len:112 ceq: dest:i len:12 cgt: dest:i len:12 cgt_un: dest:i len:12 clt: dest:i len:12 clt_un: dest:i len:12 localloc: dest:i src1:i len:60 compare: src1:i src2:i len:4 compare_imm: src1:i len:12 fcompare: src1:f src2:f len:12 rcompare: src1:f src2:f len:12 arglist: src1:i len:12 setlret: src1:i src2:i len:12 check_this: src1:b len:4 call: dest:a clob:c len:20 call_reg: dest:a src1:i len:8 clob:c call_membase: dest:a src1:b len:30 clob:c voidcall: len:20 clob:c voidcall_reg: src1:i len:8 clob:c voidcall_membase: src1:b len:24 clob:c fcall: dest:g len:28 clob:c fcall_reg: dest:g src1:i len:16 clob:c fcall_membase: dest:g src1:b len:30 clob:c rcall: dest:g len:28 clob:c rcall_reg: dest:g src1:i len:16 clob:c rcall_membase: dest:g src1:b len:30 clob:c lcall: dest:l len:20 clob:c lcall_reg: dest:l src1:i len:8 clob:c lcall_membase: dest:l src1:b len:24 clob:c vcall: len:64 clob:c vcall_reg: src1:i len:64 clob:c vcall_membase: src1:b len:70 clob:c tailcall: len:255 clob:c # FIXME len tailcall_membase: src1:b len:255 clob:c # FIXME len tailcall_reg: src1:b len:255 clob:c # FIXME len # tailcall_parameter models the size of moving one parameter, # so that the required size of a branch around a tailcall can # be accurately estimated; something like: # void f1(volatile long *a) # { # a[large] = a[another large] # } # # In current implementation with 4K limit this is typically # two full instructions, howevever raising the limit some # can lead two instructions and two thumb instructions. # FIXME A fixed size sequence to move parameters would moot this. tailcall_parameter: len:12 iconst: dest:i len:16 r4const: dest:f len:24 r8const: dest:f len:20 label: len:0 store_membase_imm: dest:b len:20 store_membase_reg: dest:b src1:i len:20 storei1_membase_imm: dest:b len:20 storei1_membase_reg: dest:b src1:i len:12 storei2_membase_imm: dest:b len:20 storei2_membase_reg: dest:b src1:i len:12 storei4_membase_imm: dest:b len:20 storei4_membase_reg: dest:b src1:i len:20 storei8_membase_imm: dest:b storei8_membase_reg: dest:b src1:i storer4_membase_reg: dest:b src1:f len:60 storer8_membase_reg: dest:b src1:f len:24 store_memindex: dest:b src1:i src2:i len:4 storei1_memindex: dest:b src1:i src2:i len:4 storei2_memindex: dest:b src1:i src2:i len:4 storei4_memindex: dest:b src1:i src2:i len:4 load_membase: dest:i src1:b len:20 loadi1_membase: dest:i src1:b len:4 loadu1_membase: dest:i src1:b len:4 loadi2_membase: dest:i src1:b len:4 loadu2_membase: dest:i src1:b len:4 loadi4_membase: dest:i src1:b len:4 loadu4_membase: dest:i src1:b len:4 loadi8_membase: dest:i src1:b loadr4_membase: dest:f src1:b len:56 loadr8_membase: dest:f src1:b len:24 load_memindex: dest:i src1:b src2:i len:4 loadi1_memindex: dest:i src1:b src2:i len:4 loadu1_memindex: dest:i src1:b src2:i len:4 loadi2_memindex: dest:i src1:b src2:i len:4 loadu2_memindex: dest:i src1:b src2:i len:4 loadi4_memindex: dest:i src1:b src2:i len:4 loadu4_memindex: dest:i src1:b src2:i len:4 loadu4_mem: dest:i len:8 move: dest:i src1:i len:4 fmove: dest:f src1:f len:4 move_f_to_i4: dest:i src1:f len:28 move_i4_to_f: dest:f src1:i len:8 add_imm: dest:i src1:i len:12 sub_imm: dest:i src1:i len:12 mul_imm: dest:i src1:i len:12 and_imm: dest:i src1:i len:12 or_imm: dest:i src1:i len:12 xor_imm: dest:i src1:i len:12 shl_imm: dest:i src1:i len:8 shr_imm: dest:i src1:i len:8 shr_un_imm: dest:i src1:i len:8 cond_exc_eq: len:8 cond_exc_ne_un: len:8 cond_exc_lt: len:8 cond_exc_lt_un: len:8 cond_exc_gt: len:8 cond_exc_gt_un: len:8 cond_exc_ge: len:8 cond_exc_ge_un: len:8 cond_exc_le: len:8 cond_exc_le_un: len:8 cond_exc_ov: len:12 cond_exc_no: len:8 cond_exc_c: len:12 cond_exc_nc: len:8 #float_beq: src1:f src2:f len:20 #float_bne_un: src1:f src2:f len:20 #float_blt: src1:f src2:f len:20 #float_blt_un: src1:f src2:f len:20 #float_bgt: src1:f src2:f len:20 #float_bgt_un: src1:f src2:f len:20 #float_bge: src1:f src2:f len:20 #float_bge_un: src1:f src2:f len:20 #float_ble: src1:f src2:f len:20 #float_ble_un: src1:f src2:f len:20 float_add: dest:f src1:f src2:f len:4 float_sub: dest:f src1:f src2:f len:4 float_mul: dest:f src1:f src2:f len:4 float_div: dest:f src1:f src2:f len:4 float_div_un: dest:f src1:f src2:f len:4 float_rem: dest:f src1:f src2:f len:16 float_rem_un: dest:f src1:f src2:f len:16 float_neg: dest:f src1:f len:4 float_not: dest:f src1:f len:4 float_conv_to_i1: dest:i src1:f len:88 float_conv_to_i2: dest:i src1:f len:88 float_conv_to_i4: dest:i src1:f len:88 float_conv_to_i8: dest:l src1:f len:88 float_conv_to_r4: dest:f src1:f len:8 float_conv_to_u4: dest:i src1:f len:88 float_conv_to_u8: dest:l src1:f len:88 float_conv_to_u2: dest:i src1:f len:88 float_conv_to_u1: dest:i src1:f len:88 float_ceq: dest:i src1:f src2:f len:16 float_cgt: dest:i src1:f src2:f len:16 float_cgt_un: dest:i src1:f src2:f len:20 float_clt: dest:i src1:f src2:f len:16 float_clt_un: dest:i src1:f src2:f len:20 float_cneq: dest:y src1:f src2:f len:20 float_cge: dest:y src1:f src2:f len:20 float_cle: dest:y src1:f src2:f len:20 # R4 opcodes rmove: dest:f src1:f len:4 r4_conv_to_i1: dest:i src1:f len:88 r4_conv_to_i2: dest:i src1:f len:88 r4_conv_to_i4: dest:i src1:f len:88 r4_conv_to_u1: dest:i src1:f len:88 r4_conv_to_u2: dest:i src1:f len:88 r4_conv_to_u4: dest:i src1:f len:88 r4_conv_to_r4: dest:f src1:f len:16 r4_conv_to_r8: dest:f src1:f len:16 r4_add: dest:f src1:f src2:f len:4 r4_sub: dest:f src1:f src2:f len:4 r4_mul: dest:f src1:f src2:f len:4 r4_div: dest:f src1:f src2:f len:4 r4_rem: dest:f src1:f src2:f len:16 r4_neg: dest:f src1:f len:4 r4_ceq: dest:i src1:f src2:f len:16 r4_cgt: dest:i src1:f src2:f len:16 r4_cgt_un: dest:i src1:f src2:f len:20 r4_clt: dest:i src1:f src2:f len:16 r4_clt_un: dest:i src1:f src2:f len:20 r4_cneq: dest:y src1:f src2:f len:20 r4_cge: dest:y src1:f src2:f len:20 r4_cle: dest:y src1:f src2:f len:20 setfret: src1:f len:12 aotconst: dest:i len:16 objc_get_selector: dest:i len:32 sqrt: dest:f src1:f len:4 adc: dest:i src1:i src2:i len:4 addcc: dest:i src1:i src2:i len:4 subcc: dest:i src1:i src2:i len:4 adc_imm: dest:i src1:i len:12 addcc_imm: dest:i src1:i len:12 subcc_imm: dest:i src1:i len:12 sbb: dest:i src1:i src2:i len:4 sbb_imm: dest:i src1:i len:12 br_reg: src1:i len:8 bigmul: len:8 dest:l src1:i src2:i bigmul_un: len:8 dest:l src1:i src2:i tls_get: len:16 dest:i tls_set: len:16 src1:i clob:c # 32 bit opcodes int_add: dest:i src1:i src2:i len:4 int_sub: dest:i src1:i src2:i len:4 int_mul: dest:i src1:i src2:i len:4 int_div: dest:i src1:i src2:i len:4 int_div_un: dest:i src1:i src2:i len:4 int_rem: dest:i src1:i src2:i len:8 int_rem_un: dest:i src1:i src2:i len:8 int_and: dest:i src1:i src2:i len:4 int_or: dest:i src1:i src2:i len:4 int_xor: dest:i src1:i src2:i len:4 int_shl: dest:i src1:i src2:i len:4 int_shr: dest:i src1:i src2:i len:4 int_shr_un: dest:i src1:i src2:i len:4 int_neg: dest:i src1:i len:4 int_not: dest:i src1:i len:4 int_conv_to_i1: dest:i src1:i len:8 int_conv_to_i2: dest:i src1:i len:8 int_conv_to_i4: dest:i src1:i len:4 int_conv_to_r4: dest:f src1:i len:84 int_conv_to_r8: dest:f src1:i len:84 int_conv_to_u4: dest:i src1:i int_conv_to_r_un: dest:f src1:i len:56 int_conv_to_u2: dest:i src1:i len:8 int_conv_to_u1: dest:i src1:i len:4 int_beq: len:16 int_bge: len:16 int_bgt: len:16 int_ble: len:16 int_blt: len:16 int_bne_un: len:16 int_bge_un: len:16 int_bgt_un: len:16 int_ble_un: len:16 int_blt_un: len:16 int_add_ovf: dest:i src1:i src2:i len:16 int_add_ovf_un: dest:i src1:i src2:i len:16 int_mul_ovf: dest:i src1:i src2:i len:16 int_mul_ovf_un: dest:i src1:i src2:i len:16 int_sub_ovf: dest:i src1:i src2:i len:16 int_sub_ovf_un: dest:i src1:i src2:i len:16 add_ovf_carry: dest:i src1:i src2:i len:16 sub_ovf_carry: dest:i src1:i src2:i len:16 add_ovf_un_carry: dest:i src1:i src2:i len:16 sub_ovf_un_carry: dest:i src1:i src2:i len:16 arm_rsbs_imm: dest:i src1:i len:4 arm_rsc_imm: dest:i src1:i len:4 # Linear IR opcodes dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 int_adc: dest:i src1:i src2:i len:4 int_addcc: dest:i src1:i src2:i len:4 int_subcc: dest:i src1:i src2:i len:4 int_sbb: dest:i src1:i src2:i len:4 int_adc_imm: dest:i src1:i len:12 int_sbb_imm: dest:i src1:i len:12 int_add_imm: dest:i src1:i len:12 int_sub_imm: dest:i src1:i len:12 int_mul_imm: dest:i src1:i len:12 int_div_imm: dest:i src1:i len:20 int_div_un_imm: dest:i src1:i len:12 int_rem_imm: dest:i src1:i len:28 int_rem_un_imm: dest:i src1:i len:16 int_and_imm: dest:i src1:i len:12 int_or_imm: dest:i src1:i len:12 int_xor_imm: dest:i src1:i len:12 int_shl_imm: dest:i src1:i len:8 int_shr_imm: dest:i src1:i len:8 int_shr_un_imm: dest:i src1:i len:8 int_ceq: dest:i len:12 int_cgt: dest:i len:12 int_cgt_un: dest:i len:12 int_clt: dest:i len:12 int_clt_un: dest:i len:12 int_cneq: dest:i len:12 int_cge: dest:i len:12 int_cle: dest:i len:12 int_cge_un: dest:i len:12 int_cle_un: dest:i len:12 cond_exc_ieq: len:16 cond_exc_ine_un: len:16 cond_exc_ilt: len:16 cond_exc_ilt_un: len:16 cond_exc_igt: len:16 cond_exc_igt_un: len:16 cond_exc_ige: len:16 cond_exc_ige_un: len:16 cond_exc_ile: len:16 cond_exc_ile_un: len:16 cond_exc_iov: len:20 cond_exc_ino: len:16 cond_exc_ic: len:20 cond_exc_inc: len:16 icompare: src1:i src2:i len:4 icompare_imm: src1:i len:12 long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:36 vcall2: len:64 clob:c vcall2_reg: src1:i len:64 clob:c vcall2_membase: src1:b len:64 clob:c dyn_call: src1:i src2:i len:252 clob:c # This is different from the original JIT opcodes float_beq: len:32 float_bne_un: len:32 float_blt: len:32 float_blt_un: len:32 float_bgt: len:32 float_bgt_un: len:32 float_bge: len:32 float_bge_un: len:32 float_ble: len:32 float_ble_un: len:32 liverange_start: len:0 liverange_end: len:0 gc_liveness_def: len:0 gc_liveness_use: len:0 gc_spill_slot_liveness_def: len:0 gc_param_slot_liveness_def: len:0 gc_safe_point: clob:c src1:i len:40 atomic_add_i4: dest:i src1:i src2:i len:64 atomic_exchange_i4: dest:i src1:i src2:i len:64 atomic_cas_i4: dest:i src1:i src2:i src3:i len:64 memory_barrier: len:8 clob:a atomic_load_i1: dest:i src1:b len:28 atomic_load_u1: dest:i src1:b len:28 atomic_load_i2: dest:i src1:b len:28 atomic_load_u2: dest:i src1:b len:28 atomic_load_i4: dest:i src1:b len:28 atomic_load_u4: dest:i src1:b len:28 atomic_load_r4: dest:f src1:b len:80 atomic_load_r8: dest:f src1:b len:32 atomic_store_i1: dest:b src1:i len:28 atomic_store_u1: dest:b src1:i len:28 atomic_store_i2: dest:b src1:i len:28 atomic_store_u2: dest:b src1:i len:28 atomic_store_i4: dest:b src1:i len:28 atomic_store_u4: dest:b src1:i len:28 atomic_store_r4: dest:b src1:f len:80 atomic_store_r8: dest:b src1:f len:32 generic_class_init: src1:a len:44 clob:c fill_prof_call_ctx: src1:i len:128
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/cpu-arm64.md
# Copyright 2011-2013 Xamarin, Inc (http://www.xamarin.com) # Copyright 2003-2011 Novell, Inc (http://www.novell.com) # Licensed under the MIT license. See LICENSE file in the project root for full license information. # arm64 cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the regsiter allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # a r3 register (output from calls) # b base register (used in address references) # f floating point register # g floating point register returned in r0:r1 for soft-float mode # # len:number describe the maximun length in bytes of the instruction # number is a positive integer # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # r 'reserves' the destination register until a later instruction unreserves it # used mostly to set output registers in function calls # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-x86.c for more details on how the specifiers are used. # nop: len:4 relaxed_nop: len:4 break: len:20 br: len:16 switch: src1:i len:12 # See the comment in resume_from_signal_handler, we can't copy the fp regs from sigctx to MonoContext on linux, # since the corresponding sigctx structures are not well defined. seq_point: len:40 clob:c il_seq_point: len:0 throw: src1:i len:24 rethrow: src1:i len:20 start_handler: len:32 endfinally: len:32 call_handler: len:16 clob:c endfilter: src1:i len:32 get_ex_obj: dest:i len:16 ckfinite: dest:f src1:f len:64 ceq: dest:i len:12 cgt: dest:i len:12 cgt_un: dest:i len:12 clt: dest:i len:12 clt_un: dest:i len:12 localloc: dest:i src1:i len:96 compare: src1:i src2:i len:4 compare_imm: src1:i len:20 fcompare: src1:f src2:f len:12 rcompare: src1:f src2:f len:12 arglist: src1:i len:12 setlret: src1:i src2:i len:12 check_this: src1:b len:4 call: dest:a clob:c len:32 call_reg: dest:a src1:i len:32 clob:c call_membase: dest:a src1:b len:32 clob:c voidcall: len:32 clob:c voidcall_reg: src1:i len:32 clob:c voidcall_membase: src1:b len:32 clob:c fcall: dest:f len:32 clob:c fcall_reg: dest:f src1:i len:32 clob:c fcall_membase: dest:f src1:b len:32 clob:c rcall: dest:f len:32 clob:c rcall_reg: dest:f src1:i len:32 clob:c rcall_membase: dest:f src1:b len:32 clob:c lcall: dest:l len:32 clob:c lcall_reg: dest:l src1:i len:32 clob:c lcall_membase: dest:l src1:b len:32 clob:c vcall: len:32 clob:c vcall_reg: src1:i len:32 clob:c vcall_membase: src1:b len:32 clob:c tailcall: len:255 clob:c # FIXME len tailcall_membase: src1:b len:255 clob:c # FIXME len tailcall_reg: src1:b len:255 clob:c # FIXME len # tailcall_parameter models the size of moving one parameter, # so that the required size of a branch around a tailcall can # be accurately estimated; something like: # void f1(volatile long *a) # { # a[large] = a[another large] # } # # This is two instructions typically, but can be 6 for frames larger than 32K. # FIXME A fixed size sequence to move parameters would moot this. tailcall_parameter: len:24 iconst: dest:i len:16 r4const: dest:f len:24 r8const: dest:f len:20 label: len:0 store_membase_imm: dest:b len:20 store_membase_reg: dest:b src1:i len:20 storei1_membase_imm: dest:b len:20 storei1_membase_reg: dest:b src1:i len:12 storei2_membase_imm: dest:b len:20 storei2_membase_reg: dest:b src1:i len:12 storei4_membase_imm: dest:b len:20 storei4_membase_reg: dest:b src1:i len:20 storer4_membase_reg: dest:b src1:f len:20 storer8_membase_reg: dest:b src1:f len:24 store_memindex: dest:b src1:i src2:i len:4 storei1_memindex: dest:b src1:i src2:i len:4 storei2_memindex: dest:b src1:i src2:i len:4 storei4_memindex: dest:b src1:i src2:i len:4 load_membase: dest:i src1:b len:20 loadi1_membase: dest:i src1:b len:32 loadu1_membase: dest:i src1:b len:32 loadi2_membase: dest:i src1:b len:32 loadu2_membase: dest:i src1:b len:32 loadi4_membase: dest:i src1:b len:32 loadu4_membase: dest:i src1:b len:32 loadr4_membase: dest:f src1:b len:32 loadr8_membase: dest:f src1:b len:32 load_memindex: dest:i src1:b src2:i len:4 loadi1_memindex: dest:i src1:b src2:i len:4 loadu1_memindex: dest:i src1:b src2:i len:4 loadi2_memindex: dest:i src1:b src2:i len:4 loadu2_memindex: dest:i src1:b src2:i len:4 loadi4_memindex: dest:i src1:b src2:i len:4 loadu4_memindex: dest:i src1:b src2:i len:4 loadu4_mem: dest:i len:8 move: dest:i src1:i len:4 fmove: dest:f src1:f len:4 rmove: dest:f src1:f len:4 move_f_to_i4: dest:i src1:f len:8 move_i4_to_f: dest:f src1:i len:8 move_f_to_i8: dest:i src1:f len:4 move_i8_to_f: dest:f src1:i len:4 add_imm: dest:i src1:i len:12 sub_imm: dest:i src1:i len:12 mul_imm: dest:i src1:i len:12 and_imm: dest:i src1:i len:12 or_imm: dest:i src1:i len:12 xor_imm: dest:i src1:i len:12 shl_imm: dest:i src1:i len:8 shr_imm: dest:i src1:i len:8 shr_un_imm: dest:i src1:i len:8 cond_exc_eq: len:8 cond_exc_ne_un: len:8 cond_exc_lt: len:8 cond_exc_lt_un: len:8 cond_exc_gt: len:8 cond_exc_gt_un: len:8 cond_exc_ge: len:8 cond_exc_ge_un: len:8 cond_exc_le: len:8 cond_exc_le_un: len:8 cond_exc_ov: len:12 cond_exc_no: len:8 cond_exc_c: len:12 cond_exc_nc: len:8 #float_beq: src1:f src2:f len:20 #float_bne_un: src1:f src2:f len:20 #float_blt: src1:f src2:f len:20 #float_blt_un: src1:f src2:f len:20 #float_bgt: src1:f src2:f len:20 #float_bgt_un: src1:f src2:f len:20 #float_bge: src1:f src2:f len:20 #float_bge_un: src1:f src2:f len:20 #float_ble: src1:f src2:f len:20 #float_ble_un: src1:f src2:f len:20 float_add: dest:f src1:f src2:f len:4 float_sub: dest:f src1:f src2:f len:4 float_mul: dest:f src1:f src2:f len:4 float_div: dest:f src1:f src2:f len:4 float_div_un: dest:f src1:f src2:f len:4 float_rem: dest:f src1:f src2:f len:16 float_rem_un: dest:f src1:f src2:f len:16 float_neg: dest:f src1:f len:4 float_not: dest:f src1:f len:4 float_conv_to_i1: dest:i src1:f len:40 float_conv_to_i2: dest:i src1:f len:40 float_conv_to_i4: dest:i src1:f len:40 float_conv_to_i8: dest:l src1:f len:40 float_conv_to_r4: dest:f src1:f len:8 float_conv_to_u4: dest:i src1:f len:40 float_conv_to_u8: dest:l src1:f len:40 float_conv_to_u2: dest:i src1:f len:40 float_conv_to_u1: dest:i src1:f len:40 float_conv_to_i: dest:i src1:f len:40 float_ceq: dest:i src1:f src2:f len:16 float_cgt: dest:i src1:f src2:f len:16 float_cgt_un: dest:i src1:f src2:f len:20 float_clt: dest:i src1:f src2:f len:16 float_clt_un: dest:i src1:f src2:f len:20 float_cneq: dest:i src1:f src2:f len:20 float_cge: dest:i src1:f src2:f len:20 float_cle: dest:i src1:f src2:f len:20 setfret: src1:f len:12 # R4 opcodes r4_conv_to_i1: dest:i src1:f len:8 r4_conv_to_u1: dest:i src1:f len:8 r4_conv_to_i2: dest:i src1:f len:8 r4_conv_to_u2: dest:i src1:f len:8 r4_conv_to_i4: dest:i src1:f len:8 r4_conv_to_u4: dest:i src1:f len:8 r4_conv_to_i8: dest:l src1:f len:8 r4_conv_to_i: dest:l src1:f len:8 r4_conv_to_u8: dest:l src1:f len:8 r4_conv_to_r4: dest:f src1:f len:4 r4_conv_to_r8: dest:f src1:f len:4 r4_add: dest:f src1:f src2:f len:4 r4_sub: dest:f src1:f src2:f len:4 r4_mul: dest:f src1:f src2:f len:4 r4_div: dest:f src1:f src2:f len:4 r4_rem: dest:f src1:f src2:f len:16 r4_neg: dest:f src1:f len:4 r4_ceq: dest:i src1:f src2:f len:16 r4_cgt: dest:i src1:f src2:f len:16 r4_cgt_un: dest:i src1:f src2:f len:20 r4_clt: dest:i src1:f src2:f len:16 r4_clt_un: dest:i src1:f src2:f len:20 r4_cneq: dest:i src1:f src2:f len:20 r4_cge: dest:i src1:f src2:f len:20 r4_cle: dest:i src1:f src2:f len:20 aotconst: dest:i len:16 objc_get_selector: dest:i len:32 sqrt: dest:f src1:f len:4 adc: dest:i src1:i src2:i len:4 addcc: dest:i src1:i src2:i len:4 subcc: dest:i src1:i src2:i len:4 adc_imm: dest:i src1:i len:12 addcc_imm: dest:i src1:i len:12 subcc_imm: dest:i src1:i len:12 sbb: dest:i src1:i src2:i len:4 sbb_imm: dest:i src1:i len:12 br_reg: src1:i len:8 bigmul: len:8 dest:l src1:i src2:i bigmul_un: len:8 dest:l src1:i src2:i tls_get: dest:i len:32 tls_set: src1:i len:32 # 32 bit opcodes int_add: dest:i src1:i src2:i len:4 int_sub: dest:i src1:i src2:i len:4 int_mul: dest:i src1:i src2:i len:4 int_div: dest:i src1:i src2:i len:72 int_div_un: dest:i src1:i src2:i len:72 int_rem: dest:i src1:i src2:i len:72 int_rem_un: dest:i src1:i src2:i len:72 int_and: dest:i src1:i src2:i len:4 int_or: dest:i src1:i src2:i len:4 int_xor: dest:i src1:i src2:i len:4 int_shl: dest:i src1:i src2:i len:4 int_shr: dest:i src1:i src2:i len:4 int_shr_un: dest:i src1:i src2:i len:4 int_neg: dest:i src1:i len:4 int_not: dest:i src1:i len:4 int_conv_to_i1: dest:i src1:i len:8 int_conv_to_i2: dest:i src1:i len:8 int_conv_to_i4: dest:i src1:i len:4 int_conv_to_r4: dest:f src1:i len:36 int_conv_to_r8: dest:f src1:i len:36 int_conv_to_u4: dest:i src1:i int_conv_to_r_un: dest:f src1:i len:56 int_conv_to_u2: dest:i src1:i len:8 int_conv_to_u1: dest:i src1:i len:4 int_beq: len:16 int_bge: len:16 int_bgt: len:16 int_ble: len:16 int_blt: len:16 int_bne_un: len:16 int_bge_un: len:16 int_bgt_un: len:16 int_ble_un: len:16 int_blt_un: len:16 int_add_ovf: dest:i src1:i src2:i len:16 int_add_ovf_un: dest:i src1:i src2:i len:16 int_mul_ovf: dest:i src1:i src2:i len:16 int_mul_ovf_un: dest:i src1:i src2:i len:16 int_sub_ovf: dest:i src1:i src2:i len:16 int_sub_ovf_un: dest:i src1:i src2:i len:16 add_ovf_carry: dest:i src1:i src2:i len:16 sub_ovf_carry: dest:i src1:i src2:i len:16 add_ovf_un_carry: dest:i src1:i src2:i len:16 sub_ovf_un_carry: dest:i src1:i src2:i len:16 arm_rsbs_imm: dest:i src1:i len:4 arm_rsc_imm: dest:i src1:i len:4 # Linear IR opcodes dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_i8const: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 int_adc: dest:i src1:i src2:i len:4 int_addcc: dest:i src1:i src2:i len:4 int_subcc: dest:i src1:i src2:i len:4 int_sbb: dest:i src1:i src2:i len:4 int_adc_imm: dest:i src1:i len:12 int_sbb_imm: dest:i src1:i len:12 int_add_imm: dest:i src1:i len:12 int_sub_imm: dest:i src1:i len:12 int_mul_imm: dest:i src1:i len:12 int_div_imm: dest:i src1:i len:20 int_div_un_imm: dest:i src1:i len:12 int_rem_imm: dest:i src1:i len:28 int_rem_un_imm: dest:i src1:i len:16 int_and_imm: dest:i src1:i len:12 int_or_imm: dest:i src1:i len:12 int_xor_imm: dest:i src1:i len:12 int_shl_imm: dest:i src1:i len:8 int_shr_imm: dest:i src1:i len:8 int_shr_un_imm: dest:i src1:i len:8 int_ceq: dest:i len:12 int_cgt: dest:i len:12 int_cgt_un: dest:i len:12 int_clt: dest:i len:12 int_clt_un: dest:i len:12 int_cneq: dest:i len:12 int_cge: dest:i len:12 int_cle: dest:i len:12 int_cge_un: dest:i len:12 int_cle_un: dest:i len:12 cond_exc_ieq: len:16 cond_exc_ine_un: len:16 cond_exc_ilt: len:16 cond_exc_ilt_un: len:16 cond_exc_igt: len:16 cond_exc_igt_un: len:16 cond_exc_ige: len:16 cond_exc_ige_un: len:16 cond_exc_ile: len:16 cond_exc_ile_un: len:16 cond_exc_iov: len:20 cond_exc_ino: len:16 cond_exc_ic: len:20 cond_exc_inc: len:16 icompare: src1:i src2:i len:4 icompare_imm: src1:i len:12 long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:36 vcall2: len:40 clob:c vcall2_reg: src1:i len:40 clob:c vcall2_membase: src1:b len:40 clob:c dyn_call: src1:i src2:i len:216 clob:c # This is different from the original JIT opcodes float_beq: len:32 float_bne_un: len:32 float_blt: len:32 float_blt_un: len:32 float_bgt: len:32 float_bgt_un: len:32 float_bge: len:32 float_bge_un: len:32 float_ble: len:32 float_ble_un: len:32 liverange_start: len:0 liverange_end: len:0 gc_liveness_def: len:0 gc_liveness_use: len:0 gc_spill_slot_liveness_def: len:0 gc_param_slot_liveness_def: len:0 # 64 bit opcodes i8const: dest:i len:16 sext_i4: dest:i src1:i len:4 zext_i4: dest:i src1:i len:4 jump_table: dest:i len:16 long_add: dest:i src1:i src2:i len:4 long_sub: dest:i src1:i src2:i len:4 long_mul: dest:i src1:i src2:i len:4 long_div: dest:i src1:i src2:i len:80 long_div_un: dest:i src1:i src2:i len:64 long_rem: dest:i src1:i src2:i len:80 long_rem_un: dest:i src1:i src2:i len:64 long_and: dest:i src1:i src2:i len:4 long_or: dest:i src1:i src2:i len:4 long_xor: dest:i src1:i src2:i len:4 long_shl: dest:i src1:i src2:i len:4 long_shr: dest:i src1:i src2:i len:4 long_shr_un: dest:i src1:i src2:i len:4 long_neg: dest:i src1:i len:4 long_not: dest:i src1:i len:4 long_add_imm: dest:i src1:i len:12 long_sub_imm: dest:i src1:i len:12 long_mul_imm: dest:i src1:i len:12 long_and_imm: dest:i src1:i len:12 long_or_imm: dest:i src1:i len:12 long_xor_imm: dest:i src1:i len:12 long_shl_imm: dest:i src1:i len:12 long_shr_imm: dest:i src1:i len:12 long_shr_un_imm: dest:i src1:i len:12 long_add_ovf: dest:i src1:i src2:i len:16 long_add_ovf_un: dest:i src1:i src2:i len:16 long_mul_ovf: dest:i src1:i src2:i len:16 long_mul_ovf_un: dest:i src1:i src2:i len:16 long_sub_ovf: dest:i src1:i src2:i len:16 long_sub_ovf_un: dest:i src1:i src2:i len:16 lcompare: src1:i src2:i len:4 lcompare_imm: src1:i len:20 long_beq: len:4 long_bge: len:4 long_bgt: len:4 long_ble: len:4 long_blt: len:4 long_bne_un: len:4 long_bge_un: len:4 long_bgt_un: len:4 long_ble_un: len:4 long_blt_un: len:4 long_ceq: dest:i len:12 long_cgt: dest:i len:12 long_cgt_un: dest:i len:12 long_clt: dest:i len:12 long_clt_un: dest:i len:12 long_conv_to_i1: dest:i src1:i len:4 long_conv_to_i2: dest:i src1:i len:4 long_conv_to_u1: dest:i src1:i len:4 long_conv_to_u2: dest:i src1:i len:4 long_conv_to_r8: dest:f src1:i len:8 long_conv_to_r4: dest:f src1:i len:12 loadi8_membase: dest:i src1:b len:12 storei8_membase_imm: dest:b len:20 storei8_membase_reg: dest:b src1:i len:12 long_conv_to_r_un: dest:f src1:i len:8 arm_setfreg_r4: dest:f src1:f len:8 localloc_imm: dest:i len:64 arm64_cbzw: src1:i len:16 arm64_cbzx: src1:i len:16 arm64_cbnzw: src1:i len:16 arm64_cbnzx: src1:i len:16 atomic_add_i4: dest:i src1:i src2:i len:32 atomic_add_i8: dest:i src1:i src2:i len:32 atomic_exchange_i4: dest:i src1:i src2:i len:32 atomic_exchange_i8: dest:i src1:i src2:i len:32 atomic_cas_i4: dest:i src1:i src2:i src3:i len:32 atomic_cas_i8: dest:i src1:i src2:i src3:i len:32 memory_barrier: len:8 clob:a atomic_load_i1: dest:i src1:b len:24 atomic_load_u1: dest:i src1:b len:24 atomic_load_i2: dest:i src1:b len:24 atomic_load_u2: dest:i src1:b len:24 atomic_load_i4: dest:i src1:b len:24 atomic_load_u4: dest:i src1:b len:24 atomic_load_i8: dest:i src1:b len:20 atomic_load_u8: dest:i src1:b len:20 atomic_load_r4: dest:f src1:b len:28 atomic_load_r8: dest:f src1:b len:24 atomic_store_i1: dest:b src1:i len:20 atomic_store_u1: dest:b src1:i len:20 atomic_store_i2: dest:b src1:i len:20 atomic_store_u2: dest:b src1:i len:20 atomic_store_i4: dest:b src1:i len:20 atomic_store_u4: dest:b src1:i len:20 atomic_store_i8: dest:b src1:i len:20 atomic_store_u8: dest:b src1:i len:20 atomic_store_r4: dest:b src1:f len:28 atomic_store_r8: dest:b src1:f len:24 generic_class_init: src1:a len:44 clob:c gc_safe_point: src1:i len:12 clob:c fill_prof_call_ctx: src1:i len:128
# Copyright 2011-2013 Xamarin, Inc (http://www.xamarin.com) # Copyright 2003-2011 Novell, Inc (http://www.novell.com) # Licensed under the MIT license. See LICENSE file in the project root for full license information. # arm64 cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the regsiter allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # a r3 register (output from calls) # b base register (used in address references) # f floating point register # g floating point register returned in r0:r1 for soft-float mode # # len:number describe the maximun length in bytes of the instruction # number is a positive integer # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # r 'reserves' the destination register until a later instruction unreserves it # used mostly to set output registers in function calls # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-x86.c for more details on how the specifiers are used. # nop: len:4 relaxed_nop: len:4 break: len:20 br: len:16 switch: src1:i len:12 # See the comment in resume_from_signal_handler, we can't copy the fp regs from sigctx to MonoContext on linux, # since the corresponding sigctx structures are not well defined. seq_point: len:40 clob:c il_seq_point: len:0 throw: src1:i len:24 rethrow: src1:i len:20 start_handler: len:32 endfinally: len:32 call_handler: len:16 clob:c endfilter: src1:i len:32 get_ex_obj: dest:i len:16 ckfinite: dest:f src1:f len:64 ceq: dest:i len:12 cgt: dest:i len:12 cgt_un: dest:i len:12 clt: dest:i len:12 clt_un: dest:i len:12 localloc: dest:i src1:i len:96 compare: src1:i src2:i len:4 compare_imm: src1:i len:20 fcompare: src1:f src2:f len:12 rcompare: src1:f src2:f len:12 arglist: src1:i len:12 setlret: src1:i src2:i len:12 check_this: src1:b len:4 call: dest:a clob:c len:32 call_reg: dest:a src1:i len:32 clob:c call_membase: dest:a src1:b len:32 clob:c voidcall: len:32 clob:c voidcall_reg: src1:i len:32 clob:c voidcall_membase: src1:b len:32 clob:c fcall: dest:f len:32 clob:c fcall_reg: dest:f src1:i len:32 clob:c fcall_membase: dest:f src1:b len:32 clob:c rcall: dest:f len:32 clob:c rcall_reg: dest:f src1:i len:32 clob:c rcall_membase: dest:f src1:b len:32 clob:c lcall: dest:l len:32 clob:c lcall_reg: dest:l src1:i len:32 clob:c lcall_membase: dest:l src1:b len:32 clob:c vcall: len:32 clob:c vcall_reg: src1:i len:32 clob:c vcall_membase: src1:b len:32 clob:c tailcall: len:255 clob:c # FIXME len tailcall_membase: src1:b len:255 clob:c # FIXME len tailcall_reg: src1:b len:255 clob:c # FIXME len # tailcall_parameter models the size of moving one parameter, # so that the required size of a branch around a tailcall can # be accurately estimated; something like: # void f1(volatile long *a) # { # a[large] = a[another large] # } # # This is two instructions typically, but can be 6 for frames larger than 32K. # FIXME A fixed size sequence to move parameters would moot this. tailcall_parameter: len:24 iconst: dest:i len:16 r4const: dest:f len:24 r8const: dest:f len:20 label: len:0 store_membase_imm: dest:b len:20 store_membase_reg: dest:b src1:i len:20 storei1_membase_imm: dest:b len:20 storei1_membase_reg: dest:b src1:i len:12 storei2_membase_imm: dest:b len:20 storei2_membase_reg: dest:b src1:i len:12 storei4_membase_imm: dest:b len:20 storei4_membase_reg: dest:b src1:i len:20 storer4_membase_reg: dest:b src1:f len:20 storer8_membase_reg: dest:b src1:f len:24 store_memindex: dest:b src1:i src2:i len:4 storei1_memindex: dest:b src1:i src2:i len:4 storei2_memindex: dest:b src1:i src2:i len:4 storei4_memindex: dest:b src1:i src2:i len:4 load_membase: dest:i src1:b len:20 loadi1_membase: dest:i src1:b len:32 loadu1_membase: dest:i src1:b len:32 loadi2_membase: dest:i src1:b len:32 loadu2_membase: dest:i src1:b len:32 loadi4_membase: dest:i src1:b len:32 loadu4_membase: dest:i src1:b len:32 loadr4_membase: dest:f src1:b len:32 loadr8_membase: dest:f src1:b len:32 load_memindex: dest:i src1:b src2:i len:4 loadi1_memindex: dest:i src1:b src2:i len:4 loadu1_memindex: dest:i src1:b src2:i len:4 loadi2_memindex: dest:i src1:b src2:i len:4 loadu2_memindex: dest:i src1:b src2:i len:4 loadi4_memindex: dest:i src1:b src2:i len:4 loadu4_memindex: dest:i src1:b src2:i len:4 loadu4_mem: dest:i len:8 move: dest:i src1:i len:4 fmove: dest:f src1:f len:4 rmove: dest:f src1:f len:4 move_f_to_i4: dest:i src1:f len:8 move_i4_to_f: dest:f src1:i len:8 move_f_to_i8: dest:i src1:f len:4 move_i8_to_f: dest:f src1:i len:4 add_imm: dest:i src1:i len:12 sub_imm: dest:i src1:i len:12 mul_imm: dest:i src1:i len:12 and_imm: dest:i src1:i len:12 or_imm: dest:i src1:i len:12 xor_imm: dest:i src1:i len:12 shl_imm: dest:i src1:i len:8 shr_imm: dest:i src1:i len:8 shr_un_imm: dest:i src1:i len:8 cond_exc_eq: len:8 cond_exc_ne_un: len:8 cond_exc_lt: len:8 cond_exc_lt_un: len:8 cond_exc_gt: len:8 cond_exc_gt_un: len:8 cond_exc_ge: len:8 cond_exc_ge_un: len:8 cond_exc_le: len:8 cond_exc_le_un: len:8 cond_exc_ov: len:12 cond_exc_no: len:8 cond_exc_c: len:12 cond_exc_nc: len:8 #float_beq: src1:f src2:f len:20 #float_bne_un: src1:f src2:f len:20 #float_blt: src1:f src2:f len:20 #float_blt_un: src1:f src2:f len:20 #float_bgt: src1:f src2:f len:20 #float_bgt_un: src1:f src2:f len:20 #float_bge: src1:f src2:f len:20 #float_bge_un: src1:f src2:f len:20 #float_ble: src1:f src2:f len:20 #float_ble_un: src1:f src2:f len:20 float_add: dest:f src1:f src2:f len:4 float_sub: dest:f src1:f src2:f len:4 float_mul: dest:f src1:f src2:f len:4 float_div: dest:f src1:f src2:f len:4 float_div_un: dest:f src1:f src2:f len:4 float_rem: dest:f src1:f src2:f len:16 float_rem_un: dest:f src1:f src2:f len:16 float_neg: dest:f src1:f len:4 float_not: dest:f src1:f len:4 float_conv_to_i1: dest:i src1:f len:40 float_conv_to_i2: dest:i src1:f len:40 float_conv_to_i4: dest:i src1:f len:40 float_conv_to_i8: dest:l src1:f len:40 float_conv_to_r4: dest:f src1:f len:8 float_conv_to_u4: dest:i src1:f len:40 float_conv_to_u8: dest:l src1:f len:40 float_conv_to_u2: dest:i src1:f len:40 float_conv_to_u1: dest:i src1:f len:40 float_ceq: dest:i src1:f src2:f len:16 float_cgt: dest:i src1:f src2:f len:16 float_cgt_un: dest:i src1:f src2:f len:20 float_clt: dest:i src1:f src2:f len:16 float_clt_un: dest:i src1:f src2:f len:20 float_cneq: dest:i src1:f src2:f len:20 float_cge: dest:i src1:f src2:f len:20 float_cle: dest:i src1:f src2:f len:20 setfret: src1:f len:12 # R4 opcodes r4_conv_to_i1: dest:i src1:f len:8 r4_conv_to_u1: dest:i src1:f len:8 r4_conv_to_i2: dest:i src1:f len:8 r4_conv_to_u2: dest:i src1:f len:8 r4_conv_to_i4: dest:i src1:f len:8 r4_conv_to_u4: dest:i src1:f len:8 r4_conv_to_i8: dest:l src1:f len:8 r4_conv_to_u8: dest:l src1:f len:8 r4_conv_to_r4: dest:f src1:f len:4 r4_conv_to_r8: dest:f src1:f len:4 r4_add: dest:f src1:f src2:f len:4 r4_sub: dest:f src1:f src2:f len:4 r4_mul: dest:f src1:f src2:f len:4 r4_div: dest:f src1:f src2:f len:4 r4_rem: dest:f src1:f src2:f len:16 r4_neg: dest:f src1:f len:4 r4_ceq: dest:i src1:f src2:f len:16 r4_cgt: dest:i src1:f src2:f len:16 r4_cgt_un: dest:i src1:f src2:f len:20 r4_clt: dest:i src1:f src2:f len:16 r4_clt_un: dest:i src1:f src2:f len:20 r4_cneq: dest:i src1:f src2:f len:20 r4_cge: dest:i src1:f src2:f len:20 r4_cle: dest:i src1:f src2:f len:20 aotconst: dest:i len:16 objc_get_selector: dest:i len:32 sqrt: dest:f src1:f len:4 adc: dest:i src1:i src2:i len:4 addcc: dest:i src1:i src2:i len:4 subcc: dest:i src1:i src2:i len:4 adc_imm: dest:i src1:i len:12 addcc_imm: dest:i src1:i len:12 subcc_imm: dest:i src1:i len:12 sbb: dest:i src1:i src2:i len:4 sbb_imm: dest:i src1:i len:12 br_reg: src1:i len:8 bigmul: len:8 dest:l src1:i src2:i bigmul_un: len:8 dest:l src1:i src2:i tls_get: dest:i len:32 tls_set: src1:i len:32 # 32 bit opcodes int_add: dest:i src1:i src2:i len:4 int_sub: dest:i src1:i src2:i len:4 int_mul: dest:i src1:i src2:i len:4 int_div: dest:i src1:i src2:i len:72 int_div_un: dest:i src1:i src2:i len:72 int_rem: dest:i src1:i src2:i len:72 int_rem_un: dest:i src1:i src2:i len:72 int_and: dest:i src1:i src2:i len:4 int_or: dest:i src1:i src2:i len:4 int_xor: dest:i src1:i src2:i len:4 int_shl: dest:i src1:i src2:i len:4 int_shr: dest:i src1:i src2:i len:4 int_shr_un: dest:i src1:i src2:i len:4 int_neg: dest:i src1:i len:4 int_not: dest:i src1:i len:4 int_conv_to_i1: dest:i src1:i len:8 int_conv_to_i2: dest:i src1:i len:8 int_conv_to_i4: dest:i src1:i len:4 int_conv_to_r4: dest:f src1:i len:36 int_conv_to_r8: dest:f src1:i len:36 int_conv_to_u4: dest:i src1:i int_conv_to_r_un: dest:f src1:i len:56 int_conv_to_u2: dest:i src1:i len:8 int_conv_to_u1: dest:i src1:i len:4 int_beq: len:16 int_bge: len:16 int_bgt: len:16 int_ble: len:16 int_blt: len:16 int_bne_un: len:16 int_bge_un: len:16 int_bgt_un: len:16 int_ble_un: len:16 int_blt_un: len:16 int_add_ovf: dest:i src1:i src2:i len:16 int_add_ovf_un: dest:i src1:i src2:i len:16 int_mul_ovf: dest:i src1:i src2:i len:16 int_mul_ovf_un: dest:i src1:i src2:i len:16 int_sub_ovf: dest:i src1:i src2:i len:16 int_sub_ovf_un: dest:i src1:i src2:i len:16 add_ovf_carry: dest:i src1:i src2:i len:16 sub_ovf_carry: dest:i src1:i src2:i len:16 add_ovf_un_carry: dest:i src1:i src2:i len:16 sub_ovf_un_carry: dest:i src1:i src2:i len:16 arm_rsbs_imm: dest:i src1:i len:4 arm_rsc_imm: dest:i src1:i len:4 # Linear IR opcodes dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_i8const: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 int_adc: dest:i src1:i src2:i len:4 int_addcc: dest:i src1:i src2:i len:4 int_subcc: dest:i src1:i src2:i len:4 int_sbb: dest:i src1:i src2:i len:4 int_adc_imm: dest:i src1:i len:12 int_sbb_imm: dest:i src1:i len:12 int_add_imm: dest:i src1:i len:12 int_sub_imm: dest:i src1:i len:12 int_mul_imm: dest:i src1:i len:12 int_div_imm: dest:i src1:i len:20 int_div_un_imm: dest:i src1:i len:12 int_rem_imm: dest:i src1:i len:28 int_rem_un_imm: dest:i src1:i len:16 int_and_imm: dest:i src1:i len:12 int_or_imm: dest:i src1:i len:12 int_xor_imm: dest:i src1:i len:12 int_shl_imm: dest:i src1:i len:8 int_shr_imm: dest:i src1:i len:8 int_shr_un_imm: dest:i src1:i len:8 int_ceq: dest:i len:12 int_cgt: dest:i len:12 int_cgt_un: dest:i len:12 int_clt: dest:i len:12 int_clt_un: dest:i len:12 int_cneq: dest:i len:12 int_cge: dest:i len:12 int_cle: dest:i len:12 int_cge_un: dest:i len:12 int_cle_un: dest:i len:12 cond_exc_ieq: len:16 cond_exc_ine_un: len:16 cond_exc_ilt: len:16 cond_exc_ilt_un: len:16 cond_exc_igt: len:16 cond_exc_igt_un: len:16 cond_exc_ige: len:16 cond_exc_ige_un: len:16 cond_exc_ile: len:16 cond_exc_ile_un: len:16 cond_exc_iov: len:20 cond_exc_ino: len:16 cond_exc_ic: len:20 cond_exc_inc: len:16 icompare: src1:i src2:i len:4 icompare_imm: src1:i len:12 long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:36 vcall2: len:40 clob:c vcall2_reg: src1:i len:40 clob:c vcall2_membase: src1:b len:40 clob:c dyn_call: src1:i src2:i len:216 clob:c # This is different from the original JIT opcodes float_beq: len:32 float_bne_un: len:32 float_blt: len:32 float_blt_un: len:32 float_bgt: len:32 float_bgt_un: len:32 float_bge: len:32 float_bge_un: len:32 float_ble: len:32 float_ble_un: len:32 liverange_start: len:0 liverange_end: len:0 gc_liveness_def: len:0 gc_liveness_use: len:0 gc_spill_slot_liveness_def: len:0 gc_param_slot_liveness_def: len:0 # 64 bit opcodes i8const: dest:i len:16 sext_i4: dest:i src1:i len:4 zext_i4: dest:i src1:i len:4 jump_table: dest:i len:16 long_add: dest:i src1:i src2:i len:4 long_sub: dest:i src1:i src2:i len:4 long_mul: dest:i src1:i src2:i len:4 long_div: dest:i src1:i src2:i len:80 long_div_un: dest:i src1:i src2:i len:64 long_rem: dest:i src1:i src2:i len:80 long_rem_un: dest:i src1:i src2:i len:64 long_and: dest:i src1:i src2:i len:4 long_or: dest:i src1:i src2:i len:4 long_xor: dest:i src1:i src2:i len:4 long_shl: dest:i src1:i src2:i len:4 long_shr: dest:i src1:i src2:i len:4 long_shr_un: dest:i src1:i src2:i len:4 long_neg: dest:i src1:i len:4 long_not: dest:i src1:i len:4 long_add_imm: dest:i src1:i len:12 long_sub_imm: dest:i src1:i len:12 long_mul_imm: dest:i src1:i len:12 long_and_imm: dest:i src1:i len:12 long_or_imm: dest:i src1:i len:12 long_xor_imm: dest:i src1:i len:12 long_shl_imm: dest:i src1:i len:12 long_shr_imm: dest:i src1:i len:12 long_shr_un_imm: dest:i src1:i len:12 long_add_ovf: dest:i src1:i src2:i len:16 long_add_ovf_un: dest:i src1:i src2:i len:16 long_mul_ovf: dest:i src1:i src2:i len:16 long_mul_ovf_un: dest:i src1:i src2:i len:16 long_sub_ovf: dest:i src1:i src2:i len:16 long_sub_ovf_un: dest:i src1:i src2:i len:16 lcompare: src1:i src2:i len:4 lcompare_imm: src1:i len:20 long_beq: len:4 long_bge: len:4 long_bgt: len:4 long_ble: len:4 long_blt: len:4 long_bne_un: len:4 long_bge_un: len:4 long_bgt_un: len:4 long_ble_un: len:4 long_blt_un: len:4 long_ceq: dest:i len:12 long_cgt: dest:i len:12 long_cgt_un: dest:i len:12 long_clt: dest:i len:12 long_clt_un: dest:i len:12 long_conv_to_i1: dest:i src1:i len:4 long_conv_to_i2: dest:i src1:i len:4 long_conv_to_u1: dest:i src1:i len:4 long_conv_to_u2: dest:i src1:i len:4 long_conv_to_r8: dest:f src1:i len:8 long_conv_to_r4: dest:f src1:i len:12 loadi8_membase: dest:i src1:b len:12 storei8_membase_imm: dest:b len:20 storei8_membase_reg: dest:b src1:i len:12 long_conv_to_r_un: dest:f src1:i len:8 arm_setfreg_r4: dest:f src1:f len:8 localloc_imm: dest:i len:64 arm64_cbzw: src1:i len:16 arm64_cbzx: src1:i len:16 arm64_cbnzw: src1:i len:16 arm64_cbnzx: src1:i len:16 atomic_add_i4: dest:i src1:i src2:i len:32 atomic_add_i8: dest:i src1:i src2:i len:32 atomic_exchange_i4: dest:i src1:i src2:i len:32 atomic_exchange_i8: dest:i src1:i src2:i len:32 atomic_cas_i4: dest:i src1:i src2:i src3:i len:32 atomic_cas_i8: dest:i src1:i src2:i src3:i len:32 memory_barrier: len:8 clob:a atomic_load_i1: dest:i src1:b len:24 atomic_load_u1: dest:i src1:b len:24 atomic_load_i2: dest:i src1:b len:24 atomic_load_u2: dest:i src1:b len:24 atomic_load_i4: dest:i src1:b len:24 atomic_load_u4: dest:i src1:b len:24 atomic_load_i8: dest:i src1:b len:20 atomic_load_u8: dest:i src1:b len:20 atomic_load_r4: dest:f src1:b len:28 atomic_load_r8: dest:f src1:b len:24 atomic_store_i1: dest:b src1:i len:20 atomic_store_u1: dest:b src1:i len:20 atomic_store_i2: dest:b src1:i len:20 atomic_store_u2: dest:b src1:i len:20 atomic_store_i4: dest:b src1:i len:20 atomic_store_u4: dest:b src1:i len:20 atomic_store_i8: dest:b src1:i len:20 atomic_store_u8: dest:b src1:i len:20 atomic_store_r4: dest:b src1:f len:28 atomic_store_r8: dest:b src1:f len:24 generic_class_init: src1:a len:44 clob:c gc_safe_point: src1:i len:12 clob:c fill_prof_call_ctx: src1:i len:128
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/cpu-mips.md
# mips cpu description file # this file is read by genmdesc to pruduce a table with all the relevant # information about the cpu instructions that may be used by the regsiter # allocator, the scheduler and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. # Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their # possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # l integer register pair # v v0 register (output from calls) # V v0/v1 register pair (output from calls) # a at register # b base register (used in address references) # f floating point register (pair - always) # g floating point register return pair (f0/f1) # # len:number describe the maximun length in bytes of the instruction # number is a positive integer # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # r 'reserves' the destination register until a later instruction unreserves it # used mostly to set output registers in function calls # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-x86.c for more details on how the specifiers are used. # memory_barrier: len:4 nop: len:4 relaxed_nop: len:4 break: len:16 call: dest:v clob:c len:20 br: len:16 switch: src1:i len:40 seq_point: len:24 il_seq_point: len:0 int_conv_to_r_un: dest:f src1:i len:32 throw: src1:i len:24 rethrow: src1:i len:24 ckfinite: dest:f src1:f len:52 start_handler: len:16 endfinally: len:12 ceq: dest:i len:16 cgt: dest:i len:16 cgt_un: dest:i len:16 clt: dest:i len:16 clt_un: dest:i len:16 localloc: dest:i src1:i len:60 compare: src1:i src2:i len:20 compare_imm: src1:i len:20 fcompare: src1:f src2:f len:12 arglist: src1:i len:12 setlret: src1:i src2:i len:12 check_this: src1:b len:4 voidcall: len:20 clob:c voidcall_reg: src1:i len:20 clob:c voidcall_membase: src1:b len:20 clob:c fcall: dest:g len:20 clob:c fcall_reg: dest:g src1:i len:20 clob:c fcall_membase: dest:g src1:b len:20 clob:c lcall: dest:V len:28 clob:c lcall_reg: dest:V src1:i len:28 clob:c lcall_membase: dest:V src1:b len:28 clob:c call_reg: dest:v src1:i len:20 clob:c call_membase: dest:v src1:b len:20 clob:c vcall: len:16 clob:c vcall_reg: src1:i len:20 clob:c vcall_membase: src1:b len:20 clob:c vcall2: len:16 clob:c vcall2_reg: src1:i len:20 clob:c vcall2_membase: src1:b len:20 clob:c jump_table: dest:i len:8 iconst: dest:i len:12 i8const: dest:l len:24 r4const: dest:f len:20 r8const: dest:f len:28 label: len:0 store_membase_imm: dest:b len:20 store_membase_reg: dest:b src1:i len:20 storei1_membase_imm: dest:b len:20 storei1_membase_reg: dest:b src1:i len:20 storei2_membase_imm: dest:b len:20 storei2_membase_reg: dest:b src1:i len:20 storei4_membase_imm: dest:b len:20 storei4_membase_reg: dest:b src1:i len:20 storei8_membase_imm: dest:b storei8_membase_reg: dest:b src1:i len:20 storer4_membase_reg: dest:b src1:f len:20 storer8_membase_reg: dest:b src1:f len:20 load_membase: dest:i src1:b len:20 loadi1_membase: dest:i src1:b len:20 loadu1_membase: dest:i src1:b len:20 loadi2_membase: dest:i src1:b len:20 loadu2_membase: dest:i src1:b len:20 loadi4_membase: dest:i src1:b len:20 loadu4_membase: dest:i src1:b len:20 loadi8_membase: dest:i src1:b len:20 loadr4_membase: dest:f src1:b len:20 loadr8_membase: dest:f src1:b len:20 load_memindex: dest:i src1:b src2:i len:4 loadi1_memindex: dest:i src1:b src2:i len:12 loadu1_memindex: dest:i src1:b src2:i len:12 loadi2_memindex: dest:i src1:b src2:i len:12 loadu2_memindex: dest:i src1:b src2:i len:12 loadi4_memindex: dest:i src1:b src2:i len:12 loadu4_memindex: dest:i src1:b src2:i len:12 loadr4_memindex: dest:f src1:b src2:i len:12 loadr8_memindex: dest:f src1:b src2:i len:12 store_memindex: dest:b src1:i src2:i len:12 storei1_memindex: dest:b src1:i src2:i len:12 storei2_memindex: dest:b src1:i src2:i len:12 storei4_memindex: dest:b src1:i src2:i len:12 storer4_memindex: dest:b src1:f src2:i len:12 storer8_memindex: dest:b src1:f src2:i len:12 loadu4_mem: dest:i len:8 move: dest:i src1:i len:4 fmove: dest:f src1:f len:8 move_f_to_i4: dest:i src1:f len:4 move_i4_to_f: dest:f src1:i len:4 add_imm: dest:i src1:i len:12 sub_imm: dest:i src1:i len:12 mul_imm: dest:i src1:i len:20 # there is no actual support for division or reminder by immediate # we simulate them, though (but we need to change the burg rules # to allocate a symbolic reg for src2) div_imm: dest:i src1:i src2:i len:20 div_un_imm: dest:i src1:i src2:i len:12 rem_imm: dest:i src1:i src2:i len:28 rem_un_imm: dest:i src1:i src2:i len:16 and_imm: dest:i src1:i len:12 or_imm: dest:i src1:i len:12 xor_imm: dest:i src1:i len:12 shl_imm: dest:i src1:i len:8 shr_imm: dest:i src1:i len:8 shr_un_imm: dest:i src1:i len:8 # Linear IR opcodes dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_i8const: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 # 32 bit opcodes int_add: dest:i src1:i src2:i len:4 int_sub: dest:i src1:i src2:i len:4 int_mul: dest:i src1:i src2:i len:16 int_div: dest:i src1:i src2:i len:84 int_div_un: dest:i src1:i src2:i len:40 int_rem: dest:i src1:i src2:i len:84 int_rem_un: dest:i src1:i src2:i len:40 int_and: dest:i src1:i src2:i len:4 int_or: dest:i src1:i src2:i len:4 int_xor: dest:i src1:i src2:i len:4 int_shl: dest:i src1:i src2:i len:4 int_shr: dest:i src1:i src2:i len:4 int_shr_un: dest:i src1:i src2:i len:4 int_neg: dest:i src1:i len:4 int_not: dest:i src1:i len:4 int_conv_to_i1: dest:i src1:i len:8 int_conv_to_i2: dest:i src1:i len:8 int_conv_to_i4: dest:i src1:i len:4 int_conv_to_r4: dest:f src1:i len:36 int_conv_to_r8: dest:f src1:i len:36 int_conv_to_u4: dest:i src1:i int_conv_to_u2: dest:i src1:i len:8 int_conv_to_u1: dest:i src1:i len:4 int_beq: len:8 int_bge: len:8 int_bgt: len:8 int_ble: len:8 int_blt: len:8 int_bne_un: len:8 int_bge_un: len:8 int_bgt_un: len:8 int_ble_un: len:8 int_blt_un: len:8 int_add_ovf: dest:i src1:i src2:i len:16 int_add_ovf_un: dest:i src1:i src2:i len:16 int_mul_ovf: dest:i src1:i src2:i len:56 int_mul_ovf_un: dest:i src1:i src2:i len:56 int_sub_ovf: dest:i src1:i src2:i len:16 int_sub_ovf_un: dest:i src1:i src2:i len:16 int_adc: dest:i src1:i src2:i len:4 int_addcc: dest:i src1:i src2:i len:4 int_subcc: dest:i src1:i src2:i len:4 int_sbb: dest:i src1:i src2:i len:4 int_adc_imm: dest:i src1:i len:12 int_sbb_imm: dest:i src1:i len:12 int_add_imm: dest:i src1:i len:12 int_sub_imm: dest:i src1:i len:12 int_mul_imm: dest:i src1:i len:12 int_div_imm: dest:i src1:i len:20 int_div_un_imm: dest:i src1:i len:12 int_rem_imm: dest:i src1:i len:28 int_rem_un_imm: dest:i src1:i len:16 int_and_imm: dest:i src1:i len:12 int_or_imm: dest:i src1:i len:12 int_xor_imm: dest:i src1:i len:12 int_shl_imm: dest:i src1:i len:8 int_shr_imm: dest:i src1:i len:8 int_shr_un_imm: dest:i src1:i len:8 int_ceq: dest:i len:16 int_cgt: dest:i len:16 int_cgt_un: dest:i len:16 int_clt: dest:i len:16 int_clt_un: dest:i len:16 cond_exc_eq: len:32 cond_exc_ne_un: len:32 cond_exc_lt: len:32 cond_exc_lt_un: len:32 cond_exc_gt: len:32 cond_exc_gt_un: len:32 cond_exc_ge: len:32 cond_exc_ge_un: len:32 cond_exc_le: len:32 cond_exc_le_un: len:32 cond_exc_ov: len:32 cond_exc_no: len:32 cond_exc_c: len:32 cond_exc_nc: len:32 cond_exc_ieq: len:32 cond_exc_ine_un: len:32 cond_exc_ilt: len:32 cond_exc_ilt_un: len:32 cond_exc_igt: len:32 cond_exc_igt_un: len:32 cond_exc_ige: len:32 cond_exc_ige_un: len:32 cond_exc_ile: len:32 cond_exc_ile_un: len:32 cond_exc_iov: len:12 cond_exc_ino: len:32 cond_exc_ic: len:12 cond_exc_inc: len:32 icompare: src1:i src2:i len:4 icompare_imm: src1:i len:12 # 64 bit opcodes long_add: dest:i src1:i src2:i len:4 long_sub: dest:i src1:i src2:i len:4 long_mul: dest:i src1:i src2:i len:32 long_mul_imm: dest:i src1:i len:4 long_div: dest:i src1:i src2:i len:40 long_div_un: dest:i src1:i src2:i len:16 long_rem: dest:i src1:i src2:i len:48 long_rem_un: dest:i src1:i src2:i len:24 long_and: dest:i src1:i src2:i len:4 long_or: dest:i src1:i src2:i len:4 long_xor: dest:i src1:i src2:i len:4 long_shl: dest:i src1:i src2:i len:4 long_shl_imm: dest:i src1:i len:4 long_shr: dest:i src1:i src2:i len:4 long_shr_un: dest:i src1:i src2:i len:4 long_shr_imm: dest:i src1:i len:4 long_shr_un_imm: dest:i src1:i len:4 long_neg: dest:i src1:i len:4 long_not: dest:i src1:i len:4 long_conv_to_i1: dest:i src1:l len:32 long_conv_to_i2: dest:i src1:l len:32 long_conv_to_i4: dest:i src1:l len:32 long_conv_to_r4: dest:f src1:l len:32 long_conv_to_r8: dest:f src1:l len:32 long_conv_to_u4: dest:i src1:l len:32 long_conv_to_u8: dest:l src1:l len:32 long_conv_to_u2: dest:i src1:l len:32 long_conv_to_u1: dest:i src1:l len:32 long_conv_to_i: dest:i src1:l len:32 long_conv_to_ovf_i: dest:i src1:i src2:i len:32 long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:32 zext_i4: dest:i src1:i len:16 sext_i4: dest:i src1:i len:16 long_beq: len:8 long_bge: len:8 long_bgt: len:8 long_ble: len:8 long_blt: len:8 long_bne_un: len:8 long_bge_un: len:8 long_bgt_un: len:8 long_ble_un: len:8 long_blt_un: len:8 long_add_ovf: dest:i src1:i src2:i len:16 long_add_ovf_un: dest:i src1:i src2:i len:16 long_mul_ovf: dest:i src1:i src2:i len:16 long_mul_ovf_un: dest:i src1:i src2:i len:16 long_sub_ovf: dest:i src1:i src2:i len:16 long_sub_ovf_un: dest:i src1:i src2:i len:16 long_ceq: dest:i len:12 long_cgt: dest:i len:12 long_cgt_un: dest:i len:12 long_clt: dest:i len:12 long_clt_un: dest:i len:12 long_add_imm: dest:i src1:i clob:1 len:4 long_sub_imm: dest:i src1:i clob:1 len:4 long_and_imm: dest:i src1:i clob:1 len:4 long_or_imm: dest:i src1:i clob:1 len:4 long_xor_imm: dest:i src1:i clob:1 len:4 lcompare: src1:i src2:i len:4 lcompare_imm: src1:i len:12 long_conv_to_r_un: dest:f src1:i src2:i len:37 float_beq: len:16 float_bne_un: len:16 float_blt: len:16 float_blt_un: len:16 float_bgt: len:16 float_bgt_un: len:16 float_bge: len:16 float_bge_un: len:16 float_ble: len:16 float_ble_un: len:16 float_add: dest:f src1:f src2:f len:4 float_sub: dest:f src1:f src2:f len:4 float_mul: dest:f src1:f src2:f len:4 float_div: dest:f src1:f src2:f len:4 float_div_un: dest:f src1:f src2:f len:4 float_rem: dest:f src1:f src2:f len:16 float_rem_un: dest:f src1:f src2:f len:16 float_neg: dest:f src1:f len:4 float_not: dest:f src1:f len:4 float_conv_to_i1: dest:i src1:f len:40 float_conv_to_i2: dest:i src1:f len:40 float_conv_to_i4: dest:i src1:f len:40 float_conv_to_i8: dest:l src1:f len:40 float_conv_to_r4: dest:f src1:f len:8 float_conv_to_u4: dest:i src1:f len:40 float_conv_to_u8: dest:l src1:f len:40 float_conv_to_u2: dest:i src1:f len:40 float_conv_to_u1: dest:i src1:f len:40 float_conv_to_i: dest:i src1:f len:40 float_ceq: dest:i src1:f src2:f len:20 float_cgt: dest:i src1:f src2:f len:20 float_cgt_un: dest:i src1:f src2:f len:20 float_clt: dest:i src1:f src2:f len:20 float_clt_un: dest:i src1:f src2:f len:20 call_handler: len:20 clob:c endfilter: src1:i len:16 aotconst: dest:i len:8 sqrt: dest:f src1:f len:4 adc: dest:i src1:i src2:i len:4 addcc: dest:i src1:i src2:i len:4 subcc: dest:i src1:i src2:i len:4 adc_imm: dest:i src1:i len:12 addcc_imm: dest:i src1:i len:12 subcc_imm: dest:i src1:i len:12 sbb: dest:i src1:i src2:i len:4 sbb_imm: dest:i src1:i len:12 br_reg: src1:i len:8 #ppc_subfic: dest:i src1:i len:4 #ppc_subfze: dest:i src1:i len:4 bigmul: len:52 dest:l src1:i src2:i bigmul_un: len:52 dest:l src1:i src2:i mips_beq: src1:i src2:i len:24 mips_bgez: src1:i len:24 mips_bgtz: src1:i len:24 mips_blez: src1:i len:24 mips_bltz: src1:i len:24 mips_bne: src1:i src2:i len:24 mips_cvtsd: dest:f src1:f len:8 mips_fbeq: src1:f src2:f len:16 mips_fbge: src1:f src2:f len:32 mips_fbge_un: src1:f src2:f len:16 mips_fbgt: src1:f src2:f len:32 mips_fbgt_un: src1:f src2:f len:16 mips_fble: src1:f src2:f len:32 mips_fble_un: src1:f src2:f len:16 mips_fblt: src1:f src2:f len:32 mips_fblt_un: src1:f src2:f len:16 mips_fbne: src1:f src2:f len:16 mips_lwc1: dest:f src1:b len:16 mips_mtc1_s: dest:f src1:i len:8 mips_mtc1_s2: dest:f src1:i src2:i len:8 mips_mfc1_s: dest:i src1:f len:8 mips_mtc1_d: dest:f src1:i len:8 mips_mfc1_d: dest:i src1:f len:8 mips_slti: dest:i src1:i len:4 mips_slt: dest:i src1:i src2:i len:4 mips_sltiu: dest:i src1:i len:4 mips_sltu: dest:i src1:i src2:i len:4 mips_cond_exc_eq: src1:i src2:i len:44 mips_cond_exc_ge: src1:i src2:i len:44 mips_cond_exc_gt: src1:i src2:i len:44 mips_cond_exc_le: src1:i src2:i len:44 mips_cond_exc_lt: src1:i src2:i len:44 mips_cond_exc_ne_un: src1:i src2:i len:44 mips_cond_exc_ge_un: src1:i src2:i len:44 mips_cond_exc_gt_un: src1:i src2:i len:44 mips_cond_exc_le_un: src1:i src2:i len:44 mips_cond_exc_lt_un: src1:i src2:i len:44 mips_cond_exc_ov: src1:i src2:i len:44 mips_cond_exc_no: src1:i src2:i len:44 mips_cond_exc_c: src1:i src2:i len:44 mips_cond_exc_nc: src1:i src2:i len:44 mips_cond_exc_ieq: src1:i src2:i len:44 mips_cond_exc_ige: src1:i src2:i len:44 mips_cond_exc_igt: src1:i src2:i len:44 mips_cond_exc_ile: src1:i src2:i len:44 mips_cond_exc_ilt: src1:i src2:i len:44 mips_cond_exc_ine_un: src1:i src2:i len:44 mips_cond_exc_ige_un: src1:i src2:i len:44 mips_cond_exc_igt_un: src1:i src2:i len:44 mips_cond_exc_ile_un: src1:i src2:i len:44 mips_cond_exc_ilt_un: src1:i src2:i len:44 mips_cond_exc_iov: src1:i src2:i len:44 mips_cond_exc_ino: src1:i src2:i len:44 mips_cond_exc_ic: src1:i src2:i len:44 mips_cond_exc_inc: src1:i src2:i len:44 liverange_start: len:0 liverange_end: len:0 gc_safe_point: len:0
# mips cpu description file # this file is read by genmdesc to pruduce a table with all the relevant # information about the cpu instructions that may be used by the regsiter # allocator, the scheduler and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. # Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their # possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # l integer register pair # v v0 register (output from calls) # V v0/v1 register pair (output from calls) # a at register # b base register (used in address references) # f floating point register (pair - always) # g floating point register return pair (f0/f1) # # len:number describe the maximun length in bytes of the instruction # number is a positive integer # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # r 'reserves' the destination register until a later instruction unreserves it # used mostly to set output registers in function calls # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-x86.c for more details on how the specifiers are used. # memory_barrier: len:4 nop: len:4 relaxed_nop: len:4 break: len:16 call: dest:v clob:c len:20 br: len:16 switch: src1:i len:40 seq_point: len:24 il_seq_point: len:0 int_conv_to_r_un: dest:f src1:i len:32 throw: src1:i len:24 rethrow: src1:i len:24 ckfinite: dest:f src1:f len:52 start_handler: len:16 endfinally: len:12 ceq: dest:i len:16 cgt: dest:i len:16 cgt_un: dest:i len:16 clt: dest:i len:16 clt_un: dest:i len:16 localloc: dest:i src1:i len:60 compare: src1:i src2:i len:20 compare_imm: src1:i len:20 fcompare: src1:f src2:f len:12 arglist: src1:i len:12 setlret: src1:i src2:i len:12 check_this: src1:b len:4 voidcall: len:20 clob:c voidcall_reg: src1:i len:20 clob:c voidcall_membase: src1:b len:20 clob:c fcall: dest:g len:20 clob:c fcall_reg: dest:g src1:i len:20 clob:c fcall_membase: dest:g src1:b len:20 clob:c lcall: dest:V len:28 clob:c lcall_reg: dest:V src1:i len:28 clob:c lcall_membase: dest:V src1:b len:28 clob:c call_reg: dest:v src1:i len:20 clob:c call_membase: dest:v src1:b len:20 clob:c vcall: len:16 clob:c vcall_reg: src1:i len:20 clob:c vcall_membase: src1:b len:20 clob:c vcall2: len:16 clob:c vcall2_reg: src1:i len:20 clob:c vcall2_membase: src1:b len:20 clob:c jump_table: dest:i len:8 iconst: dest:i len:12 i8const: dest:l len:24 r4const: dest:f len:20 r8const: dest:f len:28 label: len:0 store_membase_imm: dest:b len:20 store_membase_reg: dest:b src1:i len:20 storei1_membase_imm: dest:b len:20 storei1_membase_reg: dest:b src1:i len:20 storei2_membase_imm: dest:b len:20 storei2_membase_reg: dest:b src1:i len:20 storei4_membase_imm: dest:b len:20 storei4_membase_reg: dest:b src1:i len:20 storei8_membase_imm: dest:b storei8_membase_reg: dest:b src1:i len:20 storer4_membase_reg: dest:b src1:f len:20 storer8_membase_reg: dest:b src1:f len:20 load_membase: dest:i src1:b len:20 loadi1_membase: dest:i src1:b len:20 loadu1_membase: dest:i src1:b len:20 loadi2_membase: dest:i src1:b len:20 loadu2_membase: dest:i src1:b len:20 loadi4_membase: dest:i src1:b len:20 loadu4_membase: dest:i src1:b len:20 loadi8_membase: dest:i src1:b len:20 loadr4_membase: dest:f src1:b len:20 loadr8_membase: dest:f src1:b len:20 load_memindex: dest:i src1:b src2:i len:4 loadi1_memindex: dest:i src1:b src2:i len:12 loadu1_memindex: dest:i src1:b src2:i len:12 loadi2_memindex: dest:i src1:b src2:i len:12 loadu2_memindex: dest:i src1:b src2:i len:12 loadi4_memindex: dest:i src1:b src2:i len:12 loadu4_memindex: dest:i src1:b src2:i len:12 loadr4_memindex: dest:f src1:b src2:i len:12 loadr8_memindex: dest:f src1:b src2:i len:12 store_memindex: dest:b src1:i src2:i len:12 storei1_memindex: dest:b src1:i src2:i len:12 storei2_memindex: dest:b src1:i src2:i len:12 storei4_memindex: dest:b src1:i src2:i len:12 storer4_memindex: dest:b src1:f src2:i len:12 storer8_memindex: dest:b src1:f src2:i len:12 loadu4_mem: dest:i len:8 move: dest:i src1:i len:4 fmove: dest:f src1:f len:8 move_f_to_i4: dest:i src1:f len:4 move_i4_to_f: dest:f src1:i len:4 add_imm: dest:i src1:i len:12 sub_imm: dest:i src1:i len:12 mul_imm: dest:i src1:i len:20 # there is no actual support for division or reminder by immediate # we simulate them, though (but we need to change the burg rules # to allocate a symbolic reg for src2) div_imm: dest:i src1:i src2:i len:20 div_un_imm: dest:i src1:i src2:i len:12 rem_imm: dest:i src1:i src2:i len:28 rem_un_imm: dest:i src1:i src2:i len:16 and_imm: dest:i src1:i len:12 or_imm: dest:i src1:i len:12 xor_imm: dest:i src1:i len:12 shl_imm: dest:i src1:i len:8 shr_imm: dest:i src1:i len:8 shr_un_imm: dest:i src1:i len:8 # Linear IR opcodes dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_i8const: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 # 32 bit opcodes int_add: dest:i src1:i src2:i len:4 int_sub: dest:i src1:i src2:i len:4 int_mul: dest:i src1:i src2:i len:16 int_div: dest:i src1:i src2:i len:84 int_div_un: dest:i src1:i src2:i len:40 int_rem: dest:i src1:i src2:i len:84 int_rem_un: dest:i src1:i src2:i len:40 int_and: dest:i src1:i src2:i len:4 int_or: dest:i src1:i src2:i len:4 int_xor: dest:i src1:i src2:i len:4 int_shl: dest:i src1:i src2:i len:4 int_shr: dest:i src1:i src2:i len:4 int_shr_un: dest:i src1:i src2:i len:4 int_neg: dest:i src1:i len:4 int_not: dest:i src1:i len:4 int_conv_to_i1: dest:i src1:i len:8 int_conv_to_i2: dest:i src1:i len:8 int_conv_to_i4: dest:i src1:i len:4 int_conv_to_r4: dest:f src1:i len:36 int_conv_to_r8: dest:f src1:i len:36 int_conv_to_u4: dest:i src1:i int_conv_to_u2: dest:i src1:i len:8 int_conv_to_u1: dest:i src1:i len:4 int_beq: len:8 int_bge: len:8 int_bgt: len:8 int_ble: len:8 int_blt: len:8 int_bne_un: len:8 int_bge_un: len:8 int_bgt_un: len:8 int_ble_un: len:8 int_blt_un: len:8 int_add_ovf: dest:i src1:i src2:i len:16 int_add_ovf_un: dest:i src1:i src2:i len:16 int_mul_ovf: dest:i src1:i src2:i len:56 int_mul_ovf_un: dest:i src1:i src2:i len:56 int_sub_ovf: dest:i src1:i src2:i len:16 int_sub_ovf_un: dest:i src1:i src2:i len:16 int_adc: dest:i src1:i src2:i len:4 int_addcc: dest:i src1:i src2:i len:4 int_subcc: dest:i src1:i src2:i len:4 int_sbb: dest:i src1:i src2:i len:4 int_adc_imm: dest:i src1:i len:12 int_sbb_imm: dest:i src1:i len:12 int_add_imm: dest:i src1:i len:12 int_sub_imm: dest:i src1:i len:12 int_mul_imm: dest:i src1:i len:12 int_div_imm: dest:i src1:i len:20 int_div_un_imm: dest:i src1:i len:12 int_rem_imm: dest:i src1:i len:28 int_rem_un_imm: dest:i src1:i len:16 int_and_imm: dest:i src1:i len:12 int_or_imm: dest:i src1:i len:12 int_xor_imm: dest:i src1:i len:12 int_shl_imm: dest:i src1:i len:8 int_shr_imm: dest:i src1:i len:8 int_shr_un_imm: dest:i src1:i len:8 int_ceq: dest:i len:16 int_cgt: dest:i len:16 int_cgt_un: dest:i len:16 int_clt: dest:i len:16 int_clt_un: dest:i len:16 cond_exc_eq: len:32 cond_exc_ne_un: len:32 cond_exc_lt: len:32 cond_exc_lt_un: len:32 cond_exc_gt: len:32 cond_exc_gt_un: len:32 cond_exc_ge: len:32 cond_exc_ge_un: len:32 cond_exc_le: len:32 cond_exc_le_un: len:32 cond_exc_ov: len:32 cond_exc_no: len:32 cond_exc_c: len:32 cond_exc_nc: len:32 cond_exc_ieq: len:32 cond_exc_ine_un: len:32 cond_exc_ilt: len:32 cond_exc_ilt_un: len:32 cond_exc_igt: len:32 cond_exc_igt_un: len:32 cond_exc_ige: len:32 cond_exc_ige_un: len:32 cond_exc_ile: len:32 cond_exc_ile_un: len:32 cond_exc_iov: len:12 cond_exc_ino: len:32 cond_exc_ic: len:12 cond_exc_inc: len:32 icompare: src1:i src2:i len:4 icompare_imm: src1:i len:12 # 64 bit opcodes long_add: dest:i src1:i src2:i len:4 long_sub: dest:i src1:i src2:i len:4 long_mul: dest:i src1:i src2:i len:32 long_mul_imm: dest:i src1:i len:4 long_div: dest:i src1:i src2:i len:40 long_div_un: dest:i src1:i src2:i len:16 long_rem: dest:i src1:i src2:i len:48 long_rem_un: dest:i src1:i src2:i len:24 long_and: dest:i src1:i src2:i len:4 long_or: dest:i src1:i src2:i len:4 long_xor: dest:i src1:i src2:i len:4 long_shl: dest:i src1:i src2:i len:4 long_shl_imm: dest:i src1:i len:4 long_shr: dest:i src1:i src2:i len:4 long_shr_un: dest:i src1:i src2:i len:4 long_shr_imm: dest:i src1:i len:4 long_shr_un_imm: dest:i src1:i len:4 long_neg: dest:i src1:i len:4 long_not: dest:i src1:i len:4 long_conv_to_i1: dest:i src1:l len:32 long_conv_to_i2: dest:i src1:l len:32 long_conv_to_i4: dest:i src1:l len:32 long_conv_to_r4: dest:f src1:l len:32 long_conv_to_r8: dest:f src1:l len:32 long_conv_to_u4: dest:i src1:l len:32 long_conv_to_u8: dest:l src1:l len:32 long_conv_to_u2: dest:i src1:l len:32 long_conv_to_u1: dest:i src1:l len:32 long_conv_to_i: dest:i src1:l len:32 long_conv_to_ovf_i: dest:i src1:i src2:i len:32 long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:32 zext_i4: dest:i src1:i len:16 sext_i4: dest:i src1:i len:16 long_beq: len:8 long_bge: len:8 long_bgt: len:8 long_ble: len:8 long_blt: len:8 long_bne_un: len:8 long_bge_un: len:8 long_bgt_un: len:8 long_ble_un: len:8 long_blt_un: len:8 long_add_ovf: dest:i src1:i src2:i len:16 long_add_ovf_un: dest:i src1:i src2:i len:16 long_mul_ovf: dest:i src1:i src2:i len:16 long_mul_ovf_un: dest:i src1:i src2:i len:16 long_sub_ovf: dest:i src1:i src2:i len:16 long_sub_ovf_un: dest:i src1:i src2:i len:16 long_ceq: dest:i len:12 long_cgt: dest:i len:12 long_cgt_un: dest:i len:12 long_clt: dest:i len:12 long_clt_un: dest:i len:12 long_add_imm: dest:i src1:i clob:1 len:4 long_sub_imm: dest:i src1:i clob:1 len:4 long_and_imm: dest:i src1:i clob:1 len:4 long_or_imm: dest:i src1:i clob:1 len:4 long_xor_imm: dest:i src1:i clob:1 len:4 lcompare: src1:i src2:i len:4 lcompare_imm: src1:i len:12 long_conv_to_r_un: dest:f src1:i src2:i len:37 float_beq: len:16 float_bne_un: len:16 float_blt: len:16 float_blt_un: len:16 float_bgt: len:16 float_bgt_un: len:16 float_bge: len:16 float_bge_un: len:16 float_ble: len:16 float_ble_un: len:16 float_add: dest:f src1:f src2:f len:4 float_sub: dest:f src1:f src2:f len:4 float_mul: dest:f src1:f src2:f len:4 float_div: dest:f src1:f src2:f len:4 float_div_un: dest:f src1:f src2:f len:4 float_rem: dest:f src1:f src2:f len:16 float_rem_un: dest:f src1:f src2:f len:16 float_neg: dest:f src1:f len:4 float_not: dest:f src1:f len:4 float_conv_to_i1: dest:i src1:f len:40 float_conv_to_i2: dest:i src1:f len:40 float_conv_to_i4: dest:i src1:f len:40 float_conv_to_i8: dest:l src1:f len:40 float_conv_to_r4: dest:f src1:f len:8 float_conv_to_u4: dest:i src1:f len:40 float_conv_to_u8: dest:l src1:f len:40 float_conv_to_u2: dest:i src1:f len:40 float_conv_to_u1: dest:i src1:f len:40 float_ceq: dest:i src1:f src2:f len:20 float_cgt: dest:i src1:f src2:f len:20 float_cgt_un: dest:i src1:f src2:f len:20 float_clt: dest:i src1:f src2:f len:20 float_clt_un: dest:i src1:f src2:f len:20 call_handler: len:20 clob:c endfilter: src1:i len:16 aotconst: dest:i len:8 sqrt: dest:f src1:f len:4 adc: dest:i src1:i src2:i len:4 addcc: dest:i src1:i src2:i len:4 subcc: dest:i src1:i src2:i len:4 adc_imm: dest:i src1:i len:12 addcc_imm: dest:i src1:i len:12 subcc_imm: dest:i src1:i len:12 sbb: dest:i src1:i src2:i len:4 sbb_imm: dest:i src1:i len:12 br_reg: src1:i len:8 #ppc_subfic: dest:i src1:i len:4 #ppc_subfze: dest:i src1:i len:4 bigmul: len:52 dest:l src1:i src2:i bigmul_un: len:52 dest:l src1:i src2:i mips_beq: src1:i src2:i len:24 mips_bgez: src1:i len:24 mips_bgtz: src1:i len:24 mips_blez: src1:i len:24 mips_bltz: src1:i len:24 mips_bne: src1:i src2:i len:24 mips_cvtsd: dest:f src1:f len:8 mips_fbeq: src1:f src2:f len:16 mips_fbge: src1:f src2:f len:32 mips_fbge_un: src1:f src2:f len:16 mips_fbgt: src1:f src2:f len:32 mips_fbgt_un: src1:f src2:f len:16 mips_fble: src1:f src2:f len:32 mips_fble_un: src1:f src2:f len:16 mips_fblt: src1:f src2:f len:32 mips_fblt_un: src1:f src2:f len:16 mips_fbne: src1:f src2:f len:16 mips_lwc1: dest:f src1:b len:16 mips_mtc1_s: dest:f src1:i len:8 mips_mtc1_s2: dest:f src1:i src2:i len:8 mips_mfc1_s: dest:i src1:f len:8 mips_mtc1_d: dest:f src1:i len:8 mips_mfc1_d: dest:i src1:f len:8 mips_slti: dest:i src1:i len:4 mips_slt: dest:i src1:i src2:i len:4 mips_sltiu: dest:i src1:i len:4 mips_sltu: dest:i src1:i src2:i len:4 mips_cond_exc_eq: src1:i src2:i len:44 mips_cond_exc_ge: src1:i src2:i len:44 mips_cond_exc_gt: src1:i src2:i len:44 mips_cond_exc_le: src1:i src2:i len:44 mips_cond_exc_lt: src1:i src2:i len:44 mips_cond_exc_ne_un: src1:i src2:i len:44 mips_cond_exc_ge_un: src1:i src2:i len:44 mips_cond_exc_gt_un: src1:i src2:i len:44 mips_cond_exc_le_un: src1:i src2:i len:44 mips_cond_exc_lt_un: src1:i src2:i len:44 mips_cond_exc_ov: src1:i src2:i len:44 mips_cond_exc_no: src1:i src2:i len:44 mips_cond_exc_c: src1:i src2:i len:44 mips_cond_exc_nc: src1:i src2:i len:44 mips_cond_exc_ieq: src1:i src2:i len:44 mips_cond_exc_ige: src1:i src2:i len:44 mips_cond_exc_igt: src1:i src2:i len:44 mips_cond_exc_ile: src1:i src2:i len:44 mips_cond_exc_ilt: src1:i src2:i len:44 mips_cond_exc_ine_un: src1:i src2:i len:44 mips_cond_exc_ige_un: src1:i src2:i len:44 mips_cond_exc_igt_un: src1:i src2:i len:44 mips_cond_exc_ile_un: src1:i src2:i len:44 mips_cond_exc_ilt_un: src1:i src2:i len:44 mips_cond_exc_iov: src1:i src2:i len:44 mips_cond_exc_ino: src1:i src2:i len:44 mips_cond_exc_ic: src1:i src2:i len:44 mips_cond_exc_inc: src1:i src2:i len:44 liverange_start: len:0 liverange_end: len:0 gc_safe_point: len:0
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/cpu-ppc.md
# powerpc cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the regsiter allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # a r3 register (output from calls) # b base register (used in address references) # f floating point register # # len:number describe the maximun length in bytes of the instruction # number is a positive integer # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # r 'reserves' the destination register until a later instruction unreserves it # used mostly to set output registers in function calls # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-x86.c for more details on how the specifiers are used. # memory_barrier: len:4 nop: len:4 relaxed_nop: len:4 break: len:32 seq_point: len:24 il_seq_point: len:0 tailcall: len:120 clob:c # PowerPC outputs a nice fixed size memcpy loop for larger stack_usage, so 0. tailcall_parameter: len:0 call: dest:a clob:c len:16 br: len:4 throw: src1:i len:20 rethrow: src1:i len:20 ckfinite: dest:f src1:f ppc_check_finite: src1:i len:16 add_ovf_carry: dest:i src1:i src2:i len:16 sub_ovf_carry: dest:i src1:i src2:i len:16 add_ovf_un_carry: dest:i src1:i src2:i len:16 sub_ovf_un_carry: dest:i src1:i src2:i len:16 start_handler: len:32 endfinally: len:28 ceq: dest:i len:12 cgt: dest:i len:12 cgt_un: dest:i len:12 clt: dest:i len:12 clt_un: dest:i len:12 localloc: dest:i src1:i len:60 compare: src1:i src2:i len:4 compare_imm: src1:i len:12 fcompare: src1:f src2:f len:12 arglist: src1:i len:12 setlret: src1:i src2:i len:12 check_this: src1:b len:4 voidcall: len:16 clob:c voidcall_reg: src1:i len:16 clob:c voidcall_membase: src1:b len:16 clob:c fcall: dest:g len:16 clob:c fcall_reg: dest:g src1:i len:16 clob:c fcall_membase: dest:g src1:b len:16 clob:c lcall: dest:l len:16 clob:c lcall_reg: dest:l src1:i len:16 clob:c lcall_membase: dest:l src1:b len:16 clob:c vcall: len:16 clob:c vcall_reg: src1:i len:16 clob:c vcall_membase: src1:b len:16 clob:c call_reg: dest:a src1:i len:16 clob:c call_membase: dest:a src1:b len:16 clob:c iconst: dest:i len:8 r4const: dest:f len:12 r8const: dest:f len:24 label: len:0 store_membase_reg: dest:b src1:i len:12 storei1_membase_reg: dest:b src1:i len:12 storei2_membase_reg: dest:b src1:i len:12 storei4_membase_reg: dest:b src1:i len:12 storer4_membase_reg: dest:b src1:f len:16 storer8_membase_reg: dest:b src1:f len:12 load_membase: dest:i src1:b len:12 loadi1_membase: dest:i src1:b len:16 loadu1_membase: dest:i src1:b len:12 loadi2_membase: dest:i src1:b len:12 loadu2_membase: dest:i src1:b len:12 loadi4_membase: dest:i src1:b len:12 loadu4_membase: dest:i src1:b len:12 loadr4_membase: dest:f src1:b len:12 loadr8_membase: dest:f src1:b len:12 load_memindex: dest:i src1:b src2:i len:4 loadi1_memindex: dest:i src1:b src2:i len:8 loadu1_memindex: dest:i src1:b src2:i len:4 loadi2_memindex: dest:i src1:b src2:i len:4 loadu2_memindex: dest:i src1:b src2:i len:4 loadi4_memindex: dest:i src1:b src2:i len:4 loadu4_memindex: dest:i src1:b src2:i len:4 loadr4_memindex: dest:f src1:b src2:i len:4 loadr8_memindex: dest:f src1:b src2:i len:4 store_memindex: dest:b src1:i src2:i len:4 storei1_memindex: dest:b src1:i src2:i len:4 storei2_memindex: dest:b src1:i src2:i len:4 storei4_memindex: dest:b src1:i src2:i len:4 storer4_memindex: dest:b src1:i src2:i len:8 storer8_memindex: dest:b src1:i src2:i len:4 loadu4_mem: dest:i len:8 move: dest:i src1:i len:4 fmove: dest:f src1:f len:4 move_f_to_i4: dest:i src1:f len:8 move_i4_to_f: dest:f src1:i len:8 add_imm: dest:i src1:i len:4 sub_imm: dest:i src1:i len:4 mul_imm: dest:i src1:i len:4 # there is no actual support for division or reminder by immediate # we simulate them, though (but we need to change the burg rules # to allocate a symbolic reg for src2) div_imm: dest:i src1:i src2:i len:20 div_un_imm: dest:i src1:i src2:i len:12 rem_imm: dest:i src1:i src2:i len:28 rem_un_imm: dest:i src1:i src2:i len:16 and_imm: dest:i src1:i len:4 or_imm: dest:i src1:i len:4 xor_imm: dest:i src1:i len:4 shl_imm: dest:i src1:i len:4 shr_imm: dest:i src1:i len:4 shr_un_imm: dest:i src1:i len:4 cond_exc_eq: len:8 cond_exc_ne_un: len:8 cond_exc_lt: len:8 cond_exc_lt_un: len:8 cond_exc_gt: len:8 cond_exc_gt_un: len:8 cond_exc_ge: len:8 cond_exc_ge_un: len:8 cond_exc_le: len:8 cond_exc_le_un: len:8 cond_exc_ov: len:12 cond_exc_no: len:8 cond_exc_c: len:12 cond_exc_nc: len:8 long_conv_to_ovf_i: dest:i src1:i src2:i len:32 long_mul_ovf: long_conv_to_r_un: dest:f src1:i src2:i len:37 float_beq: len:8 float_bne_un: len:8 float_blt: len:8 float_blt_un: len:8 float_bgt: len:8 float_bgt_un: len:8 float_bge: len:8 float_bge_un: len:8 float_ble: len:8 float_ble_un: len:8 float_add: dest:f src1:f src2:f len:4 float_sub: dest:f src1:f src2:f len:4 float_mul: dest:f src1:f src2:f len:4 float_div: dest:f src1:f src2:f len:4 float_div_un: dest:f src1:f src2:f len:4 float_rem: dest:f src1:f src2:f len:16 float_rem_un: dest:f src1:f src2:f len:16 float_neg: dest:f src1:f len:4 float_not: dest:f src1:f len:4 float_conv_to_i1: dest:i src1:f len:40 float_conv_to_i2: dest:i src1:f len:40 float_conv_to_i4: dest:i src1:f len:40 float_conv_to_i8: dest:l src1:f len:40 float_conv_to_r4: dest:f src1:f len:4 float_conv_to_u4: dest:i src1:f len:40 float_conv_to_u8: dest:l src1:f len:40 float_conv_to_u2: dest:i src1:f len:40 float_conv_to_u1: dest:i src1:f len:40 float_conv_to_i: dest:i src1:f len:40 float_ceq: dest:i src1:f src2:f len:16 float_cgt: dest:i src1:f src2:f len:16 float_cgt_un: dest:i src1:f src2:f len:20 float_clt: dest:i src1:f src2:f len:16 float_clt_un: dest:i src1:f src2:f len:20 float_cneq: dest:i src1:f src2:f len:16 float_cge: dest:i src1:f src2:f len:16 float_cle: dest:i src1:f src2:f len:16 call_handler: len:12 clob:c endfilter: src1:i len:32 aotconst: dest:i len:8 load_gotaddr: dest:i len:32 got_entry: dest:i src1:b len:32 abs: dest:f src1:f len:4 sqrt: dest:f src1:f len:4 sqrtf: dest:f src1:f len:4 round: dest:f src1:f len:4 ppc_trunc: dest:f src1:f len:4 ppc_ceil: dest:f src1:f len:4 ppc_floor: dest:f src1:f len:4 adc: dest:i src1:i src2:i len:4 addcc: dest:i src1:i src2:i len:4 subcc: dest:i src1:i src2:i len:4 addcc_imm: dest:i src1:i len:4 sbb: dest:i src1:i src2:i len:4 br_reg: src1:i len:8 ppc_subfic: dest:i src1:i len:4 ppc_subfze: dest:i src1:i len:4 bigmul: len:12 dest:l src1:i src2:i bigmul_un: len:12 dest:l src1:i src2:i # Linear IR opcodes dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 # 32 bit opcodes int_add: dest:i src1:i src2:i len:4 int_sub: dest:i src1:i src2:i len:4 int_mul: dest:i src1:i src2:i len:4 int_div: dest:i src1:i src2:i len:40 int_div_un: dest:i src1:i src2:i len:16 int_rem: dest:i src1:i src2:i len:48 int_rem_un: dest:i src1:i src2:i len:24 int_and: dest:i src1:i src2:i len:4 int_or: dest:i src1:i src2:i len:4 int_xor: dest:i src1:i src2:i len:4 int_shl: dest:i src1:i src2:i len:4 int_shr: dest:i src1:i src2:i len:4 int_shr_un: dest:i src1:i src2:i len:4 int_neg: dest:i src1:i len:4 int_not: dest:i src1:i len:4 int_conv_to_i1: dest:i src1:i len:8 int_conv_to_i2: dest:i src1:i len:8 int_conv_to_i4: dest:i src1:i len:4 int_conv_to_r4: dest:f src1:i len:36 int_conv_to_r8: dest:f src1:i len:36 int_conv_to_u4: dest:i src1:i int_conv_to_u2: dest:i src1:i len:8 int_conv_to_u1: dest:i src1:i len:4 int_beq: len:8 int_bge: len:8 int_bgt: len:8 int_ble: len:8 int_blt: len:8 int_bne_un: len:8 int_bge_un: len:8 int_bgt_un: len:8 int_ble_un: len:8 int_blt_un: len:8 int_add_ovf: dest:i src1:i src2:i len:16 int_add_ovf_un: dest:i src1:i src2:i len:16 int_mul_ovf: dest:i src1:i src2:i len:16 int_mul_ovf_un: dest:i src1:i src2:i len:16 int_sub_ovf: dest:i src1:i src2:i len:16 int_sub_ovf_un: dest:i src1:i src2:i len:16 int_adc: dest:i src1:i src2:i len:4 int_addcc: dest:i src1:i src2:i len:4 int_subcc: dest:i src1:i src2:i len:4 int_sbb: dest:i src1:i src2:i len:4 int_adc_imm: dest:i src1:i len:12 int_sbb_imm: dest:i src1:i len:12 int_add_imm: dest:i src1:i len:12 int_sub_imm: dest:i src1:i len:12 int_mul_imm: dest:i src1:i len:12 int_div_imm: dest:i src1:i len:20 int_div_un_imm: dest:i src1:i len:12 int_rem_imm: dest:i src1:i len:28 int_rem_un_imm: dest:i src1:i len:16 int_and_imm: dest:i src1:i len:12 int_or_imm: dest:i src1:i len:12 int_xor_imm: dest:i src1:i len:12 int_shl_imm: dest:i src1:i len:8 int_shr_imm: dest:i src1:i len:8 int_shr_un_imm: dest:i src1:i len:8 int_ceq: dest:i len:12 int_cgt: dest:i len:12 int_cgt_un: dest:i len:12 int_clt: dest:i len:12 int_clt_un: dest:i len:12 int_cneq: dest:i len:12 int_cge: dest:i len:12 int_cle: dest:i len:12 int_cge_un: dest:i len:12 int_cle_un: dest:i len:12 cond_exc_ieq: len:8 cond_exc_ine_un: len:8 cond_exc_ilt: len:8 cond_exc_ilt_un: len:8 cond_exc_igt: len:8 cond_exc_igt_un: len:8 cond_exc_ige: len:8 cond_exc_ige_un: len:8 cond_exc_ile: len:8 cond_exc_ile_un: len:8 cond_exc_iov: len:12 cond_exc_ino: len:8 cond_exc_ic: len:12 cond_exc_inc: len:8 icompare: src1:i src2:i len:4 icompare_imm: src1:i len:12 long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:32 # shouldn't use long stuff on ppc32 #long_min: dest:i src1:i src2:i len:8 clob:1 #long_min_un: dest:i src1:i src2:i len:8 clob:1 #long_max: dest:i src1:i src2:i len:8 clob:1 #long_max_un: dest:i src1:i src2:i len:8 clob:1 int_min: dest:i src1:i src2:i len:8 clob:1 int_max: dest:i src1:i src2:i len:8 clob:1 int_min_un: dest:i src1:i src2:i len:8 clob:1 int_max_un: dest:i src1:i src2:i len:8 clob:1 vcall2: len:20 clob:c vcall2_reg: src1:i len:8 clob:c vcall2_membase: src1:b len:16 clob:c jump_table: dest:i len:8 atomic_add_i4: src1:b src2:i dest:i len:28 atomic_cas_i4: src1:b src2:i src3:i dest:i len:38 liverange_start: len:0 liverange_end: len:0 gc_safe_point: len:0
# powerpc cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the regsiter allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # a r3 register (output from calls) # b base register (used in address references) # f floating point register # # len:number describe the maximun length in bytes of the instruction # number is a positive integer # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # r 'reserves' the destination register until a later instruction unreserves it # used mostly to set output registers in function calls # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-x86.c for more details on how the specifiers are used. # memory_barrier: len:4 nop: len:4 relaxed_nop: len:4 break: len:32 seq_point: len:24 il_seq_point: len:0 tailcall: len:120 clob:c # PowerPC outputs a nice fixed size memcpy loop for larger stack_usage, so 0. tailcall_parameter: len:0 call: dest:a clob:c len:16 br: len:4 throw: src1:i len:20 rethrow: src1:i len:20 ckfinite: dest:f src1:f ppc_check_finite: src1:i len:16 add_ovf_carry: dest:i src1:i src2:i len:16 sub_ovf_carry: dest:i src1:i src2:i len:16 add_ovf_un_carry: dest:i src1:i src2:i len:16 sub_ovf_un_carry: dest:i src1:i src2:i len:16 start_handler: len:32 endfinally: len:28 ceq: dest:i len:12 cgt: dest:i len:12 cgt_un: dest:i len:12 clt: dest:i len:12 clt_un: dest:i len:12 localloc: dest:i src1:i len:60 compare: src1:i src2:i len:4 compare_imm: src1:i len:12 fcompare: src1:f src2:f len:12 arglist: src1:i len:12 setlret: src1:i src2:i len:12 check_this: src1:b len:4 voidcall: len:16 clob:c voidcall_reg: src1:i len:16 clob:c voidcall_membase: src1:b len:16 clob:c fcall: dest:g len:16 clob:c fcall_reg: dest:g src1:i len:16 clob:c fcall_membase: dest:g src1:b len:16 clob:c lcall: dest:l len:16 clob:c lcall_reg: dest:l src1:i len:16 clob:c lcall_membase: dest:l src1:b len:16 clob:c vcall: len:16 clob:c vcall_reg: src1:i len:16 clob:c vcall_membase: src1:b len:16 clob:c call_reg: dest:a src1:i len:16 clob:c call_membase: dest:a src1:b len:16 clob:c iconst: dest:i len:8 r4const: dest:f len:12 r8const: dest:f len:24 label: len:0 store_membase_reg: dest:b src1:i len:12 storei1_membase_reg: dest:b src1:i len:12 storei2_membase_reg: dest:b src1:i len:12 storei4_membase_reg: dest:b src1:i len:12 storer4_membase_reg: dest:b src1:f len:16 storer8_membase_reg: dest:b src1:f len:12 load_membase: dest:i src1:b len:12 loadi1_membase: dest:i src1:b len:16 loadu1_membase: dest:i src1:b len:12 loadi2_membase: dest:i src1:b len:12 loadu2_membase: dest:i src1:b len:12 loadi4_membase: dest:i src1:b len:12 loadu4_membase: dest:i src1:b len:12 loadr4_membase: dest:f src1:b len:12 loadr8_membase: dest:f src1:b len:12 load_memindex: dest:i src1:b src2:i len:4 loadi1_memindex: dest:i src1:b src2:i len:8 loadu1_memindex: dest:i src1:b src2:i len:4 loadi2_memindex: dest:i src1:b src2:i len:4 loadu2_memindex: dest:i src1:b src2:i len:4 loadi4_memindex: dest:i src1:b src2:i len:4 loadu4_memindex: dest:i src1:b src2:i len:4 loadr4_memindex: dest:f src1:b src2:i len:4 loadr8_memindex: dest:f src1:b src2:i len:4 store_memindex: dest:b src1:i src2:i len:4 storei1_memindex: dest:b src1:i src2:i len:4 storei2_memindex: dest:b src1:i src2:i len:4 storei4_memindex: dest:b src1:i src2:i len:4 storer4_memindex: dest:b src1:i src2:i len:8 storer8_memindex: dest:b src1:i src2:i len:4 loadu4_mem: dest:i len:8 move: dest:i src1:i len:4 fmove: dest:f src1:f len:4 move_f_to_i4: dest:i src1:f len:8 move_i4_to_f: dest:f src1:i len:8 add_imm: dest:i src1:i len:4 sub_imm: dest:i src1:i len:4 mul_imm: dest:i src1:i len:4 # there is no actual support for division or reminder by immediate # we simulate them, though (but we need to change the burg rules # to allocate a symbolic reg for src2) div_imm: dest:i src1:i src2:i len:20 div_un_imm: dest:i src1:i src2:i len:12 rem_imm: dest:i src1:i src2:i len:28 rem_un_imm: dest:i src1:i src2:i len:16 and_imm: dest:i src1:i len:4 or_imm: dest:i src1:i len:4 xor_imm: dest:i src1:i len:4 shl_imm: dest:i src1:i len:4 shr_imm: dest:i src1:i len:4 shr_un_imm: dest:i src1:i len:4 cond_exc_eq: len:8 cond_exc_ne_un: len:8 cond_exc_lt: len:8 cond_exc_lt_un: len:8 cond_exc_gt: len:8 cond_exc_gt_un: len:8 cond_exc_ge: len:8 cond_exc_ge_un: len:8 cond_exc_le: len:8 cond_exc_le_un: len:8 cond_exc_ov: len:12 cond_exc_no: len:8 cond_exc_c: len:12 cond_exc_nc: len:8 long_conv_to_ovf_i: dest:i src1:i src2:i len:32 long_mul_ovf: long_conv_to_r_un: dest:f src1:i src2:i len:37 float_beq: len:8 float_bne_un: len:8 float_blt: len:8 float_blt_un: len:8 float_bgt: len:8 float_bgt_un: len:8 float_bge: len:8 float_bge_un: len:8 float_ble: len:8 float_ble_un: len:8 float_add: dest:f src1:f src2:f len:4 float_sub: dest:f src1:f src2:f len:4 float_mul: dest:f src1:f src2:f len:4 float_div: dest:f src1:f src2:f len:4 float_div_un: dest:f src1:f src2:f len:4 float_rem: dest:f src1:f src2:f len:16 float_rem_un: dest:f src1:f src2:f len:16 float_neg: dest:f src1:f len:4 float_not: dest:f src1:f len:4 float_conv_to_i1: dest:i src1:f len:40 float_conv_to_i2: dest:i src1:f len:40 float_conv_to_i4: dest:i src1:f len:40 float_conv_to_i8: dest:l src1:f len:40 float_conv_to_r4: dest:f src1:f len:4 float_conv_to_u4: dest:i src1:f len:40 float_conv_to_u8: dest:l src1:f len:40 float_conv_to_u2: dest:i src1:f len:40 float_conv_to_u1: dest:i src1:f len:40 float_ceq: dest:i src1:f src2:f len:16 float_cgt: dest:i src1:f src2:f len:16 float_cgt_un: dest:i src1:f src2:f len:20 float_clt: dest:i src1:f src2:f len:16 float_clt_un: dest:i src1:f src2:f len:20 float_cneq: dest:i src1:f src2:f len:16 float_cge: dest:i src1:f src2:f len:16 float_cle: dest:i src1:f src2:f len:16 call_handler: len:12 clob:c endfilter: src1:i len:32 aotconst: dest:i len:8 load_gotaddr: dest:i len:32 got_entry: dest:i src1:b len:32 abs: dest:f src1:f len:4 sqrt: dest:f src1:f len:4 sqrtf: dest:f src1:f len:4 round: dest:f src1:f len:4 ppc_trunc: dest:f src1:f len:4 ppc_ceil: dest:f src1:f len:4 ppc_floor: dest:f src1:f len:4 adc: dest:i src1:i src2:i len:4 addcc: dest:i src1:i src2:i len:4 subcc: dest:i src1:i src2:i len:4 addcc_imm: dest:i src1:i len:4 sbb: dest:i src1:i src2:i len:4 br_reg: src1:i len:8 ppc_subfic: dest:i src1:i len:4 ppc_subfze: dest:i src1:i len:4 bigmul: len:12 dest:l src1:i src2:i bigmul_un: len:12 dest:l src1:i src2:i # Linear IR opcodes dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 # 32 bit opcodes int_add: dest:i src1:i src2:i len:4 int_sub: dest:i src1:i src2:i len:4 int_mul: dest:i src1:i src2:i len:4 int_div: dest:i src1:i src2:i len:40 int_div_un: dest:i src1:i src2:i len:16 int_rem: dest:i src1:i src2:i len:48 int_rem_un: dest:i src1:i src2:i len:24 int_and: dest:i src1:i src2:i len:4 int_or: dest:i src1:i src2:i len:4 int_xor: dest:i src1:i src2:i len:4 int_shl: dest:i src1:i src2:i len:4 int_shr: dest:i src1:i src2:i len:4 int_shr_un: dest:i src1:i src2:i len:4 int_neg: dest:i src1:i len:4 int_not: dest:i src1:i len:4 int_conv_to_i1: dest:i src1:i len:8 int_conv_to_i2: dest:i src1:i len:8 int_conv_to_i4: dest:i src1:i len:4 int_conv_to_r4: dest:f src1:i len:36 int_conv_to_r8: dest:f src1:i len:36 int_conv_to_u4: dest:i src1:i int_conv_to_u2: dest:i src1:i len:8 int_conv_to_u1: dest:i src1:i len:4 int_beq: len:8 int_bge: len:8 int_bgt: len:8 int_ble: len:8 int_blt: len:8 int_bne_un: len:8 int_bge_un: len:8 int_bgt_un: len:8 int_ble_un: len:8 int_blt_un: len:8 int_add_ovf: dest:i src1:i src2:i len:16 int_add_ovf_un: dest:i src1:i src2:i len:16 int_mul_ovf: dest:i src1:i src2:i len:16 int_mul_ovf_un: dest:i src1:i src2:i len:16 int_sub_ovf: dest:i src1:i src2:i len:16 int_sub_ovf_un: dest:i src1:i src2:i len:16 int_adc: dest:i src1:i src2:i len:4 int_addcc: dest:i src1:i src2:i len:4 int_subcc: dest:i src1:i src2:i len:4 int_sbb: dest:i src1:i src2:i len:4 int_adc_imm: dest:i src1:i len:12 int_sbb_imm: dest:i src1:i len:12 int_add_imm: dest:i src1:i len:12 int_sub_imm: dest:i src1:i len:12 int_mul_imm: dest:i src1:i len:12 int_div_imm: dest:i src1:i len:20 int_div_un_imm: dest:i src1:i len:12 int_rem_imm: dest:i src1:i len:28 int_rem_un_imm: dest:i src1:i len:16 int_and_imm: dest:i src1:i len:12 int_or_imm: dest:i src1:i len:12 int_xor_imm: dest:i src1:i len:12 int_shl_imm: dest:i src1:i len:8 int_shr_imm: dest:i src1:i len:8 int_shr_un_imm: dest:i src1:i len:8 int_ceq: dest:i len:12 int_cgt: dest:i len:12 int_cgt_un: dest:i len:12 int_clt: dest:i len:12 int_clt_un: dest:i len:12 int_cneq: dest:i len:12 int_cge: dest:i len:12 int_cle: dest:i len:12 int_cge_un: dest:i len:12 int_cle_un: dest:i len:12 cond_exc_ieq: len:8 cond_exc_ine_un: len:8 cond_exc_ilt: len:8 cond_exc_ilt_un: len:8 cond_exc_igt: len:8 cond_exc_igt_un: len:8 cond_exc_ige: len:8 cond_exc_ige_un: len:8 cond_exc_ile: len:8 cond_exc_ile_un: len:8 cond_exc_iov: len:12 cond_exc_ino: len:8 cond_exc_ic: len:12 cond_exc_inc: len:8 icompare: src1:i src2:i len:4 icompare_imm: src1:i len:12 long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:32 # shouldn't use long stuff on ppc32 #long_min: dest:i src1:i src2:i len:8 clob:1 #long_min_un: dest:i src1:i src2:i len:8 clob:1 #long_max: dest:i src1:i src2:i len:8 clob:1 #long_max_un: dest:i src1:i src2:i len:8 clob:1 int_min: dest:i src1:i src2:i len:8 clob:1 int_max: dest:i src1:i src2:i len:8 clob:1 int_min_un: dest:i src1:i src2:i len:8 clob:1 int_max_un: dest:i src1:i src2:i len:8 clob:1 vcall2: len:20 clob:c vcall2_reg: src1:i len:8 clob:c vcall2_membase: src1:b len:16 clob:c jump_table: dest:i len:8 atomic_add_i4: src1:b src2:i dest:i len:28 atomic_cas_i4: src1:b src2:i src3:i dest:i len:38 liverange_start: len:0 liverange_end: len:0 gc_safe_point: len:0
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/cpu-ppc64.md
# powerpc cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the regsiter allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # a r3 register (output from calls) # b base register (used in address references) # f floating point register # # len:number describe the maximun length in bytes of the instruction # number is a positive integer # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # r 'reserves' the destination register until a later instruction unreserves it # used mostly to set output registers in function calls # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-x86.c for more details on how the specifiers are used. # tailcall: len:124 clob:c tailcall_parameter: len:0 # PowerPC outputs a nice fixed size memcpy loop for larger stack_usage, so 0. memory_barrier: len:4 nop: len:4 relaxed_nop: len:4 break: len:40 seq_point: len:48 il_seq_point: len:0 call: dest:a clob:c len:36 br: len:4 throw: src1:i len:40 rethrow: src1:i len:40 ckfinite: dest:f src1:f ppc_check_finite: src1:i len:16 add_ovf_carry: dest:i src1:i src2:i len:16 sub_ovf_carry: dest:i src1:i src2:i len:16 add_ovf_un_carry: dest:i src1:i src2:i len:16 sub_ovf_un_carry: dest:i src1:i src2:i len:16 start_handler: len:16 endfinally: len:20 ceq: dest:i len:12 cgt: dest:i len:12 cgt_un: dest:i len:12 clt: dest:i len:12 clt_un: dest:i len:12 localloc: dest:i src1:i len:60 compare: src1:i src2:i len:4 compare_imm: src1:i len:12 fcompare: src1:f src2:f len:12 arglist: src1:i len:12 setlret: src1:i src2:i len:12 check_this: src1:b len:4 voidcall: len:36 clob:c voidcall_reg: src1:i len:16 clob:c voidcall_membase: src1:b len:16 clob:c fcall: dest:g len:36 clob:c fcall_reg: dest:g src1:i len:16 clob:c fcall_membase: dest:g src1:b len:16 clob:c lcall: dest:a len:36 clob:c lcall_reg: dest:a src1:i len:16 clob:c lcall_membase: dest:a src1:b len:16 clob:c vcall: len:16 clob:c vcall_reg: src1:i len:16 clob:c vcall_membase: src1:b len:12 clob:c call_reg: dest:a src1:i len:16 clob:c call_membase: dest:a src1:b len:16 clob:c iconst: dest:i len:20 i8const: dest:i len:20 r4const: dest:f len:12 r8const: dest:f len:24 label: len:0 store_membase_reg: dest:b src1:i len:12 storei1_membase_reg: dest:b src1:i len:12 storei2_membase_reg: dest:b src1:i len:12 storei4_membase_reg: dest:b src1:i len:12 storei8_membase_reg: dest:b src1:i len:12 storer4_membase_reg: dest:b src1:f len:16 storer8_membase_reg: dest:b src1:f len:12 load_membase: dest:i src1:b len:12 loadi1_membase: dest:i src1:b len:16 loadu1_membase: dest:i src1:b len:12 loadi2_membase: dest:i src1:b len:12 loadu2_membase: dest:i src1:b len:12 loadi4_membase: dest:i src1:b len:12 loadu4_membase: dest:i src1:b len:12 loadi8_membase: dest:i src1:b len:12 loadr4_membase: dest:f src1:b len:12 loadr8_membase: dest:f src1:b len:12 load_memindex: dest:i src1:b src2:i len:4 loadi1_memindex: dest:i src1:b src2:i len:8 loadu1_memindex: dest:i src1:b src2:i len:4 loadi2_memindex: dest:i src1:b src2:i len:4 loadu2_memindex: dest:i src1:b src2:i len:4 loadi4_memindex: dest:i src1:b src2:i len:4 loadu4_memindex: dest:i src1:b src2:i len:4 loadi8_memindex: dest:i src1:b src2:i len:4 loadr4_memindex: dest:f src1:b src2:i len:4 loadr8_memindex: dest:f src1:b src2:i len:4 store_memindex: dest:b src1:i src2:i len:4 storei1_memindex: dest:b src1:i src2:i len:4 storei2_memindex: dest:b src1:i src2:i len:4 storei4_memindex: dest:b src1:i src2:i len:4 storei8_memindex: dest:b src1:i src2:i len:4 storer4_memindex: dest:b src1:i src2:i len:8 storer8_memindex: dest:b src1:i src2:i len:4 loadu4_mem: dest:i len:8 move: dest:i src1:i len:4 fmove: dest:f src1:f len:4 move_f_to_i4: dest:i src1:f len:8 move_i4_to_f: dest:f src1:i len:8 move_f_to_i8: dest:i src1:f len:8 move_i8_to_f: dest:f src1:i len:8 add_imm: dest:i src1:i len:4 sub_imm: dest:i src1:i len:4 mul_imm: dest:i src1:i len:4 # there is no actual support for division or reminder by immediate # we simulate them, though (but we need to change the burg rules # to allocate a symbolic reg for src2) div_imm: dest:i src1:i src2:i len:20 div_un_imm: dest:i src1:i src2:i len:12 rem_imm: dest:i src1:i src2:i len:28 rem_un_imm: dest:i src1:i src2:i len:16 and_imm: dest:i src1:i len:4 or_imm: dest:i src1:i len:4 xor_imm: dest:i src1:i len:4 shl_imm: dest:i src1:i len:4 shr_imm: dest:i src1:i len:4 shr_un_imm: dest:i src1:i len:4 cond_exc_eq: len:8 cond_exc_ne_un: len:8 cond_exc_lt: len:8 cond_exc_lt_un: len:8 cond_exc_gt: len:8 cond_exc_gt_un: len:8 cond_exc_ge: len:8 cond_exc_ge_un: len:8 cond_exc_le: len:8 cond_exc_le_un: len:8 cond_exc_ov: len:12 cond_exc_no: len:8 cond_exc_c: len:12 cond_exc_nc: len:8 long_conv_to_ovf_i: dest:i src1:i src2:i len:32 #long_mul_ovf: long_conv_to_r_un: dest:f src1:i src2:i len:37 float_beq: len:8 float_bne_un: len:8 float_blt: len:8 float_blt_un: len:8 float_bgt: len:8 float_bgt_un: len:8 float_bge: len:8 float_bge_un: len:8 float_ble: len:8 float_ble_un: len:8 float_add: dest:f src1:f src2:f len:4 float_sub: dest:f src1:f src2:f len:4 float_mul: dest:f src1:f src2:f len:4 float_div: dest:f src1:f src2:f len:4 float_div_un: dest:f src1:f src2:f len:4 float_rem: dest:f src1:f src2:f len:16 float_rem_un: dest:f src1:f src2:f len:16 float_neg: dest:f src1:f len:4 float_not: dest:f src1:f len:4 float_conv_to_i1: dest:i src1:f len:40 float_conv_to_i2: dest:i src1:f len:40 float_conv_to_i4: dest:i src1:f len:40 float_conv_to_i8: dest:i src1:f len:40 float_conv_to_r4: dest:f src1:f len:4 float_conv_to_u4: dest:i src1:f len:40 float_conv_to_u8: dest:i src1:f len:40 float_conv_to_u2: dest:i src1:f len:40 float_conv_to_u1: dest:i src1:f len:40 float_conv_to_i: dest:i src1:f len:40 float_ceq: dest:i src1:f src2:f len:16 float_cgt: dest:i src1:f src2:f len:16 float_cgt_un: dest:i src1:f src2:f len:20 float_clt: dest:i src1:f src2:f len:16 float_clt_un: dest:i src1:f src2:f len:20 float_cneq: dest:i src1:f src2:f len:16 float_cge: dest:i src1:f src2:f len:16 float_cle: dest:i src1:f src2:f len:16 call_handler: len:12 clob:c endfilter: src1:i len:20 aotconst: dest:i len:8 load_gotaddr: dest:i len:32 got_entry: dest:i src1:b len:32 abs: dest:f src1:f len:4 sqrt: dest:f src1:f len:4 sqrtf: dest:f src1:f len:4 round: dest:f src1:f len:4 ppc_trunc: dest:f src1:f len:4 ppc_ceil: dest:f src1:f len:4 ppc_floor: dest:f src1:f len:4 adc: dest:i src1:i src2:i len:4 addcc: dest:i src1:i src2:i len:4 subcc: dest:i src1:i src2:i len:4 addcc_imm: dest:i src1:i len:4 sbb: dest:i src1:i src2:i len:4 br_reg: src1:i len:8 ppc_subfic: dest:i src1:i len:4 ppc_subfze: dest:i src1:i len:4 bigmul: len:12 dest:i src1:i src2:i bigmul_un: len:12 dest:i src1:i src2:i # Linear IR opcodes dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_i8const: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 # 32 bit opcodes int_add: dest:i src1:i src2:i len:4 int_sub: dest:i src1:i src2:i len:4 int_mul: dest:i src1:i src2:i len:4 int_div: dest:i src1:i src2:i len:40 int_div_un: dest:i src1:i src2:i len:16 int_rem: dest:i src1:i src2:i len:48 int_rem_un: dest:i src1:i src2:i len:24 int_and: dest:i src1:i src2:i len:4 int_or: dest:i src1:i src2:i len:4 int_xor: dest:i src1:i src2:i len:4 int_shl: dest:i src1:i src2:i len:4 int_shr: dest:i src1:i src2:i len:4 int_shr_un: dest:i src1:i src2:i len:4 int_neg: dest:i src1:i len:4 int_not: dest:i src1:i len:4 int_conv_to_i1: dest:i src1:i len:8 int_conv_to_i2: dest:i src1:i len:8 int_conv_to_i4: dest:i src1:i len:4 sext_i4: dest:i src1:i len:4 int_conv_to_r4: dest:f src1:i len:20 int_conv_to_r8: dest:f src1:i len:16 int_conv_to_u4: dest:i src1:i len:4 int_conv_to_u2: dest:i src1:i len:8 int_conv_to_u1: dest:i src1:i len:4 int_beq: len:8 int_bge: len:8 int_bgt: len:8 int_ble: len:8 int_blt: len:8 int_bne_un: len:8 int_bge_un: len:8 int_bgt_un: len:8 int_ble_un: len:8 int_blt_un: len:8 int_add_ovf: dest:i src1:i src2:i len:16 int_add_ovf_un: dest:i src1:i src2:i len:16 int_mul_ovf: dest:i src1:i src2:i len:16 int_mul_ovf_un: dest:i src1:i src2:i len:16 int_sub_ovf: dest:i src1:i src2:i len:16 int_sub_ovf_un: dest:i src1:i src2:i len:16 int_adc: dest:i src1:i src2:i len:4 int_addcc: dest:i src1:i src2:i len:4 int_subcc: dest:i src1:i src2:i len:4 int_sbb: dest:i src1:i src2:i len:4 int_adc_imm: dest:i src1:i len:12 int_sbb_imm: dest:i src1:i len:12 int_add_imm: dest:i src1:i len:4 int_sub_imm: dest:i src1:i len:12 int_mul_imm: dest:i src1:i len:12 int_div_imm: dest:i src1:i len:20 int_div_un_imm: dest:i src1:i len:12 int_rem_imm: dest:i src1:i len:28 int_rem_un_imm: dest:i src1:i len:16 int_and_imm: dest:i src1:i len:12 int_or_imm: dest:i src1:i len:12 int_xor_imm: dest:i src1:i len:12 int_shl_imm: dest:i src1:i len:8 int_shr_imm: dest:i src1:i len:8 int_shr_un_imm: dest:i src1:i len:8 int_ceq: dest:i len:12 int_cgt: dest:i len:12 int_cgt_un: dest:i len:12 int_clt: dest:i len:12 int_clt_un: dest:i len:12 int_cneq: dest:i len:12 int_cge: dest:i len:12 int_cle: dest:i len:12 int_cge_un: dest:i len:12 int_cle_un: dest:i len:12 cond_exc_ieq: len:8 cond_exc_ine_un: len:8 cond_exc_ilt: len:8 cond_exc_ilt_un: len:8 cond_exc_igt: len:8 cond_exc_igt_un: len:8 cond_exc_ige: len:8 cond_exc_ige_un: len:8 cond_exc_ile: len:8 cond_exc_ile_un: len:8 cond_exc_iov: len:12 cond_exc_ino: len:8 cond_exc_ic: len:12 cond_exc_inc: len:8 icompare: src1:i src2:i len:4 icompare_imm: src1:i len:12 # 64 bit opcodes long_add: dest:i src1:i src2:i len:4 long_sub: dest:i src1:i src2:i len:4 long_mul: dest:i src1:i src2:i len:4 long_mul_imm: dest:i src1:i len:4 long_div: dest:i src1:i src2:i len:40 long_div_un: dest:i src1:i src2:i len:16 long_rem: dest:i src1:i src2:i len:48 long_rem_un: dest:i src1:i src2:i len:24 long_and: dest:i src1:i src2:i len:4 long_or: dest:i src1:i src2:i len:4 long_xor: dest:i src1:i src2:i len:4 long_shl: dest:i src1:i src2:i len:4 long_shl_imm: dest:i src1:i len:4 long_shr: dest:i src1:i src2:i len:4 long_shr_un: dest:i src1:i src2:i len:4 long_shr_imm: dest:i src1:i len:4 long_shr_un_imm: dest:i src1:i len:4 long_neg: dest:i src1:i len:4 long_not: dest:i src1:i len:4 long_conv_to_i1: dest:i src1:i len:4 long_conv_to_i2: dest:i src1:i len:4 long_conv_to_i4: dest:i src1:i len:4 long_conv_to_r4: dest:f src1:i len:16 long_conv_to_r8: dest:f src1:i len:12 long_conv_to_u4: dest:i src1:i long_conv_to_u2: dest:i src1:i len:4 long_conv_to_u1: dest:i src1:i len:4 zext_i4: dest:i src1:i len:4 long_beq: len:8 long_bge: len:8 long_bgt: len:8 long_ble: len:8 long_blt: len:8 long_bne_un: len:8 long_bge_un: len:8 long_bgt_un: len:8 long_ble_un: len:8 long_blt_un: len:8 long_add_ovf: dest:i src1:i src2:i len:16 long_add_ovf_un: dest:i src1:i src2:i len:16 long_mul_ovf: dest:i src1:i src2:i len:16 long_mul_ovf_un: dest:i src1:i src2:i len:16 long_sub_ovf: dest:i src1:i src2:i len:16 long_sub_ovf_un: dest:i src1:i src2:i len:16 long_ceq: dest:i len:12 long_cgt: dest:i len:12 long_cgt_un: dest:i len:12 long_clt: dest:i len:12 long_clt_un: dest:i len:12 long_add_imm: dest:i src1:i clob:1 len:4 long_sub_imm: dest:i src1:i clob:1 len:4 long_and_imm: dest:i src1:i clob:1 len:4 long_or_imm: dest:i src1:i clob:1 len:4 long_xor_imm: dest:i src1:i clob:1 len:4 lcompare: src1:i src2:i len:4 lcompare_imm: src1:i len:12 long_min: dest:i src1:i src2:i len:8 clob:1 long_min_un: dest:i src1:i src2:i len:8 clob:1 long_max: dest:i src1:i src2:i len:8 clob:1 long_max_un: dest:i src1:i src2:i len:8 clob:1 int_min: dest:i src1:i src2:i len:8 clob:1 int_max: dest:i src1:i src2:i len:8 clob:1 int_min_un: dest:i src1:i src2:i len:8 clob:1 int_max_un: dest:i src1:i src2:i len:8 clob:1 #long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:30 vcall2: len:36 clob:c vcall2_reg: src1:i len:16 clob:c vcall2_membase: src1:b len:16 clob:c jump_table: dest:i len:20 atomic_add_i4: src1:b src2:i dest:i len:28 atomic_add_i8: src1:b src2:i dest:i len:28 atomic_cas_i4: src1:b src2:i src3:i dest:i len:38 atomic_cas_i8: src1:b src2:i src3:i dest:i len:38 liverange_start: len:0 liverange_end: len:0 gc_safe_point: len:0
# powerpc cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the regsiter allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # a r3 register (output from calls) # b base register (used in address references) # f floating point register # # len:number describe the maximun length in bytes of the instruction # number is a positive integer # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # r 'reserves' the destination register until a later instruction unreserves it # used mostly to set output registers in function calls # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-x86.c for more details on how the specifiers are used. # tailcall: len:124 clob:c tailcall_parameter: len:0 # PowerPC outputs a nice fixed size memcpy loop for larger stack_usage, so 0. memory_barrier: len:4 nop: len:4 relaxed_nop: len:4 break: len:40 seq_point: len:48 il_seq_point: len:0 call: dest:a clob:c len:36 br: len:4 throw: src1:i len:40 rethrow: src1:i len:40 ckfinite: dest:f src1:f ppc_check_finite: src1:i len:16 add_ovf_carry: dest:i src1:i src2:i len:16 sub_ovf_carry: dest:i src1:i src2:i len:16 add_ovf_un_carry: dest:i src1:i src2:i len:16 sub_ovf_un_carry: dest:i src1:i src2:i len:16 start_handler: len:16 endfinally: len:20 ceq: dest:i len:12 cgt: dest:i len:12 cgt_un: dest:i len:12 clt: dest:i len:12 clt_un: dest:i len:12 localloc: dest:i src1:i len:60 compare: src1:i src2:i len:4 compare_imm: src1:i len:12 fcompare: src1:f src2:f len:12 arglist: src1:i len:12 setlret: src1:i src2:i len:12 check_this: src1:b len:4 voidcall: len:36 clob:c voidcall_reg: src1:i len:16 clob:c voidcall_membase: src1:b len:16 clob:c fcall: dest:g len:36 clob:c fcall_reg: dest:g src1:i len:16 clob:c fcall_membase: dest:g src1:b len:16 clob:c lcall: dest:a len:36 clob:c lcall_reg: dest:a src1:i len:16 clob:c lcall_membase: dest:a src1:b len:16 clob:c vcall: len:16 clob:c vcall_reg: src1:i len:16 clob:c vcall_membase: src1:b len:12 clob:c call_reg: dest:a src1:i len:16 clob:c call_membase: dest:a src1:b len:16 clob:c iconst: dest:i len:20 i8const: dest:i len:20 r4const: dest:f len:12 r8const: dest:f len:24 label: len:0 store_membase_reg: dest:b src1:i len:12 storei1_membase_reg: dest:b src1:i len:12 storei2_membase_reg: dest:b src1:i len:12 storei4_membase_reg: dest:b src1:i len:12 storei8_membase_reg: dest:b src1:i len:12 storer4_membase_reg: dest:b src1:f len:16 storer8_membase_reg: dest:b src1:f len:12 load_membase: dest:i src1:b len:12 loadi1_membase: dest:i src1:b len:16 loadu1_membase: dest:i src1:b len:12 loadi2_membase: dest:i src1:b len:12 loadu2_membase: dest:i src1:b len:12 loadi4_membase: dest:i src1:b len:12 loadu4_membase: dest:i src1:b len:12 loadi8_membase: dest:i src1:b len:12 loadr4_membase: dest:f src1:b len:12 loadr8_membase: dest:f src1:b len:12 load_memindex: dest:i src1:b src2:i len:4 loadi1_memindex: dest:i src1:b src2:i len:8 loadu1_memindex: dest:i src1:b src2:i len:4 loadi2_memindex: dest:i src1:b src2:i len:4 loadu2_memindex: dest:i src1:b src2:i len:4 loadi4_memindex: dest:i src1:b src2:i len:4 loadu4_memindex: dest:i src1:b src2:i len:4 loadi8_memindex: dest:i src1:b src2:i len:4 loadr4_memindex: dest:f src1:b src2:i len:4 loadr8_memindex: dest:f src1:b src2:i len:4 store_memindex: dest:b src1:i src2:i len:4 storei1_memindex: dest:b src1:i src2:i len:4 storei2_memindex: dest:b src1:i src2:i len:4 storei4_memindex: dest:b src1:i src2:i len:4 storei8_memindex: dest:b src1:i src2:i len:4 storer4_memindex: dest:b src1:i src2:i len:8 storer8_memindex: dest:b src1:i src2:i len:4 loadu4_mem: dest:i len:8 move: dest:i src1:i len:4 fmove: dest:f src1:f len:4 move_f_to_i4: dest:i src1:f len:8 move_i4_to_f: dest:f src1:i len:8 move_f_to_i8: dest:i src1:f len:8 move_i8_to_f: dest:f src1:i len:8 add_imm: dest:i src1:i len:4 sub_imm: dest:i src1:i len:4 mul_imm: dest:i src1:i len:4 # there is no actual support for division or reminder by immediate # we simulate them, though (but we need to change the burg rules # to allocate a symbolic reg for src2) div_imm: dest:i src1:i src2:i len:20 div_un_imm: dest:i src1:i src2:i len:12 rem_imm: dest:i src1:i src2:i len:28 rem_un_imm: dest:i src1:i src2:i len:16 and_imm: dest:i src1:i len:4 or_imm: dest:i src1:i len:4 xor_imm: dest:i src1:i len:4 shl_imm: dest:i src1:i len:4 shr_imm: dest:i src1:i len:4 shr_un_imm: dest:i src1:i len:4 cond_exc_eq: len:8 cond_exc_ne_un: len:8 cond_exc_lt: len:8 cond_exc_lt_un: len:8 cond_exc_gt: len:8 cond_exc_gt_un: len:8 cond_exc_ge: len:8 cond_exc_ge_un: len:8 cond_exc_le: len:8 cond_exc_le_un: len:8 cond_exc_ov: len:12 cond_exc_no: len:8 cond_exc_c: len:12 cond_exc_nc: len:8 long_conv_to_ovf_i: dest:i src1:i src2:i len:32 #long_mul_ovf: long_conv_to_r_un: dest:f src1:i src2:i len:37 float_beq: len:8 float_bne_un: len:8 float_blt: len:8 float_blt_un: len:8 float_bgt: len:8 float_bgt_un: len:8 float_bge: len:8 float_bge_un: len:8 float_ble: len:8 float_ble_un: len:8 float_add: dest:f src1:f src2:f len:4 float_sub: dest:f src1:f src2:f len:4 float_mul: dest:f src1:f src2:f len:4 float_div: dest:f src1:f src2:f len:4 float_div_un: dest:f src1:f src2:f len:4 float_rem: dest:f src1:f src2:f len:16 float_rem_un: dest:f src1:f src2:f len:16 float_neg: dest:f src1:f len:4 float_not: dest:f src1:f len:4 float_conv_to_i1: dest:i src1:f len:40 float_conv_to_i2: dest:i src1:f len:40 float_conv_to_i4: dest:i src1:f len:40 float_conv_to_i8: dest:i src1:f len:40 float_conv_to_r4: dest:f src1:f len:4 float_conv_to_u4: dest:i src1:f len:40 float_conv_to_u8: dest:i src1:f len:40 float_conv_to_u2: dest:i src1:f len:40 float_conv_to_u1: dest:i src1:f len:40 float_ceq: dest:i src1:f src2:f len:16 float_cgt: dest:i src1:f src2:f len:16 float_cgt_un: dest:i src1:f src2:f len:20 float_clt: dest:i src1:f src2:f len:16 float_clt_un: dest:i src1:f src2:f len:20 float_cneq: dest:i src1:f src2:f len:16 float_cge: dest:i src1:f src2:f len:16 float_cle: dest:i src1:f src2:f len:16 call_handler: len:12 clob:c endfilter: src1:i len:20 aotconst: dest:i len:8 load_gotaddr: dest:i len:32 got_entry: dest:i src1:b len:32 abs: dest:f src1:f len:4 sqrt: dest:f src1:f len:4 sqrtf: dest:f src1:f len:4 round: dest:f src1:f len:4 ppc_trunc: dest:f src1:f len:4 ppc_ceil: dest:f src1:f len:4 ppc_floor: dest:f src1:f len:4 adc: dest:i src1:i src2:i len:4 addcc: dest:i src1:i src2:i len:4 subcc: dest:i src1:i src2:i len:4 addcc_imm: dest:i src1:i len:4 sbb: dest:i src1:i src2:i len:4 br_reg: src1:i len:8 ppc_subfic: dest:i src1:i len:4 ppc_subfze: dest:i src1:i len:4 bigmul: len:12 dest:i src1:i src2:i bigmul_un: len:12 dest:i src1:i src2:i # Linear IR opcodes dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_i8const: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 # 32 bit opcodes int_add: dest:i src1:i src2:i len:4 int_sub: dest:i src1:i src2:i len:4 int_mul: dest:i src1:i src2:i len:4 int_div: dest:i src1:i src2:i len:40 int_div_un: dest:i src1:i src2:i len:16 int_rem: dest:i src1:i src2:i len:48 int_rem_un: dest:i src1:i src2:i len:24 int_and: dest:i src1:i src2:i len:4 int_or: dest:i src1:i src2:i len:4 int_xor: dest:i src1:i src2:i len:4 int_shl: dest:i src1:i src2:i len:4 int_shr: dest:i src1:i src2:i len:4 int_shr_un: dest:i src1:i src2:i len:4 int_neg: dest:i src1:i len:4 int_not: dest:i src1:i len:4 int_conv_to_i1: dest:i src1:i len:8 int_conv_to_i2: dest:i src1:i len:8 int_conv_to_i4: dest:i src1:i len:4 sext_i4: dest:i src1:i len:4 int_conv_to_r4: dest:f src1:i len:20 int_conv_to_r8: dest:f src1:i len:16 int_conv_to_u4: dest:i src1:i len:4 int_conv_to_u2: dest:i src1:i len:8 int_conv_to_u1: dest:i src1:i len:4 int_beq: len:8 int_bge: len:8 int_bgt: len:8 int_ble: len:8 int_blt: len:8 int_bne_un: len:8 int_bge_un: len:8 int_bgt_un: len:8 int_ble_un: len:8 int_blt_un: len:8 int_add_ovf: dest:i src1:i src2:i len:16 int_add_ovf_un: dest:i src1:i src2:i len:16 int_mul_ovf: dest:i src1:i src2:i len:16 int_mul_ovf_un: dest:i src1:i src2:i len:16 int_sub_ovf: dest:i src1:i src2:i len:16 int_sub_ovf_un: dest:i src1:i src2:i len:16 int_adc: dest:i src1:i src2:i len:4 int_addcc: dest:i src1:i src2:i len:4 int_subcc: dest:i src1:i src2:i len:4 int_sbb: dest:i src1:i src2:i len:4 int_adc_imm: dest:i src1:i len:12 int_sbb_imm: dest:i src1:i len:12 int_add_imm: dest:i src1:i len:4 int_sub_imm: dest:i src1:i len:12 int_mul_imm: dest:i src1:i len:12 int_div_imm: dest:i src1:i len:20 int_div_un_imm: dest:i src1:i len:12 int_rem_imm: dest:i src1:i len:28 int_rem_un_imm: dest:i src1:i len:16 int_and_imm: dest:i src1:i len:12 int_or_imm: dest:i src1:i len:12 int_xor_imm: dest:i src1:i len:12 int_shl_imm: dest:i src1:i len:8 int_shr_imm: dest:i src1:i len:8 int_shr_un_imm: dest:i src1:i len:8 int_ceq: dest:i len:12 int_cgt: dest:i len:12 int_cgt_un: dest:i len:12 int_clt: dest:i len:12 int_clt_un: dest:i len:12 int_cneq: dest:i len:12 int_cge: dest:i len:12 int_cle: dest:i len:12 int_cge_un: dest:i len:12 int_cle_un: dest:i len:12 cond_exc_ieq: len:8 cond_exc_ine_un: len:8 cond_exc_ilt: len:8 cond_exc_ilt_un: len:8 cond_exc_igt: len:8 cond_exc_igt_un: len:8 cond_exc_ige: len:8 cond_exc_ige_un: len:8 cond_exc_ile: len:8 cond_exc_ile_un: len:8 cond_exc_iov: len:12 cond_exc_ino: len:8 cond_exc_ic: len:12 cond_exc_inc: len:8 icompare: src1:i src2:i len:4 icompare_imm: src1:i len:12 # 64 bit opcodes long_add: dest:i src1:i src2:i len:4 long_sub: dest:i src1:i src2:i len:4 long_mul: dest:i src1:i src2:i len:4 long_mul_imm: dest:i src1:i len:4 long_div: dest:i src1:i src2:i len:40 long_div_un: dest:i src1:i src2:i len:16 long_rem: dest:i src1:i src2:i len:48 long_rem_un: dest:i src1:i src2:i len:24 long_and: dest:i src1:i src2:i len:4 long_or: dest:i src1:i src2:i len:4 long_xor: dest:i src1:i src2:i len:4 long_shl: dest:i src1:i src2:i len:4 long_shl_imm: dest:i src1:i len:4 long_shr: dest:i src1:i src2:i len:4 long_shr_un: dest:i src1:i src2:i len:4 long_shr_imm: dest:i src1:i len:4 long_shr_un_imm: dest:i src1:i len:4 long_neg: dest:i src1:i len:4 long_not: dest:i src1:i len:4 long_conv_to_i1: dest:i src1:i len:4 long_conv_to_i2: dest:i src1:i len:4 long_conv_to_i4: dest:i src1:i len:4 long_conv_to_r4: dest:f src1:i len:16 long_conv_to_r8: dest:f src1:i len:12 long_conv_to_u4: dest:i src1:i long_conv_to_u2: dest:i src1:i len:4 long_conv_to_u1: dest:i src1:i len:4 zext_i4: dest:i src1:i len:4 long_beq: len:8 long_bge: len:8 long_bgt: len:8 long_ble: len:8 long_blt: len:8 long_bne_un: len:8 long_bge_un: len:8 long_bgt_un: len:8 long_ble_un: len:8 long_blt_un: len:8 long_add_ovf: dest:i src1:i src2:i len:16 long_add_ovf_un: dest:i src1:i src2:i len:16 long_mul_ovf: dest:i src1:i src2:i len:16 long_mul_ovf_un: dest:i src1:i src2:i len:16 long_sub_ovf: dest:i src1:i src2:i len:16 long_sub_ovf_un: dest:i src1:i src2:i len:16 long_ceq: dest:i len:12 long_cgt: dest:i len:12 long_cgt_un: dest:i len:12 long_clt: dest:i len:12 long_clt_un: dest:i len:12 long_add_imm: dest:i src1:i clob:1 len:4 long_sub_imm: dest:i src1:i clob:1 len:4 long_and_imm: dest:i src1:i clob:1 len:4 long_or_imm: dest:i src1:i clob:1 len:4 long_xor_imm: dest:i src1:i clob:1 len:4 lcompare: src1:i src2:i len:4 lcompare_imm: src1:i len:12 long_min: dest:i src1:i src2:i len:8 clob:1 long_min_un: dest:i src1:i src2:i len:8 clob:1 long_max: dest:i src1:i src2:i len:8 clob:1 long_max_un: dest:i src1:i src2:i len:8 clob:1 int_min: dest:i src1:i src2:i len:8 clob:1 int_max: dest:i src1:i src2:i len:8 clob:1 int_min_un: dest:i src1:i src2:i len:8 clob:1 int_max_un: dest:i src1:i src2:i len:8 clob:1 #long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:30 vcall2: len:36 clob:c vcall2_reg: src1:i len:16 clob:c vcall2_membase: src1:b len:16 clob:c jump_table: dest:i len:20 atomic_add_i4: src1:b src2:i dest:i len:28 atomic_add_i8: src1:b src2:i dest:i len:28 atomic_cas_i4: src1:b src2:i src3:i dest:i len:38 atomic_cas_i8: src1:b src2:i src3:i dest:i len:38 liverange_start: len:0 liverange_end: len:0 gc_safe_point: len:0
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/cpu-s390x.md
# S/390 cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the regsiter allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # a r3 register (output from calls) # b base register (used in address references) # f floating point register # # len:number describe the maximun length in bytes of the instruction # number is a positive integer # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # r 'reserves' the destination register until a later instruction unreserves it # used mostly to set output registers in function calls # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-x86.c for more details on how the specifiers are used. # adc: dest:i src1:i src2:i len:6 adc_imm: dest:i src1:i len:14 add_imm: dest:i src1:i len:24 add_ovf_carry: dest:i src1:1 src2:i len:28 add_ovf_un_carry: dest:i src1:1 src2:i len:12 addcc: dest:i src1:i src2:i len:12 and_imm: dest:i src1:i len:24 aotconst: dest:i len:8 atomic_add_i4: src1:b src2:i dest:i len:28 atomic_add_i8: src1:b src2:i dest:i len:30 atomic_exchange_i4: src1:b src2:i dest:i len:18 atomic_exchange_i8: src1:b src2:i dest:i len:24 br: len:6 br_reg: src1:i len:8 break: len:22 call: dest:o clob:c len:26 call_handler: len:12 clob:c call_membase: dest:o src1:b len:12 clob:c call_reg: dest:o src1:i len:8 clob:c ceq: dest:i len:12 cgt_un: dest:i len:12 cgt: dest:i len:12 check_this: src1:b len:16 ckfinite: dest:f src1:f len:22 clt_un: dest:i len:12 clt: dest:i len:12 compare: src1:i src2:i len:4 compare_imm: src1:i len:20 cond_exc_c: len:8 cond_exc_eq: len:8 cond_exc_ge: len:8 cond_exc_ge_un: len:8 cond_exc_gt: len:8 cond_exc_gt_un: len:8 cond_exc_le: len:8 cond_exc_le_un: len:8 cond_exc_lt: len:8 cond_exc_lt_un: len:8 cond_exc_nc: len:8 cond_exc_ne_un: len:8 cond_exc_no: len:8 cond_exc_ov: len:8 div_imm: dest:i src1:i len:24 div_un_imm: dest:i src1:i len:24 endfinally: len:8 fcall: dest:g len:26 clob:c fcall_membase: dest:g src1:b len:14 clob:c fcall_reg: dest:g src1:i len:10 clob:c fcompare: src1:f src2:f len:14 rcompare: src1:f src2:f len:14 float_add: dest:f src1:f src2:f len:8 float_beq: len:10 float_bge: len:10 float_bge_un: len:8 float_bgt: len:10 float_ble: len:10 float_ble_un: len:8 float_blt: len:10 float_blt_un: len:8 float_bne_un: len:8 float_bgt_un: len:8 float_ceq: dest:i src1:f src2:f len:16 float_cgt: dest:i src1:f src2:f len:16 float_cgt_un: dest:i src1:f src2:f len:16 float_clt: dest:i src1:f src2:f len:16 float_clt_un: dest:i src1:f src2:f len:16 float_cneq: dest:y src1:f src2:f len:16 float_cge: dest:y src1:f src2:f len:16 float_cle: dest:y src1:f src2:f len:16 float_conv_to_i1: dest:i src1:f len:50 float_conv_to_i2: dest:i src1:f len:50 float_conv_to_i4: dest:i src1:f len:50 float_conv_to_i8: dest:l src1:f len:50 float_conv_to_i: dest:i src1:f len:52 float_conv_to_r4: dest:f src1:f len:8 float_conv_to_u1: dest:i src1:f len:72 float_conv_to_u2: dest:i src1:f len:72 float_conv_to_u4: dest:i src1:f len:72 float_conv_to_u8: dest:i src1:f len:72 float_div: dest:f src1:f src2:f len:24 float_div_un: dest:f src1:f src2:f len:30 float_mul: dest:f src1:f src2:f len:8 float_neg: dest:f src1:f len:8 float_not: dest:f src1:f len:8 float_rem: dest:f src1:f src2:f len:24 float_rem_un: dest:f src1:f src2:f len:30 float_sub: dest:f src1:f src2:f len:24 # R4 opcodes r4_conv_to_i1: dest:i src1:f len:32 r4_conv_to_u1: dest:i src1:f len:32 r4_conv_to_i2: dest:i src1:f len:32 r4_conv_to_u2: dest:i src1:f len:32 r4_conv_to_i4: dest:i src1:f len:16 r4_conv_to_i: dest:i src1:f len:16 r4_conv_to_u4: dest:i src1:f len:32 r4_conv_to_i8: dest:i src1:f len:32 r4_conv_to_r8: dest:f src1:f len:17 r4_conv_to_u8: dest:i src1:f len:17 r4_conv_to_r4: dest:f src1:f len:17 r4_add: dest:f src1:f src2:f clob:1 len:8 r4_sub: dest:f src1:f src2:f clob:1 len:20 r4_mul: dest:f src1:f src2:f clob:1 len:8 r4_div: dest:f src1:f src2:f clob:1 len:20 r4_rem: dest:f src1:f src2:f clob:1 len:24 r4_neg: dest:f src1:f clob:1 len:23 r4_ceq: dest:i src1:f src2:f len:35 r4_cgt: dest:i src1:f src2:f len:35 r4_cgt_un: dest:i src1:f src2:f len:48 r4_clt: dest:i src1:f src2:f len:35 r4_clt_un: dest:i src1:f src2:f len:42 r4_cneq: dest:i src1:f src2:f len:42 r4_cge: dest:i src1:f src2:f len:35 r4_cle: dest:i src1:f src2:f len:35 rmove: dest:f src1:f len:4 fmove: dest:f src1:f len:4 move_f_to_i4: dest:i src1:f len:14 move_i4_to_f: dest:f src1:i len:14 move_f_to_i8: dest:i src1:f len:4 move_i8_to_f: dest:f src1:i len:8 i8const: dest:i len:20 icompare: src1:i src2:i len:4 icompare_imm: src1:i len:18 iconst: dest:i len:40 label: len:0 lcall: dest:o len:22 clob:c lcall_membase: dest:o src1:b len:12 clob:c lcall_reg: dest:o src1:i len:8 clob:c lcompare: src1:i src2:i len:4 load_membase: dest:i src1:b len:30 loadi1_membase: dest:i src1:b len:40 loadi2_membase: dest:i src1:b len:30 loadi4_membase: dest:i src1:b len:30 loadi8_membase: dest:i src1:b len:30 loadr4_membase: dest:f src1:b len:28 loadr8_membase: dest:f src1:b len:28 loadu1_membase: dest:i src1:b len:30 loadu2_membase: dest:i src1:b len:30 loadu4_mem: dest:i len:8 loadu4_membase: dest:i src1:b len:30 localloc: dest:i src1:i len:180 memory_barrier: len:10 move: dest:i src1:i len:4 mul_imm: dest:i src1:i len:24 nop: len:4 popcnt32: dest:i src1:i len:38 popcnt64: dest:i src1:i len:34 relaxed_nop: len:4 arglist: src1:i len:28 bigmul: len:2 dest:i src1:a src2:i bigmul_un: len:2 dest:i src1:a src2:i endfilter: src1:i len:28 rethrow: src1:i len:26 or_imm: dest:i src1:i len:24 r4const: dest:f len:26 r8const: dest:f len:24 rem_imm: dest:i src1:i len:24 rcall: dest:f len:26 clob:c rcall_reg: dest:f src1:i len:8 clob:c rcall_membase: dest:f src1:b len:12 clob:c rem_un_imm: dest:i src1:i len:24 s390_bkchain: len:8 dest:i src1:i s390_move: len:48 src2:b src1:b s390_setf4ret: dest:f src1:f len:4 sbb: dest:i src1:i src2:i len:6 sbb_imm: dest:i src1:i len:14 seq_point: len:64 il_seq_point: len:0 sext_i4: dest:i src1:i len:4 zext_i4: dest:i src1:i len:4 shl_imm: dest:i src1:i len:10 shr_imm: dest:i src1:i len:10 shr_un_imm: dest:i src1:i len:10 abs: dest:f src1:f len:4 absf: dest:f src1:f len:4 ceil: dest:f src1:f len:4 ceilf: dest:f src1:f len:4 floor: dest:f src1:f len:4 floorf: dest:f src1:f len:4 round: dest:f src1:f len:4 sqrt: dest:f src1:f len:4 sqrtf: dest:f src1:f len:4 trunc: dest:f src1:f len:4 truncf: dest:f src1:f len:4 fcopysign: dest:f src1:f src2:f len:4 start_handler: len:26 store_membase_imm: dest:b len:46 store_membase_reg: dest:b src1:i len:26 storei1_membase_imm: dest:b len:46 storei1_membase_reg: dest:b src1:i len:26 storei2_membase_imm: dest:b len:46 storei2_membase_reg: dest:b src1:i len:26 storei4_membase_imm: dest:b len:46 storei4_membase_reg: dest:b src1:i len:26 storei8_membase_imm: dest:b len:46 storei8_membase_reg: dest:b src1:i len:26 storer4_membase_reg: dest:b src1:f len:28 storer8_membase_reg: dest:b src1:f len:24 sub_imm: dest:i src1:i len:18 sub_ovf_carry: dest:i src1:1 src2:i len:28 sub_ovf_un_carry: dest:i src1:1 src2:i len:12 subcc: dest:i src1:i src2:i len:12 tailcall: len:32 clob:c tailcall_reg: src1:b len:32 clob:c tailcall_membase: src1:b len:32 clob:c # Tailcall parameters are moved with one instruction per 256 bytes, # of stacked parameters. Zero and six are the most common # totals. Division is not possible. Allocate an instruction per parameter. tailcall_parameter: len:6 throw: src1:i len:26 tls_get: dest:1 len:32 tls_set: src1:1 len:32 vcall: len:22 clob:c vcall_membase: src1:b len:12 clob:c vcall_reg: src1:i len:8 clob:c voidcall: len:22 clob:c voidcall_membase: src1:b len:12 clob:c voidcall_reg: src1:i len:8 clob:c xor_imm: dest:i src1:i len:20 # 32 bit opcodes int_adc: dest:i src1:i src2:i len:12 int_adc_imm: dest:i src1:i len:14 int_addcc: dest:i src1:i src2:i len:12 int_add: dest:i src1:i src2:i len:12 int_add_imm: dest:i src1:i len:20 int_and: dest:i src1:i src2:i len:12 int_and_imm: dest:i src1:i len:24 int_beq: len:8 int_bge: len:8 int_bge_un: len:8 int_bgt: len:8 int_bgt_un: len:8 int_ble: len:8 int_ble_un: len:8 int_blt: len:8 int_blt_un: len:8 int_bne_un: len:8 int_ceq: dest:i len:12 int_cgt: dest:i len:12 int_cgt_un: dest:i len:12 int_clt: dest:i len:12 int_clt_un: dest:i len:12 int_cneq: dest:i len:12 int_cge: dest:i len:12 int_cle: dest:i len:12 int_cge_un: dest:i len:12 int_cle_un: dest:i len:12 int_div: dest:a src1:i src2:i len:16 int_div_imm: dest:a src1:i len:24 int_div_un: dest:a src1:i src2:i len:16 int_div_un_imm: dest:a src1:i len:24 int_mul: dest:i src1:i src2:i len:16 int_mul_imm: dest:i src1:i len:24 int_mul_ovf: dest:i src1:i src2:i len:44 int_mul_ovf_un: dest:i src1:i src2:i len:22 int_add_ovf: dest:i src1:i src2:i len:32 int_add_ovf_un: dest:i src1:i src2:i len:32 int_sub_ovf: dest:i src1:i src2:i len:32 int_sub_ovf_un: dest:i src1:i src2:i len:32 int_neg: dest:i src1:i len:12 int_not: dest:i src1:i len:12 int_or: dest:i src1:i src2:i len:12 int_or_imm: dest:i src1:i len:24 int_rem: dest:d src1:i src2:i len:16 int_rem_imm: dest:d src1:i len:24 int_rem_un: dest:d src1:i src2:i len:16 int_rem_un_imm: dest:d src1:i len:24 int_sbb: dest:i src1:i src2:i len:6 int_sbb_imm: dest:i src1:i len:14 int_shl: dest:i src1:i src2:i clob:s len:12 int_shl_imm: dest:i src1:i len:10 int_shr: dest:i src1:i src2:i clob:s len:12 int_shr_imm: dest:i src1:i len:10 int_shr_un: dest:i src1:i src2:i clob:s len:12 int_shr_un_imm: dest:i src1:i len:10 int_subcc: dest:i src1:i src2:i len:12 int_sub: dest:i src1:i src2:i len:12 int_sub_imm: dest:i src1:i len:20 int_xor: dest:i src1:i src2:i len:12 int_xor_imm: dest:i src1:i len:24 int_conv_to_r4: dest:f src1:i len:16 int_conv_to_r8: dest:f src1:i len:16 # 64 bit opcodes long_add: dest:i src1:i src2:i len:12 long_sub: dest:i src1:i src2:i len:12 long_add_ovf: dest:i src1:i src2:i len:32 long_add_ovf_un: dest:i src1:i src2:i len:32 long_div: dest:i src1:i src2:i len:12 long_div_un: dest:i src1:i src2:i len:16 long_mul: dest:i src1:i src2:i len:12 long_mul_imm: dest:i src1:i len:20 long_mul_ovf: dest:i src1:i src2:i len:56 long_mul_ovf_un: dest:i src1:i src2:i len:64 long_and: dest:i src1:i src2:i len:8 long_or: dest:i src1:i src2:i len:8 long_xor: dest:i src1:i src2:i len:8 long_neg: dest:i src1:i len:6 long_not: dest:i src1:i len:12 long_rem: dest:i src1:i src2:i len:12 long_rem_imm: dest:i src1:i len:12 long_rem_un: dest:i src1:i src2:i len:16 long_shl: dest:i src1:i src2:i len:14 long_shl_imm: dest:i src1:i len:14 long_shr_un: dest:i src1:i src2:i len:14 long_shr: dest:i src1:i src2:i len:14 long_shr_imm: dest:i src1:i len:14 long_shr_un_imm: dest:i src1:i len:14 long_sub_imm: dest:i src1:i len:16 long_sub_ovf: dest:i src1:i src2:i len:16 long_sub_ovf_un: dest:i src1:i src2:i len:28 long_conv_to_i1: dest:i src1:i len:12 long_conv_to_i2: dest:i src1:i len:12 long_conv_to_i4: dest:i src1:i len:4 long_conv_to_i8: dest:i src1:i len:4 long_conv_to_i: dest:i src1:i len:4 long_conv_to_ovf_i: dest:i src1:i len:44 long_conv_to_ovf_i4_un: dest:i src1:i len:50 long_conv_to_ovf_u4: dest:i src1:i len:48 long_conv_to_ovf_u8_un: dest:i src1:i len:4 long_conv_to_r4: dest:f src1:i len:16 long_conv_to_r8: dest:f src1:i len:16 long_conv_to_u1: dest:i src1:i len:16 long_conv_to_u2: dest:i src1:i len:24 long_conv_to_u4: dest:i src1:i len:4 long_conv_to_u8: dest:i src1:i len:4 long_conv_to_u: dest:i src1:i len:4 long_conv_to_r_un: dest:f src1:i len:37 long_beq: len:8 long_bge_un: len:8 long_bge: len:8 long_bgt_un: len:8 long_bgt: len:8 long_ble_un: len:8 long_ble: len:8 long_blt_un: len:8 long_blt: len:8 long_bne_un: len:8 # Linear IR opcodes dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_i8const: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 jump_table: dest:i len:24 int_conv_to_i1: dest:i src1:i len:12 int_conv_to_i2: dest:i src1:i len:12 int_conv_to_i4: dest:i src1:i len:4 int_conv_to_i: dest:i src1:i len:4 int_conv_to_u1: dest:i src1:i len:10 int_conv_to_u2: dest:i src1:i len:16 int_conv_to_u4: dest:i src1:i len:4 int_conv_to_r_un: dest:f src1:i len:37 cond_exc_ic: len:8 cond_exc_ieq: len:8 cond_exc_ige: len:8 cond_exc_ige_un: len:8 cond_exc_igt: len:8 cond_exc_igt_un: len:8 cond_exc_ile: len:8 cond_exc_ile_un: len:8 cond_exc_ilt: len:8 cond_exc_ilt_un: len:8 cond_exc_inc: len:8 cond_exc_ine_un: len:8 cond_exc_ino: len:8 cond_exc_iov: len:8 lcompare_imm: src1:i len:20 long_add_imm: dest:i src1:i len:20 long_ceq: dest:i len:12 long_cgt_un: dest:i len:12 long_cgt: dest:i len:12 long_clt_un: dest:i len:12 long_clt: dest:i len:12 vcall2: len:22 clob:c vcall2_membase: src1:b len:12 clob:c vcall2_reg: src1:i len:8 clob:c s390_int_add_ovf: len:32 dest:i src1:i src2:i s390_int_add_ovf_un: len:32 dest:i src1:i src2:i s390_int_sub_ovf: len:32 dest:i src1:i src2:i s390_int_sub_ovf_un: len:32 dest:i src1:i src2:i s390_long_add_ovf: dest:i src1:i src2:i len:32 s390_long_add_ovf_un: dest:i src1:i src2:i len:32 s390_long_sub_ovf: dest:i src1:i src2:i len:32 s390_long_sub_ovf_un: dest:i src1:i src2:i len:32 liverange_start: len:0 liverange_end: len:0 gc_liveness_def: len:0 gc_liveness_use: len:0 gc_spill_slot_liveness_def: len:0 gc_param_slot_liveness_def: len:0 gc_safe_point: clob:c src1:i len:32 generic_class_init: src1:A len:32 clob:c s390_crj: src1:i src2:i len:24 s390_crj_un: src1:i src2:i len:24 s390_cgrj: src1:i src2:i len:24 s390_cgrj_un: src1:i src2:i len:24 s390_cij: len:24 s390_cij_un: src1:i len:24 s390_cgij: len:24 s390_cgij_un: len:24
# S/390 cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the regsiter allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # a r3 register (output from calls) # b base register (used in address references) # f floating point register # # len:number describe the maximun length in bytes of the instruction # number is a positive integer # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # r 'reserves' the destination register until a later instruction unreserves it # used mostly to set output registers in function calls # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-x86.c for more details on how the specifiers are used. # adc: dest:i src1:i src2:i len:6 adc_imm: dest:i src1:i len:14 add_imm: dest:i src1:i len:24 add_ovf_carry: dest:i src1:1 src2:i len:28 add_ovf_un_carry: dest:i src1:1 src2:i len:12 addcc: dest:i src1:i src2:i len:12 and_imm: dest:i src1:i len:24 aotconst: dest:i len:8 atomic_add_i4: src1:b src2:i dest:i len:28 atomic_add_i8: src1:b src2:i dest:i len:30 atomic_exchange_i4: src1:b src2:i dest:i len:18 atomic_exchange_i8: src1:b src2:i dest:i len:24 br: len:6 br_reg: src1:i len:8 break: len:22 call: dest:o clob:c len:26 call_handler: len:12 clob:c call_membase: dest:o src1:b len:12 clob:c call_reg: dest:o src1:i len:8 clob:c ceq: dest:i len:12 cgt_un: dest:i len:12 cgt: dest:i len:12 check_this: src1:b len:16 ckfinite: dest:f src1:f len:22 clt_un: dest:i len:12 clt: dest:i len:12 compare: src1:i src2:i len:4 compare_imm: src1:i len:20 cond_exc_c: len:8 cond_exc_eq: len:8 cond_exc_ge: len:8 cond_exc_ge_un: len:8 cond_exc_gt: len:8 cond_exc_gt_un: len:8 cond_exc_le: len:8 cond_exc_le_un: len:8 cond_exc_lt: len:8 cond_exc_lt_un: len:8 cond_exc_nc: len:8 cond_exc_ne_un: len:8 cond_exc_no: len:8 cond_exc_ov: len:8 div_imm: dest:i src1:i len:24 div_un_imm: dest:i src1:i len:24 endfinally: len:8 fcall: dest:g len:26 clob:c fcall_membase: dest:g src1:b len:14 clob:c fcall_reg: dest:g src1:i len:10 clob:c fcompare: src1:f src2:f len:14 rcompare: src1:f src2:f len:14 float_add: dest:f src1:f src2:f len:8 float_beq: len:10 float_bge: len:10 float_bge_un: len:8 float_bgt: len:10 float_ble: len:10 float_ble_un: len:8 float_blt: len:10 float_blt_un: len:8 float_bne_un: len:8 float_bgt_un: len:8 float_ceq: dest:i src1:f src2:f len:16 float_cgt: dest:i src1:f src2:f len:16 float_cgt_un: dest:i src1:f src2:f len:16 float_clt: dest:i src1:f src2:f len:16 float_clt_un: dest:i src1:f src2:f len:16 float_cneq: dest:y src1:f src2:f len:16 float_cge: dest:y src1:f src2:f len:16 float_cle: dest:y src1:f src2:f len:16 float_conv_to_i1: dest:i src1:f len:50 float_conv_to_i2: dest:i src1:f len:50 float_conv_to_i4: dest:i src1:f len:50 float_conv_to_i8: dest:l src1:f len:50 float_conv_to_r4: dest:f src1:f len:8 float_conv_to_u1: dest:i src1:f len:72 float_conv_to_u2: dest:i src1:f len:72 float_conv_to_u4: dest:i src1:f len:72 float_conv_to_u8: dest:i src1:f len:72 float_div: dest:f src1:f src2:f len:24 float_div_un: dest:f src1:f src2:f len:30 float_mul: dest:f src1:f src2:f len:8 float_neg: dest:f src1:f len:8 float_not: dest:f src1:f len:8 float_rem: dest:f src1:f src2:f len:24 float_rem_un: dest:f src1:f src2:f len:30 float_sub: dest:f src1:f src2:f len:24 # R4 opcodes r4_conv_to_i1: dest:i src1:f len:32 r4_conv_to_u1: dest:i src1:f len:32 r4_conv_to_i2: dest:i src1:f len:32 r4_conv_to_u2: dest:i src1:f len:32 r4_conv_to_i4: dest:i src1:f len:16 r4_conv_to_u4: dest:i src1:f len:32 r4_conv_to_i8: dest:i src1:f len:32 r4_conv_to_r8: dest:f src1:f len:17 r4_conv_to_u8: dest:i src1:f len:17 r4_conv_to_r4: dest:f src1:f len:17 r4_add: dest:f src1:f src2:f clob:1 len:8 r4_sub: dest:f src1:f src2:f clob:1 len:20 r4_mul: dest:f src1:f src2:f clob:1 len:8 r4_div: dest:f src1:f src2:f clob:1 len:20 r4_rem: dest:f src1:f src2:f clob:1 len:24 r4_neg: dest:f src1:f clob:1 len:23 r4_ceq: dest:i src1:f src2:f len:35 r4_cgt: dest:i src1:f src2:f len:35 r4_cgt_un: dest:i src1:f src2:f len:48 r4_clt: dest:i src1:f src2:f len:35 r4_clt_un: dest:i src1:f src2:f len:42 r4_cneq: dest:i src1:f src2:f len:42 r4_cge: dest:i src1:f src2:f len:35 r4_cle: dest:i src1:f src2:f len:35 rmove: dest:f src1:f len:4 fmove: dest:f src1:f len:4 move_f_to_i4: dest:i src1:f len:14 move_i4_to_f: dest:f src1:i len:14 move_f_to_i8: dest:i src1:f len:4 move_i8_to_f: dest:f src1:i len:8 i8const: dest:i len:20 icompare: src1:i src2:i len:4 icompare_imm: src1:i len:18 iconst: dest:i len:40 label: len:0 lcall: dest:o len:22 clob:c lcall_membase: dest:o src1:b len:12 clob:c lcall_reg: dest:o src1:i len:8 clob:c lcompare: src1:i src2:i len:4 load_membase: dest:i src1:b len:30 loadi1_membase: dest:i src1:b len:40 loadi2_membase: dest:i src1:b len:30 loadi4_membase: dest:i src1:b len:30 loadi8_membase: dest:i src1:b len:30 loadr4_membase: dest:f src1:b len:28 loadr8_membase: dest:f src1:b len:28 loadu1_membase: dest:i src1:b len:30 loadu2_membase: dest:i src1:b len:30 loadu4_mem: dest:i len:8 loadu4_membase: dest:i src1:b len:30 localloc: dest:i src1:i len:180 memory_barrier: len:10 move: dest:i src1:i len:4 mul_imm: dest:i src1:i len:24 nop: len:4 popcnt32: dest:i src1:i len:38 popcnt64: dest:i src1:i len:34 relaxed_nop: len:4 arglist: src1:i len:28 bigmul: len:2 dest:i src1:a src2:i bigmul_un: len:2 dest:i src1:a src2:i endfilter: src1:i len:28 rethrow: src1:i len:26 or_imm: dest:i src1:i len:24 r4const: dest:f len:26 r8const: dest:f len:24 rem_imm: dest:i src1:i len:24 rcall: dest:f len:26 clob:c rcall_reg: dest:f src1:i len:8 clob:c rcall_membase: dest:f src1:b len:12 clob:c rem_un_imm: dest:i src1:i len:24 s390_bkchain: len:8 dest:i src1:i s390_move: len:48 src2:b src1:b s390_setf4ret: dest:f src1:f len:4 sbb: dest:i src1:i src2:i len:6 sbb_imm: dest:i src1:i len:14 seq_point: len:64 il_seq_point: len:0 sext_i4: dest:i src1:i len:4 zext_i4: dest:i src1:i len:4 shl_imm: dest:i src1:i len:10 shr_imm: dest:i src1:i len:10 shr_un_imm: dest:i src1:i len:10 abs: dest:f src1:f len:4 absf: dest:f src1:f len:4 ceil: dest:f src1:f len:4 ceilf: dest:f src1:f len:4 floor: dest:f src1:f len:4 floorf: dest:f src1:f len:4 round: dest:f src1:f len:4 sqrt: dest:f src1:f len:4 sqrtf: dest:f src1:f len:4 trunc: dest:f src1:f len:4 truncf: dest:f src1:f len:4 fcopysign: dest:f src1:f src2:f len:4 start_handler: len:26 store_membase_imm: dest:b len:46 store_membase_reg: dest:b src1:i len:26 storei1_membase_imm: dest:b len:46 storei1_membase_reg: dest:b src1:i len:26 storei2_membase_imm: dest:b len:46 storei2_membase_reg: dest:b src1:i len:26 storei4_membase_imm: dest:b len:46 storei4_membase_reg: dest:b src1:i len:26 storei8_membase_imm: dest:b len:46 storei8_membase_reg: dest:b src1:i len:26 storer4_membase_reg: dest:b src1:f len:28 storer8_membase_reg: dest:b src1:f len:24 sub_imm: dest:i src1:i len:18 sub_ovf_carry: dest:i src1:1 src2:i len:28 sub_ovf_un_carry: dest:i src1:1 src2:i len:12 subcc: dest:i src1:i src2:i len:12 tailcall: len:32 clob:c tailcall_reg: src1:b len:32 clob:c tailcall_membase: src1:b len:32 clob:c # Tailcall parameters are moved with one instruction per 256 bytes, # of stacked parameters. Zero and six are the most common # totals. Division is not possible. Allocate an instruction per parameter. tailcall_parameter: len:6 throw: src1:i len:26 tls_get: dest:1 len:32 tls_set: src1:1 len:32 vcall: len:22 clob:c vcall_membase: src1:b len:12 clob:c vcall_reg: src1:i len:8 clob:c voidcall: len:22 clob:c voidcall_membase: src1:b len:12 clob:c voidcall_reg: src1:i len:8 clob:c xor_imm: dest:i src1:i len:20 # 32 bit opcodes int_adc: dest:i src1:i src2:i len:12 int_adc_imm: dest:i src1:i len:14 int_addcc: dest:i src1:i src2:i len:12 int_add: dest:i src1:i src2:i len:12 int_add_imm: dest:i src1:i len:20 int_and: dest:i src1:i src2:i len:12 int_and_imm: dest:i src1:i len:24 int_beq: len:8 int_bge: len:8 int_bge_un: len:8 int_bgt: len:8 int_bgt_un: len:8 int_ble: len:8 int_ble_un: len:8 int_blt: len:8 int_blt_un: len:8 int_bne_un: len:8 int_ceq: dest:i len:12 int_cgt: dest:i len:12 int_cgt_un: dest:i len:12 int_clt: dest:i len:12 int_clt_un: dest:i len:12 int_cneq: dest:i len:12 int_cge: dest:i len:12 int_cle: dest:i len:12 int_cge_un: dest:i len:12 int_cle_un: dest:i len:12 int_div: dest:a src1:i src2:i len:16 int_div_imm: dest:a src1:i len:24 int_div_un: dest:a src1:i src2:i len:16 int_div_un_imm: dest:a src1:i len:24 int_mul: dest:i src1:i src2:i len:16 int_mul_imm: dest:i src1:i len:24 int_mul_ovf: dest:i src1:i src2:i len:44 int_mul_ovf_un: dest:i src1:i src2:i len:22 int_add_ovf: dest:i src1:i src2:i len:32 int_add_ovf_un: dest:i src1:i src2:i len:32 int_sub_ovf: dest:i src1:i src2:i len:32 int_sub_ovf_un: dest:i src1:i src2:i len:32 int_neg: dest:i src1:i len:12 int_not: dest:i src1:i len:12 int_or: dest:i src1:i src2:i len:12 int_or_imm: dest:i src1:i len:24 int_rem: dest:d src1:i src2:i len:16 int_rem_imm: dest:d src1:i len:24 int_rem_un: dest:d src1:i src2:i len:16 int_rem_un_imm: dest:d src1:i len:24 int_sbb: dest:i src1:i src2:i len:6 int_sbb_imm: dest:i src1:i len:14 int_shl: dest:i src1:i src2:i clob:s len:12 int_shl_imm: dest:i src1:i len:10 int_shr: dest:i src1:i src2:i clob:s len:12 int_shr_imm: dest:i src1:i len:10 int_shr_un: dest:i src1:i src2:i clob:s len:12 int_shr_un_imm: dest:i src1:i len:10 int_subcc: dest:i src1:i src2:i len:12 int_sub: dest:i src1:i src2:i len:12 int_sub_imm: dest:i src1:i len:20 int_xor: dest:i src1:i src2:i len:12 int_xor_imm: dest:i src1:i len:24 int_conv_to_r4: dest:f src1:i len:16 int_conv_to_r8: dest:f src1:i len:16 # 64 bit opcodes long_add: dest:i src1:i src2:i len:12 long_sub: dest:i src1:i src2:i len:12 long_add_ovf: dest:i src1:i src2:i len:32 long_add_ovf_un: dest:i src1:i src2:i len:32 long_div: dest:i src1:i src2:i len:12 long_div_un: dest:i src1:i src2:i len:16 long_mul: dest:i src1:i src2:i len:12 long_mul_imm: dest:i src1:i len:20 long_mul_ovf: dest:i src1:i src2:i len:56 long_mul_ovf_un: dest:i src1:i src2:i len:64 long_and: dest:i src1:i src2:i len:8 long_or: dest:i src1:i src2:i len:8 long_xor: dest:i src1:i src2:i len:8 long_neg: dest:i src1:i len:6 long_not: dest:i src1:i len:12 long_rem: dest:i src1:i src2:i len:12 long_rem_imm: dest:i src1:i len:12 long_rem_un: dest:i src1:i src2:i len:16 long_shl: dest:i src1:i src2:i len:14 long_shl_imm: dest:i src1:i len:14 long_shr_un: dest:i src1:i src2:i len:14 long_shr: dest:i src1:i src2:i len:14 long_shr_imm: dest:i src1:i len:14 long_shr_un_imm: dest:i src1:i len:14 long_sub_imm: dest:i src1:i len:16 long_sub_ovf: dest:i src1:i src2:i len:16 long_sub_ovf_un: dest:i src1:i src2:i len:28 long_conv_to_i1: dest:i src1:i len:12 long_conv_to_i2: dest:i src1:i len:12 long_conv_to_i4: dest:i src1:i len:4 long_conv_to_i8: dest:i src1:i len:4 long_conv_to_i: dest:i src1:i len:4 long_conv_to_ovf_i: dest:i src1:i len:44 long_conv_to_ovf_i4_un: dest:i src1:i len:50 long_conv_to_ovf_u4: dest:i src1:i len:48 long_conv_to_ovf_u8_un: dest:i src1:i len:4 long_conv_to_r4: dest:f src1:i len:16 long_conv_to_r8: dest:f src1:i len:16 long_conv_to_u1: dest:i src1:i len:16 long_conv_to_u2: dest:i src1:i len:24 long_conv_to_u4: dest:i src1:i len:4 long_conv_to_u8: dest:i src1:i len:4 long_conv_to_u: dest:i src1:i len:4 long_conv_to_r_un: dest:f src1:i len:37 long_beq: len:8 long_bge_un: len:8 long_bge: len:8 long_bgt_un: len:8 long_bgt: len:8 long_ble_un: len:8 long_ble: len:8 long_blt_un: len:8 long_blt: len:8 long_bne_un: len:8 # Linear IR opcodes dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_i8const: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 jump_table: dest:i len:24 int_conv_to_i1: dest:i src1:i len:12 int_conv_to_i2: dest:i src1:i len:12 int_conv_to_i4: dest:i src1:i len:4 int_conv_to_i: dest:i src1:i len:4 int_conv_to_u1: dest:i src1:i len:10 int_conv_to_u2: dest:i src1:i len:16 int_conv_to_u4: dest:i src1:i len:4 int_conv_to_r_un: dest:f src1:i len:37 cond_exc_ic: len:8 cond_exc_ieq: len:8 cond_exc_ige: len:8 cond_exc_ige_un: len:8 cond_exc_igt: len:8 cond_exc_igt_un: len:8 cond_exc_ile: len:8 cond_exc_ile_un: len:8 cond_exc_ilt: len:8 cond_exc_ilt_un: len:8 cond_exc_inc: len:8 cond_exc_ine_un: len:8 cond_exc_ino: len:8 cond_exc_iov: len:8 lcompare_imm: src1:i len:20 long_add_imm: dest:i src1:i len:20 long_ceq: dest:i len:12 long_cgt_un: dest:i len:12 long_cgt: dest:i len:12 long_clt_un: dest:i len:12 long_clt: dest:i len:12 vcall2: len:22 clob:c vcall2_membase: src1:b len:12 clob:c vcall2_reg: src1:i len:8 clob:c s390_int_add_ovf: len:32 dest:i src1:i src2:i s390_int_add_ovf_un: len:32 dest:i src1:i src2:i s390_int_sub_ovf: len:32 dest:i src1:i src2:i s390_int_sub_ovf_un: len:32 dest:i src1:i src2:i s390_long_add_ovf: dest:i src1:i src2:i len:32 s390_long_add_ovf_un: dest:i src1:i src2:i len:32 s390_long_sub_ovf: dest:i src1:i src2:i len:32 s390_long_sub_ovf_un: dest:i src1:i src2:i len:32 liverange_start: len:0 liverange_end: len:0 gc_liveness_def: len:0 gc_liveness_use: len:0 gc_spill_slot_liveness_def: len:0 gc_param_slot_liveness_def: len:0 gc_safe_point: clob:c src1:i len:32 generic_class_init: src1:A len:32 clob:c s390_crj: src1:i src2:i len:24 s390_crj_un: src1:i src2:i len:24 s390_cgrj: src1:i src2:i len:24 s390_cgrj_un: src1:i src2:i len:24 s390_cij: len:24 s390_cij_un: src1:i len:24 s390_cgij: len:24 s390_cgij_un: len:24
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/cpu-sparc.md
# sparc32 cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the register allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # b base register (used in address references) # f floating point register # L register pair (same as 'i' on v9) # l %o0:%o1 register pair (same as 'i' on v9) # o %o0 # # len:number describe the maximun length in bytes of the instruction # number is a positive integer # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # r 'reserves' the destination register until a later instruction unreserves it # used mostly to set output registers in function calls # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-sparc32.c for more details on how the specifiers are used. # label: len:0 break: len:64 br: len:8 throw: src1:i len:64 rethrow: src1:i len:64 start_handler: len:64 endfinally: len:64 endfilter: src1:i len:64 ckfinite: dest:f src1:f len:40 ceq: dest:i len:64 cgt: dest:i len:64 cgt_un: dest:i len:64 clt: dest:i len:64 clt_un: dest:i len:64 localloc: dest:i src1:i len:64 localloc_imm: dest:i len:64 compare: src1:i src2:i len:4 icompare: src1:i src2:i len:4 compare_imm: src1:i len:64 icompare_imm: src1:i len:64 fcompare: src1:f src2:f len:64 lcompare: src1:i src2:i len:4 setfret: dest:f src1:f len:8 check_this: src1:b len:4 arglist: src1:i len:64 call: dest:o clob:c len:40 call_reg: dest:o src1:i len:64 clob:c call_membase: dest:o src1:b len:64 clob:c voidcall: len:64 clob:c voidcall_reg: src1:i len:64 clob:c voidcall_membase: src1:b len:64 clob:c fcall: dest:f len:64 clob:c fcall_reg: dest:f src1:i len:64 clob:c fcall_membase: dest:f src1:b len:64 clob:c lcall: dest:l len:42 clob:c lcall_reg: dest:l src1:i len:64 clob:c lcall_membase: dest:l src1:b len:64 clob:c vcall: len:40 clob:c vcall_reg: src1:i len:64 clob:c vcall_membase: src1:b len:64 clob:c iconst: dest:i len:64 i8const: dest:i len:64 r4const: dest:f len:64 r8const: dest:f len:64 store_membase_imm: dest:b len:64 store_membase_reg: dest:b src1:i len:64 storei1_membase_imm: dest:b len:64 storei1_membase_reg: dest:b src1:i len:64 storei2_membase_imm: dest:b len:64 storei2_membase_reg: dest:b src1:i len:64 storei4_membase_imm: dest:b len:64 storei4_membase_reg: dest:b src1:i len:64 storei8_membase_imm: dest:b len:64 len:64 storei8_membase_reg: dest:b src1:i len:64 storer4_membase_reg: dest:b src1:f len:64 storer8_membase_reg: dest:b src1:f len:64 load_membase: dest:i src1:b len:64 loadi1_membase: dest:i src1:b len:64 loadu1_membase: dest:i src1:b len:64 loadi2_membase: dest:i src1:b len:64 loadu2_membase: dest:i src1:b len:64 loadi4_membase: dest:i src1:b len:64 loadu4_membase: dest:i src1:b len:64 loadi8_membase: dest:i src1:b len:64 loadr4_membase: dest:f src1:b len:64 loadr8_membase: dest:f src1:b len:64 loadu4_mem: dest:i len:8 move: dest:i src1:i len:4 add_imm: dest:i src1:i len:64 addcc_imm: dest:i src1:i len:64 sub_imm: dest:i src1:i len:64 subcc_imm: dest:i src1:i len:64 mul_imm: dest:i src1:i len:64 div_imm: dest:a src1:i src2:i len:64 div_un_imm: dest:a src1:i src2:i len:64 rem_imm: dest:d src1:i src2:i len:64 rem_un_imm: dest:d src1:i src2:i len:64 and_imm: dest:i src1:i len:64 or_imm: dest:i src1:i len:64 xor_imm: dest:i src1:i len:64 shl_imm: dest:i src1:i len:64 shr_imm: dest:i src1:i len:64 shr_un_imm: dest:i src1:i len:64 cond_exc_eq: len:64 cond_exc_ne_un: len:64 cond_exc_lt: len:64 cond_exc_lt_un: len:64 cond_exc_gt: len:64 cond_exc_gt_un: len:64 cond_exc_ge: len:64 cond_exc_ge_un: len:64 cond_exc_le: len:64 cond_exc_le_un: len:64 cond_exc_ov: len:64 cond_exc_no: len:64 cond_exc_c: len:64 cond_exc_nc: len:64 float_beq: len:8 float_bne_un: len:64 float_blt: len:8 float_blt_un: len:64 float_bgt: len:8 float_bgt_un: len:64 float_bge: len:64 float_bge_un: len:64 float_ble: len:64 float_ble_un: len:64 float_add: dest:f src1:f src2:f len:4 float_sub: dest:f src1:f src2:f len:4 float_mul: dest:f src1:f src2:f len:4 float_div: dest:f src1:f src2:f len:4 float_div_un: dest:f src1:f src2:f len:4 float_rem: dest:f src1:f src2:f len:64 float_rem_un: dest:f src1:f src2:f len:64 float_neg: dest:f src1:f len:4 float_not: dest:f src1:f len:4 float_conv_to_i1: dest:i src1:f len:40 float_conv_to_i2: dest:i src1:f len:40 float_conv_to_i4: dest:i src1:f len:40 float_conv_to_i8: dest:L src1:f len:40 float_conv_to_r4: dest:f src1:f len:8 float_conv_to_u4: dest:i src1:f len:40 float_conv_to_u8: dest:L src1:f len:40 float_conv_to_u2: dest:i src1:f len:40 float_conv_to_u1: dest:i src1:f len:40 float_conv_to_i: dest:i src1:f len:40 float_ceq: dest:i src1:f src2:f len:64 float_cgt: dest:i src1:f src2:f len:64 float_cgt_un: dest:i src1:f src2:f len:64 float_clt: dest:i src1:f src2:f len:64 float_clt_un: dest:i src1:f src2:f len:64 call_handler: len:64 clob:c aotconst: dest:i len:64 adc: dest:i src1:i src2:i len:4 addcc: dest:i src1:i src2:i len:4 subcc: dest:i src1:i src2:i len:4 adc_imm: dest:i src1:i len:64 sbb: dest:i src1:i src2:i len:4 sbb_imm: dest:i src1:i len:64 br_reg: src1:i len:8 bigmul: len:2 dest:L src1:a src2:i bigmul_un: len:2 dest:L src1:a src2:i fmove: dest:f src1:f len:8 # 32 bit opcodes int_add: dest:i src1:i src2:i len:64 int_sub: dest:i src1:i src2:i len:64 int_mul: dest:i src1:i src2:i len:64 int_div: dest:i src1:i src2:i len:64 int_div_un: dest:i src1:i src2:i len:64 int_rem: dest:i src1:i src2:i len:64 int_rem_un: dest:i src1:i src2:i len:64 int_and: dest:i src1:i src2:i len:64 int_or: dest:i src1:i src2:i len:64 int_xor: dest:i src1:i src2:i len:64 int_shl: dest:i src1:i src2:i len:64 int_shr: dest:i src1:i src2:i len:64 int_shr_un: dest:i src1:i src2:i len:64 int_adc: dest:i src1:i src2:i len:64 int_adc_imm: dest:i src1:i len:64 int_sbb: dest:i src1:i src2:i len:64 int_sbb_imm: dest:i src1:i len:64 int_addcc: dest:i src1:i src2:i len:64 int_subcc: dest:i src1:i src2:i len:64 int_add_imm: dest:i src1:i len:64 int_sub_imm: dest:i src1:i len:64 int_mul_imm: dest:i src1:i len:64 int_div_imm: dest:i src1:i len:64 int_div_un_imm: dest:i src1:i len:64 int_rem_imm: dest:i src1:i len:64 int_rem_un_imm: dest:i src1:i len:64 int_and_imm: dest:i src1:i len:64 int_or_imm: dest:i src1:i len:64 int_xor_imm: dest:i src1:i len:64 int_shl_imm: dest:i src1:i len:64 int_shr_imm: dest:i src1:i len:64 int_shr_un_imm: dest:i src1:i len:64 int_mul_ovf: dest:i src1:i src2:i len:64 int_mul_ovf_un: dest:i src1:i src2:i len:64 int_conv_to_i1: dest:i src1:i len:8 int_conv_to_i2: dest:i src1:i len:8 int_conv_to_i4: dest:i src1:i len:4 int_conv_to_i8: dest:i src1:i len:4 int_conv_to_r4: dest:f src1:i len:64 int_conv_to_r8: dest:f src1:i len:64 int_conv_to_u4: dest:i src1:i len:4 int_conv_to_u8: dest:i src1:i len:4 int_conv_to_u2: dest:i src1:i len:8 int_conv_to_u1: dest:i src1:i len:4 int_conv_to_i: dest:i src1:i len:4 int_neg: dest:i src1:i len:64 int_not: dest:i src1:i len:64 int_ceq: dest:i len:64 int_cgt: dest:i len:64 int_cgt_un: dest:i len:64 int_clt: dest:i len:64 int_clt_un: dest:i len:64 int_beq: len:8 int_bge: len:8 int_bgt: len:8 int_ble: len:8 int_blt: len:8 int_bne_un: len:64 int_bge_un: len:64 int_bgt_un: len:64 int_ble_un: len:64 int_blt_un: len:64 # 64 bit opcodes long_shl: dest:i src1:i src2:i len:64 long_shr: dest:i src1:i src2:i len:64 long_shr_un: dest:i src1:i src2:i len:64 long_conv_to_ovf_i: dest:i src1:i src2:i len:48 long_mul_ovf: long_conv_to_r_un: dest:f src1:i src2:i len:64 long_shr_imm: dest:i src1:i len:64 long_shr_un_imm: dest:i src1:i len:64 long_shl_imm: dest:i src1:i len:64 memory_barrier: len:4 sparc_brz: src1:i len: 8 sparc_brlez: src1:i len: 8 sparc_brlz: src1:i len: 8 sparc_brnz: src1:i len: 8 sparc_brgz: src1:i len: 8 sparc_brgez: src1:i len: 8 sparc_cond_exc_eqz: src1:i len:64 sparc_cond_exc_nez: src1:i len:64 sparc_cond_exc_ltz: src1:i len:64 sparc_cond_exc_gtz: src1:i len:64 sparc_cond_exc_gez: src1:i len:64 sparc_cond_exc_lez: src1:i len:64 relaxed_nop: len:0 # Linear IR opcodes nop: len:0 dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_i8const: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 jump_table: dest:i len:64 cond_exc_ieq: len:64 cond_exc_ine_un: len:64 cond_exc_ilt: len:64 cond_exc_ilt_un: len:64 cond_exc_igt: len:64 cond_exc_igt_un: len:64 cond_exc_ige: len:64 cond_exc_ige_un: len:64 cond_exc_ile: len:64 cond_exc_ile_un: len:64 cond_exc_iov: len:64 cond_exc_ino: len:64 cond_exc_ic: len:64 cond_exc_inc: len:64 long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:48 vcall2: len:40 clob:c vcall2_reg: src1:i len:64 clob:c vcall2_membase: src1:b len:64 clob:c liverange_start: len:0 liverange_end: len:0 gc_safe_point: len:0
# sparc32 cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the register allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # b base register (used in address references) # f floating point register # L register pair (same as 'i' on v9) # l %o0:%o1 register pair (same as 'i' on v9) # o %o0 # # len:number describe the maximun length in bytes of the instruction # number is a positive integer # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # r 'reserves' the destination register until a later instruction unreserves it # used mostly to set output registers in function calls # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # See the code in mini-sparc32.c for more details on how the specifiers are used. # label: len:0 break: len:64 br: len:8 throw: src1:i len:64 rethrow: src1:i len:64 start_handler: len:64 endfinally: len:64 endfilter: src1:i len:64 ckfinite: dest:f src1:f len:40 ceq: dest:i len:64 cgt: dest:i len:64 cgt_un: dest:i len:64 clt: dest:i len:64 clt_un: dest:i len:64 localloc: dest:i src1:i len:64 localloc_imm: dest:i len:64 compare: src1:i src2:i len:4 icompare: src1:i src2:i len:4 compare_imm: src1:i len:64 icompare_imm: src1:i len:64 fcompare: src1:f src2:f len:64 lcompare: src1:i src2:i len:4 setfret: dest:f src1:f len:8 check_this: src1:b len:4 arglist: src1:i len:64 call: dest:o clob:c len:40 call_reg: dest:o src1:i len:64 clob:c call_membase: dest:o src1:b len:64 clob:c voidcall: len:64 clob:c voidcall_reg: src1:i len:64 clob:c voidcall_membase: src1:b len:64 clob:c fcall: dest:f len:64 clob:c fcall_reg: dest:f src1:i len:64 clob:c fcall_membase: dest:f src1:b len:64 clob:c lcall: dest:l len:42 clob:c lcall_reg: dest:l src1:i len:64 clob:c lcall_membase: dest:l src1:b len:64 clob:c vcall: len:40 clob:c vcall_reg: src1:i len:64 clob:c vcall_membase: src1:b len:64 clob:c iconst: dest:i len:64 i8const: dest:i len:64 r4const: dest:f len:64 r8const: dest:f len:64 store_membase_imm: dest:b len:64 store_membase_reg: dest:b src1:i len:64 storei1_membase_imm: dest:b len:64 storei1_membase_reg: dest:b src1:i len:64 storei2_membase_imm: dest:b len:64 storei2_membase_reg: dest:b src1:i len:64 storei4_membase_imm: dest:b len:64 storei4_membase_reg: dest:b src1:i len:64 storei8_membase_imm: dest:b len:64 len:64 storei8_membase_reg: dest:b src1:i len:64 storer4_membase_reg: dest:b src1:f len:64 storer8_membase_reg: dest:b src1:f len:64 load_membase: dest:i src1:b len:64 loadi1_membase: dest:i src1:b len:64 loadu1_membase: dest:i src1:b len:64 loadi2_membase: dest:i src1:b len:64 loadu2_membase: dest:i src1:b len:64 loadi4_membase: dest:i src1:b len:64 loadu4_membase: dest:i src1:b len:64 loadi8_membase: dest:i src1:b len:64 loadr4_membase: dest:f src1:b len:64 loadr8_membase: dest:f src1:b len:64 loadu4_mem: dest:i len:8 move: dest:i src1:i len:4 add_imm: dest:i src1:i len:64 addcc_imm: dest:i src1:i len:64 sub_imm: dest:i src1:i len:64 subcc_imm: dest:i src1:i len:64 mul_imm: dest:i src1:i len:64 div_imm: dest:a src1:i src2:i len:64 div_un_imm: dest:a src1:i src2:i len:64 rem_imm: dest:d src1:i src2:i len:64 rem_un_imm: dest:d src1:i src2:i len:64 and_imm: dest:i src1:i len:64 or_imm: dest:i src1:i len:64 xor_imm: dest:i src1:i len:64 shl_imm: dest:i src1:i len:64 shr_imm: dest:i src1:i len:64 shr_un_imm: dest:i src1:i len:64 cond_exc_eq: len:64 cond_exc_ne_un: len:64 cond_exc_lt: len:64 cond_exc_lt_un: len:64 cond_exc_gt: len:64 cond_exc_gt_un: len:64 cond_exc_ge: len:64 cond_exc_ge_un: len:64 cond_exc_le: len:64 cond_exc_le_un: len:64 cond_exc_ov: len:64 cond_exc_no: len:64 cond_exc_c: len:64 cond_exc_nc: len:64 float_beq: len:8 float_bne_un: len:64 float_blt: len:8 float_blt_un: len:64 float_bgt: len:8 float_bgt_un: len:64 float_bge: len:64 float_bge_un: len:64 float_ble: len:64 float_ble_un: len:64 float_add: dest:f src1:f src2:f len:4 float_sub: dest:f src1:f src2:f len:4 float_mul: dest:f src1:f src2:f len:4 float_div: dest:f src1:f src2:f len:4 float_div_un: dest:f src1:f src2:f len:4 float_rem: dest:f src1:f src2:f len:64 float_rem_un: dest:f src1:f src2:f len:64 float_neg: dest:f src1:f len:4 float_not: dest:f src1:f len:4 float_conv_to_i1: dest:i src1:f len:40 float_conv_to_i2: dest:i src1:f len:40 float_conv_to_i4: dest:i src1:f len:40 float_conv_to_i8: dest:L src1:f len:40 float_conv_to_r4: dest:f src1:f len:8 float_conv_to_u4: dest:i src1:f len:40 float_conv_to_u8: dest:L src1:f len:40 float_conv_to_u2: dest:i src1:f len:40 float_conv_to_u1: dest:i src1:f len:40 float_ceq: dest:i src1:f src2:f len:64 float_cgt: dest:i src1:f src2:f len:64 float_cgt_un: dest:i src1:f src2:f len:64 float_clt: dest:i src1:f src2:f len:64 float_clt_un: dest:i src1:f src2:f len:64 call_handler: len:64 clob:c aotconst: dest:i len:64 adc: dest:i src1:i src2:i len:4 addcc: dest:i src1:i src2:i len:4 subcc: dest:i src1:i src2:i len:4 adc_imm: dest:i src1:i len:64 sbb: dest:i src1:i src2:i len:4 sbb_imm: dest:i src1:i len:64 br_reg: src1:i len:8 bigmul: len:2 dest:L src1:a src2:i bigmul_un: len:2 dest:L src1:a src2:i fmove: dest:f src1:f len:8 # 32 bit opcodes int_add: dest:i src1:i src2:i len:64 int_sub: dest:i src1:i src2:i len:64 int_mul: dest:i src1:i src2:i len:64 int_div: dest:i src1:i src2:i len:64 int_div_un: dest:i src1:i src2:i len:64 int_rem: dest:i src1:i src2:i len:64 int_rem_un: dest:i src1:i src2:i len:64 int_and: dest:i src1:i src2:i len:64 int_or: dest:i src1:i src2:i len:64 int_xor: dest:i src1:i src2:i len:64 int_shl: dest:i src1:i src2:i len:64 int_shr: dest:i src1:i src2:i len:64 int_shr_un: dest:i src1:i src2:i len:64 int_adc: dest:i src1:i src2:i len:64 int_adc_imm: dest:i src1:i len:64 int_sbb: dest:i src1:i src2:i len:64 int_sbb_imm: dest:i src1:i len:64 int_addcc: dest:i src1:i src2:i len:64 int_subcc: dest:i src1:i src2:i len:64 int_add_imm: dest:i src1:i len:64 int_sub_imm: dest:i src1:i len:64 int_mul_imm: dest:i src1:i len:64 int_div_imm: dest:i src1:i len:64 int_div_un_imm: dest:i src1:i len:64 int_rem_imm: dest:i src1:i len:64 int_rem_un_imm: dest:i src1:i len:64 int_and_imm: dest:i src1:i len:64 int_or_imm: dest:i src1:i len:64 int_xor_imm: dest:i src1:i len:64 int_shl_imm: dest:i src1:i len:64 int_shr_imm: dest:i src1:i len:64 int_shr_un_imm: dest:i src1:i len:64 int_mul_ovf: dest:i src1:i src2:i len:64 int_mul_ovf_un: dest:i src1:i src2:i len:64 int_conv_to_i1: dest:i src1:i len:8 int_conv_to_i2: dest:i src1:i len:8 int_conv_to_i4: dest:i src1:i len:4 int_conv_to_i8: dest:i src1:i len:4 int_conv_to_r4: dest:f src1:i len:64 int_conv_to_r8: dest:f src1:i len:64 int_conv_to_u4: dest:i src1:i len:4 int_conv_to_u8: dest:i src1:i len:4 int_conv_to_u2: dest:i src1:i len:8 int_conv_to_u1: dest:i src1:i len:4 int_conv_to_i: dest:i src1:i len:4 int_neg: dest:i src1:i len:64 int_not: dest:i src1:i len:64 int_ceq: dest:i len:64 int_cgt: dest:i len:64 int_cgt_un: dest:i len:64 int_clt: dest:i len:64 int_clt_un: dest:i len:64 int_beq: len:8 int_bge: len:8 int_bgt: len:8 int_ble: len:8 int_blt: len:8 int_bne_un: len:64 int_bge_un: len:64 int_bgt_un: len:64 int_ble_un: len:64 int_blt_un: len:64 # 64 bit opcodes long_shl: dest:i src1:i src2:i len:64 long_shr: dest:i src1:i src2:i len:64 long_shr_un: dest:i src1:i src2:i len:64 long_conv_to_ovf_i: dest:i src1:i src2:i len:48 long_mul_ovf: long_conv_to_r_un: dest:f src1:i src2:i len:64 long_shr_imm: dest:i src1:i len:64 long_shr_un_imm: dest:i src1:i len:64 long_shl_imm: dest:i src1:i len:64 memory_barrier: len:4 sparc_brz: src1:i len: 8 sparc_brlez: src1:i len: 8 sparc_brlz: src1:i len: 8 sparc_brnz: src1:i len: 8 sparc_brgz: src1:i len: 8 sparc_brgez: src1:i len: 8 sparc_cond_exc_eqz: src1:i len:64 sparc_cond_exc_nez: src1:i len:64 sparc_cond_exc_ltz: src1:i len:64 sparc_cond_exc_gtz: src1:i len:64 sparc_cond_exc_gez: src1:i len:64 sparc_cond_exc_lez: src1:i len:64 relaxed_nop: len:0 # Linear IR opcodes nop: len:0 dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_i8const: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 jump_table: dest:i len:64 cond_exc_ieq: len:64 cond_exc_ine_un: len:64 cond_exc_ilt: len:64 cond_exc_ilt_un: len:64 cond_exc_igt: len:64 cond_exc_igt_un: len:64 cond_exc_ige: len:64 cond_exc_ige_un: len:64 cond_exc_ile: len:64 cond_exc_ile_un: len:64 cond_exc_iov: len:64 cond_exc_ino: len:64 cond_exc_ic: len:64 cond_exc_inc: len:64 long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:48 vcall2: len:40 clob:c vcall2_reg: src1:i len:64 clob:c vcall2_membase: src1:b len:64 clob:c liverange_start: len:0 liverange_end: len:0 gc_safe_point: len:0
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/cpu-x86.md
# x86-class cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the regsiter allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # b base register (used in address references) # f floating point register # a EAX register # d EDX register # s ECX register # l long reg (forced eax:edx) # L long reg (dynamic) # y the reg needs to be one of EAX,EBX,ECX,EDX (sete opcodes) # x XMM reg (XMM0 - X007) # # len:number describe the maximun length in bytes of the instruction # number is a positive integer. If the length is not specified # it defaults to zero. But lengths are only checked if the given opcode # is encountered during compilation. Some opcodes, like CONV_U4 are # transformed into other opcodes in the brg files, so they do not show up # during code generation. # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # 1 clobbers the first source register # a EAX is clobbered # d EDX is clobbered # x both the source operands are clobbered (xchg) # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # Templates can be defined by using the 'template' keyword instead of an opcode name. # The template name is assigned from a (required) 'name' specifier. # To apply a template to an opcode, just use the template:template_name specifier: any value # defined by the template can be overridden by adding more specifiers after the template. # # See the code in mini-x86.c for more details on how the specifiers are used. # break: len:1 call: dest:a clob:c len:17 tailcall: len:255 clob:c tailcall_membase: src1:b len:255 clob:c # FIXME len tailcall_reg: src1:b len:255 clob:c # FIXME len # tailcall_parameter models the size of moving one parameter, # so that the required size of a branch around a tailcall can # be accurately estimated; something like: # void f1(volatile long *a) # { # a[large] = a[another large] # } # # This is like amd64 but without the rex bytes. # # Frame size is artificially limited to 1GB in mono_arch_tailcall_supported. # This is presently redundant with tailcall len:255, as the limit of # near branches is [-128, +127], after which the limit is # [-2GB, +2GB-1] # FIXME A fixed size sequence to move parameters would moot this. tailcall_parameter: len:12 br: len:5 seq_point: len:26 clob:c il_seq_point: len:0 int_beq: len:6 int_bge: len:6 int_bgt: len:6 int_ble: len:6 int_blt: len:6 int_bne_un: len:6 int_bge_un: len:6 int_bgt_un: len:6 int_ble_un: len:6 int_blt_un: len:6 label: len:0 #template: name:ibalu int_add: dest:i src1:i src2:i clob:1 len:2 int_sub: dest:i src1:i src2:i clob:1 len:2 int_mul: dest:i src1:i src2:i clob:1 len:3 int_div: dest:a src1:a src2:i len:15 clob:d int_div_un: dest:a src1:a src2:i len:15 clob:d int_rem: dest:d src1:a src2:i len:15 clob:a int_rem_un: dest:d src1:a src2:i len:15 clob:a int_and: dest:i src1:i src2:i clob:1 len:2 int_or: dest:i src1:i src2:i clob:1 len:2 int_xor: dest:i src1:i src2:i clob:1 len:2 int_shl: dest:i src1:i src2:s clob:1 len:2 int_shr: dest:i src1:i src2:s clob:1 len:2 int_shr_un: dest:i src1:i src2:s clob:1 len:2 int_min: dest:i src1:i src2:i len:16 clob:1 int_min_un: dest:i src1:i src2:i len:16 clob:1 int_max: dest:i src1:i src2:i len:16 clob:1 int_max_un: dest:i src1:i src2:i len:16 clob:1 int_neg: dest:i src1:i len:2 clob:1 int_not: dest:i src1:i len:2 clob:1 int_conv_to_i1: dest:i src1:y len:3 int_conv_to_i2: dest:i src1:i len:3 int_conv_to_i4: dest:i src1:i len:2 int_conv_to_r4: dest:f src1:i len:13 int_conv_to_r8: dest:f src1:i len:7 int_conv_to_u4: dest:i src1:i int_conv_to_u2: dest:i src1:i len:3 int_conv_to_u1: dest:i src1:y len:3 int_conv_to_i: dest:i src1:i len:3 int_mul_ovf: dest:i src1:i src2:i clob:1 len:9 int_mul_ovf_un: dest:i src1:i src2:i len:16 throw: src1:i len:13 rethrow: src1:i len:13 start_handler: len:16 endfinally: len:16 endfilter: src1:a len:16 get_ex_obj: dest:a len:16 ckfinite: dest:f src1:f len:32 ceq: dest:y len:6 cgt: dest:y len:6 cgt_un: dest:y len:6 clt: dest:y len:6 clt_un: dest:y len:6 localloc: dest:i src1:i len:120 compare: src1:i src2:i len:2 compare_imm: src1:i len:6 fcompare: src1:f src2:f clob:a len:9 arglist: src1:b len:10 check_this: src1:b len:3 voidcall: len:17 clob:c voidcall_reg: src1:i len:11 clob:c voidcall_membase: src1:b len:16 clob:c fcall: dest:f len:17 clob:c fcall_reg: dest:f src1:i len:11 clob:c fcall_membase: dest:f src1:b len:16 clob:c lcall: dest:l len:17 clob:c lcall_reg: dest:l src1:i len:11 clob:c lcall_membase: dest:l src1:b len:16 clob:c vcall: len:17 clob:c vcall_reg: src1:i len:11 clob:c vcall_membase: src1:b len:16 clob:c call_reg: dest:a src1:i len:11 clob:c call_membase: dest:a src1:b len:16 clob:c iconst: dest:i len:5 r4const: dest:f len:15 r8const: dest:f len:16 store_membase_imm: dest:b len:11 store_membase_reg: dest:b src1:i len:7 storei1_membase_imm: dest:b len:10 storei1_membase_reg: dest:b src1:y len:7 storei2_membase_imm: dest:b len:11 storei2_membase_reg: dest:b src1:i len:7 storei4_membase_imm: dest:b len:10 storei4_membase_reg: dest:b src1:i len:7 storei8_membase_imm: dest:b storei8_membase_reg: dest:b src1:i storer4_membase_reg: dest:b src1:f len:7 storer8_membase_reg: dest:b src1:f len:7 load_membase: dest:i src1:b len:7 loadi1_membase: dest:y src1:b len:7 loadu1_membase: dest:y src1:b len:7 loadi2_membase: dest:i src1:b len:7 loadu2_membase: dest:i src1:b len:7 loadi4_membase: dest:i src1:b len:7 loadu4_membase: dest:i src1:b len:7 loadi8_membase: dest:i src1:b loadr4_membase: dest:f src1:b len:7 loadr8_membase: dest:f src1:b len:7 loadu4_mem: dest:i len:9 move: dest:i src1:i len:2 addcc_imm: dest:i src1:i len:6 clob:1 add_imm: dest:i src1:i len:6 clob:1 subcc_imm: dest:i src1:i len:6 clob:1 sub_imm: dest:i src1:i len:6 clob:1 mul_imm: dest:i src1:i len:9 and_imm: dest:i src1:i len:6 clob:1 or_imm: dest:i src1:i len:6 clob:1 xor_imm: dest:i src1:i len:6 clob:1 shl_imm: dest:i src1:i len:6 clob:1 shr_imm: dest:i src1:i len:6 clob:1 shr_un_imm: dest:i src1:i len:6 clob:1 cond_exc_eq: len:6 cond_exc_ne_un: len:6 cond_exc_lt: len:6 cond_exc_lt_un: len:6 cond_exc_gt: len:6 cond_exc_gt_un: len:6 cond_exc_ge: len:6 cond_exc_ge_un: len:6 cond_exc_le: len:6 cond_exc_le_un: len:6 cond_exc_ov: len:6 cond_exc_no: len:6 cond_exc_c: len:6 cond_exc_nc: len:6 long_shl: dest:L src1:L src2:s clob:1 len:21 long_shr: dest:L src1:L src2:s clob:1 len:22 long_shr_un: dest:L src1:L src2:s clob:1 len:22 long_shr_imm: dest:L src1:L clob:1 len:10 long_shr_un_imm: dest:L src1:L clob:1 len:10 long_shl_imm: dest:L src1:L clob:1 len:10 float_beq: len:12 float_bne_un: len:18 float_blt: len:12 float_blt_un: len:20 float_bgt: len:12 float_bgt_un: len:20 float_bge: len:22 float_bge_un: len:12 float_ble: len:22 float_ble_un: len:12 float_add: dest:f src1:f src2:f len:2 float_sub: dest:f src1:f src2:f len:2 float_mul: dest:f src1:f src2:f len:2 float_div: dest:f src1:f src2:f len:2 float_div_un: dest:f src1:f src2:f len:2 float_rem: dest:f src1:f src2:f len:17 float_rem_un: dest:f src1:f src2:f len:17 float_neg: dest:f src1:f len:2 float_not: dest:f src1:f len:2 float_conv_to_i1: dest:y src1:f len:39 float_conv_to_i2: dest:y src1:f len:39 float_conv_to_i4: dest:i src1:f len:39 float_conv_to_i8: dest:L src1:f len:39 float_conv_to_u4: dest:i src1:f len:39 float_conv_to_u8: dest:L src1:f len:39 float_conv_to_u2: dest:y src1:f len:39 float_conv_to_u1: dest:y src1:f len:39 float_conv_to_i: dest:i src1:f len:39 float_conv_to_ovf_i: dest:a src1:f len:30 float_conv_to_ovd_u: dest:a src1:f len:30 float_mul_ovf: float_ceq: dest:y src1:f src2:f len:25 float_cgt: dest:y src1:f src2:f len:25 float_cgt_un: dest:y src1:f src2:f len:37 float_clt: dest:y src1:f src2:f len:25 float_clt_un: dest:y src1:f src2:f len:32 float_cneq: dest:y src1:f src2:f len:25 float_cge: dest:y src1:f src2:f len:37 float_cle: dest:y src1:f src2:f len:37 call_handler: len:11 clob:c aotconst: dest:i len:5 load_gotaddr: dest:i len:64 got_entry: dest:i src1:b len:7 gc_safe_point: clob:c src1:i len:20 x86_test_null: src1:i len:2 x86_compare_membase_reg: src1:b src2:i len:7 x86_compare_membase_imm: src1:b len:11 x86_compare_membase8_imm: src1:b len:8 x86_compare_mem_imm: len:11 x86_compare_reg_membase: src1:i src2:b len:7 x86_inc_reg: dest:i src1:i clob:1 len:1 x86_inc_membase: src1:b len:7 x86_dec_reg: dest:i src1:i clob:1 len:1 x86_dec_membase: src1:b len:7 x86_add_membase_imm: src1:b len:11 x86_sub_membase_imm: src1:b len:11 x86_and_membase_imm: src1:b len:11 x86_or_membase_imm: src1:b len:11 x86_xor_membase_imm: src1:b len:11 x86_push: src1:i len:1 x86_push_imm: len:5 x86_push_membase: src1:b len:7 x86_push_obj: src1:b len:30 x86_push_got_entry: src1:b len:7 x86_lea: dest:i src1:i src2:i len:7 x86_lea_membase: dest:i src1:i len:10 x86_xchg: src1:i src2:i clob:x len:1 x86_fpop: src1:f len:2 x86_fp_load_i8: dest:f src1:b len:7 x86_fp_load_i4: dest:f src1:b len:7 x86_seteq_membase: src1:b len:7 x86_setne_membase: src1:b len:7 x86_add_reg_membase: dest:i src1:i src2:b clob:1 len:11 x86_sub_reg_membase: dest:i src1:i src2:b clob:1 len:11 x86_mul_reg_membase: dest:i src1:i src2:b clob:1 len:13 adc: dest:i src1:i src2:i len:2 clob:1 addcc: dest:i src1:i src2:i len:2 clob:1 subcc: dest:i src1:i src2:i len:2 clob:1 adc_imm: dest:i src1:i len:6 clob:1 sbb: dest:i src1:i src2:i len:2 clob:1 sbb_imm: dest:i src1:i len:6 clob:1 br_reg: src1:i len:2 sin: dest:f src1:f len:6 cos: dest:f src1:f len:6 abs: dest:f src1:f len:2 tan: dest:f src1:f len:49 atan: dest:f src1:f len:8 sqrt: dest:f src1:f len:2 round: dest:f src1:f len:2 bigmul: len:2 dest:l src1:a src2:i bigmul_un: len:2 dest:l src1:a src2:i sext_i1: dest:i src1:y len:3 sext_i2: dest:i src1:y len:3 tls_get: dest:i len:32 tls_set: src1:i len:20 atomic_add_i4: src1:b src2:i dest:i len:16 atomic_exchange_i4: src1:b src2:i dest:a len:24 atomic_cas_i4: src1:b src2:i src3:a dest:a len:24 memory_barrier: len:16 atomic_load_i1: dest:y src1:b len:7 atomic_load_u1: dest:y src1:b len:7 atomic_load_i2: dest:i src1:b len:7 atomic_load_u2: dest:i src1:b len:7 atomic_load_i4: dest:i src1:b len:7 atomic_load_u4: dest:i src1:b len:7 atomic_load_r4: dest:f src1:b len:10 atomic_load_r8: dest:f src1:b len:10 atomic_store_i1: dest:b src1:y len:10 atomic_store_u1: dest:b src1:y len:10 atomic_store_i2: dest:b src1:i len:10 atomic_store_u2: dest:b src1:i len:10 atomic_store_i4: dest:b src1:i len:10 atomic_store_u4: dest:b src1:i len:10 atomic_store_r4: dest:b src1:f len:10 atomic_store_r8: dest:b src1:f len:10 card_table_wbarrier: src1:a src2:i clob:d len:34 relaxed_nop: len:2 hard_nop: len:1 # Linear IR opcodes nop: len:0 dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 jump_table: dest:i len:5 int_adc: dest:i src1:i src2:i len:2 clob:1 int_addcc: dest:i src1:i src2:i len:2 clob:1 int_subcc: dest:i src1:i src2:i len:2 clob:1 int_sbb: dest:i src1:i src2:i len:2 clob:1 int_add_imm: dest:i src1:i len:6 clob:1 int_sub_imm: dest:i src1:i len:6 clob:1 int_mul_imm: dest:i src1:i len:9 int_div_imm: dest:a src1:a len:15 clob:d int_div_un_imm: dest:a src1:a len:15 clob:d int_rem_imm: dest:a src1:a len:15 clob:d int_rem_un_imm: dest:d src1:a len:15 clob:a int_and_imm: dest:i src1:i len:6 clob:1 int_or_imm: dest:i src1:i len:6 clob:1 int_xor_imm: dest:i src1:i len:6 clob:1 int_shl_imm: dest:i src1:i len:6 clob:1 int_shr_imm: dest:i src1:i len:6 clob:1 int_shr_un_imm: dest:i src1:i len:6 clob:1 int_conv_to_r_un: dest:f src1:i len:32 int_ceq: dest:y len:6 int_cgt: dest:y len:6 int_cgt_un: dest:y len:6 int_clt: dest:y len:6 int_clt_un: dest:y len:6 int_cneq: dest:y len:6 int_cge: dest:y len:6 int_cle: dest:y len:6 int_cge_un: dest:y len:6 int_cle_un: dest:y len:6 cond_exc_ieq: len:6 cond_exc_ine_un: len:6 cond_exc_ilt: len:6 cond_exc_ilt_un: len:6 cond_exc_igt: len:6 cond_exc_igt_un: len:6 cond_exc_ige: len:6 cond_exc_ige_un: len:6 cond_exc_ile: len:6 cond_exc_ile_un: len:6 cond_exc_iov: len:6 cond_exc_ino: len:6 cond_exc_ic: len:6 cond_exc_inc: len:6 icompare: src1:i src2:i len:2 icompare_imm: src1:i len:6 cmov_ieq: dest:i src1:i src2:i len:16 clob:1 cmov_ige: dest:i src1:i src2:i len:16 clob:1 cmov_igt: dest:i src1:i src2:i len:16 clob:1 cmov_ile: dest:i src1:i src2:i len:16 clob:1 cmov_ilt: dest:i src1:i src2:i len:16 clob:1 cmov_ine_un: dest:i src1:i src2:i len:16 clob:1 cmov_ige_un: dest:i src1:i src2:i len:16 clob:1 cmov_igt_un: dest:i src1:i src2:i len:16 clob:1 cmov_ile_un: dest:i src1:i src2:i len:16 clob:1 cmov_ilt_un: dest:i src1:i src2:i len:16 clob:1 long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:30 long_conv_to_r8_2: dest:f src1:i src2:i len:14 long_conv_to_r4_2: dest:f src1:i src2:i len:14 long_conv_to_r_un_2: dest:f src1:i src2:i len:40 fmove: dest:f src1:f move_f_to_i4: dest:i src1:f len:17 move_i4_to_f: dest:f src1:i len:17 float_conv_to_r4: dest:f src1:f len:12 load_mem: dest:i len:9 loadi4_mem: dest:i len:9 loadu1_mem: dest:i len:9 loadu2_mem: dest:i len:9 vcall2: len:17 clob:c vcall2_reg: src1:i len:11 clob:c vcall2_membase: src1:b len:16 clob:c localloc_imm: dest:i len:120 x86_add_membase_reg: src1:b src2:i len:11 x86_sub_membase_reg: src1:b src2:i len:11 x86_and_membase_reg: src1:b src2:i len:11 x86_or_membase_reg: src1:b src2:i len:11 x86_xor_membase_reg: src1:b src2:i len:11 x86_mul_membase_reg: src1:b src2:i len:13 x86_and_reg_membase: dest:i src1:i src2:b clob:1 len:6 x86_or_reg_membase: dest:i src1:i src2:b clob:1 len:6 x86_xor_reg_membase: dest:i src1:i src2:b clob:1 len:6 x86_fxch: len:2 addps: dest:x src1:x src2:x len:3 clob:1 divps: dest:x src1:x src2:x len:3 clob:1 mulps: dest:x src1:x src2:x len:3 clob:1 subps: dest:x src1:x src2:x len:3 clob:1 maxps: dest:x src1:x src2:x len:3 clob:1 minps: dest:x src1:x src2:x len:3 clob:1 compps: dest:x src1:x src2:x len:4 clob:1 andps: dest:x src1:x src2:x len:3 clob:1 andnps: dest:x src1:x src2:x len:3 clob:1 orps: dest:x src1:x src2:x len:3 clob:1 xorps: dest:x src1:x src2:x len:3 clob:1 haddps: dest:x src1:x src2:x len:4 clob:1 hsubps: dest:x src1:x src2:x len:4 clob:1 addsubps: dest:x src1:x src2:x len:4 clob:1 dupps_low: dest:x src1:x len:4 dupps_high: dest:x src1:x len:4 addpd: dest:x src1:x src2:x len:4 clob:1 divpd: dest:x src1:x src2:x len:4 clob:1 mulpd: dest:x src1:x src2:x len:4 clob:1 subpd: dest:x src1:x src2:x len:4 clob:1 maxpd: dest:x src1:x src2:x len:4 clob:1 minpd: dest:x src1:x src2:x len:4 clob:1 comppd: dest:x src1:x src2:x len:5 clob:1 andpd: dest:x src1:x src2:x len:4 clob:1 andnpd: dest:x src1:x src2:x len:4 clob:1 orpd: dest:x src1:x src2:x len:4 clob:1 xorpd: dest:x src1:x src2:x len:4 clob:1 sqrtpd: dest:x src1:x len:4 clob:1 haddpd: dest:x src1:x src2:x len:5 clob:1 hsubpd: dest:x src1:x src2:x len:5 clob:1 addsubpd: dest:x src1:x src2:x len:5 clob:1 duppd: dest:x src1:x len:5 pand: dest:x src1:x src2:x len:4 clob:1 por: dest:x src1:x src2:x len:4 clob:1 pxor: dest:x src1:x src2:x len:4 clob:1 sqrtps: dest:x src1:x len:4 rsqrtps: dest:x src1:x len:4 rcpps: dest:x src1:x len:4 pshuflew_high: dest:x src1:x len:5 pshuflew_low: dest:x src1:x len:5 pshufled: dest:x src1:x len:5 shufps: dest:x src1:x src2:x len:4 clob:1 shufpd: dest:x src1:x src2:x len:5 clob:1 extract_mask: dest:i src1:x len:4 paddb: dest:x src1:x src2:x len:4 clob:1 paddw: dest:x src1:x src2:x len:4 clob:1 paddd: dest:x src1:x src2:x len:4 clob:1 paddq: dest:x src1:x src2:x len:4 clob:1 psubb: dest:x src1:x src2:x len:4 clob:1 psubw: dest:x src1:x src2:x len:4 clob:1 psubd: dest:x src1:x src2:x len:4 clob:1 psubq: dest:x src1:x src2:x len:4 clob:1 pmaxb_un: dest:x src1:x src2:x len:4 clob:1 pmaxw_un: dest:x src1:x src2:x len:5 clob:1 pmaxd_un: dest:x src1:x src2:x len:5 clob:1 pmaxb: dest:x src1:x src2:x len:5 clob:1 pmaxw: dest:x src1:x src2:x len:4 clob:1 pmaxd: dest:x src1:x src2:x len:5 clob:1 pavgb_un: dest:x src1:x src2:x len:4 clob:1 pavgw_un: dest:x src1:x src2:x len:4 clob:1 pminb_un: dest:x src1:x src2:x len:4 clob:1 pminw_un: dest:x src1:x src2:x len:5 clob:1 pmind_un: dest:x src1:x src2:x len:5 clob:1 pminb: dest:x src1:x src2:x len:5 clob:1 pminw: dest:x src1:x src2:x len:4 clob:1 pmind: dest:x src1:x src2:x len:5 clob:1 pcmpeqb: dest:x src1:x src2:x len:4 clob:1 pcmpeqw: dest:x src1:x src2:x len:4 clob:1 pcmpeqd: dest:x src1:x src2:x len:4 clob:1 pcmpeqq: dest:x src1:x src2:x len:5 clob:1 pcmpgtb: dest:x src1:x src2:x len:4 clob:1 pcmpgtw: dest:x src1:x src2:x len:4 clob:1 pcmpgtd: dest:x src1:x src2:x len:4 clob:1 pcmpgtq: dest:x src1:x src2:x len:5 clob:1 psum_abs_diff: dest:x src1:x src2:x len:4 clob:1 unpack_lowb: dest:x src1:x src2:x len:4 clob:1 unpack_loww: dest:x src1:x src2:x len:4 clob:1 unpack_lowd: dest:x src1:x src2:x len:4 clob:1 unpack_lowq: dest:x src1:x src2:x len:4 clob:1 unpack_lowps: dest:x src1:x src2:x len:3 clob:1 unpack_lowpd: dest:x src1:x src2:x len:4 clob:1 unpack_highb: dest:x src1:x src2:x len:4 clob:1 unpack_highw: dest:x src1:x src2:x len:4 clob:1 unpack_highd: dest:x src1:x src2:x len:4 clob:1 unpack_highq: dest:x src1:x src2:x len:4 clob:1 unpack_highps: dest:x src1:x src2:x len:3 clob:1 unpack_highpd: dest:x src1:x src2:x len:4 clob:1 packw: dest:x src1:x src2:x len:4 clob:1 packd: dest:x src1:x src2:x len:4 clob:1 packw_un: dest:x src1:x src2:x len:4 clob:1 packd_un: dest:x src1:x src2:x len:5 clob:1 paddb_sat: dest:x src1:x src2:x len:4 clob:1 paddb_sat_un: dest:x src1:x src2:x len:4 clob:1 paddw_sat: dest:x src1:x src2:x len:4 clob:1 paddw_sat_un: dest:x src1:x src2:x len:4 clob:1 psubb_sat: dest:x src1:x src2:x len:4 clob:1 psubb_sat_un: dest:x src1:x src2:x len:4 clob:1 psubw_sat: dest:x src1:x src2:x len:4 clob:1 psubw_sat_un: dest:x src1:x src2:x len:4 clob:1 pmulw: dest:x src1:x src2:x len:4 clob:1 pmuld: dest:x src1:x src2:x len:5 clob:1 pmulq: dest:x src1:x src2:x len:4 clob:1 pmulw_high_un: dest:x src1:x src2:x len:4 clob:1 pmulw_high: dest:x src1:x src2:x len:4 clob:1 pshrw: dest:x src1:x len:5 clob:1 pshrw_reg: dest:x src1:x src2:x len:4 clob:1 psarw: dest:x src1:x len:5 clob:1 psarw_reg: dest:x src1:x src2:x len:4 clob:1 pshlw: dest:x src1:x len:5 clob:1 pshlw_reg: dest:x src1:x src2:x len:4 clob:1 pshrd: dest:x src1:x len:5 clob:1 pshrd_reg: dest:x src1:x src2:x len:4 clob:1 psard: dest:x src1:x len:5 clob:1 psard_reg: dest:x src1:x src2:x len:4 clob:1 pshld: dest:x src1:x len:5 clob:1 pshld_reg: dest:x src1:x src2:x len:4 clob:1 pshrq: dest:x src1:x len:5 clob:1 pshrq_reg: dest:x src1:x src2:x len:4 clob:1 pshlq: dest:x src1:x len:5 clob:1 pshlq_reg: dest:x src1:x src2:x len:4 clob:1 cvtdq2pd: dest:x src1:x len:4 clob:1 cvtdq2ps: dest:x src1:x len:3 clob:1 cvtpd2dq: dest:x src1:x len:4 clob:1 cvtpd2ps: dest:x src1:x len:4 clob:1 cvtps2dq: dest:x src1:x len:4 clob:1 cvtps2pd: dest:x src1:x len:3 clob:1 cvttpd2dq: dest:x src1:x len:4 clob:1 cvttps2dq: dest:x src1:x len:4 clob:1 xmove: dest:x src1:x len:4 xzero: dest:x len:4 xones: dest:x len:4 iconv_to_x: dest:x src1:i len:4 extract_i4: dest:i src1:x len:4 extract_i2: dest:i src1:x len:10 extract_i1: dest:i src1:x len:10 extract_r8: dest:f src1:x len:8 insert_i2: dest:x src1:x src2:i len:5 clob:1 extractx_u2: dest:i src1:x len:5 insertx_u1_slow: dest:x src1:i src2:i len:16 clob:x insertx_i4_slow: dest:x src1:x src2:i len:13 clob:x insertx_r4_slow: dest:x src1:x src2:f len:24 clob:1 insertx_r8_slow: dest:x src1:x src2:f len:24 clob:1 loadx_membase: dest:x src1:b len:7 storex_membase: dest:b src1:x len:7 storex_membase_reg: dest:b src1:x len:7 loadx_aligned_membase: dest:x src1:b len:7 storex_aligned_membase_reg: dest:b src1:x len:7 storex_nta_membase_reg: dest:b src1:x len:7 fconv_to_r8_x: dest:x src1:f len:14 xconv_r8_to_i4: dest:y src1:x len:7 prefetch_membase: src1:b len:4 expand_i2: dest:x src1:i len:15 expand_i4: dest:x src1:i len:9 expand_r4: dest:x src1:f len:20 expand_r8: dest:x src1:f len:20 liverange_start: len:0 liverange_end: len:0 gc_liveness_def: len:0 gc_liveness_use: len:0 gc_spill_slot_liveness_def: len:0 gc_param_slot_liveness_def: len:0 get_sp: dest:i len:6 set_sp: src1:i len:6 fill_prof_call_ctx: src1:i len:128 get_last_error: dest:i len:32
# x86-class cpu description file # this file is read by genmdesc to pruduce a table with all the relevant information # about the cpu instructions that may be used by the regsiter allocator, the scheduler # and other parts of the arch-dependent part of mini. # # An opcode name is followed by a colon and optional specifiers. # A specifier has a name, a colon and a value. Specifiers are separated by white space. # Here is a description of the specifiers valid for this file and their possible values. # # dest:register describes the destination register of an instruction # src1:register describes the first source register of an instruction # src2:register describes the second source register of an instruction # # register may have the following values: # i integer register # b base register (used in address references) # f floating point register # a EAX register # d EDX register # s ECX register # l long reg (forced eax:edx) # L long reg (dynamic) # y the reg needs to be one of EAX,EBX,ECX,EDX (sete opcodes) # x XMM reg (XMM0 - X007) # # len:number describe the maximun length in bytes of the instruction # number is a positive integer. If the length is not specified # it defaults to zero. But lengths are only checked if the given opcode # is encountered during compilation. Some opcodes, like CONV_U4 are # transformed into other opcodes in the brg files, so they do not show up # during code generation. # # cost:number describe how many cycles are needed to complete the instruction (unused) # # clob:spec describe if the instruction clobbers registers or has special needs # # spec can be one of the following characters: # c clobbers caller-save registers # 1 clobbers the first source register # a EAX is clobbered # d EDX is clobbered # x both the source operands are clobbered (xchg) # # flags:spec describe if the instruction uses or sets the flags (unused) # # spec can be one of the following chars: # s sets the flags # u uses the flags # m uses and modifies the flags # # res:spec describe what units are used in the processor (unused) # # delay: describe delay slots (unused) # # the required specifiers are: len, clob (if registers are clobbered), the registers # specifiers if the registers are actually used, flags (when scheduling is implemented). # # Templates can be defined by using the 'template' keyword instead of an opcode name. # The template name is assigned from a (required) 'name' specifier. # To apply a template to an opcode, just use the template:template_name specifier: any value # defined by the template can be overridden by adding more specifiers after the template. # # See the code in mini-x86.c for more details on how the specifiers are used. # break: len:1 call: dest:a clob:c len:17 tailcall: len:255 clob:c tailcall_membase: src1:b len:255 clob:c # FIXME len tailcall_reg: src1:b len:255 clob:c # FIXME len # tailcall_parameter models the size of moving one parameter, # so that the required size of a branch around a tailcall can # be accurately estimated; something like: # void f1(volatile long *a) # { # a[large] = a[another large] # } # # This is like amd64 but without the rex bytes. # # Frame size is artificially limited to 1GB in mono_arch_tailcall_supported. # This is presently redundant with tailcall len:255, as the limit of # near branches is [-128, +127], after which the limit is # [-2GB, +2GB-1] # FIXME A fixed size sequence to move parameters would moot this. tailcall_parameter: len:12 br: len:5 seq_point: len:26 clob:c il_seq_point: len:0 int_beq: len:6 int_bge: len:6 int_bgt: len:6 int_ble: len:6 int_blt: len:6 int_bne_un: len:6 int_bge_un: len:6 int_bgt_un: len:6 int_ble_un: len:6 int_blt_un: len:6 label: len:0 #template: name:ibalu int_add: dest:i src1:i src2:i clob:1 len:2 int_sub: dest:i src1:i src2:i clob:1 len:2 int_mul: dest:i src1:i src2:i clob:1 len:3 int_div: dest:a src1:a src2:i len:15 clob:d int_div_un: dest:a src1:a src2:i len:15 clob:d int_rem: dest:d src1:a src2:i len:15 clob:a int_rem_un: dest:d src1:a src2:i len:15 clob:a int_and: dest:i src1:i src2:i clob:1 len:2 int_or: dest:i src1:i src2:i clob:1 len:2 int_xor: dest:i src1:i src2:i clob:1 len:2 int_shl: dest:i src1:i src2:s clob:1 len:2 int_shr: dest:i src1:i src2:s clob:1 len:2 int_shr_un: dest:i src1:i src2:s clob:1 len:2 int_min: dest:i src1:i src2:i len:16 clob:1 int_min_un: dest:i src1:i src2:i len:16 clob:1 int_max: dest:i src1:i src2:i len:16 clob:1 int_max_un: dest:i src1:i src2:i len:16 clob:1 int_neg: dest:i src1:i len:2 clob:1 int_not: dest:i src1:i len:2 clob:1 int_conv_to_i1: dest:i src1:y len:3 int_conv_to_i2: dest:i src1:i len:3 int_conv_to_i4: dest:i src1:i len:2 int_conv_to_r4: dest:f src1:i len:13 int_conv_to_r8: dest:f src1:i len:7 int_conv_to_u4: dest:i src1:i int_conv_to_u2: dest:i src1:i len:3 int_conv_to_u1: dest:i src1:y len:3 int_conv_to_i: dest:i src1:i len:3 int_mul_ovf: dest:i src1:i src2:i clob:1 len:9 int_mul_ovf_un: dest:i src1:i src2:i len:16 throw: src1:i len:13 rethrow: src1:i len:13 start_handler: len:16 endfinally: len:16 endfilter: src1:a len:16 get_ex_obj: dest:a len:16 ckfinite: dest:f src1:f len:32 ceq: dest:y len:6 cgt: dest:y len:6 cgt_un: dest:y len:6 clt: dest:y len:6 clt_un: dest:y len:6 localloc: dest:i src1:i len:120 compare: src1:i src2:i len:2 compare_imm: src1:i len:6 fcompare: src1:f src2:f clob:a len:9 arglist: src1:b len:10 check_this: src1:b len:3 voidcall: len:17 clob:c voidcall_reg: src1:i len:11 clob:c voidcall_membase: src1:b len:16 clob:c fcall: dest:f len:17 clob:c fcall_reg: dest:f src1:i len:11 clob:c fcall_membase: dest:f src1:b len:16 clob:c lcall: dest:l len:17 clob:c lcall_reg: dest:l src1:i len:11 clob:c lcall_membase: dest:l src1:b len:16 clob:c vcall: len:17 clob:c vcall_reg: src1:i len:11 clob:c vcall_membase: src1:b len:16 clob:c call_reg: dest:a src1:i len:11 clob:c call_membase: dest:a src1:b len:16 clob:c iconst: dest:i len:5 r4const: dest:f len:15 r8const: dest:f len:16 store_membase_imm: dest:b len:11 store_membase_reg: dest:b src1:i len:7 storei1_membase_imm: dest:b len:10 storei1_membase_reg: dest:b src1:y len:7 storei2_membase_imm: dest:b len:11 storei2_membase_reg: dest:b src1:i len:7 storei4_membase_imm: dest:b len:10 storei4_membase_reg: dest:b src1:i len:7 storei8_membase_imm: dest:b storei8_membase_reg: dest:b src1:i storer4_membase_reg: dest:b src1:f len:7 storer8_membase_reg: dest:b src1:f len:7 load_membase: dest:i src1:b len:7 loadi1_membase: dest:y src1:b len:7 loadu1_membase: dest:y src1:b len:7 loadi2_membase: dest:i src1:b len:7 loadu2_membase: dest:i src1:b len:7 loadi4_membase: dest:i src1:b len:7 loadu4_membase: dest:i src1:b len:7 loadi8_membase: dest:i src1:b loadr4_membase: dest:f src1:b len:7 loadr8_membase: dest:f src1:b len:7 loadu4_mem: dest:i len:9 move: dest:i src1:i len:2 addcc_imm: dest:i src1:i len:6 clob:1 add_imm: dest:i src1:i len:6 clob:1 subcc_imm: dest:i src1:i len:6 clob:1 sub_imm: dest:i src1:i len:6 clob:1 mul_imm: dest:i src1:i len:9 and_imm: dest:i src1:i len:6 clob:1 or_imm: dest:i src1:i len:6 clob:1 xor_imm: dest:i src1:i len:6 clob:1 shl_imm: dest:i src1:i len:6 clob:1 shr_imm: dest:i src1:i len:6 clob:1 shr_un_imm: dest:i src1:i len:6 clob:1 cond_exc_eq: len:6 cond_exc_ne_un: len:6 cond_exc_lt: len:6 cond_exc_lt_un: len:6 cond_exc_gt: len:6 cond_exc_gt_un: len:6 cond_exc_ge: len:6 cond_exc_ge_un: len:6 cond_exc_le: len:6 cond_exc_le_un: len:6 cond_exc_ov: len:6 cond_exc_no: len:6 cond_exc_c: len:6 cond_exc_nc: len:6 long_shl: dest:L src1:L src2:s clob:1 len:21 long_shr: dest:L src1:L src2:s clob:1 len:22 long_shr_un: dest:L src1:L src2:s clob:1 len:22 long_shr_imm: dest:L src1:L clob:1 len:10 long_shr_un_imm: dest:L src1:L clob:1 len:10 long_shl_imm: dest:L src1:L clob:1 len:10 float_beq: len:12 float_bne_un: len:18 float_blt: len:12 float_blt_un: len:20 float_bgt: len:12 float_bgt_un: len:20 float_bge: len:22 float_bge_un: len:12 float_ble: len:22 float_ble_un: len:12 float_add: dest:f src1:f src2:f len:2 float_sub: dest:f src1:f src2:f len:2 float_mul: dest:f src1:f src2:f len:2 float_div: dest:f src1:f src2:f len:2 float_div_un: dest:f src1:f src2:f len:2 float_rem: dest:f src1:f src2:f len:17 float_rem_un: dest:f src1:f src2:f len:17 float_neg: dest:f src1:f len:2 float_not: dest:f src1:f len:2 float_conv_to_i1: dest:y src1:f len:39 float_conv_to_i2: dest:y src1:f len:39 float_conv_to_i4: dest:i src1:f len:39 float_conv_to_i8: dest:L src1:f len:39 float_conv_to_u4: dest:i src1:f len:39 float_conv_to_u8: dest:L src1:f len:39 float_conv_to_u2: dest:y src1:f len:39 float_conv_to_u1: dest:y src1:f len:39 float_conv_to_ovf_i: dest:a src1:f len:30 float_conv_to_ovd_u: dest:a src1:f len:30 float_mul_ovf: float_ceq: dest:y src1:f src2:f len:25 float_cgt: dest:y src1:f src2:f len:25 float_cgt_un: dest:y src1:f src2:f len:37 float_clt: dest:y src1:f src2:f len:25 float_clt_un: dest:y src1:f src2:f len:32 float_cneq: dest:y src1:f src2:f len:25 float_cge: dest:y src1:f src2:f len:37 float_cle: dest:y src1:f src2:f len:37 call_handler: len:11 clob:c aotconst: dest:i len:5 load_gotaddr: dest:i len:64 got_entry: dest:i src1:b len:7 gc_safe_point: clob:c src1:i len:20 x86_test_null: src1:i len:2 x86_compare_membase_reg: src1:b src2:i len:7 x86_compare_membase_imm: src1:b len:11 x86_compare_membase8_imm: src1:b len:8 x86_compare_mem_imm: len:11 x86_compare_reg_membase: src1:i src2:b len:7 x86_inc_reg: dest:i src1:i clob:1 len:1 x86_inc_membase: src1:b len:7 x86_dec_reg: dest:i src1:i clob:1 len:1 x86_dec_membase: src1:b len:7 x86_add_membase_imm: src1:b len:11 x86_sub_membase_imm: src1:b len:11 x86_and_membase_imm: src1:b len:11 x86_or_membase_imm: src1:b len:11 x86_xor_membase_imm: src1:b len:11 x86_push: src1:i len:1 x86_push_imm: len:5 x86_push_membase: src1:b len:7 x86_push_obj: src1:b len:30 x86_push_got_entry: src1:b len:7 x86_lea: dest:i src1:i src2:i len:7 x86_lea_membase: dest:i src1:i len:10 x86_xchg: src1:i src2:i clob:x len:1 x86_fpop: src1:f len:2 x86_fp_load_i8: dest:f src1:b len:7 x86_fp_load_i4: dest:f src1:b len:7 x86_seteq_membase: src1:b len:7 x86_setne_membase: src1:b len:7 x86_add_reg_membase: dest:i src1:i src2:b clob:1 len:11 x86_sub_reg_membase: dest:i src1:i src2:b clob:1 len:11 x86_mul_reg_membase: dest:i src1:i src2:b clob:1 len:13 adc: dest:i src1:i src2:i len:2 clob:1 addcc: dest:i src1:i src2:i len:2 clob:1 subcc: dest:i src1:i src2:i len:2 clob:1 adc_imm: dest:i src1:i len:6 clob:1 sbb: dest:i src1:i src2:i len:2 clob:1 sbb_imm: dest:i src1:i len:6 clob:1 br_reg: src1:i len:2 sin: dest:f src1:f len:6 cos: dest:f src1:f len:6 abs: dest:f src1:f len:2 tan: dest:f src1:f len:49 atan: dest:f src1:f len:8 sqrt: dest:f src1:f len:2 round: dest:f src1:f len:2 bigmul: len:2 dest:l src1:a src2:i bigmul_un: len:2 dest:l src1:a src2:i sext_i1: dest:i src1:y len:3 sext_i2: dest:i src1:y len:3 tls_get: dest:i len:32 tls_set: src1:i len:20 atomic_add_i4: src1:b src2:i dest:i len:16 atomic_exchange_i4: src1:b src2:i dest:a len:24 atomic_cas_i4: src1:b src2:i src3:a dest:a len:24 memory_barrier: len:16 atomic_load_i1: dest:y src1:b len:7 atomic_load_u1: dest:y src1:b len:7 atomic_load_i2: dest:i src1:b len:7 atomic_load_u2: dest:i src1:b len:7 atomic_load_i4: dest:i src1:b len:7 atomic_load_u4: dest:i src1:b len:7 atomic_load_r4: dest:f src1:b len:10 atomic_load_r8: dest:f src1:b len:10 atomic_store_i1: dest:b src1:y len:10 atomic_store_u1: dest:b src1:y len:10 atomic_store_i2: dest:b src1:i len:10 atomic_store_u2: dest:b src1:i len:10 atomic_store_i4: dest:b src1:i len:10 atomic_store_u4: dest:b src1:i len:10 atomic_store_r4: dest:b src1:f len:10 atomic_store_r8: dest:b src1:f len:10 card_table_wbarrier: src1:a src2:i clob:d len:34 relaxed_nop: len:2 hard_nop: len:1 # Linear IR opcodes nop: len:0 dummy_use: src1:i len:0 dummy_iconst: dest:i len:0 dummy_r8const: dest:f len:0 dummy_r4const: dest:f len:0 not_reached: len:0 not_null: src1:i len:0 jump_table: dest:i len:5 int_adc: dest:i src1:i src2:i len:2 clob:1 int_addcc: dest:i src1:i src2:i len:2 clob:1 int_subcc: dest:i src1:i src2:i len:2 clob:1 int_sbb: dest:i src1:i src2:i len:2 clob:1 int_add_imm: dest:i src1:i len:6 clob:1 int_sub_imm: dest:i src1:i len:6 clob:1 int_mul_imm: dest:i src1:i len:9 int_div_imm: dest:a src1:a len:15 clob:d int_div_un_imm: dest:a src1:a len:15 clob:d int_rem_imm: dest:a src1:a len:15 clob:d int_rem_un_imm: dest:d src1:a len:15 clob:a int_and_imm: dest:i src1:i len:6 clob:1 int_or_imm: dest:i src1:i len:6 clob:1 int_xor_imm: dest:i src1:i len:6 clob:1 int_shl_imm: dest:i src1:i len:6 clob:1 int_shr_imm: dest:i src1:i len:6 clob:1 int_shr_un_imm: dest:i src1:i len:6 clob:1 int_conv_to_r_un: dest:f src1:i len:32 int_ceq: dest:y len:6 int_cgt: dest:y len:6 int_cgt_un: dest:y len:6 int_clt: dest:y len:6 int_clt_un: dest:y len:6 int_cneq: dest:y len:6 int_cge: dest:y len:6 int_cle: dest:y len:6 int_cge_un: dest:y len:6 int_cle_un: dest:y len:6 cond_exc_ieq: len:6 cond_exc_ine_un: len:6 cond_exc_ilt: len:6 cond_exc_ilt_un: len:6 cond_exc_igt: len:6 cond_exc_igt_un: len:6 cond_exc_ige: len:6 cond_exc_ige_un: len:6 cond_exc_ile: len:6 cond_exc_ile_un: len:6 cond_exc_iov: len:6 cond_exc_ino: len:6 cond_exc_ic: len:6 cond_exc_inc: len:6 icompare: src1:i src2:i len:2 icompare_imm: src1:i len:6 cmov_ieq: dest:i src1:i src2:i len:16 clob:1 cmov_ige: dest:i src1:i src2:i len:16 clob:1 cmov_igt: dest:i src1:i src2:i len:16 clob:1 cmov_ile: dest:i src1:i src2:i len:16 clob:1 cmov_ilt: dest:i src1:i src2:i len:16 clob:1 cmov_ine_un: dest:i src1:i src2:i len:16 clob:1 cmov_ige_un: dest:i src1:i src2:i len:16 clob:1 cmov_igt_un: dest:i src1:i src2:i len:16 clob:1 cmov_ile_un: dest:i src1:i src2:i len:16 clob:1 cmov_ilt_un: dest:i src1:i src2:i len:16 clob:1 long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:30 long_conv_to_r8_2: dest:f src1:i src2:i len:14 long_conv_to_r4_2: dest:f src1:i src2:i len:14 long_conv_to_r_un_2: dest:f src1:i src2:i len:40 fmove: dest:f src1:f move_f_to_i4: dest:i src1:f len:17 move_i4_to_f: dest:f src1:i len:17 float_conv_to_r4: dest:f src1:f len:12 load_mem: dest:i len:9 loadi4_mem: dest:i len:9 loadu1_mem: dest:i len:9 loadu2_mem: dest:i len:9 vcall2: len:17 clob:c vcall2_reg: src1:i len:11 clob:c vcall2_membase: src1:b len:16 clob:c localloc_imm: dest:i len:120 x86_add_membase_reg: src1:b src2:i len:11 x86_sub_membase_reg: src1:b src2:i len:11 x86_and_membase_reg: src1:b src2:i len:11 x86_or_membase_reg: src1:b src2:i len:11 x86_xor_membase_reg: src1:b src2:i len:11 x86_mul_membase_reg: src1:b src2:i len:13 x86_and_reg_membase: dest:i src1:i src2:b clob:1 len:6 x86_or_reg_membase: dest:i src1:i src2:b clob:1 len:6 x86_xor_reg_membase: dest:i src1:i src2:b clob:1 len:6 x86_fxch: len:2 addps: dest:x src1:x src2:x len:3 clob:1 divps: dest:x src1:x src2:x len:3 clob:1 mulps: dest:x src1:x src2:x len:3 clob:1 subps: dest:x src1:x src2:x len:3 clob:1 maxps: dest:x src1:x src2:x len:3 clob:1 minps: dest:x src1:x src2:x len:3 clob:1 compps: dest:x src1:x src2:x len:4 clob:1 andps: dest:x src1:x src2:x len:3 clob:1 andnps: dest:x src1:x src2:x len:3 clob:1 orps: dest:x src1:x src2:x len:3 clob:1 xorps: dest:x src1:x src2:x len:3 clob:1 haddps: dest:x src1:x src2:x len:4 clob:1 hsubps: dest:x src1:x src2:x len:4 clob:1 addsubps: dest:x src1:x src2:x len:4 clob:1 dupps_low: dest:x src1:x len:4 dupps_high: dest:x src1:x len:4 addpd: dest:x src1:x src2:x len:4 clob:1 divpd: dest:x src1:x src2:x len:4 clob:1 mulpd: dest:x src1:x src2:x len:4 clob:1 subpd: dest:x src1:x src2:x len:4 clob:1 maxpd: dest:x src1:x src2:x len:4 clob:1 minpd: dest:x src1:x src2:x len:4 clob:1 comppd: dest:x src1:x src2:x len:5 clob:1 andpd: dest:x src1:x src2:x len:4 clob:1 andnpd: dest:x src1:x src2:x len:4 clob:1 orpd: dest:x src1:x src2:x len:4 clob:1 xorpd: dest:x src1:x src2:x len:4 clob:1 sqrtpd: dest:x src1:x len:4 clob:1 haddpd: dest:x src1:x src2:x len:5 clob:1 hsubpd: dest:x src1:x src2:x len:5 clob:1 addsubpd: dest:x src1:x src2:x len:5 clob:1 duppd: dest:x src1:x len:5 pand: dest:x src1:x src2:x len:4 clob:1 por: dest:x src1:x src2:x len:4 clob:1 pxor: dest:x src1:x src2:x len:4 clob:1 sqrtps: dest:x src1:x len:4 rsqrtps: dest:x src1:x len:4 rcpps: dest:x src1:x len:4 pshuflew_high: dest:x src1:x len:5 pshuflew_low: dest:x src1:x len:5 pshufled: dest:x src1:x len:5 shufps: dest:x src1:x src2:x len:4 clob:1 shufpd: dest:x src1:x src2:x len:5 clob:1 extract_mask: dest:i src1:x len:4 paddb: dest:x src1:x src2:x len:4 clob:1 paddw: dest:x src1:x src2:x len:4 clob:1 paddd: dest:x src1:x src2:x len:4 clob:1 paddq: dest:x src1:x src2:x len:4 clob:1 psubb: dest:x src1:x src2:x len:4 clob:1 psubw: dest:x src1:x src2:x len:4 clob:1 psubd: dest:x src1:x src2:x len:4 clob:1 psubq: dest:x src1:x src2:x len:4 clob:1 pmaxb_un: dest:x src1:x src2:x len:4 clob:1 pmaxw_un: dest:x src1:x src2:x len:5 clob:1 pmaxd_un: dest:x src1:x src2:x len:5 clob:1 pmaxb: dest:x src1:x src2:x len:5 clob:1 pmaxw: dest:x src1:x src2:x len:4 clob:1 pmaxd: dest:x src1:x src2:x len:5 clob:1 pavgb_un: dest:x src1:x src2:x len:4 clob:1 pavgw_un: dest:x src1:x src2:x len:4 clob:1 pminb_un: dest:x src1:x src2:x len:4 clob:1 pminw_un: dest:x src1:x src2:x len:5 clob:1 pmind_un: dest:x src1:x src2:x len:5 clob:1 pminb: dest:x src1:x src2:x len:5 clob:1 pminw: dest:x src1:x src2:x len:4 clob:1 pmind: dest:x src1:x src2:x len:5 clob:1 pcmpeqb: dest:x src1:x src2:x len:4 clob:1 pcmpeqw: dest:x src1:x src2:x len:4 clob:1 pcmpeqd: dest:x src1:x src2:x len:4 clob:1 pcmpeqq: dest:x src1:x src2:x len:5 clob:1 pcmpgtb: dest:x src1:x src2:x len:4 clob:1 pcmpgtw: dest:x src1:x src2:x len:4 clob:1 pcmpgtd: dest:x src1:x src2:x len:4 clob:1 pcmpgtq: dest:x src1:x src2:x len:5 clob:1 psum_abs_diff: dest:x src1:x src2:x len:4 clob:1 unpack_lowb: dest:x src1:x src2:x len:4 clob:1 unpack_loww: dest:x src1:x src2:x len:4 clob:1 unpack_lowd: dest:x src1:x src2:x len:4 clob:1 unpack_lowq: dest:x src1:x src2:x len:4 clob:1 unpack_lowps: dest:x src1:x src2:x len:3 clob:1 unpack_lowpd: dest:x src1:x src2:x len:4 clob:1 unpack_highb: dest:x src1:x src2:x len:4 clob:1 unpack_highw: dest:x src1:x src2:x len:4 clob:1 unpack_highd: dest:x src1:x src2:x len:4 clob:1 unpack_highq: dest:x src1:x src2:x len:4 clob:1 unpack_highps: dest:x src1:x src2:x len:3 clob:1 unpack_highpd: dest:x src1:x src2:x len:4 clob:1 packw: dest:x src1:x src2:x len:4 clob:1 packd: dest:x src1:x src2:x len:4 clob:1 packw_un: dest:x src1:x src2:x len:4 clob:1 packd_un: dest:x src1:x src2:x len:5 clob:1 paddb_sat: dest:x src1:x src2:x len:4 clob:1 paddb_sat_un: dest:x src1:x src2:x len:4 clob:1 paddw_sat: dest:x src1:x src2:x len:4 clob:1 paddw_sat_un: dest:x src1:x src2:x len:4 clob:1 psubb_sat: dest:x src1:x src2:x len:4 clob:1 psubb_sat_un: dest:x src1:x src2:x len:4 clob:1 psubw_sat: dest:x src1:x src2:x len:4 clob:1 psubw_sat_un: dest:x src1:x src2:x len:4 clob:1 pmulw: dest:x src1:x src2:x len:4 clob:1 pmuld: dest:x src1:x src2:x len:5 clob:1 pmulq: dest:x src1:x src2:x len:4 clob:1 pmulw_high_un: dest:x src1:x src2:x len:4 clob:1 pmulw_high: dest:x src1:x src2:x len:4 clob:1 pshrw: dest:x src1:x len:5 clob:1 pshrw_reg: dest:x src1:x src2:x len:4 clob:1 psarw: dest:x src1:x len:5 clob:1 psarw_reg: dest:x src1:x src2:x len:4 clob:1 pshlw: dest:x src1:x len:5 clob:1 pshlw_reg: dest:x src1:x src2:x len:4 clob:1 pshrd: dest:x src1:x len:5 clob:1 pshrd_reg: dest:x src1:x src2:x len:4 clob:1 psard: dest:x src1:x len:5 clob:1 psard_reg: dest:x src1:x src2:x len:4 clob:1 pshld: dest:x src1:x len:5 clob:1 pshld_reg: dest:x src1:x src2:x len:4 clob:1 pshrq: dest:x src1:x len:5 clob:1 pshrq_reg: dest:x src1:x src2:x len:4 clob:1 pshlq: dest:x src1:x len:5 clob:1 pshlq_reg: dest:x src1:x src2:x len:4 clob:1 cvtdq2pd: dest:x src1:x len:4 clob:1 cvtdq2ps: dest:x src1:x len:3 clob:1 cvtpd2dq: dest:x src1:x len:4 clob:1 cvtpd2ps: dest:x src1:x len:4 clob:1 cvtps2dq: dest:x src1:x len:4 clob:1 cvtps2pd: dest:x src1:x len:3 clob:1 cvttpd2dq: dest:x src1:x len:4 clob:1 cvttps2dq: dest:x src1:x len:4 clob:1 xmove: dest:x src1:x len:4 xzero: dest:x len:4 xones: dest:x len:4 iconv_to_x: dest:x src1:i len:4 extract_i4: dest:i src1:x len:4 extract_i2: dest:i src1:x len:10 extract_i1: dest:i src1:x len:10 extract_r8: dest:f src1:x len:8 insert_i2: dest:x src1:x src2:i len:5 clob:1 extractx_u2: dest:i src1:x len:5 insertx_u1_slow: dest:x src1:i src2:i len:16 clob:x insertx_i4_slow: dest:x src1:x src2:i len:13 clob:x insertx_r4_slow: dest:x src1:x src2:f len:24 clob:1 insertx_r8_slow: dest:x src1:x src2:f len:24 clob:1 loadx_membase: dest:x src1:b len:7 storex_membase: dest:b src1:x len:7 storex_membase_reg: dest:b src1:x len:7 loadx_aligned_membase: dest:x src1:b len:7 storex_aligned_membase_reg: dest:b src1:x len:7 storex_nta_membase_reg: dest:b src1:x len:7 fconv_to_r8_x: dest:x src1:f len:14 xconv_r8_to_i4: dest:y src1:x len:7 prefetch_membase: src1:b len:4 expand_i2: dest:x src1:i len:15 expand_i4: dest:x src1:i len:9 expand_r4: dest:x src1:f len:20 expand_r8: dest:x src1:f len:20 liverange_start: len:0 liverange_end: len:0 gc_liveness_def: len:0 gc_liveness_use: len:0 gc_spill_slot_liveness_def: len:0 gc_param_slot_liveness_def: len:0 get_sp: dest:i len:6 set_sp: src1:i len:6 fill_prof_call_ctx: src1:i len:128 get_last_error: dest:i len:32
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/method-to-ir.c
/** * \file * Convert CIL to the JIT internal representation * * Author: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * * (C) 2002 Ximian, Inc. * Copyright 2003-2010 Novell, Inc (http://www.novell.com) * Copyright 2011 Xamarin, Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include <config.h> #include <glib.h> #include <mono/utils/mono-compiler.h> #include "mini.h" #ifndef DISABLE_JIT #include <signal.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <math.h> #include <string.h> #include <ctype.h> #ifdef HAVE_SYS_TIME_H #include <sys/time.h> #endif #ifdef HAVE_ALLOCA_H #include <alloca.h> #endif #include <mono/utils/memcheck.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/assembly.h> #include <mono/metadata/assembly-internals.h> #include <mono/metadata/attrdefs.h> #include <mono/metadata/loader.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/class.h> #include <mono/metadata/class-abi-details.h> #include <mono/metadata/object.h> #include <mono/metadata/exception.h> #include <mono/metadata/exception-internals.h> #include <mono/metadata/opcodes.h> #include <mono/metadata/mono-endian.h> #include <mono/metadata/tokentype.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/marshal.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/debug-internals.h> #include <mono/metadata/gc-internals.h> #include <mono/metadata/threads-types.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/profiler.h> #include <mono/metadata/monitor.h> #include <mono/utils/mono-memory-model.h> #include <mono/utils/mono-error-internals.h> #include <mono/metadata/mono-basic-block.h> #include <mono/metadata/reflection-internals.h> #include <mono/utils/mono-threads-coop.h> #include <mono/utils/mono-utils-debug.h> #include <mono/utils/mono-logger-internals.h> #include <mono/metadata/verify-internals.h> #include <mono/metadata/icall-decl.h> #include "mono/metadata/icall-signatures.h" #include "trace.h" #include "ir-emit.h" #include "jit-icalls.h" #include <mono/jit/jit.h> #include "seq-points.h" #include "aot-compiler.h" #include "mini-llvm.h" #include "mini-runtime.h" #include "llvmonly-runtime.h" #include "mono/utils/mono-tls-inline.h" #define BRANCH_COST 10 #define CALL_COST 10 /* Used for the JIT */ #define INLINE_LENGTH_LIMIT 20 /* * The aot and jit inline limits should be different, * since aot sees the whole program so we can let opt inline methods for us, * while the jit only sees one method, so we have to inline things ourselves. */ /* Used by LLVM AOT */ #define LLVM_AOT_INLINE_LENGTH_LIMIT 30 /* Used to LLVM JIT */ #define LLVM_JIT_INLINE_LENGTH_LIMIT 100 static const gboolean debug_tailcall = FALSE; // logging static const gboolean debug_tailcall_try_all = FALSE; // consider any call followed by ret gboolean mono_tailcall_print_enabled (void) { return debug_tailcall || MONO_TRACE_IS_TRACED (G_LOG_LEVEL_DEBUG, MONO_TRACE_TAILCALL); } void mono_tailcall_print (const char *format, ...) { if (!mono_tailcall_print_enabled ()) return; va_list args; va_start (args, format); g_printv (format, args); va_end (args); } /* These have 'cfg' as an implicit argument */ #define INLINE_FAILURE(msg) do { \ if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \ inline_failure (cfg, msg); \ goto exception_exit; \ } \ } while (0) #define CHECK_CFG_EXCEPTION do {\ if (cfg->exception_type != MONO_EXCEPTION_NONE) \ goto exception_exit; \ } while (0) #define FIELD_ACCESS_FAILURE(method, field) do { \ field_access_failure ((cfg), (method), (field)); \ goto exception_exit; \ } while (0) #define GENERIC_SHARING_FAILURE(opcode) do { \ if (cfg->gshared) { \ gshared_failure (cfg, opcode, __FILE__, __LINE__); \ goto exception_exit; \ } \ } while (0) #define GSHAREDVT_FAILURE(opcode) do { \ if (cfg->gsharedvt) { \ gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \ goto exception_exit; \ } \ } while (0) #define OUT_OF_MEMORY_FAILURE do { \ mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \ mono_error_set_out_of_memory (cfg->error, ""); \ goto exception_exit; \ } while (0) #define DISABLE_AOT(cfg) do { \ if ((cfg)->verbose_level >= 2) \ printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \ (cfg)->disable_aot = TRUE; \ } while (0) #define LOAD_ERROR do { \ break_on_unverified (); \ mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \ goto exception_exit; \ } while (0) #define TYPE_LOAD_ERROR(klass) do { \ cfg->exception_ptr = klass; \ LOAD_ERROR; \ } while (0) #define CHECK_CFG_ERROR do {\ if (!is_ok (cfg->error)) { \ mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \ goto mono_error_exit; \ } \ } while (0) int mono_op_to_op_imm (int opcode); int mono_op_to_op_imm_noemul (int opcode); static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always, gboolean *is_empty); static MonoInst* convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins); /* helper methods signatures */ /* type loading helpers */ static GENERATE_GET_CLASS_WITH_CACHE (iequatable, "System", "IEquatable`1") static GENERATE_GET_CLASS_WITH_CACHE (geqcomparer, "System.Collections.Generic", "GenericEqualityComparer`1"); /* * Instruction metadata */ #ifdef MINI_OP #undef MINI_OP #endif #ifdef MINI_OP3 #undef MINI_OP3 #endif #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ', #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3, #define NONE ' ' #define IREG 'i' #define FREG 'f' #define VREG 'v' #define XREG 'x' #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == TARGET_SIZEOF_VOID_P #define LREG IREG #else #define LREG 'l' #endif /* keep in sync with the enum in mini.h */ const char mini_ins_info[] = { #include "mini-ops.h" }; #undef MINI_OP #undef MINI_OP3 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)), #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))), /* * This should contain the index of the last sreg + 1. This is not the same * as the number of sregs for opcodes like IA64_CMP_EQ_IMM. */ const gint8 mini_ins_sreg_counts[] = { #include "mini-ops.h" }; #undef MINI_OP #undef MINI_OP3 guint32 mono_alloc_ireg (MonoCompile *cfg) { return alloc_ireg (cfg); } guint32 mono_alloc_lreg (MonoCompile *cfg) { return alloc_lreg (cfg); } guint32 mono_alloc_freg (MonoCompile *cfg) { return alloc_freg (cfg); } guint32 mono_alloc_preg (MonoCompile *cfg) { return alloc_preg (cfg); } guint32 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type) { return alloc_dreg (cfg, stack_type); } /* * mono_alloc_ireg_ref: * * Allocate an IREG, and mark it as holding a GC ref. */ guint32 mono_alloc_ireg_ref (MonoCompile *cfg) { return alloc_ireg_ref (cfg); } /* * mono_alloc_ireg_mp: * * Allocate an IREG, and mark it as holding a managed pointer. */ guint32 mono_alloc_ireg_mp (MonoCompile *cfg) { return alloc_ireg_mp (cfg); } /* * mono_alloc_ireg_copy: * * Allocate an IREG with the same GC type as VREG. */ guint32 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg) { if (vreg_is_ref (cfg, vreg)) return alloc_ireg_ref (cfg); else if (vreg_is_mp (cfg, vreg)) return alloc_ireg_mp (cfg); else return alloc_ireg (cfg); } guint mono_type_to_regmove (MonoCompile *cfg, MonoType *type) { if (m_type_is_byref (type)) return OP_MOVE; type = mini_get_underlying_type (type); handle_enum: switch (type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_MOVE; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_MOVE; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_MOVE; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: return OP_MOVE; case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: return OP_MOVE; case MONO_TYPE_I8: case MONO_TYPE_U8: #if SIZEOF_REGISTER == 8 return OP_MOVE; #else return OP_LMOVE; #endif case MONO_TYPE_R4: return cfg->r4fp ? OP_RMOVE : OP_FMOVE; case MONO_TYPE_R8: return OP_FMOVE; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { type = mono_class_enum_basetype_internal (type->data.klass); goto handle_enum; } if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type))) return OP_XMOVE; return OP_VMOVE; case MONO_TYPE_TYPEDBYREF: return OP_VMOVE; case MONO_TYPE_GENERICINST: if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type))) return OP_XMOVE; type = m_class_get_byval_arg (type->data.generic_class->container_class); goto handle_enum; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: g_assert (cfg->gshared); if (mini_type_var_is_vt (type)) return OP_VMOVE; else return mono_type_to_regmove (cfg, mini_get_underlying_type (type)); default: g_error ("unknown type 0x%02x in type_to_regstore", type->type); } return -1; } void mono_print_bb (MonoBasicBlock *bb, const char *msg) { int i; MonoInst *tree; GString *str = g_string_new (""); g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num); for (i = 0; i < bb->in_count; ++i) g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn); g_string_append_printf (str, ", OUT: "); for (i = 0; i < bb->out_count; ++i) g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn); g_string_append_printf (str, " ]\n"); g_print ("%s", str->str); g_string_free (str, TRUE); for (tree = bb->code; tree; tree = tree->next) mono_print_ins_index (-1, tree); } static MONO_NEVER_INLINE gboolean break_on_unverified (void) { if (mini_debug_options.break_on_unverified) { G_BREAKPOINT (); return TRUE; } return FALSE; } static void clear_cfg_error (MonoCompile *cfg) { mono_error_cleanup (cfg->error); error_init (cfg->error); } static MONO_NEVER_INLINE void field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field) { char *method_fname = mono_method_full_name (method, TRUE); char *field_fname = mono_field_full_name (field); mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); mono_error_set_generic_error (cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); g_free (method_fname); g_free (field_fname); } static MONO_NEVER_INLINE void inline_failure (MonoCompile *cfg, const char *msg) { if (cfg->verbose_level >= 2) printf ("inline failed: %s\n", msg); mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED); } static MONO_NEVER_INLINE void gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line) { if (cfg->verbose_level > 2) printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name (opcode), line); mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); } static MONO_NEVER_INLINE void gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line) { cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line); if (cfg->verbose_level >= 2) printf ("%s\n", cfg->exception_message); mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); } void mini_set_inline_failure (MonoCompile *cfg, const char *msg) { if (cfg->verbose_level >= 2) printf ("inline failed: %s\n", msg); mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED); } /* * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e. * foo<T> (int i) { ldarg.0; box T; } */ #define UNVERIFIED do { \ if (cfg->gsharedvt) { \ if (cfg->verbose_level > 2) \ printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \ mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \ goto exception_exit; \ } \ break_on_unverified (); \ goto unverified; \ } while (0) #define GET_BBLOCK(cfg,tblock,ip) do { \ (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \ if (!(tblock)) { \ if ((ip) >= end || (ip) < header->code) UNVERIFIED; \ NEW_BBLOCK (cfg, (tblock)); \ (tblock)->cil_code = (ip); \ ADD_BBLOCK (cfg, (tblock)); \ } \ } while (0) /* Emit conversions so both operands of a binary opcode are of the same type */ static void add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref) { MonoInst *arg1 = *arg1_ref; MonoInst *arg2 = *arg2_ref; if (cfg->r4fp && ((arg1->type == STACK_R4 && arg2->type == STACK_R8) || (arg1->type == STACK_R8 && arg2->type == STACK_R4))) { MonoInst *conv; /* Mixing r4/r8 is allowed by the spec */ if (arg1->type == STACK_R4) { int dreg = alloc_freg (cfg); EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg); conv->type = STACK_R8; ins->sreg1 = dreg; *arg1_ref = conv; } if (arg2->type == STACK_R4) { int dreg = alloc_freg (cfg); EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg); conv->type = STACK_R8; ins->sreg2 = dreg; *arg2_ref = conv; } } #if SIZEOF_REGISTER == 8 /* FIXME: Need to add many more cases */ if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { MonoInst *widen; int dr = alloc_preg (cfg); EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); (ins)->sreg2 = widen->dreg; } #endif } #define ADD_UNOP(op) do { \ MONO_INST_NEW (cfg, ins, (op)); \ sp--; \ ins->sreg1 = sp [0]->dreg; \ type_from_op (cfg, ins, sp [0], NULL); \ CHECK_TYPE (ins); \ (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \ MONO_ADD_INS ((cfg)->cbb, (ins)); \ *sp++ = mono_decompose_opcode (cfg, ins); \ } while (0) #define ADD_BINCOND(next_block) do { \ MonoInst *cmp; \ sp -= 2; \ MONO_INST_NEW(cfg, cmp, OP_COMPARE); \ cmp->sreg1 = sp [0]->dreg; \ cmp->sreg2 = sp [1]->dreg; \ add_widen_op (cfg, cmp, &sp [0], &sp [1]); \ type_from_op (cfg, cmp, sp [0], sp [1]); \ CHECK_TYPE (cmp); \ type_from_op (cfg, ins, sp [0], sp [1]); \ ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \ GET_BBLOCK (cfg, tblock, target); \ link_bblock (cfg, cfg->cbb, tblock); \ ins->inst_true_bb = tblock; \ if ((next_block)) { \ link_bblock (cfg, cfg->cbb, (next_block)); \ ins->inst_false_bb = (next_block); \ start_new_bblock = 1; \ } else { \ GET_BBLOCK (cfg, tblock, next_ip); \ link_bblock (cfg, cfg->cbb, tblock); \ ins->inst_false_bb = tblock; \ start_new_bblock = 2; \ } \ if (sp != stack_start) { \ handle_stack_args (cfg, stack_start, sp - stack_start); \ CHECK_UNVERIFIABLE (cfg); \ } \ MONO_ADD_INS (cfg->cbb, cmp); \ MONO_ADD_INS (cfg->cbb, ins); \ } while (0) /* * * link_bblock: Links two basic blocks * * links two basic blocks in the control flow graph, the 'from' * argument is the starting block and the 'to' argument is the block * the control flow ends to after 'from'. */ static void link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to) { MonoBasicBlock **newa; int i, found; #if 0 if (from->cil_code) { if (to->cil_code) printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code); else printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code); } else { if (to->cil_code) printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code); else printf ("edge from entry to exit\n"); } #endif found = FALSE; for (i = 0; i < from->out_count; ++i) { if (to == from->out_bb [i]) { found = TRUE; break; } } if (!found) { newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1)); for (i = 0; i < from->out_count; ++i) { newa [i] = from->out_bb [i]; } newa [i] = to; from->out_count++; from->out_bb = newa; } found = FALSE; for (i = 0; i < to->in_count; ++i) { if (from == to->in_bb [i]) { found = TRUE; break; } } if (!found) { newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1)); for (i = 0; i < to->in_count; ++i) { newa [i] = to->in_bb [i]; } newa [i] = from; to->in_count++; to->in_bb = newa; } } void mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to) { link_bblock (cfg, from, to); } static void mono_create_spvar_for_region (MonoCompile *cfg, int region); static void mark_bb_in_region (MonoCompile *cfg, guint region, uint32_t start, uint32_t end) { MonoBasicBlock *bb = cfg->cil_offset_to_bb [start]; //start must exist in cil_offset_to_bb as those are il offsets used by EH which should have GET_BBLOCK early. g_assert (bb); if (cfg->verbose_level > 1) g_print ("FIRST BB for %d is BB_%d\n", start, bb->block_num); for (; bb && bb->real_offset < end; bb = bb->next_bb) { //no one claimed this bb, take it. if (bb->region == -1) { bb->region = region; continue; } //current region is an early handler, bail if ((bb->region & (0xf << 4)) != MONO_REGION_TRY) { continue; } //current region is a try, only overwrite if new region is a handler if ((region & (0xf << 4)) != MONO_REGION_TRY) { bb->region = region; } } if (cfg->spvars) mono_create_spvar_for_region (cfg, region); } static void compute_bb_regions (MonoCompile *cfg) { MonoBasicBlock *bb; MonoMethodHeader *header = cfg->header; int i; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) bb->region = -1; for (i = 0; i < header->num_clauses; ++i) { MonoExceptionClause *clause = &header->clauses [i]; if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) mark_bb_in_region (cfg, ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags, clause->data.filter_offset, clause->handler_offset); guint handler_region; if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY) handler_region = ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags; else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) handler_region = ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags; else handler_region = ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags; mark_bb_in_region (cfg, handler_region, clause->handler_offset, clause->handler_offset + clause->handler_len); mark_bb_in_region (cfg, ((i + 1) << 8) | clause->flags, clause->try_offset, clause->try_offset + clause->try_len); } if (cfg->verbose_level > 2) { MonoBasicBlock *bb; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) g_print ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region); } } static gboolean ip_in_finally_clause (MonoCompile *cfg, int offset) { MonoMethodHeader *header = cfg->header; MonoExceptionClause *clause; int i; for (i = 0; i < header->num_clauses; ++i) { clause = &header->clauses [i]; if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT) continue; if (MONO_OFFSET_IN_HANDLER (clause, offset)) return TRUE; } return FALSE; } /* Find clauses between ip and target, from inner to outer */ static GList* mono_find_leave_clauses (MonoCompile *cfg, guchar *ip, guchar *target) { MonoMethodHeader *header = cfg->header; MonoExceptionClause *clause; int i; GList *res = NULL; for (i = 0; i < header->num_clauses; ++i) { clause = &header->clauses [i]; if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) && (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) { MonoLeaveClause *leave = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoLeaveClause)); leave->index = i; leave->clause = clause; res = g_list_append_mempool (cfg->mempool, res, leave); } } return res; } static void mono_create_spvar_for_region (MonoCompile *cfg, int region) { MonoInst *var; var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region)); if (var) return; var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* prevent it from being register allocated */ var->flags |= MONO_INST_VOLATILE; g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var); } MonoInst * mono_find_exvar_for_offset (MonoCompile *cfg, int offset) { return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset)); } static MonoInst* mono_create_exvar_for_offset (MonoCompile *cfg, int offset) { MonoInst *var; var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset)); if (var) return var; var = mono_compile_create_var (cfg, mono_get_object_type (), OP_LOCAL); /* prevent it from being register allocated */ var->flags |= MONO_INST_VOLATILE; g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var); return var; } /* * Returns the type used in the eval stack when @type is loaded. * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases. */ void mini_type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst) { MonoClass *klass; type = mini_get_underlying_type (type); inst->klass = klass = mono_class_from_mono_type_internal (type); if (m_type_is_byref (type)) { inst->type = STACK_MP; return; } handle_enum: switch (type->type) { case MONO_TYPE_VOID: inst->type = STACK_INV; return; case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: inst->type = STACK_I4; return; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: inst->type = STACK_PTR; return; case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: inst->type = STACK_OBJ; return; case MONO_TYPE_I8: case MONO_TYPE_U8: inst->type = STACK_I8; return; case MONO_TYPE_R4: inst->type = cfg->r4_stack_type; break; case MONO_TYPE_R8: inst->type = STACK_R8; return; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { type = mono_class_enum_basetype_internal (type->data.klass); goto handle_enum; } else { inst->klass = klass; inst->type = STACK_VTYPE; return; } case MONO_TYPE_TYPEDBYREF: inst->klass = mono_defaults.typed_reference_class; inst->type = STACK_VTYPE; return; case MONO_TYPE_GENERICINST: type = m_class_get_byval_arg (type->data.generic_class->container_class); goto handle_enum; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: g_assert (cfg->gshared); if (mini_is_gsharedvt_type (type)) { g_assert (cfg->gsharedvt); inst->type = STACK_VTYPE; } else { mini_type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst); } return; default: g_error ("unknown type 0x%02x in eval stack type", type->type); } } /* * The following tables are used to quickly validate the IL code in type_from_op (). */ #define IF_P8(v) (SIZEOF_VOID_P == 8 ? v : STACK_INV) #define IF_P8_I8 IF_P8(STACK_I8) #define IF_P8_PTR IF_P8(STACK_PTR) static const char bin_num_table [STACK_MAX] [STACK_MAX] = { {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_I4, IF_P8_I8, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV}, {STACK_INV, IF_P8_I8, STACK_I8, IF_P8_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_PTR, IF_P8_PTR, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8}, {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4} }; static const char neg_table [] = { STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4 }; /* reduce the size of this table */ static const char bin_int_table [STACK_MAX] [STACK_MAX] = { {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_I4, IF_P8_I8, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, IF_P8_I8, STACK_I8, IF_P8_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_PTR, IF_P8_PTR, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV} }; #define P1 (SIZEOF_VOID_P == 8) static const char bin_comp_table [STACK_MAX] [STACK_MAX] = { /* Inv i L p F & O vt r4 */ {0}, {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */ {0, 0, 1,P1, 0, 0, 0, 0}, /* L, int64 */ {0, 1,P1, 1, 0, 2, 4, 0}, /* p, ptr */ {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */ {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */ {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */ {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */ {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */ }; #undef P1 /* reduce the size of this table */ static const char shift_table [STACK_MAX] [STACK_MAX] = { {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV} }; /* * Tables to map from the non-specific opcode to the matching * type-specific opcode. */ /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */ static const guint16 binops_op_map [STACK_MAX] = { 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD }; /* handles from CEE_NEG to CEE_CONV_U8 */ static const guint16 unops_op_map [STACK_MAX] = { 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG }; /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */ static const guint16 ovfops_op_map [STACK_MAX] = { 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2 }; /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */ static const guint16 ovf2ops_op_map [STACK_MAX] = { 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN }; /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */ static const guint16 ovf3ops_op_map [STACK_MAX] = { 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1 }; /* handles from CEE_BEQ to CEE_BLT_UN */ static const guint16 beqops_op_map [STACK_MAX] = { 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ }; /* handles from CEE_CEQ to CEE_CLT_UN */ static const guint16 ceqops_op_map [STACK_MAX] = { 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ }; /* * Sets ins->type (the type on the eval stack) according to the * type of the opcode and the arguments to it. * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV. * * FIXME: this function sets ins->type unconditionally in some cases, but * it should set it to invalid for some types (a conv.x on an object) */ static void type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2) { switch (ins->opcode) { /* binops */ case MONO_CEE_ADD: case MONO_CEE_SUB: case MONO_CEE_MUL: case MONO_CEE_DIV: case MONO_CEE_REM: /* FIXME: check unverifiable args for STACK_MP */ ins->type = bin_num_table [src1->type] [src2->type]; ins->opcode += binops_op_map [ins->type]; break; case MONO_CEE_DIV_UN: case MONO_CEE_REM_UN: case MONO_CEE_AND: case MONO_CEE_OR: case MONO_CEE_XOR: ins->type = bin_int_table [src1->type] [src2->type]; ins->opcode += binops_op_map [ins->type]; break; case MONO_CEE_SHL: case MONO_CEE_SHR: case MONO_CEE_SHR_UN: ins->type = shift_table [src1->type] [src2->type]; ins->opcode += binops_op_map [ins->type]; break; case OP_COMPARE: case OP_LCOMPARE: case OP_ICOMPARE: ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV; if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP)))) ins->opcode = OP_LCOMPARE; else if (src1->type == STACK_R4) ins->opcode = OP_RCOMPARE; else if (src1->type == STACK_R8) ins->opcode = OP_FCOMPARE; else ins->opcode = OP_ICOMPARE; break; case OP_ICOMPARE_IMM: ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV; if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP)))) ins->opcode = OP_LCOMPARE_IMM; break; case MONO_CEE_BEQ: case MONO_CEE_BGE: case MONO_CEE_BGT: case MONO_CEE_BLE: case MONO_CEE_BLT: case MONO_CEE_BNE_UN: case MONO_CEE_BGE_UN: case MONO_CEE_BGT_UN: case MONO_CEE_BLE_UN: case MONO_CEE_BLT_UN: ins->opcode += beqops_op_map [src1->type]; break; case OP_CEQ: ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV; ins->opcode += ceqops_op_map [src1->type]; break; case OP_CGT: case OP_CGT_UN: case OP_CLT: case OP_CLT_UN: ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV; ins->opcode += ceqops_op_map [src1->type]; break; /* unops */ case MONO_CEE_NEG: ins->type = neg_table [src1->type]; ins->opcode += unops_op_map [ins->type]; break; case MONO_CEE_NOT: if (src1->type >= STACK_I4 && src1->type <= STACK_PTR) ins->type = src1->type; else ins->type = STACK_INV; ins->opcode += unops_op_map [ins->type]; break; case MONO_CEE_CONV_I1: case MONO_CEE_CONV_I2: case MONO_CEE_CONV_I4: case MONO_CEE_CONV_U4: ins->type = STACK_I4; ins->opcode += unops_op_map [src1->type]; break; case MONO_CEE_CONV_R_UN: ins->type = STACK_R8; switch (src1->type) { case STACK_I4: case STACK_PTR: ins->opcode = OP_ICONV_TO_R_UN; break; case STACK_I8: ins->opcode = OP_LCONV_TO_R_UN; break; case STACK_R4: ins->opcode = OP_RCONV_TO_R8; break; case STACK_R8: ins->opcode = OP_FMOVE; break; } break; case MONO_CEE_CONV_OVF_I1: case MONO_CEE_CONV_OVF_U1: case MONO_CEE_CONV_OVF_I2: case MONO_CEE_CONV_OVF_U2: case MONO_CEE_CONV_OVF_I4: case MONO_CEE_CONV_OVF_U4: ins->type = STACK_I4; ins->opcode += ovf3ops_op_map [src1->type]; break; case MONO_CEE_CONV_OVF_I_UN: case MONO_CEE_CONV_OVF_U_UN: ins->type = STACK_PTR; ins->opcode += ovf2ops_op_map [src1->type]; break; case MONO_CEE_CONV_OVF_I1_UN: case MONO_CEE_CONV_OVF_I2_UN: case MONO_CEE_CONV_OVF_I4_UN: case MONO_CEE_CONV_OVF_U1_UN: case MONO_CEE_CONV_OVF_U2_UN: case MONO_CEE_CONV_OVF_U4_UN: ins->type = STACK_I4; ins->opcode += ovf2ops_op_map [src1->type]; break; case MONO_CEE_CONV_U: ins->type = STACK_PTR; switch (src1->type) { case STACK_I4: ins->opcode = OP_ICONV_TO_U; break; case STACK_PTR: case STACK_MP: case STACK_OBJ: #if TARGET_SIZEOF_VOID_P == 8 ins->opcode = OP_LCONV_TO_U; #else ins->opcode = OP_MOVE; #endif break; case STACK_I8: ins->opcode = OP_LCONV_TO_U; break; case STACK_R8: if (TARGET_SIZEOF_VOID_P == 8) ins->opcode = OP_FCONV_TO_U8; else ins->opcode = OP_FCONV_TO_U4; break; case STACK_R4: if (TARGET_SIZEOF_VOID_P == 8) ins->opcode = OP_RCONV_TO_U8; else ins->opcode = OP_RCONV_TO_U4; break; } break; case MONO_CEE_CONV_I8: case MONO_CEE_CONV_U8: ins->type = STACK_I8; ins->opcode += unops_op_map [src1->type]; break; case MONO_CEE_CONV_OVF_I8: case MONO_CEE_CONV_OVF_U8: ins->type = STACK_I8; ins->opcode += ovf3ops_op_map [src1->type]; break; case MONO_CEE_CONV_OVF_U8_UN: case MONO_CEE_CONV_OVF_I8_UN: ins->type = STACK_I8; ins->opcode += ovf2ops_op_map [src1->type]; break; case MONO_CEE_CONV_R4: ins->type = cfg->r4_stack_type; ins->opcode += unops_op_map [src1->type]; break; case MONO_CEE_CONV_R8: ins->type = STACK_R8; ins->opcode += unops_op_map [src1->type]; break; case OP_CKFINITE: ins->type = STACK_R8; break; case MONO_CEE_CONV_U2: case MONO_CEE_CONV_U1: ins->type = STACK_I4; ins->opcode += ovfops_op_map [src1->type]; break; case MONO_CEE_CONV_I: case MONO_CEE_CONV_OVF_I: case MONO_CEE_CONV_OVF_U: ins->type = STACK_PTR; ins->opcode += ovfops_op_map [src1->type]; break; case MONO_CEE_ADD_OVF: case MONO_CEE_ADD_OVF_UN: case MONO_CEE_MUL_OVF: case MONO_CEE_MUL_OVF_UN: case MONO_CEE_SUB_OVF: case MONO_CEE_SUB_OVF_UN: ins->type = bin_num_table [src1->type] [src2->type]; ins->opcode += ovfops_op_map [src1->type]; if (ins->type == STACK_R8) ins->type = STACK_INV; break; case OP_LOAD_MEMBASE: ins->type = STACK_PTR; break; case OP_LOADI1_MEMBASE: case OP_LOADU1_MEMBASE: case OP_LOADI2_MEMBASE: case OP_LOADU2_MEMBASE: case OP_LOADI4_MEMBASE: case OP_LOADU4_MEMBASE: ins->type = STACK_PTR; break; case OP_LOADI8_MEMBASE: ins->type = STACK_I8; break; case OP_LOADR4_MEMBASE: ins->type = cfg->r4_stack_type; break; case OP_LOADR8_MEMBASE: ins->type = STACK_R8; break; default: g_error ("opcode 0x%04x not handled in type from op", ins->opcode); break; } if (ins->type == STACK_MP) { if (src1->type == STACK_MP) ins->klass = src1->klass; else ins->klass = mono_defaults.object_class; } } void mini_type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2) { type_from_op (cfg, ins, src1, src2); } static MonoClass* ldind_to_type (int op) { switch (op) { case MONO_CEE_LDIND_I1: return mono_defaults.sbyte_class; case MONO_CEE_LDIND_U1: return mono_defaults.byte_class; case MONO_CEE_LDIND_I2: return mono_defaults.int16_class; case MONO_CEE_LDIND_U2: return mono_defaults.uint16_class; case MONO_CEE_LDIND_I4: return mono_defaults.int32_class; case MONO_CEE_LDIND_U4: return mono_defaults.uint32_class; case MONO_CEE_LDIND_I8: return mono_defaults.int64_class; case MONO_CEE_LDIND_I: return mono_defaults.int_class; case MONO_CEE_LDIND_R4: return mono_defaults.single_class; case MONO_CEE_LDIND_R8: return mono_defaults.double_class; case MONO_CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type default: g_error ("Unknown ldind type %d", op); } } static MonoClass* stind_to_type (int op) { switch (op) { case MONO_CEE_STIND_I1: return mono_defaults.sbyte_class; case MONO_CEE_STIND_I2: return mono_defaults.int16_class; case MONO_CEE_STIND_I4: return mono_defaults.int32_class; case MONO_CEE_STIND_I8: return mono_defaults.int64_class; case MONO_CEE_STIND_I: return mono_defaults.int_class; case MONO_CEE_STIND_R4: return mono_defaults.single_class; case MONO_CEE_STIND_R8: return mono_defaults.double_class; case MONO_CEE_STIND_REF: return mono_defaults.object_class; default: g_error ("Unknown stind type %d", op); } } #if 0 static const char param_table [STACK_MAX] [STACK_MAX] = { {0}, }; static int check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig) { int i; if (sig->hasthis) { switch (args->type) { case STACK_I4: case STACK_I8: case STACK_R8: case STACK_VTYPE: case STACK_INV: return 0; } args++; } for (i = 0; i < sig->param_count; ++i) { switch (args [i].type) { case STACK_INV: return 0; case STACK_MP: if (m_type_is_byref (!sig->params [i])) return 0; continue; case STACK_OBJ: if (m_type_is_byref (sig->params [i])) return 0; switch (m_type_is_byref (sig->params [i])) { case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: break; default: return 0; } continue; case STACK_R8: if (m_type_is_byref (sig->params [i])) return 0; if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8) return 0; continue; case STACK_PTR: case STACK_I4: case STACK_I8: case STACK_VTYPE: break; } /*if (!param_table [args [i].type] [sig->params [i]->type]) return 0;*/ } return 1; } #endif /* * The got_var contains the address of the Global Offset Table when AOT * compiling. */ MonoInst * mono_get_got_var (MonoCompile *cfg) { if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only) return NULL; if (!cfg->got_var) { cfg->got_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); } return cfg->got_var; } static void mono_create_rgctx_var (MonoCompile *cfg) { if (!cfg->rgctx_var) { cfg->rgctx_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* force the var to be stack allocated */ if (!cfg->llvm_only) cfg->rgctx_var->flags |= MONO_INST_VOLATILE; } } static MonoInst * mono_get_mrgctx_var (MonoCompile *cfg) { g_assert (cfg->gshared); mono_create_rgctx_var (cfg); return cfg->rgctx_var; } static MonoInst * mono_get_vtable_var (MonoCompile *cfg) { g_assert (cfg->gshared); /* The mrgctx and the vtable are stored in the same var */ mono_create_rgctx_var (cfg); return cfg->rgctx_var; } static MonoType* type_from_stack_type (MonoInst *ins) { switch (ins->type) { case STACK_I4: return mono_get_int32_type (); case STACK_I8: return m_class_get_byval_arg (mono_defaults.int64_class); case STACK_PTR: return mono_get_int_type (); case STACK_R4: return m_class_get_byval_arg (mono_defaults.single_class); case STACK_R8: return m_class_get_byval_arg (mono_defaults.double_class); case STACK_MP: return m_class_get_this_arg (ins->klass); case STACK_OBJ: return mono_get_object_type (); case STACK_VTYPE: return m_class_get_byval_arg (ins->klass); default: g_error ("stack type %d to monotype not handled\n", ins->type); } return NULL; } MonoStackType mini_type_to_stack_type (MonoCompile *cfg, MonoType *t) { t = mini_type_get_underlying_type (t); switch (t->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: return STACK_I4; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: return STACK_PTR; case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: return STACK_OBJ; case MONO_TYPE_I8: case MONO_TYPE_U8: return STACK_I8; case MONO_TYPE_R4: return (MonoStackType)cfg->r4_stack_type; case MONO_TYPE_R8: return STACK_R8; case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: return STACK_VTYPE; case MONO_TYPE_GENERICINST: if (mono_type_generic_inst_is_valuetype (t)) return STACK_VTYPE; else return STACK_OBJ; break; default: g_assert_not_reached (); } return (MonoStackType)-1; } static MonoClass* array_access_to_klass (int opcode) { switch (opcode) { case MONO_CEE_LDELEM_U1: return mono_defaults.byte_class; case MONO_CEE_LDELEM_U2: return mono_defaults.uint16_class; case MONO_CEE_LDELEM_I: case MONO_CEE_STELEM_I: return mono_defaults.int_class; case MONO_CEE_LDELEM_I1: case MONO_CEE_STELEM_I1: return mono_defaults.sbyte_class; case MONO_CEE_LDELEM_I2: case MONO_CEE_STELEM_I2: return mono_defaults.int16_class; case MONO_CEE_LDELEM_I4: case MONO_CEE_STELEM_I4: return mono_defaults.int32_class; case MONO_CEE_LDELEM_U4: return mono_defaults.uint32_class; case MONO_CEE_LDELEM_I8: case MONO_CEE_STELEM_I8: return mono_defaults.int64_class; case MONO_CEE_LDELEM_R4: case MONO_CEE_STELEM_R4: return mono_defaults.single_class; case MONO_CEE_LDELEM_R8: case MONO_CEE_STELEM_R8: return mono_defaults.double_class; case MONO_CEE_LDELEM_REF: case MONO_CEE_STELEM_REF: return mono_defaults.object_class; default: g_assert_not_reached (); } return NULL; } /* * We try to share variables when possible */ static MonoInst * mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins) { MonoInst *res; int pos, vnum; MonoType *type; type = type_from_stack_type (ins); /* inlining can result in deeper stacks */ if (cfg->inline_depth || slot >= cfg->header->max_stack) return mono_compile_create_var (cfg, type, OP_LOCAL); pos = ins->type - 1 + slot * STACK_MAX; switch (ins->type) { case STACK_I4: case STACK_I8: case STACK_R8: case STACK_PTR: case STACK_MP: case STACK_OBJ: if ((vnum = cfg->intvars [pos])) return cfg->varinfo [vnum]; res = mono_compile_create_var (cfg, type, OP_LOCAL); cfg->intvars [pos] = res->inst_c0; break; default: res = mono_compile_create_var (cfg, type, OP_LOCAL); } return res; } static void mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key) { /* * Don't use this if a generic_context is set, since that means AOT can't * look up the method using just the image+token. * table == 0 means this is a reference made from a wrapper. */ if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) { MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken)); jump_info_token->image = image; jump_info_token->token = token; g_hash_table_insert (cfg->token_info_hash, key, jump_info_token); } } /* * This function is called to handle items that are left on the evaluation stack * at basic block boundaries. What happens is that we save the values to local variables * and we reload them later when first entering the target basic block (with the * handle_loaded_temps () function). * A single joint point will use the same variables (stored in the array bb->out_stack or * bb->in_stack, if the basic block is before or after the joint point). * * This function needs to be called _before_ emitting the last instruction of * the bb (i.e. before emitting a branch). * If the stack merge fails at a join point, cfg->unverifiable is set. */ static void handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count) { int i, bindex; MonoBasicBlock *bb = cfg->cbb; MonoBasicBlock *outb; MonoInst *inst, **locals; gboolean found; if (!count) return; if (cfg->verbose_level > 3) printf ("%d item(s) on exit from B%d\n", count, bb->block_num); if (!bb->out_scount) { bb->out_scount = count; //printf ("bblock %d has out:", bb->block_num); found = FALSE; for (i = 0; i < bb->out_count; ++i) { outb = bb->out_bb [i]; /* exception handlers are linked, but they should not be considered for stack args */ if (outb->flags & BB_EXCEPTION_HANDLER) continue; //printf (" %d", outb->block_num); if (outb->in_stack) { found = TRUE; bb->out_stack = outb->in_stack; break; } } //printf ("\n"); if (!found) { bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count); for (i = 0; i < count; ++i) { /* * try to reuse temps already allocated for this purpouse, if they occupy the same * stack slot and if they are of the same type. * This won't cause conflicts since if 'local' is used to * store one of the values in the in_stack of a bblock, then * the same variable will be used for the same outgoing stack * slot as well. * This doesn't work when inlining methods, since the bblocks * in the inlined methods do not inherit their in_stack from * the bblock they are inlined to. See bug #58863 for an * example. */ bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]); } } } for (i = 0; i < bb->out_count; ++i) { outb = bb->out_bb [i]; /* exception handlers are linked, but they should not be considered for stack args */ if (outb->flags & BB_EXCEPTION_HANDLER) continue; if (outb->in_scount) { if (outb->in_scount != bb->out_scount) { cfg->unverifiable = TRUE; return; } continue; /* check they are the same locals */ } outb->in_scount = count; outb->in_stack = bb->out_stack; } locals = bb->out_stack; cfg->cbb = bb; for (i = 0; i < count; ++i) { sp [i] = convert_value (cfg, locals [i]->inst_vtype, sp [i]); EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]); inst->cil_code = sp [i]->cil_code; sp [i] = locals [i]; if (cfg->verbose_level > 3) printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0); } /* * It is possible that the out bblocks already have in_stack assigned, and * the in_stacks differ. In this case, we will store to all the different * in_stacks. */ found = TRUE; bindex = 0; while (found) { /* Find a bblock which has a different in_stack */ found = FALSE; while (bindex < bb->out_count) { outb = bb->out_bb [bindex]; /* exception handlers are linked, but they should not be considered for stack args */ if (outb->flags & BB_EXCEPTION_HANDLER) { bindex++; continue; } if (outb->in_stack != locals) { for (i = 0; i < count; ++i) { sp [i] = convert_value (cfg, outb->in_stack [i]->inst_vtype, sp [i]); EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]); inst->cil_code = sp [i]->cil_code; sp [i] = locals [i]; if (cfg->verbose_level > 3) printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0); } locals = outb->in_stack; found = TRUE; break; } bindex ++; } } } MonoInst* mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data) { MonoInst *ins; if (cfg->compile_aot) { MONO_DISABLE_WARNING (4306) // 'type cast': conversion from 'MonoJumpInfoType' to 'MonoInst *' of greater size EMIT_NEW_AOTCONST (cfg, ins, patch_type, data); MONO_RESTORE_WARNING } else { MonoJumpInfo ji; gpointer target; ERROR_DECL (error); ji.type = patch_type; ji.data.target = data; target = mono_resolve_patch_target_ext (cfg->mem_manager, NULL, NULL, &ji, FALSE, error); mono_error_assert_ok (error); EMIT_NEW_PCONST (cfg, ins, target); } return ins; } static MonoInst* mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key) { int tls_offset = mono_tls_get_tls_offset (key); if (cfg->compile_aot) return NULL; if (tls_offset != -1 && mono_arch_have_fast_tls ()) { MonoInst *ins; MONO_INST_NEW (cfg, ins, OP_TLS_GET); ins->dreg = mono_alloc_preg (cfg); ins->inst_offset = tls_offset; return ins; } return NULL; } static MonoInst* mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key) { MonoInst *fast_tls = NULL; if (!mini_debug_options.use_fallback_tls) fast_tls = mono_create_fast_tls_getter (cfg, key); if (fast_tls) { MONO_ADD_INS (cfg->cbb, fast_tls); return fast_tls; } const MonoJitICallId jit_icall_id = mono_get_tls_key_to_jit_icall_id (key); if (cfg->compile_aot && !cfg->llvm_only) { MonoInst *addr; /* * tls getters are critical pieces of code and we don't want to resolve them * through the standard plt/tramp mechanism since we might expose ourselves * to crashes and infinite recursions. * Therefore the NOCALL part of MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, FALSE in is_plt_patch. */ EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, GUINT_TO_POINTER (jit_icall_id)); return mini_emit_calli (cfg, mono_icall_sig_ptr, NULL, addr, NULL, NULL); } else { return mono_emit_jit_icall_id (cfg, jit_icall_id, NULL); } } /* * emit_push_lmf: * * Emit IR to push the current LMF onto the LMF stack. */ static void emit_push_lmf (MonoCompile *cfg) { /* * Emit IR to push the LMF: * lmf_addr = <lmf_addr from tls> * lmf->lmf_addr = lmf_addr * lmf->prev_lmf = *lmf_addr * *lmf_addr = lmf */ MonoInst *ins, *lmf_ins; if (!cfg->lmf_ir) return; int lmf_reg, prev_lmf_reg; /* * Store lmf_addr in a variable, so it can be allocated to a global register. */ if (!cfg->lmf_addr_var) cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); if (!cfg->lmf_var) { MonoInst *lmf_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); lmf_var->flags |= MONO_INST_VOLATILE; lmf_var->flags |= MONO_INST_LMF; cfg->lmf_var = lmf_var; } lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR); g_assert (lmf_ins); lmf_ins->dreg = cfg->lmf_addr_var->dreg; EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL); lmf_reg = ins->dreg; prev_lmf_reg = alloc_preg (cfg); /* Save previous_lmf */ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0); if (cfg->deopt) /* Mark this as an LMFExt */ EMIT_NEW_BIALU_IMM (cfg, ins, OP_POR_IMM, prev_lmf_reg, prev_lmf_reg, 2); EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg); /* Set new lmf */ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg); } /* * emit_pop_lmf: * * Emit IR to pop the current LMF from the LMF stack. */ static void emit_pop_lmf (MonoCompile *cfg) { int lmf_reg, lmf_addr_reg; MonoInst *ins; if (!cfg->lmf_ir) return; EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL); lmf_reg = ins->dreg; int prev_lmf_reg; /* * Emit IR to pop the LMF: * *(lmf->lmf_addr) = lmf->prev_lmf */ /* This could be called before emit_push_lmf () */ if (!cfg->lmf_addr_var) cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); lmf_addr_reg = cfg->lmf_addr_var->dreg; prev_lmf_reg = alloc_preg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf)); if (cfg->deopt) /* Clear out the bit set by push_lmf () to mark this as LMFExt */ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PXOR_IMM, prev_lmf_reg, prev_lmf_reg, 2); EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg); } /* * target_type_is_incompatible: * @cfg: MonoCompile context * * Check that the item @arg on the evaluation stack can be stored * in the target type (can be a local, or field, etc). * The cfg arg can be used to check if we need verification or just * validity checks. * * Returns: non-0 value if arg can't be stored on a target. */ static int target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg) { MonoType *simple_type; MonoClass *klass; if (m_type_is_byref (target)) { /* FIXME: check that the pointed to types match */ if (arg->type == STACK_MP) { /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */ MonoClass *target_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (mono_class_from_mono_type_internal (target)))); MonoClass *source_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass))); /* if the target is native int& or X* or same type */ if (target->type == MONO_TYPE_I || target->type == MONO_TYPE_PTR || target_class_lowered == source_class_lowered) return 0; /* Both are primitive type byrefs and the source points to a larger type that the destination */ if (MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (target_class_lowered)) && MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (source_class_lowered)) && mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered)) return 0; return 1; } if (arg->type == STACK_PTR) return 0; return 1; } simple_type = mini_get_underlying_type (target); switch (simple_type->type) { case MONO_TYPE_VOID: return 1; case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: if (arg->type != STACK_I4 && arg->type != STACK_PTR) return 1; return 0; case MONO_TYPE_PTR: /* STACK_MP is needed when setting pinned locals */ if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP) #if SIZEOF_VOID_P == 8 if (arg->type != STACK_I8) #endif return 1; return 0; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_FNPTR: /* * Some opcodes like ldloca returns 'transient pointers' which can be stored in * in native int. (#688008). */ if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP) return 1; return 0; case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: if (arg->type != STACK_OBJ) return 1; /* FIXME: check type compatibility */ return 0; case MONO_TYPE_I8: case MONO_TYPE_U8: if (arg->type != STACK_I8) #if SIZEOF_VOID_P == 8 if (arg->type != STACK_PTR) #endif return 1; return 0; case MONO_TYPE_R4: if (arg->type != cfg->r4_stack_type) return 1; return 0; case MONO_TYPE_R8: if (arg->type != STACK_R8) return 1; return 0; case MONO_TYPE_VALUETYPE: if (arg->type != STACK_VTYPE) return 1; klass = mono_class_from_mono_type_internal (simple_type); if (klass != arg->klass) return 1; return 0; case MONO_TYPE_TYPEDBYREF: if (arg->type != STACK_VTYPE) return 1; klass = mono_class_from_mono_type_internal (simple_type); if (klass != arg->klass) return 1; return 0; case MONO_TYPE_GENERICINST: if (mono_type_generic_inst_is_valuetype (simple_type)) { MonoClass *target_class; if (arg->type != STACK_VTYPE) return 1; klass = mono_class_from_mono_type_internal (simple_type); target_class = mono_class_from_mono_type_internal (target); /* The second cases is needed when doing partial sharing */ if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass)))) return 1; return 0; } else { if (arg->type != STACK_OBJ) return 1; /* FIXME: check type compatibility */ return 0; } case MONO_TYPE_VAR: case MONO_TYPE_MVAR: g_assert (cfg->gshared); if (mini_type_var_is_vt (simple_type)) { if (arg->type != STACK_VTYPE) return 1; } else { if (arg->type != STACK_OBJ) return 1; } return 0; default: g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type); } return 1; } /* * convert_value: * * Emit some implicit conversions which are not part of the .net spec, but are allowed by MS.NET. */ static MonoInst* convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins) { if (!cfg->r4fp) return ins; type = mini_get_underlying_type (type); switch (type->type) { case MONO_TYPE_R4: if (ins->type == STACK_R8) { int dreg = alloc_freg (cfg); MonoInst *conv; EMIT_NEW_UNALU (cfg, conv, OP_FCONV_TO_R4, dreg, ins->dreg); conv->type = STACK_R4; return conv; } break; case MONO_TYPE_R8: if (ins->type == STACK_R4) { int dreg = alloc_freg (cfg); MonoInst *conv; EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, ins->dreg); conv->type = STACK_R8; return conv; } break; default: break; } return ins; } /* * Prepare arguments for passing to a function call. * Return a non-zero value if the arguments can't be passed to the given * signature. * The type checks are not yet complete and some conversions may need * casts on 32 or 64 bit architectures. * * FIXME: implement this using target_type_is_incompatible () */ static gboolean check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args) { MonoType *simple_type; int i; if (sig->hasthis) { if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR) return TRUE; args++; } for (i = 0; i < sig->param_count; ++i) { if (m_type_is_byref (sig->params [i])) { if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR) return TRUE; continue; } simple_type = mini_get_underlying_type (sig->params [i]); handle_enum: switch (simple_type->type) { case MONO_TYPE_VOID: return TRUE; case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR) return TRUE; continue; case MONO_TYPE_I: case MONO_TYPE_U: if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ) return TRUE; continue; case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: if (args [i]->type != STACK_I4 && !(SIZEOF_VOID_P == 8 && args [i]->type == STACK_I8) && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ) return TRUE; continue; case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: if (args [i]->type != STACK_OBJ) return TRUE; continue; case MONO_TYPE_I8: case MONO_TYPE_U8: if (args [i]->type != STACK_I8 && !(SIZEOF_VOID_P == 8 && (args [i]->type == STACK_I4 || args [i]->type == STACK_PTR))) return TRUE; continue; case MONO_TYPE_R4: if (args [i]->type != cfg->r4_stack_type) return TRUE; continue; case MONO_TYPE_R8: if (args [i]->type != STACK_R8) return TRUE; continue; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (simple_type->data.klass)) { simple_type = mono_class_enum_basetype_internal (simple_type->data.klass); goto handle_enum; } if (args [i]->type != STACK_VTYPE) return TRUE; continue; case MONO_TYPE_TYPEDBYREF: if (args [i]->type != STACK_VTYPE) return TRUE; continue; case MONO_TYPE_GENERICINST: simple_type = m_class_get_byval_arg (simple_type->data.generic_class->container_class); goto handle_enum; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: /* gsharedvt */ if (args [i]->type != STACK_VTYPE) return TRUE; continue; default: g_error ("unknown type 0x%02x in check_call_signature", simple_type->type); } } return FALSE; } MonoJumpInfo * mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target) { MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo)); ji->ip.i = ip; ji->type = type; ji->data.target = target; return ji; } int mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass) { if (cfg->gshared) return mono_class_check_context_used (klass); else return 0; } int mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method) { if (cfg->gshared) return mono_method_check_context_used (method); else return 0; } /* * check_method_sharing: * * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD. */ static void check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx) { gboolean pass_vtable = FALSE; gboolean pass_mrgctx = FALSE; if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cmethod->klass)) && (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) { gboolean sharable = FALSE; if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) sharable = TRUE; /* * Pass vtable iff target method might * be shared, which means that sharing * is enabled for its class and its * context is sharable (and it's not a * generic method). */ if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst)) pass_vtable = TRUE; } if (mini_method_needs_mrgctx (cmethod)) { if (mini_method_is_default_method (cmethod)) pass_vtable = FALSE; else g_assert (!pass_vtable); if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) { pass_mrgctx = TRUE; } else { if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature_internal (cmethod))) pass_mrgctx = TRUE; } } if (out_pass_vtable) *out_pass_vtable = pass_vtable; if (out_pass_mrgctx) *out_pass_mrgctx = pass_mrgctx; } static gboolean direct_icalls_enabled (MonoCompile *cfg, MonoMethod *method) { if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls) return FALSE; if (method && cfg->compile_aot && mono_aot_direct_icalls_enabled_for_method (cfg, method)) return TRUE; /* LLVM on amd64 can't handle calls to non-32 bit addresses */ #ifdef TARGET_AMD64 if (cfg->compile_llvm && !cfg->llvm_only) return FALSE; #endif return FALSE; } MonoInst* mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args) { /* * Call the jit icall without a wrapper if possible. * The wrapper is needed to be able to do stack walks for asynchronously suspended * threads when debugging. */ if (direct_icalls_enabled (cfg, NULL)) { int costs; if (!info->wrapper_method) { info->wrapper_method = mono_marshal_get_icall_wrapper (info, TRUE); mono_memory_barrier (); } /* * Inline the wrapper method, which is basically a call to the C icall, and * an exception check. */ costs = inline_method (cfg, info->wrapper_method, NULL, args, NULL, il_offset, TRUE, NULL); g_assert (costs > 0); g_assert (!MONO_TYPE_IS_VOID (info->sig->ret)); return args [0]; } return mono_emit_jit_icall_id (cfg, mono_jit_icall_info_id (info), args); } static MonoInst* mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig) { if (!MONO_TYPE_IS_VOID (fsig->ret)) { if ((fsig->pinvoke || LLVM_ENABLED) && !m_type_is_byref (fsig->ret)) { int widen_op = -1; /* * Native code might return non register sized integers * without initializing the upper bits. */ switch (mono_type_to_load_membase (cfg, fsig->ret)) { case OP_LOADI1_MEMBASE: widen_op = OP_ICONV_TO_I1; break; case OP_LOADU1_MEMBASE: widen_op = OP_ICONV_TO_U1; break; case OP_LOADI2_MEMBASE: widen_op = OP_ICONV_TO_I2; break; case OP_LOADU2_MEMBASE: widen_op = OP_ICONV_TO_U2; break; default: break; } if (widen_op != -1) { int dreg = alloc_preg (cfg); MonoInst *widen; EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg); widen->type = ins->type; ins = widen; } } } return ins; } static MonoInst* emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type); static void emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee) { MonoInst *args [2]; args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD); args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD); mono_emit_jit_icall (cfg, mono_throw_method_access, args); } static void emit_bad_image_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee) { mono_emit_jit_icall (cfg, mono_throw_bad_image, NULL); } static void emit_not_supported_failure (MonoCompile *cfg) { mono_emit_jit_icall (cfg, mono_throw_not_supported, NULL); } static void emit_invalid_program_with_msg (MonoCompile *cfg, MonoError *error_msg, MonoMethod *caller, MonoMethod *callee) { g_assert (!is_ok (error_msg)); char *str = mono_mem_manager_strdup (cfg->mem_manager, mono_error_get_message (error_msg)); MonoInst *iargs[1]; if (cfg->compile_aot) EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str); else EMIT_NEW_PCONST (cfg, iargs [0], str); mono_emit_jit_icall (cfg, mono_throw_invalid_program, iargs); } // FIXME Consolidate the multiple functions named get_method_nofail. static MonoMethod* get_method_nofail (MonoClass *klass, const char *method_name, int num_params, int flags) { MonoMethod *method; ERROR_DECL (error); method = mono_class_get_method_from_name_checked (klass, method_name, num_params, flags, error); mono_error_assert_ok (error); g_assertf (method, "Could not lookup method %s in %s", method_name, m_class_get_name (klass)); return method; } MonoMethod* mini_get_memcpy_method (void) { static MonoMethod *memcpy_method = NULL; if (!memcpy_method) { memcpy_method = get_method_nofail (mono_defaults.string_class, "memcpy", 3, 0); if (!memcpy_method) g_error ("Old corlib found. Install a new one"); } return memcpy_method; } MonoInst* mini_emit_storing_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value) { MonoInst *store; /* * Add a release memory barrier so the object contents are flushed * to memory before storing the reference into another object. */ if (!mini_debug_options.weak_memory_model) mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); EMIT_NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, ptr->dreg, 0, value->dreg); mini_emit_write_barrier (cfg, ptr, value); return store; } void mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value) { int card_table_shift_bits; target_mgreg_t card_table_mask; guint8 *card_table; MonoInst *dummy_use; int nursery_shift_bits; size_t nursery_size; if (!cfg->gen_write_barriers) return; //method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]) card_table = mono_gc_get_target_card_table (&card_table_shift_bits, &card_table_mask); mono_gc_get_nursery (&nursery_shift_bits, &nursery_size); if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) { MonoInst *wbarrier; MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER); wbarrier->sreg1 = ptr->dreg; wbarrier->sreg2 = value->dreg; MONO_ADD_INS (cfg->cbb, wbarrier); } else if (card_table) { int offset_reg = alloc_preg (cfg); int card_reg; MonoInst *ins; /* * We emit a fast light weight write barrier. This always marks cards as in the concurrent * collector case, so, for the serial collector, it might slightly slow down nursery * collections. We also expect that the host system and the target system have the same card * table configuration, which is the case if they have the same pointer size. */ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits); if (card_table_mask) MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask); /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support * IMM's larger than 32bits. */ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL); card_reg = ins->dreg; MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg); MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1); } else { MonoMethod *write_barrier = mono_gc_get_write_barrier (); mono_emit_method_call (cfg, write_barrier, &ptr, NULL); } EMIT_NEW_DUMMY_USE (cfg, dummy_use, value); } MonoMethod* mini_get_memset_method (void) { static MonoMethod *memset_method = NULL; if (!memset_method) { memset_method = get_method_nofail (mono_defaults.string_class, "memset", 3, 0); if (!memset_method) g_error ("Old corlib found. Install a new one"); } return memset_method; } void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass) { MonoInst *iargs [3]; int n; guint32 align; MonoMethod *memset_method; MonoInst *size_ins = NULL; MonoInst *bzero_ins = NULL; static MonoMethod *bzero_method; /* FIXME: Optimize this for the case when dest is an LDADDR */ mono_class_init_internal (klass); if (mini_is_gsharedvt_klass (klass)) { size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE); bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO); if (!bzero_method) bzero_method = get_method_nofail (mono_defaults.string_class, "bzero_aligned_1", 2, 0); g_assert (bzero_method); iargs [0] = dest; iargs [1] = size_ins; mini_emit_calli (cfg, mono_method_signature_internal (bzero_method), iargs, bzero_ins, NULL, NULL); return; } klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (klass))); n = mono_class_value_size (klass, &align); if (n <= TARGET_SIZEOF_VOID_P * 8) { mini_emit_memset (cfg, dest->dreg, 0, n, 0, align); } else { memset_method = mini_get_memset_method (); iargs [0] = dest; EMIT_NEW_ICONST (cfg, iargs [1], 0); EMIT_NEW_ICONST (cfg, iargs [2], n); mono_emit_method_call (cfg, memset_method, iargs, NULL); } } static gboolean context_used_is_mrgctx (MonoCompile *cfg, int context_used) { /* gshared dim methods use an mrgctx */ if (mini_method_is_default_method (cfg->method)) return context_used != 0; return context_used & MONO_GENERIC_CONTEXT_USED_METHOD; } /* * emit_get_rgctx: * * Emit IR to return either the vtable or the mrgctx. */ static MonoInst* emit_get_rgctx (MonoCompile *cfg, int context_used) { MonoMethod *method = cfg->method; g_assert (cfg->gshared); /* Data whose context contains method type vars is stored in the mrgctx */ if (context_used_is_mrgctx (cfg, context_used)) { MonoInst *mrgctx_loc, *mrgctx_var; g_assert (cfg->rgctx_access == MONO_RGCTX_ACCESS_MRGCTX); if (!mini_method_is_default_method (method)) g_assert (method->is_inflated && mono_method_get_context (method)->method_inst); if (cfg->llvm_only) { mrgctx_var = mono_get_mrgctx_var (cfg); } else { /* Volatile */ mrgctx_loc = mono_get_mrgctx_var (cfg); g_assert (mrgctx_loc->flags & MONO_INST_VOLATILE); EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0); } return mrgctx_var; } /* * The rest of the entries are stored in vtable->runtime_generic_context so * have to return a vtable. */ if (cfg->rgctx_access == MONO_RGCTX_ACCESS_MRGCTX) { MonoInst *mrgctx_loc, *mrgctx_var, *vtable_var; int vtable_reg; /* We are passed an mrgctx, return mrgctx->class_vtable */ if (cfg->llvm_only) { mrgctx_var = mono_get_mrgctx_var (cfg); } else { mrgctx_loc = mono_get_mrgctx_var (cfg); g_assert (mrgctx_loc->flags & MONO_INST_VOLATILE); EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0); } vtable_reg = alloc_preg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable)); vtable_var->type = STACK_PTR; return vtable_var; } else if (cfg->rgctx_access == MONO_RGCTX_ACCESS_VTABLE) { MonoInst *vtable_loc, *vtable_var; /* We are passed a vtable, return it */ if (cfg->llvm_only) { vtable_var = mono_get_vtable_var (cfg); } else { vtable_loc = mono_get_vtable_var (cfg); g_assert (vtable_loc->flags & MONO_INST_VOLATILE); EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0); } vtable_var->type = STACK_PTR; return vtable_var; } else { MonoInst *ins, *this_ins; int vtable_reg; /* We are passed a this pointer, return this->vtable */ EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, mono_get_object_type ()); vtable_reg = alloc_preg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); return ins; } } static MonoJumpInfoRgctxEntry * mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type) { MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry)); if (in_mrgctx) res->d.method = method; else res->d.klass = method->klass; res->in_mrgctx = in_mrgctx; res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo)); res->data->type = patch_type; res->data->data.target = patch_data; res->info_type = info_type; return res; } static MonoInst* emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type); static MonoInst* emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry) { MonoInst *call; MonoInst *slot_ins; EMIT_NEW_AOTCONST (cfg, slot_ins, MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry); // Can't add basic blocks during interp entry mode if (cfg->disable_inline_rgctx_fetch || cfg->interp_entry_only) { MonoInst *args [2] = { rgctx, slot_ins }; if (entry->in_mrgctx) call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args); else call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args); return call; } MonoBasicBlock *slowpath_bb, *end_bb; MonoInst *ins, *res; int rgctx_reg, res_reg; /* * rgctx = vtable->runtime_generic_context; * if (rgctx) { * val = rgctx [slot + 1]; * if (val) * return val; * } * <slowpath> */ NEW_BBLOCK (cfg, end_bb); NEW_BBLOCK (cfg, slowpath_bb); if (entry->in_mrgctx) { rgctx_reg = rgctx->dreg; } else { rgctx_reg = alloc_preg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context)); // FIXME: Avoid this check by allocating the table when the vtable is created etc. MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb); } int table_size = mono_class_rgctx_get_array_size (0, entry->in_mrgctx); if (entry->in_mrgctx) table_size -= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / TARGET_SIZEOF_VOID_P; MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_ins->dreg, table_size - 1); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBGE, slowpath_bb); int shifted_slot_reg = alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISHL_IMM, shifted_slot_reg, slot_ins->dreg, TARGET_SIZEOF_VOID_P == 8 ? 3 : 2); int addr_reg = alloc_preg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, addr_reg, rgctx_reg); EMIT_NEW_BIALU (cfg, ins, OP_PADD, addr_reg, addr_reg, shifted_slot_reg); int val_reg = alloc_preg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, addr_reg, TARGET_SIZEOF_VOID_P + (entry->in_mrgctx ? MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT : 0)); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb); res_reg = alloc_preg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, val_reg); res = ins; MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); MONO_START_BB (cfg, slowpath_bb); slowpath_bb->out_of_line = TRUE; MonoInst *args[2] = { rgctx, slot_ins }; if (entry->in_mrgctx) call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args); else call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, call->dreg); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); MONO_START_BB (cfg, end_bb); return res; } /* * emit_rgctx_fetch: * * Emit IR to load the value of the rgctx entry ENTRY from the rgctx. */ static MonoInst* emit_rgctx_fetch (MonoCompile *cfg, int context_used, MonoJumpInfoRgctxEntry *entry) { MonoInst *rgctx = emit_get_rgctx (cfg, context_used); if (cfg->llvm_only) return emit_rgctx_fetch_inline (cfg, rgctx, entry); else return mini_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, mono_icall_sig_ptr_ptr, &rgctx); } /* * mini_emit_get_rgctx_klass: * * Emit IR to load the property RGCTX_TYPE of KLASS. If context_used is 0, emit * normal constants, else emit a load from the rgctx. */ MonoInst* mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type) { if (!context_used) { MonoInst *ins; switch (rgctx_type) { case MONO_RGCTX_INFO_KLASS: EMIT_NEW_CLASSCONST (cfg, ins, klass); return ins; case MONO_RGCTX_INFO_VTABLE: { MonoVTable *vtable = mono_class_vtable_checked (klass, cfg->error); CHECK_CFG_ERROR; EMIT_NEW_VTABLECONST (cfg, ins, vtable); return ins; } default: g_assert_not_reached (); } } // Its cheaper to load these from the gsharedvt info struct if (cfg->llvm_only && cfg->gsharedvt) return mini_emit_get_gsharedvt_info_klass (cfg, klass, rgctx_type); MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_CLASS, klass, rgctx_type); return emit_rgctx_fetch (cfg, context_used, entry); mono_error_exit: return NULL; } static MonoInst* emit_get_rgctx_sig (MonoCompile *cfg, int context_used, MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type) { MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type); return emit_rgctx_fetch (cfg, context_used, entry); } static MonoInst* emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used, MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type) { MonoJumpInfoGSharedVtCall *call_info; MonoJumpInfoRgctxEntry *entry; call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall)); call_info->sig = sig; call_info->method = cmethod; entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type); return emit_rgctx_fetch (cfg, context_used, entry); } /* * emit_get_rgctx_virt_method: * * Return data for method VIRT_METHOD for a receiver of type KLASS. */ static MonoInst* emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used, MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type) { MonoJumpInfoVirtMethod *info; MonoJumpInfoRgctxEntry *entry; if (context_used == -1) context_used = mono_class_check_context_used (klass) | mono_method_check_context_used (virt_method); info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod)); info->klass = klass; info->method = virt_method; entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type); return emit_rgctx_fetch (cfg, context_used, entry); } static MonoInst* emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoGSharedVtMethodInfo *info) { MonoJumpInfoRgctxEntry *entry; entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO); return emit_rgctx_fetch (cfg, context_used, entry); } /* * emit_get_rgctx_method: * * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit * normal constants, else emit a load from the rgctx. */ static MonoInst* emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type) { if (context_used == -1) context_used = mono_method_check_context_used (cmethod); if (!context_used) { MonoInst *ins; switch (rgctx_type) { case MONO_RGCTX_INFO_METHOD: EMIT_NEW_METHODCONST (cfg, ins, cmethod); return ins; case MONO_RGCTX_INFO_METHOD_RGCTX: EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod); return ins; case MONO_RGCTX_INFO_METHOD_FTNDESC: EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHOD_FTNDESC, cmethod); return ins; case MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY: EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_LLVMONLY_INTERP_ENTRY, cmethod); return ins; default: g_assert_not_reached (); } } else { // Its cheaper to load these from the gsharedvt info struct if (cfg->llvm_only && cfg->gsharedvt) return emit_get_gsharedvt_info (cfg, cmethod, rgctx_type); MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type); return emit_rgctx_fetch (cfg, context_used, entry); } } static MonoInst* emit_get_rgctx_field (MonoCompile *cfg, int context_used, MonoClassField *field, MonoRgctxInfoType rgctx_type) { // Its cheaper to load these from the gsharedvt info struct if (cfg->llvm_only && cfg->gsharedvt) return emit_get_gsharedvt_info (cfg, field, rgctx_type); MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_FIELD, field, rgctx_type); return emit_rgctx_fetch (cfg, context_used, entry); } MonoInst* mini_emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type) { return emit_get_rgctx_method (cfg, context_used, cmethod, rgctx_type); } static int get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type) { MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info; MonoRuntimeGenericContextInfoTemplate *template_; int i, idx; g_assert (info); for (i = 0; i < info->num_entries; ++i) { MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i]; if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET) return i; } if (info->num_entries == info->count_entries) { MonoRuntimeGenericContextInfoTemplate *new_entries; int new_count_entries = info->count_entries ? info->count_entries * 2 : 16; new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries); memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries); info->entries = new_entries; info->count_entries = new_count_entries; } idx = info->num_entries; template_ = &info->entries [idx]; template_->info_type = rgctx_type; template_->data = data; info->num_entries ++; return idx; } /* * emit_get_gsharedvt_info: * * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline. */ static MonoInst* emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type) { MonoInst *ins; int idx, dreg; idx = get_gsharedvt_info_slot (cfg, data, rgctx_type); /* Load info->entries [idx] */ dreg = alloc_preg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P)); return ins; } MonoInst* mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type) { return emit_get_gsharedvt_info (cfg, m_class_get_byval_arg (klass), rgctx_type); } /* * On return the caller must check @klass for load errors. */ static void emit_class_init (MonoCompile *cfg, MonoClass *klass) { MonoInst *vtable_arg; int context_used; context_used = mini_class_check_context_used (cfg, klass); if (context_used) { vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VTABLE); } else { MonoVTable *vtable = mono_class_vtable_checked (klass, cfg->error); if (!is_ok (cfg->error)) { mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); return; } EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable); } if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) { MonoInst *ins; /* * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode, * so this doesn't have to clobber any regs and it doesn't break basic blocks. */ MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT); ins->sreg1 = vtable_arg->dreg; MONO_ADD_INS (cfg->cbb, ins); } else { int inited_reg; MonoBasicBlock *inited_bb; inited_reg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized)); NEW_BBLOCK (cfg, inited_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb); cfg->cbb->out_of_line = TRUE; mono_emit_jit_icall (cfg, mono_generic_class_init, &vtable_arg); MONO_START_BB (cfg, inited_bb); } } static void emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack) { MonoInst *ins; if (cfg->gen_seq_points && cfg->method == method) { NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc); if (nonempty_stack) ins->flags |= MONO_INST_NONEMPTY_STACK; MONO_ADD_INS (cfg->cbb, ins); cfg->last_seq_point = ins; } } void mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check) { if (mini_debug_options.better_cast_details) { int vtable_reg = alloc_preg (cfg); int klass_reg = alloc_preg (cfg); MonoBasicBlock *is_null_bb = NULL; MonoInst *tls_get; if (null_check) { NEW_BBLOCK (cfg, is_null_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb); } tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS); if (!tls_get) { fprintf (stderr, "error: --debug=casts not supported on this platform.\n."); exit (1); } MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable)); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg); MonoInst *class_ins = mini_emit_get_rgctx_klass (cfg, mini_class_check_context_used (cfg, klass), klass, MONO_RGCTX_INFO_KLASS); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), class_ins->dreg); if (null_check) MONO_START_BB (cfg, is_null_bb); } } void mini_reset_cast_details (MonoCompile *cfg) { /* Reset the variables holding the cast details */ if (mini_debug_options.better_cast_details) { MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS); /* It is enough to reset the from field */ MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0); } } /* * On return the caller must check @array_class for load errors */ static void mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class) { int vtable_reg = alloc_preg (cfg); int context_used; context_used = mini_class_check_context_used (cfg, array_class); mini_save_cast_details (cfg, array_class, obj->dreg, FALSE); MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); if (context_used) { MonoInst *vtable_ins; vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE); MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg); } else { if (cfg->compile_aot) { int vt_reg; MonoVTable *vtable; if (!(vtable = mono_class_vtable_checked (array_class, cfg->error))) { mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); return; } vt_reg = alloc_preg (cfg); MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable); MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg); } else { MonoVTable *vtable; if (!(vtable = mono_class_vtable_checked (array_class, cfg->error))) { mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); return; } MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, (gssize)vtable); } } MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException"); mini_reset_cast_details (cfg); } /** * Handles unbox of a Nullable<T>. If context_used is non zero, then shared * generic code is generated. */ static MonoInst* handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used) { MonoMethod* method; if (m_class_is_enumtype (mono_class_get_nullable_param_internal (klass))) method = get_method_nofail (klass, "UnboxExact", 1, 0); else method = get_method_nofail (klass, "Unbox", 1, 0); g_assert (method); if (context_used) { MonoInst *rgctx, *addr; /* FIXME: What if the class is shared? We might not have to get the address of the method from the RGCTX. */ if (cfg->llvm_only) { addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_FTNDESC); cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature_internal (method)); return mini_emit_llvmonly_calli (cfg, mono_method_signature_internal (method), &val, addr); } else { addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_GENERIC_METHOD_CODE); rgctx = emit_get_rgctx (cfg, context_used); return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx); } } else { gboolean pass_vtable, pass_mrgctx; MonoInst *rgctx_arg = NULL; check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx); g_assert (!pass_mrgctx); if (pass_vtable) { MonoVTable *vtable = mono_class_vtable_checked (method->klass, cfg->error); mono_error_assert_ok (cfg->error); EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable); } return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg); } } MonoInst* mini_handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst *val, int context_used) { MonoInst *add; int obj_reg; int vtable_reg = alloc_dreg (cfg ,STACK_PTR); int klass_reg = alloc_dreg (cfg ,STACK_PTR); int eclass_reg = alloc_dreg (cfg ,STACK_PTR); int rank_reg = alloc_dreg (cfg ,STACK_I4); obj_reg = val->dreg; MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable)); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank)); /* FIXME: generics */ g_assert (m_class_get_rank (klass) == 0); // Check rank == 0 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0); MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException"); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass)); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, m_class_offsetof_element_class ()); if (context_used) { MonoInst *element_class; /* This assertion is from the unboxcast insn */ g_assert (m_class_get_rank (klass) == 0); element_class = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_ELEMENT_KLASS); MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg); MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException"); } else { mini_save_cast_details (cfg, m_class_get_element_class (klass), obj_reg, FALSE); mini_emit_class_check (cfg, eclass_reg, m_class_get_element_class (klass)); mini_reset_cast_details (cfg); } NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, MONO_ABI_SIZEOF (MonoObject)); MONO_ADD_INS (cfg->cbb, add); add->type = STACK_MP; add->klass = klass; return add; } static MonoInst* handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj) { MonoInst *addr, *klass_inst, *is_ref, *args[16]; MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb; MonoInst *ins; int dreg, addr_reg; klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS); /* obj */ args [0] = obj; /* klass */ args [1] = klass_inst; /* CASTCLASS */ obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args); NEW_BBLOCK (cfg, is_ref_bb); NEW_BBLOCK (cfg, is_nullable_bb); NEW_BBLOCK (cfg, end_bb); is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb); /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */ addr_reg = alloc_dreg (cfg, STACK_MP); /* Non-ref case */ /* UNBOX */ NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, MONO_ABI_SIZEOF (MonoObject)); MONO_ADD_INS (cfg->cbb, addr); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* Ref case */ MONO_START_BB (cfg, is_ref_bb); /* Save the ref to a temporary */ dreg = alloc_ireg (cfg); EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, m_class_get_byval_arg (klass)); addr->dreg = addr_reg; MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* Nullable case */ MONO_START_BB (cfg, is_nullable_bb); { MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX); MonoInst *unbox_call; MonoMethodSignature *unbox_sig; unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *))); unbox_sig->ret = m_class_get_byval_arg (klass); unbox_sig->param_count = 1; unbox_sig->params [0] = mono_get_object_type (); if (cfg->llvm_only) unbox_call = mini_emit_llvmonly_calli (cfg, unbox_sig, &obj, addr); else unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL); EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, m_class_get_byval_arg (klass)); addr->dreg = addr_reg; } MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* End */ MONO_START_BB (cfg, end_bb); /* LDOBJ */ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr_reg, 0); return ins; } /* * Returns NULL and set the cfg exception on error. */ static MonoInst* handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used) { MonoInst *iargs [2]; MonoJitICallId alloc_ftn; if (mono_class_get_flags (klass) & TYPE_ATTRIBUTE_ABSTRACT) { char* full_name = mono_type_get_full_name (klass); mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); mono_error_set_member_access (cfg->error, "Cannot create an abstract class: %s", full_name); g_free (full_name); return NULL; } if (context_used) { gboolean known_instance_size = !mini_is_gsharedvt_klass (klass); MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size); iargs [0] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VTABLE); alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new_specific; if (managed_alloc) { if (known_instance_size) { int size = mono_class_instance_size (klass); if (size < MONO_ABI_SIZEOF (MonoObject)) g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass)); EMIT_NEW_ICONST (cfg, iargs [1], size); } return mono_emit_method_call (cfg, managed_alloc, iargs, NULL); } return mono_emit_jit_icall_id (cfg, alloc_ftn, iargs); } if (cfg->compile_aot && cfg->cbb->out_of_line && m_class_get_type_token (klass) && m_class_get_image (klass) == mono_defaults.corlib && !mono_class_is_ginst (klass)) { /* This happens often in argument checking code, eg. throw new FooException... */ /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */ EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (m_class_get_type_token (klass))); alloc_ftn = MONO_JIT_ICALL_mono_helper_newobj_mscorlib; } else { MonoVTable *vtable = mono_class_vtable_checked (klass, cfg->error); if (!is_ok (cfg->error)) { mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); return NULL; } MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE); if (managed_alloc) { int size = mono_class_instance_size (klass); if (size < MONO_ABI_SIZEOF (MonoObject)) g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass)); EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable); EMIT_NEW_ICONST (cfg, iargs [1], size); return mono_emit_method_call (cfg, managed_alloc, iargs, NULL); } alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new_specific; EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable); } return mono_emit_jit_icall_id (cfg, alloc_ftn, iargs); } /* * Returns NULL and set the cfg exception on error. */ MonoInst* mini_emit_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used) { MonoInst *alloc, *ins; if (G_UNLIKELY (m_class_is_byreflike (klass))) { mono_error_set_bad_image (cfg->error, m_class_get_image (cfg->method->klass), "Cannot box IsByRefLike type '%s.%s'", m_class_get_name_space (klass), m_class_get_name (klass)); mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); return NULL; } if (mono_class_is_nullable (klass)) { MonoMethod* method = get_method_nofail (klass, "Box", 1, 0); if (context_used) { if (cfg->llvm_only) { MonoMethodSignature *sig = mono_method_signature_internal (method); MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_FTNDESC); cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, sig); return mini_emit_llvmonly_calli (cfg, sig, &val, addr); } else { /* FIXME: What if the class is shared? We might not have to get the method address from the RGCTX. */ MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_GENERIC_METHOD_CODE); MonoInst *rgctx = emit_get_rgctx (cfg, context_used); return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx); } } else { gboolean pass_vtable, pass_mrgctx; MonoInst *rgctx_arg = NULL; check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx); g_assert (!pass_mrgctx); if (pass_vtable) { MonoVTable *vtable = mono_class_vtable_checked (method->klass, cfg->error); mono_error_assert_ok (cfg->error); EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable); } return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg); } } if (mini_is_gsharedvt_klass (klass)) { MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb; MonoInst *res, *is_ref, *src_var, *addr; int dreg; dreg = alloc_ireg (cfg); NEW_BBLOCK (cfg, is_ref_bb); NEW_BBLOCK (cfg, is_nullable_bb); NEW_BBLOCK (cfg, end_bb); is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb); /* Non-ref case */ alloc = handle_alloc (cfg, klass, TRUE, context_used); if (!alloc) return NULL; EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg); ins->opcode = OP_STOREV_MEMBASE; EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg); res->type = STACK_OBJ; res->klass = klass; MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* Ref case */ MONO_START_BB (cfg, is_ref_bb); /* val is a vtype, so has to load the value manually */ src_var = get_vreg_to_inst (cfg, val->dreg); if (!src_var) src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, val->dreg); EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* Nullable case */ MONO_START_BB (cfg, is_nullable_bb); { MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_BOX); MonoInst *box_call; MonoMethodSignature *box_sig; /* * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot * construct that method at JIT time, so have to do things by hand. */ box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *))); box_sig->ret = mono_get_object_type (); box_sig->param_count = 1; box_sig->params [0] = m_class_get_byval_arg (klass); if (cfg->llvm_only) box_call = mini_emit_llvmonly_calli (cfg, box_sig, &val, addr); else box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL); EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg); res->type = STACK_OBJ; res->klass = klass; } MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); MONO_START_BB (cfg, end_bb); return res; } alloc = handle_alloc (cfg, klass, TRUE, context_used); if (!alloc) return NULL; EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg); return alloc; } static gboolean method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod) { if (cmethod->klass == mono_defaults.systemtype_class) { if (!strcmp (cmethod->name, "GetType")) return TRUE; } /* * In corelib code, methods which need to do a stack walk declare a StackCrawlMark local and pass it as an * arguments until it reaches an icall. Its hard to detect which methods do that especially with * StackCrawlMark.LookForMyCallersCaller, so for now, just hardcode the classes which contain the public * methods whose caller is needed. */ if (mono_is_corlib_image (m_class_get_image (cmethod->klass))) { const char *cname = m_class_get_name (cmethod->klass); if (!strcmp (cname, "Assembly") || !strcmp (cname, "AssemblyLoadContext") || (!strcmp (cname, "Activator"))) { if (!strcmp (cmethod->name, "op_Equality")) return FALSE; return TRUE; } } return FALSE; } G_GNUC_UNUSED MonoInst* mini_handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, int enum_val_reg, MonoInst *enum_flag) { MonoType *enum_type = mono_type_get_underlying_type (m_class_get_byval_arg (klass)); guint32 load_opc = mono_type_to_load_membase (cfg, enum_type); gboolean is_i4; switch (enum_type->type) { case MONO_TYPE_I8: case MONO_TYPE_U8: #if SIZEOF_REGISTER == 8 case MONO_TYPE_I: case MONO_TYPE_U: #endif is_i4 = FALSE; break; default: is_i4 = TRUE; break; } { MonoInst *load = NULL, *and_, *cmp, *ceq; int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg); int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg); int dest_reg = alloc_ireg (cfg); if (enum_this) { EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0); } else { g_assert (enum_val_reg != -1); enum_reg = enum_val_reg; } EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg); EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg); EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1); ceq->type = STACK_I4; if (!is_i4) { load = load ? mono_decompose_opcode (cfg, load) : NULL; and_ = mono_decompose_opcode (cfg, and_); cmp = mono_decompose_opcode (cfg, cmp); ceq = mono_decompose_opcode (cfg, ceq); } return ceq; } } static void emit_set_deopt_il_offset (MonoCompile *cfg, int offset) { MonoInst *ins; if (!(cfg->deopt && cfg->method == cfg->current_method)) return; EMIT_NEW_VARLOADA (cfg, ins, cfg->il_state_var, NULL); MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, ins->dreg, MONO_STRUCT_OFFSET (MonoMethodILState, il_offset), offset); } static MonoInst* emit_get_rgctx_dele_tramp (MonoCompile *cfg, int context_used, MonoClass *klass, MonoMethod *virt_method, gboolean _virtual, MonoRgctxInfoType rgctx_type) { MonoDelegateClassMethodPair *info; MonoJumpInfoRgctxEntry *entry; info = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair)); info->klass = klass; info->method = virt_method; info->is_virtual = _virtual; entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, info, rgctx_type); return emit_rgctx_fetch (cfg, context_used, entry); } /* * Returns NULL and set the cfg exception on error. */ static G_GNUC_UNUSED MonoInst* handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int target_method_context_used, int invoke_context_used, gboolean virtual_) { MonoInst *ptr; int dreg; gpointer trampoline; MonoInst *obj, *tramp_ins; guint8 **code_slot; if (virtual_ && !cfg->llvm_only) { MonoMethod *invoke = mono_get_delegate_invoke_internal (klass); g_assert (invoke); //FIXME verify & fix any issue with removing invoke_context_used restriction if (invoke_context_used || !mono_get_delegate_virtual_invoke_impl (mono_method_signature_internal (invoke), target_method_context_used ? NULL : method)) return NULL; } obj = handle_alloc (cfg, klass, FALSE, invoke_context_used); if (!obj) return NULL; /* Inline the contents of mono_delegate_ctor */ /* Set target field */ /* Optimize away setting of NULL target */ if (!MONO_INS_IS_PCONST_NULL (target)) { if (!(method->flags & METHOD_ATTRIBUTE_STATIC)) { MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target->dreg, 0); MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException"); } if (!mini_debug_options.weak_memory_model) mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg); if (cfg->gen_write_barriers) { dreg = alloc_preg (cfg); EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target)); mini_emit_write_barrier (cfg, ptr, target); } } /* Set method field */ if (!(target_method_context_used || invoke_context_used) && !cfg->llvm_only) { //If compiling with gsharing enabled, it's faster to load method the delegate trampoline info than to use a rgctx slot MonoInst *method_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg); } if (cfg->llvm_only) { if (virtual_) { MonoInst *args [ ] = { obj, target, emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD) }; mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate_virtual, args); return obj; } } /* * To avoid looking up the compiled code belonging to the target method * in mono_delegate_trampoline (), we allocate a per-domain memory slot to * store it, and we fill it after the method has been compiled. */ if (!method->dynamic && !cfg->llvm_only) { MonoInst *code_slot_ins; if (target_method_context_used) { code_slot_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE); } else { MonoJitMemoryManager *jit_mm = (MonoJitMemoryManager*)cfg->jit_mm; jit_mm_lock (jit_mm); if (!jit_mm->method_code_hash) jit_mm->method_code_hash = g_hash_table_new (NULL, NULL); code_slot = (guint8 **)g_hash_table_lookup (jit_mm->method_code_hash, method); if (!code_slot) { code_slot = (guint8 **)mono_mem_manager_alloc0 (jit_mm->mem_manager, sizeof (gpointer)); g_hash_table_insert (jit_mm->method_code_hash, method, code_slot); } jit_mm_unlock (jit_mm); code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method); } MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg); } if (target_method_context_used || invoke_context_used) { tramp_ins = emit_get_rgctx_dele_tramp (cfg, target_method_context_used | invoke_context_used, klass, method, virtual_, MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO); //This is emited as a contant store for the non-shared case. //We copy from the delegate trampoline info as it's faster than a rgctx fetch dreg = alloc_preg (cfg); if (!cfg->llvm_only) { MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), dreg); } } else if (cfg->compile_aot) { MonoDelegateClassMethodPair *del_tramp; del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair)); del_tramp->klass = klass; del_tramp->method = method; del_tramp->is_virtual = virtual_; EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp); } else { if (virtual_) trampoline = mono_create_delegate_virtual_trampoline (klass, method); else trampoline = mono_create_delegate_trampoline_info (klass, method); EMIT_NEW_PCONST (cfg, tramp_ins, trampoline); } if (cfg->llvm_only) { MonoInst *args [ ] = { obj, tramp_ins }; mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate, args); return obj; } /* Set invoke_impl field */ if (virtual_) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg); } else { dreg = alloc_preg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg); dreg = alloc_preg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg); } dreg = alloc_preg (cfg); MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg); /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */ return obj; } /* * handle_constrained_gsharedvt_call: * * Handle constrained calls where the receiver is a gsharedvt type. * Return the instruction representing the call. Set the cfg exception on failure. */ static MonoInst* handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class, gboolean *ref_emit_widen) { MonoInst *ins = NULL; gboolean emit_widen = *ref_emit_widen; gboolean supported; /* * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype. * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we * pack the arguments into an array, and do the rest of the work in in an icall. */ supported = ((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!m_class_is_valuetype (cmethod->klass) && m_class_get_image (cmethod->klass) != mono_defaults.corlib)); if (supported) supported = (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)); if (supported) { if (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1)) { supported = TRUE; } else { supported = TRUE; for (int i = 0; i < fsig->param_count; ++i) { if (!(m_type_is_byref (fsig->params [i]) || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_IS_REFERENCE (fsig->params [i]) || MONO_TYPE_ISSTRUCT (fsig->params [i]) || mini_is_gsharedvt_type (fsig->params [i]))) supported = FALSE; } } } if (supported) { MonoInst *args [5]; /* * This case handles calls to * - object:ToString()/Equals()/GetHashCode(), * - System.IComparable<T>:CompareTo() * - System.IEquatable<T>:Equals () * plus some simple interface calls enough to support AsyncTaskMethodBuilder. */ if (fsig->hasthis) args [0] = sp [0]; else EMIT_NEW_PCONST (cfg, args [0], NULL); args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD); args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS); /* !fsig->hasthis is for the wrapper for the Object.GetType () icall or static virtual methods */ if ((fsig->hasthis || m_method_is_static (cmethod)) && fsig->param_count) { /* Call mono_gsharedvt_constrained_call (gpointer mp, MonoMethod *cmethod, MonoClass *klass, gboolean *deref_args, gpointer *args) */ gboolean has_gsharedvt = FALSE; for (int i = 0; i < fsig->param_count; ++i) { if (mini_is_gsharedvt_type (fsig->params [i])) has_gsharedvt = TRUE; } /* Pass an array of bools which signal whenever the corresponding argument is a gsharedvt ref type */ if (has_gsharedvt) { MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM); ins->dreg = alloc_preg (cfg); ins->inst_imm = fsig->param_count; MONO_ADD_INS (cfg->cbb, ins); args [3] = ins; } else { EMIT_NEW_PCONST (cfg, args [3], 0); } /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */ MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM); ins->dreg = alloc_preg (cfg); ins->inst_imm = fsig->param_count * sizeof (target_mgreg_t); MONO_ADD_INS (cfg->cbb, ins); args [4] = ins; for (int i = 0; i < fsig->param_count; ++i) { int addr_reg; if (mini_is_gsharedvt_type (fsig->params [i])) { MonoInst *is_deref; int deref_arg_reg; ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (fsig->params [i]), MONO_RGCTX_INFO_CLASS_BOX_TYPE); deref_arg_reg = alloc_preg (cfg); /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */ EMIT_NEW_BIALU_IMM (cfg, is_deref, OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, i, is_deref->dreg); } else if (has_gsharedvt) { MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, args [3]->dreg, i, 0); } MonoInst *arg = sp [i + fsig->hasthis]; if (mini_is_gsharedvt_type (fsig->params [i]) || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_ISSTRUCT (fsig->params [i])) { EMIT_NEW_VARLOADA_VREG (cfg, ins, arg->dreg, fsig->params [i]); addr_reg = ins->dreg; EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), addr_reg); } else { EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), arg->dreg); } } } else { EMIT_NEW_ICONST (cfg, args [3], 0); EMIT_NEW_ICONST (cfg, args [4], 0); } ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args); emit_widen = FALSE; if (mini_is_gsharedvt_type (fsig->ret)) { ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type_internal (fsig->ret), ins); } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret))) { MonoInst *add; /* Unbox */ NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, MONO_ABI_SIZEOF (MonoObject)); MONO_ADD_INS (cfg->cbb, add); /* Load value */ NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0); MONO_ADD_INS (cfg->cbb, ins); /* ins represents the call result */ } } else { GSHAREDVT_FAILURE (CEE_CALLVIRT); } *ref_emit_widen = emit_widen; return ins; exception_exit: return NULL; } static void mono_emit_load_got_addr (MonoCompile *cfg) { MonoInst *getaddr, *dummy_use; if (!cfg->got_var || cfg->got_var_allocated) return; MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR); getaddr->cil_code = cfg->header->code; getaddr->dreg = cfg->got_var->dreg; /* Add it to the start of the first bblock */ if (cfg->bb_entry->code) { getaddr->next = cfg->bb_entry->code; cfg->bb_entry->code = getaddr; } else MONO_ADD_INS (cfg->bb_entry, getaddr); cfg->got_var_allocated = TRUE; /* * Add a dummy use to keep the got_var alive, since real uses might * only be generated by the back ends. * Add it to end_bblock, so the variable's lifetime covers the whole * method. * It would be better to make the usage of the got var explicit in all * cases when the backend needs it (i.e. calls, throw etc.), so this * wouldn't be needed. */ NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var); MONO_ADD_INS (cfg->bb_exit, dummy_use); } static MonoMethod* get_constrained_method (MonoCompile *cfg, MonoImage *image, guint32 token, MonoMethod *cil_method, MonoClass *constrained_class, MonoGenericContext *generic_context) { MonoMethod *cmethod = cil_method; gboolean constrained_is_generic_param = m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR || m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR; if (cfg->current_method->wrapper_type != MONO_WRAPPER_NONE) { if (cfg->verbose_level > 2) printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class)); if (!(constrained_is_generic_param && cfg->gshared)) { cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, cfg->error); CHECK_CFG_ERROR; } } else { if (cfg->verbose_level > 2) printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class)); if (constrained_is_generic_param && cfg->gshared) { /* * This is needed since get_method_constrained can't find * the method in klass representing a type var. * The type var is guaranteed to be a reference type in this * case. */ if (!mini_is_gsharedvt_klass (constrained_class)) g_assert (!m_class_is_valuetype (cmethod->klass)); } else { cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, cfg->error); CHECK_CFG_ERROR; } } return cmethod; mono_error_exit: return NULL; } static gboolean method_does_not_return (MonoMethod *method) { // FIXME: Under netcore, these are decorated with the [DoesNotReturn] attribute return m_class_get_image (method->klass) == mono_defaults.corlib && !strcmp (m_class_get_name (method->klass), "ThrowHelper") && strstr (method->name, "Throw") == method->name && !method->is_inflated; } static int inline_limit, llvm_jit_inline_limit, llvm_aot_inline_limit; static gboolean inline_limit_inited; static gboolean mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method) { MonoMethodHeaderSummary header; MonoVTable *vtable; int limit; #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK MonoMethodSignature *sig = mono_method_signature_internal (method); int i; #endif if (cfg->disable_inline) return FALSE; if (cfg->gsharedvt) return FALSE; if (cfg->inline_depth > 10) return FALSE; if (!mono_method_get_header_summary (method, &header)) return FALSE; /*runtime, icall and pinvoke are checked by summary call*/ if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) || (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) || header.has_clauses) return FALSE; if (method->flags & METHOD_ATTRIBUTE_REQSECOBJ) /* Used to mark methods containing StackCrawlMark locals */ return FALSE; /* also consider num_locals? */ /* Do the size check early to avoid creating vtables */ if (!inline_limit_inited) { char *inlinelimit; if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) { inline_limit = atoi (inlinelimit); llvm_jit_inline_limit = inline_limit; llvm_aot_inline_limit = inline_limit; g_free (inlinelimit); } else { inline_limit = INLINE_LENGTH_LIMIT; llvm_jit_inline_limit = LLVM_JIT_INLINE_LENGTH_LIMIT; llvm_aot_inline_limit = LLVM_AOT_INLINE_LENGTH_LIMIT; } inline_limit_inited = TRUE; } if (COMPILE_LLVM (cfg)) { if (cfg->compile_aot) limit = llvm_aot_inline_limit; else limit = llvm_jit_inline_limit; } else { limit = inline_limit; } if (header.code_size >= limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING)) return FALSE; /* * if we can initialize the class of the method right away, we do, * otherwise we don't allow inlining if the class needs initialization, * since it would mean inserting a call to mono_runtime_class_init() * inside the inlined code */ if (cfg->gshared && m_class_has_cctor (method->klass) && mini_class_check_context_used (cfg, method->klass)) return FALSE; { /* The AggressiveInlining hint is a good excuse to force that cctor to run. */ if ((cfg->opt & MONO_OPT_AGGRESSIVE_INLINING) || method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) { if (m_class_has_cctor (method->klass)) { ERROR_DECL (error); vtable = mono_class_vtable_checked (method->klass, error); if (!is_ok (error)) { mono_error_cleanup (error); return FALSE; } if (!cfg->compile_aot) { if (!mono_runtime_class_init_full (vtable, error)) { mono_error_cleanup (error); return FALSE; } } } } else if (mono_class_is_before_field_init (method->klass)) { if (cfg->run_cctors && m_class_has_cctor (method->klass)) { ERROR_DECL (error); /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */ if (!m_class_get_runtime_vtable (method->klass)) /* No vtable created yet */ return FALSE; vtable = mono_class_vtable_checked (method->klass, error); if (!is_ok (error)) { mono_error_cleanup (error); return FALSE; } /* This makes so that inline cannot trigger */ /* .cctors: too many apps depend on them */ /* running with a specific order... */ if (! vtable->initialized) return FALSE; if (!mono_runtime_class_init_full (vtable, error)) { mono_error_cleanup (error); return FALSE; } } } else if (mono_class_needs_cctor_run (method->klass, NULL)) { ERROR_DECL (error); if (!m_class_get_runtime_vtable (method->klass)) /* No vtable created yet */ return FALSE; vtable = mono_class_vtable_checked (method->klass, error); if (!is_ok (error)) { mono_error_cleanup (error); return FALSE; } if (!vtable->initialized) return FALSE; } } #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK if (mono_arch_is_soft_float ()) { /* FIXME: */ if (sig->ret && sig->ret->type == MONO_TYPE_R4) return FALSE; for (i = 0; i < sig->param_count; ++i) if (!m_type_is_byref (sig->params [i]) && sig->params [i]->type == MONO_TYPE_R4) return FALSE; } #endif if (g_list_find (cfg->dont_inline, method)) return FALSE; if (mono_profiler_get_call_instrumentation_flags (method)) return FALSE; if (mono_profiler_coverage_instrumentation_enabled (method)) return FALSE; if (method_does_not_return (method)) return FALSE; return TRUE; } static gboolean mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable) { if (!cfg->compile_aot) { g_assert (vtable); if (vtable->initialized) return FALSE; } if (mono_class_is_before_field_init (klass)) { if (cfg->method == method) return FALSE; } if (!mono_class_needs_cctor_run (klass, method)) return FALSE; if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass)) /* The initialization is already done before the method is called */ return FALSE; return TRUE; } int mini_emit_sext_index_reg (MonoCompile *cfg, MonoInst *index) { int index_reg = index->dreg; int index2_reg; #if SIZEOF_REGISTER == 8 /* The array reg is 64 bits but the index reg is only 32 */ if (COMPILE_LLVM (cfg)) { /* * abcrem can't handle the OP_SEXT_I4, so add this after abcrem, * during OP_BOUNDS_CHECK decomposition, and in the implementation * of OP_X86_LEA for llvm. */ index2_reg = index_reg; } else { index2_reg = alloc_preg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg); } #else if (index->type == STACK_I8) { index2_reg = alloc_preg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg); } else { index2_reg = index_reg; } #endif return index2_reg; } MonoInst* mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck, gboolean bounded) { MonoInst *ins; guint32 size; int mult_reg, add_reg, array_reg, index2_reg, bounds_reg, lower_bound_reg, realidx2_reg; int context_used; if (mini_is_gsharedvt_variable_klass (klass)) { size = -1; } else { mono_class_init_internal (klass); size = mono_class_array_element_size (klass); } mult_reg = alloc_preg (cfg); array_reg = arr->dreg; realidx2_reg = index2_reg = mini_emit_sext_index_reg (cfg, index); if (bounded) { bounds_reg = alloc_preg (cfg); lower_bound_reg = alloc_preg (cfg); realidx2_reg = alloc_preg (cfg); MonoBasicBlock *is_null_bb = NULL; NEW_BBLOCK (cfg, is_null_bb); // gint32 lower_bound = 0; // if (arr->bounds) // lower_bound = arr->bounds.lower_bound; // realidx2 = index2 - lower_bound; MONO_EMIT_NEW_PCONST (cfg, lower_bound_reg, NULL); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds)); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, lower_bound_reg, bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound)); MONO_START_BB (cfg, is_null_bb); MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2_reg, lower_bound_reg); } if (bcheck) MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, realidx2_reg); #if defined(TARGET_X86) || defined(TARGET_AMD64) if (size == 1 || size == 2 || size == 4 || size == 8) { static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 }; EMIT_NEW_X86_LEA (cfg, ins, array_reg, realidx2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector)); ins->klass = klass; ins->type = STACK_MP; return ins; } #endif add_reg = alloc_ireg_mp (cfg); if (size == -1) { MonoInst *rgctx_ins; /* gsharedvt */ g_assert (cfg->gshared); context_used = mini_class_check_context_used (cfg, klass); g_assert (context_used); rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE); MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, realidx2_reg, rgctx_ins->dreg); } else { MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, realidx2_reg, size); } MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg); NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector)); ins->klass = klass; ins->type = STACK_MP; MONO_ADD_INS (cfg->cbb, ins); return ins; } static MonoInst* mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2) { int bounds_reg = alloc_preg (cfg); int add_reg = alloc_ireg_mp (cfg); int mult_reg = alloc_preg (cfg); int mult2_reg = alloc_preg (cfg); int low1_reg = alloc_preg (cfg); int low2_reg = alloc_preg (cfg); int high1_reg = alloc_preg (cfg); int high2_reg = alloc_preg (cfg); int realidx1_reg = alloc_preg (cfg); int realidx2_reg = alloc_preg (cfg); int sum_reg = alloc_preg (cfg); int index1, index2; MonoInst *ins; guint32 size; mono_class_init_internal (klass); size = mono_class_array_element_size (klass); index1 = index_ins1->dreg; index2 = index_ins2->dreg; #if SIZEOF_REGISTER == 8 /* The array reg is 64 bits but the index reg is only 32 */ if (COMPILE_LLVM (cfg)) { /* Not needed */ } else { int tmpreg = alloc_preg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1); index1 = tmpreg; tmpreg = alloc_preg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2); index2 = tmpreg; } #else // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ? #endif /* range checking */ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds)); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg, bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound)); MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg, bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length)); MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg); MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg, bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound)); MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg, bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length)); MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg); MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg); MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size); MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg); NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector)); ins->type = STACK_MP; ins->klass = klass; MONO_ADD_INS (cfg->cbb, ins); return ins; } static MonoInst* mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, guchar *ip, gboolean is_set) { int rank; MonoInst *addr; MonoMethod *addr_method; int element_size; MonoClass *eclass = m_class_get_element_class (cmethod->klass); gboolean bounded = m_class_get_byval_arg (cmethod->klass) ? m_class_get_byval_arg (cmethod->klass)->type == MONO_TYPE_ARRAY : FALSE; rank = mono_method_signature_internal (cmethod)->param_count - (is_set? 1: 0); if (rank == 1) return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE, bounded); /* emit_ldelema_2 depends on OP_LMUL */ if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) { return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]); } if (mini_is_gsharedvt_variable_klass (eclass)) element_size = 0; else element_size = mono_class_array_element_size (eclass); addr_method = mono_marshal_get_array_address (rank, element_size); addr = mono_emit_method_call (cfg, addr_method, sp, NULL); return addr; } static gboolean mini_class_is_reference (MonoClass *klass) { return mini_type_is_reference (m_class_get_byval_arg (klass)); } MonoInst* mini_emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks) { if (safety_checks && mini_class_is_reference (klass) && !(MONO_INS_IS_PCONST_NULL (sp [2]))) { MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class); MonoMethod *helper; MonoInst *iargs [3]; if (sp [0]->type != STACK_OBJ) return NULL; if (sp [2]->type != STACK_OBJ) return NULL; iargs [2] = sp [2]; iargs [1] = sp [1]; iargs [0] = sp [0]; MonoClass *array_class = sp [0]->klass; if (array_class && m_class_get_rank (array_class) == 1) { MonoClass *eclass = m_class_get_element_class (array_class); if (m_class_is_sealed (eclass)) { helper = mono_marshal_get_virtual_stelemref (array_class); /* Make a non-virtual call if possible */ return mono_emit_method_call (cfg, helper, iargs, NULL); } } helper = mono_marshal_get_virtual_stelemref (obj_array); if (!helper->slot) mono_class_setup_vtable (obj_array); g_assert (helper->slot); return mono_emit_method_call (cfg, helper, iargs, sp [0]); } else { MonoInst *ins; if (mini_is_gsharedvt_variable_klass (klass)) { MonoInst *addr; // FIXME-VT: OP_ICONST optimization addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE); EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg); ins->opcode = OP_STOREV_MEMBASE; } else if (sp [1]->opcode == OP_ICONST) { int array_reg = sp [0]->dreg; int index_reg = sp [1]->dreg; int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector); if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg) && sp [1]->inst_c0 < 0) MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg); if (safety_checks) MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg); EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset, sp [2]->dreg); } else { MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks, FALSE); if (!mini_debug_options.weak_memory_model && mini_class_is_reference (klass)) mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg); if (mini_class_is_reference (klass)) mini_emit_write_barrier (cfg, addr, sp [2]); } return ins; } } MonoInst* mini_emit_memory_barrier (MonoCompile *cfg, int kind) { MonoInst *ins = NULL; MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER); MONO_ADD_INS (cfg->cbb, ins); ins->backend.memory_barrier_kind = kind; return ins; } /* * This entry point could be used later for arbitrary method * redirection. */ inline static MonoInst* mini_redirect_call (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins) { if (method->klass == mono_defaults.string_class) { /* managed string allocation support */ if (strcmp (method->name, "FastAllocateString") == 0) { MonoInst *iargs [2]; MonoVTable *vtable = mono_class_vtable_checked (method->klass, cfg->error); MonoMethod *managed_alloc = NULL; mono_error_assert_ok (cfg->error); /*Should not fail since it System.String*/ #ifndef MONO_CROSS_COMPILE managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE); #endif if (!managed_alloc) return NULL; EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable); iargs [1] = args [0]; return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins); } } return NULL; } static void mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp) { MonoInst *store, *temp; int i; for (i = 0; i < sig->param_count + sig->hasthis; ++i) { MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis]; /* * FIXME: We should use *args++ = sp [0], but that would mean the arg * would be different than the MonoInst's used to represent arguments, and * the ldelema implementation can't deal with that. * Solution: When ldelema is used on an inline argument, create a var for * it, emit ldelema on that var, and emit the saving code below in * inline_method () if needed. */ temp = mono_compile_create_var (cfg, argtype, OP_LOCAL); cfg->args [i] = temp; /* This uses cfg->args [i] which is set by the preceding line */ EMIT_NEW_ARGSTORE (cfg, store, i, *sp); store->cil_code = sp [0]->cil_code; sp++; } } #define MONO_INLINE_CALLED_LIMITED_METHODS 1 #define MONO_INLINE_CALLER_LIMITED_METHODS 1 #if (MONO_INLINE_CALLED_LIMITED_METHODS) static gboolean check_inline_called_method_name_limit (MonoMethod *called_method) { int strncmp_result; static const char *limit = NULL; if (limit == NULL) { const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT"); if (limit_string != NULL) limit = limit_string; else limit = ""; } if (limit [0] != '\0') { char *called_method_name = mono_method_full_name (called_method, TRUE); strncmp_result = strncmp (called_method_name, limit, strlen (limit)); g_free (called_method_name); //return (strncmp_result <= 0); return (strncmp_result == 0); } else { return TRUE; } } #endif #if (MONO_INLINE_CALLER_LIMITED_METHODS) static gboolean check_inline_caller_method_name_limit (MonoMethod *caller_method) { int strncmp_result; static const char *limit = NULL; if (limit == NULL) { const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT"); if (limit_string != NULL) { limit = limit_string; } else { limit = ""; } } if (limit [0] != '\0') { char *caller_method_name = mono_method_full_name (caller_method, TRUE); strncmp_result = strncmp (caller_method_name, limit, strlen (limit)); g_free (caller_method_name); //return (strncmp_result <= 0); return (strncmp_result == 0); } else { return TRUE; } } #endif void mini_emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype) { static double r8_0 = 0.0; static float r4_0 = 0.0; MonoInst *ins; int t; rtype = mini_get_underlying_type (rtype); t = rtype->type; if (m_type_is_byref (rtype)) { MONO_EMIT_NEW_PCONST (cfg, dreg, NULL); } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) { MONO_EMIT_NEW_ICONST (cfg, dreg, 0); } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) { MONO_EMIT_NEW_I8CONST (cfg, dreg, 0); } else if (cfg->r4fp && t == MONO_TYPE_R4) { MONO_INST_NEW (cfg, ins, OP_R4CONST); ins->type = STACK_R4; ins->inst_p0 = (void*)&r4_0; ins->dreg = dreg; MONO_ADD_INS (cfg->cbb, ins); } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) { MONO_INST_NEW (cfg, ins, OP_R8CONST); ins->type = STACK_R8; ins->inst_p0 = (void*)&r8_0; ins->dreg = dreg; MONO_ADD_INS (cfg->cbb, ins); } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) || ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) { MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype)); } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) { MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype)); } else { MONO_EMIT_NEW_PCONST (cfg, dreg, NULL); } } static void emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype) { int t; rtype = mini_get_underlying_type (rtype); t = rtype->type; if (m_type_is_byref (rtype)) { MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST); } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) { MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST); } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) { MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST); } else if (cfg->r4fp && t == MONO_TYPE_R4) { MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST); } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) { MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST); } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) || ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) { MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO); } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) { MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO); } else { mini_emit_init_rvar (cfg, dreg, rtype); } } /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */ static void emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init) { MonoInst *var = cfg->locals [local]; if (COMPILE_SOFT_FLOAT (cfg)) { MonoInst *store; int reg = alloc_dreg (cfg, (MonoStackType)var->type); mini_emit_init_rvar (cfg, reg, type); EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins); } else { if (init) mini_emit_init_rvar (cfg, var->dreg, type); else emit_dummy_init_rvar (cfg, var->dreg, type); } } int mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always) { return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always, NULL); } /* * inline_method: * * Return the cost of inlining CMETHOD, or zero if it should not be inlined. */ static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always, gboolean *is_empty) { ERROR_DECL (error); MonoInst *ins, *rvar = NULL; MonoMethodHeader *cheader; MonoBasicBlock *ebblock, *sbblock; int i, costs; MonoInst **prev_locals, **prev_args; MonoType **prev_arg_types; guint prev_real_offset; GHashTable *prev_cbb_hash; MonoBasicBlock **prev_cil_offset_to_bb; MonoBasicBlock *prev_cbb; const guchar *prev_ip; guchar *prev_cil_start; guint32 prev_cil_offset_to_bb_len; MonoMethod *prev_current_method; MonoGenericContext *prev_generic_context; gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE; g_assert (cfg->exception_type == MONO_EXCEPTION_NONE); #if (MONO_INLINE_CALLED_LIMITED_METHODS) if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod)) return 0; #endif #if (MONO_INLINE_CALLER_LIMITED_METHODS) if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method)) return 0; #endif if (!fsig) fsig = mono_method_signature_internal (cmethod); if (cfg->verbose_level > 2) printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE)); if (!cmethod->inline_info) { cfg->stat_inlineable_methods++; cmethod->inline_info = 1; } if (is_empty) *is_empty = FALSE; /* allocate local variables */ cheader = mono_method_get_header_checked (cmethod, error); if (!cheader) { if (inline_always) { mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); mono_error_move (cfg->error, error); } else { mono_error_cleanup (error); } return 0; } if (is_empty && cheader->code_size == 1 && cheader->code [0] == CEE_RET) *is_empty = TRUE; /* allocate space to store the return value */ if (!MONO_TYPE_IS_VOID (fsig->ret)) { rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL); } prev_locals = cfg->locals; cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*)); for (i = 0; i < cheader->num_locals; ++i) cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL); /* allocate start and end blocks */ /* This is needed so if the inline is aborted, we can clean up */ NEW_BBLOCK (cfg, sbblock); sbblock->real_offset = real_offset; NEW_BBLOCK (cfg, ebblock); ebblock->block_num = cfg->num_bblocks++; ebblock->real_offset = real_offset; prev_args = cfg->args; prev_arg_types = cfg->arg_types; prev_ret_var_set = cfg->ret_var_set; prev_real_offset = cfg->real_offset; prev_cbb_hash = cfg->cbb_hash; prev_cil_offset_to_bb = cfg->cil_offset_to_bb; prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len; prev_cil_start = cfg->cil_start; prev_ip = cfg->ip; prev_cbb = cfg->cbb; prev_current_method = cfg->current_method; prev_generic_context = cfg->generic_context; prev_disable_inline = cfg->disable_inline; cfg->ret_var_set = FALSE; cfg->inline_depth ++; if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) virtual_ = TRUE; costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_); ret_var_set = cfg->ret_var_set; cfg->real_offset = prev_real_offset; cfg->cbb_hash = prev_cbb_hash; cfg->cil_offset_to_bb = prev_cil_offset_to_bb; cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len; cfg->cil_start = prev_cil_start; cfg->ip = prev_ip; cfg->locals = prev_locals; cfg->args = prev_args; cfg->arg_types = prev_arg_types; cfg->current_method = prev_current_method; cfg->generic_context = prev_generic_context; cfg->ret_var_set = prev_ret_var_set; cfg->disable_inline = prev_disable_inline; cfg->inline_depth --; if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) { if (cfg->verbose_level > 2) printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE)); mono_error_assert_ok (cfg->error); cfg->stat_inlined_methods++; /* always add some code to avoid block split failures */ MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (prev_cbb, ins); prev_cbb->next_bb = sbblock; link_bblock (cfg, prev_cbb, sbblock); /* * Get rid of the begin and end bblocks if possible to aid local * optimizations. */ if (prev_cbb->out_count == 1) mono_merge_basic_blocks (cfg, prev_cbb, sbblock); if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock)) mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]); if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) { MonoBasicBlock *prev = ebblock->in_bb [0]; if (prev->next_bb == ebblock) { mono_merge_basic_blocks (cfg, prev, ebblock); cfg->cbb = prev; if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) { mono_merge_basic_blocks (cfg, prev_cbb, prev); cfg->cbb = prev_cbb; } } else { /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */ cfg->cbb = ebblock; } } else { /* * Its possible that the rvar is set in some prev bblock, but not in others. * (#1835). */ if (rvar) { MonoBasicBlock *bb; for (i = 0; i < ebblock->in_count; ++i) { bb = ebblock->in_bb [i]; if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) { cfg->cbb = bb; mini_emit_init_rvar (cfg, rvar->dreg, fsig->ret); } } } cfg->cbb = ebblock; } if (rvar) { /* * If the inlined method contains only a throw, then the ret var is not * set, so set it to a dummy value. */ if (!ret_var_set) mini_emit_init_rvar (cfg, rvar->dreg, fsig->ret); EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0); *sp++ = ins; } cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader); return costs + 1; } else { if (cfg->verbose_level > 2) { const char *msg = mono_error_get_message (cfg->error); printf ("INLINE ABORTED %s (cost %d) %s\n", mono_method_full_name (cmethod, TRUE), costs, msg ? msg : ""); } cfg->exception_type = MONO_EXCEPTION_NONE; clear_cfg_error (cfg); /* This gets rid of the newly added bblocks */ cfg->cbb = prev_cbb; } cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader); return 0; } /* * Some of these comments may well be out-of-date. * Design decisions: we do a single pass over the IL code (and we do bblock * splitting/merging in the few cases when it's required: a back jump to an IL * address that was not already seen as bblock starting point). * Code is validated as we go (full verification is still better left to metadata/verify.c). * Complex operations are decomposed in simpler ones right away. We need to let the * arch-specific code peek and poke inside this process somehow (except when the * optimizations can take advantage of the full semantic info of coarse opcodes). * All the opcodes of the form opcode.s are 'normalized' to opcode. * MonoInst->opcode initially is the IL opcode or some simplification of that * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific * opcode with value bigger than OP_LAST. * At this point the IR can be handed over to an interpreter, a dumb code generator * or to the optimizing code generator that will translate it to SSA form. * * Profiling directed optimizations. * We may compile by default with few or no optimizations and instrument the code * or the user may indicate what methods to optimize the most either in a config file * or through repeated runs where the compiler applies offline the optimizations to * each method and then decides if it was worth it. */ #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED #define CHECK_STACK_OVF() if (((sp - stack_start) + 1) > header->max_stack) UNVERIFIED #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED #define CHECK_OPSIZE(size) if ((size) < 1 || ip + (size) > end) UNVERIFIED #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass)) /* offset from br.s -> br like opcodes */ #define BIG_BRANCH_OFFSET 13 static gboolean ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip) { MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start]; return b == NULL || b == bb; } static int get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, guchar *start, guchar *end, guchar **pos) { guchar *ip = start; guchar *target; int i; guint cli_addr; MonoBasicBlock *bblock; const MonoOpcode *opcode; while (ip < end) { cli_addr = ip - start; i = mono_opcode_value ((const guint8 **)&ip, end); if (i < 0) UNVERIFIED; opcode = &mono_opcodes [i]; switch (opcode->argument) { case MonoInlineNone: ip++; break; case MonoInlineString: case MonoInlineType: case MonoInlineField: case MonoInlineMethod: case MonoInlineTok: case MonoInlineSig: case MonoShortInlineR: case MonoInlineI: ip += 5; break; case MonoInlineVar: ip += 3; break; case MonoShortInlineVar: case MonoShortInlineI: ip += 2; break; case MonoShortInlineBrTarget: target = start + cli_addr + 2 + (signed char)ip [1]; GET_BBLOCK (cfg, bblock, target); ip += 2; if (ip < end) GET_BBLOCK (cfg, bblock, ip); break; case MonoInlineBrTarget: target = start + cli_addr + 5 + (gint32)read32 (ip + 1); GET_BBLOCK (cfg, bblock, target); ip += 5; if (ip < end) GET_BBLOCK (cfg, bblock, ip); break; case MonoInlineSwitch: { guint32 n = read32 (ip + 1); guint32 j; ip += 5; cli_addr += 5 + 4 * n; target = start + cli_addr; GET_BBLOCK (cfg, bblock, target); for (j = 0; j < n; ++j) { target = start + cli_addr + (gint32)read32 (ip); GET_BBLOCK (cfg, bblock, target); ip += 4; } break; } case MonoInlineR: case MonoInlineI8: ip += 9; break; default: g_assert_not_reached (); } if (i == CEE_THROW) { guchar *bb_start = ip - 1; /* Find the start of the bblock containing the throw */ bblock = NULL; while ((bb_start >= start) && !bblock) { bblock = cfg->cil_offset_to_bb [(bb_start) - start]; bb_start --; } if (bblock) bblock->out_of_line = 1; } } return 0; unverified: exception_exit: *pos = ip; return 1; } static MonoMethod * mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error) { MonoMethod *method; error_init (error); if (m->wrapper_type != MONO_WRAPPER_NONE) { method = (MonoMethod *)mono_method_get_wrapper_data (m, token); if (context) { method = mono_class_inflate_generic_method_checked (method, context, error); } } else { method = mono_get_method_checked (m_class_get_image (m->klass), token, klass, context, error); } return method; } static MonoMethod * mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context) { ERROR_DECL (error); MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? cfg->error : error); if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (m_class_get_byval_arg (method->klass))) { mono_error_set_bad_image (cfg->error, m_class_get_image (cfg->method->klass), "Method with open type while not compiling gshared"); method = NULL; } if (!method && !cfg) mono_error_cleanup (error); /* FIXME don't swallow the error */ return method; } static MonoMethodSignature* mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error) { MonoMethodSignature *fsig; error_init (error); if (method->wrapper_type != MONO_WRAPPER_NONE) { fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token); } else { fsig = mono_metadata_parse_signature_checked (m_class_get_image (method->klass), token, error); return_val_if_nok (error, NULL); } if (context) { fsig = mono_inflate_generic_signature(fsig, context, error); } return fsig; } /* * Return the original method is a wrapper is specified. We can only access * the custom attributes from the original method. */ static MonoMethod* get_original_method (MonoMethod *method) { if (method->wrapper_type == MONO_WRAPPER_NONE) return method; /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */ if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) return NULL; /* in other cases we need to find the original method */ return mono_marshal_method_from_wrapper (method); } static guchar* il_read_op (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op) // If ip is desired_il_op, return the next ip, else NULL. { if (G_LIKELY (ip < end) && G_UNLIKELY (*ip == first_byte)) { MonoOpcodeEnum il_op = MonoOpcodeEnum_Invalid; // mono_opcode_value_and_size updates ip, but not in the expected way. const guchar *temp_ip = ip; const int size = mono_opcode_value_and_size (&temp_ip, end, &il_op); return (G_LIKELY (size > 0) && G_UNLIKELY (il_op == desired_il_op)) ? (ip + size) : NULL; } return NULL; } static guchar* il_read_op_and_token (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, guint32 *token) { ip = il_read_op (ip, end, first_byte, desired_il_op); if (ip) *token = read32 (ip - 4); // could be +1 or +2 from start return ip; } static guchar* il_read_branch_and_target (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, int size, guchar **target) { ip = il_read_op (ip, end, first_byte, desired_il_op); if (ip) { gint32 delta = 0; switch (size) { case 1: delta = (signed char)ip [-1]; break; case 4: delta = (gint32)read32 (ip - 4); break; } // FIXME verify it is within the function and start of an instruction. *target = ip + delta; return ip; } return NULL; } #define il_read_brtrue(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE, MONO_CEE_BRTRUE, 4, target)) #define il_read_brtrue_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE_S, MONO_CEE_BRTRUE_S, 1, target)) #define il_read_brfalse(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE, MONO_CEE_BRFALSE, 4, target)) #define il_read_brfalse_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE_S, MONO_CEE_BRFALSE_S, 1, target)) #define il_read_dup(ip, end) (il_read_op (ip, end, CEE_DUP, MONO_CEE_DUP)) #define il_read_newobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_NEW_OBJ, MONO_CEE_NEWOBJ, token)) #define il_read_ldtoken(ip, end, token) (il_read_op_and_token (ip, end, CEE_LDTOKEN, MONO_CEE_LDTOKEN, token)) #define il_read_call(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALL, MONO_CEE_CALL, token)) #define il_read_callvirt(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALLVIRT, MONO_CEE_CALLVIRT, token)) #define il_read_initobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_INITOBJ, token)) #define il_read_constrained(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_CONSTRAINED_, token)) #define il_read_unbox_any(ip, end, token) (il_read_op_and_token (ip, end, CEE_UNBOX_ANY, MONO_CEE_UNBOX_ANY, token)) /* * Check that the IL instructions at ip are the array initialization * sequence and return the pointer to the data and the size. */ static const char* initialize_array_data (MonoCompile *cfg, MonoMethod *method, gboolean aot, guchar *ip, guchar *end, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token, MonoOpcodeEnum *il_op, guchar **next_ip) { /* * newarr[System.Int32] * dup * ldtoken field valuetype ... * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle) */ guint32 token; guint32 field_token; if ((ip = il_read_dup (ip, end)) && ip_in_bb (cfg, cfg->cbb, ip) && (ip = il_read_ldtoken (ip, end, &field_token)) && IS_FIELD_DEF (field_token) && ip_in_bb (cfg, cfg->cbb, ip) && (ip = il_read_call (ip, end, &token))) { ERROR_DECL (error); guint32 rva; const char *data_ptr; int size = 0; MonoMethod *cmethod; MonoClass *dummy_class; MonoClassField *field = mono_field_from_token_checked (m_class_get_image (method->klass), field_token, &dummy_class, NULL, error); int dummy_align; if (!field) { mono_error_cleanup (error); /* FIXME don't swallow the error */ return NULL; } *out_field_token = field_token; cmethod = mini_get_method (NULL, method, token, NULL, NULL); if (!cmethod) return NULL; if (strcmp (cmethod->name, "InitializeArray") || strcmp (m_class_get_name (cmethod->klass), "RuntimeHelpers") || m_class_get_image (cmethod->klass) != mono_defaults.corlib) return NULL; switch (mini_get_underlying_type (m_class_get_byval_arg (klass))->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: size = 1; break; /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */ #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN case MONO_TYPE_I2: case MONO_TYPE_U2: size = 2; break; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_R4: size = 4; break; case MONO_TYPE_R8: case MONO_TYPE_I8: case MONO_TYPE_U8: size = 8; break; #endif default: return NULL; } size *= len; if (size > mono_type_size (field->type, &dummy_align)) return NULL; *out_size = size; /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/ MonoImage *method_klass_image = m_class_get_image (method->klass); if (!image_is_dynamic (method_klass_image)) { guint32 field_index = mono_metadata_token_index (field_token); mono_metadata_field_info (method_klass_image, field_index - 1, NULL, &rva, NULL); data_ptr = mono_image_rva_map (method_klass_image, rva); /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/ /* for aot code we do the lookup on load */ if (aot && data_ptr) data_ptr = (const char *)GUINT_TO_POINTER (rva); } else { /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */ g_assert (!aot); data_ptr = mono_field_get_data (field); } if (!data_ptr) return NULL; *il_op = MONO_CEE_CALL; *next_ip = ip; return data_ptr; } return NULL; } static void set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, guchar *ip) { ERROR_DECL (error); char *method_fname = mono_method_full_name (method, TRUE); char *method_code; MonoMethodHeader *header = mono_method_get_header_checked (method, error); if (!header) { method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (error)); mono_error_cleanup (error); } else if (header->code_size == 0) method_code = g_strdup ("method body is empty."); else method_code = mono_disasm_code_one (NULL, method, ip, NULL); mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code)); g_free (method_fname); g_free (method_code); cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header); } guint32 mono_type_to_stloc_coerce (MonoType *type) { if (m_type_is_byref (type)) return 0; type = mini_get_underlying_type (type); handle_enum: switch (type->type) { case MONO_TYPE_I1: return OP_ICONV_TO_I1; case MONO_TYPE_U1: return OP_ICONV_TO_U1; case MONO_TYPE_I2: return OP_ICONV_TO_I2; case MONO_TYPE_U2: return OP_ICONV_TO_U2; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R4: case MONO_TYPE_R8: case MONO_TYPE_TYPEDBYREF: case MONO_TYPE_GENERICINST: return 0; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { type = mono_class_enum_basetype_internal (type->data.klass); goto handle_enum; } return 0; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: //TODO I believe we don't need to handle gsharedvt as there won't be match and, for example, u1 is not covariant to u32 return 0; default: g_error ("unknown type 0x%02x in mono_type_to_stloc_coerce", type->type); } return -1; } static void emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n) { MonoInst *ins; guint32 coerce_op = mono_type_to_stloc_coerce (header->locals [n]); if (coerce_op) { if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) { if (cfg->verbose_level > 2) printf ("Found existing coercing is enough for stloc\n"); } else { MONO_INST_NEW (cfg, ins, coerce_op); ins->dreg = alloc_ireg (cfg); ins->sreg1 = sp [0]->dreg; ins->type = STACK_I4; ins->klass = mono_class_from_mono_type_internal (header->locals [n]); MONO_ADD_INS (cfg->cbb, ins); *sp = mono_decompose_opcode (cfg, ins); } } guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]); if (!cfg->deopt && (opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) { /* Optimize reg-reg moves away */ /* * Can't optimize other opcodes, since sp[0] might point to * the last ins of a decomposed opcode. */ sp [0]->dreg = (cfg)->locals [n]->dreg; } else { EMIT_NEW_LOCSTORE (cfg, ins, n, *sp); } } static void emit_starg_ir (MonoCompile *cfg, MonoInst **sp, int n) { MonoInst *ins; guint32 coerce_op = mono_type_to_stloc_coerce (cfg->arg_types [n]); if (coerce_op) { if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) { if (cfg->verbose_level > 2) printf ("Found existing coercing is enough for starg\n"); } else { MONO_INST_NEW (cfg, ins, coerce_op); ins->dreg = alloc_ireg (cfg); ins->sreg1 = sp [0]->dreg; ins->type = STACK_I4; ins->klass = mono_class_from_mono_type_internal (cfg->arg_types [n]); MONO_ADD_INS (cfg->cbb, ins); *sp = mono_decompose_opcode (cfg, ins); } } EMIT_NEW_ARGSTORE (cfg, ins, n, *sp); } /* * ldloca inhibits many optimizations so try to get rid of it in common * cases. */ static guchar * emit_optimized_ldloca_ir (MonoCompile *cfg, guchar *ip, guchar *end, int local) { guint32 token; MonoClass *klass; MonoType *type; guchar *start = ip; if ((ip = il_read_initobj (ip, end, &token)) && ip_in_bb (cfg, cfg->cbb, start + 1)) { /* From the INITOBJ case */ klass = mini_get_class (cfg->current_method, token, cfg->generic_context); CHECK_TYPELOAD (klass); type = mini_get_underlying_type (m_class_get_byval_arg (klass)); emit_init_local (cfg, local, type, TRUE); return ip; } exception_exit: return NULL; } static MonoInst* handle_call_res_devirt (MonoCompile *cfg, MonoMethod *cmethod, MonoInst *call_res) { /* * Devirt EqualityComparer.Default.Equals () calls for some types. * The corefx code excepts these calls to be devirtualized. * This depends on the implementation of EqualityComparer.Default, which is * in mcs/class/referencesource/mscorlib/system/collections/generic/equalitycomparer.cs */ if (m_class_get_image (cmethod->klass) == mono_defaults.corlib && !strcmp (m_class_get_name (cmethod->klass), "EqualityComparer`1") && !strcmp (cmethod->name, "get_Default")) { MonoType *param_type = mono_class_get_generic_class (cmethod->klass)->context.class_inst->type_argv [0]; MonoClass *inst; MonoGenericContext ctx; ERROR_DECL (error); memset (&ctx, 0, sizeof (ctx)); MonoType *args [ ] = { param_type }; ctx.class_inst = mono_metadata_get_generic_inst (1, args); inst = mono_class_inflate_generic_class_checked (mono_class_get_iequatable_class (), &ctx, error); mono_error_assert_ok (error); /* EqualityComparer<T>.Default returns specific types depending on T */ // FIXME: Add more /* 1. Implements IEquatable<T> */ /* * Can't use this for string/byte as it might use a different comparer: * * // Specialize type byte for performance reasons * if (t == typeof(byte)) { * return (EqualityComparer<T>)(object)(new ByteEqualityComparer()); * } * #if MOBILE * // Breaks .net serialization compatibility * if (t == typeof (string)) * return (EqualityComparer<T>)(object)new InternalStringComparer (); * #endif */ if (mono_class_is_assignable_from_internal (inst, mono_class_from_mono_type_internal (param_type)) && param_type->type != MONO_TYPE_U1 && param_type->type != MONO_TYPE_STRING) { MonoInst *typed_objref; MonoClass *gcomparer_inst; memset (&ctx, 0, sizeof (ctx)); args [0] = param_type; ctx.class_inst = mono_metadata_get_generic_inst (1, args); MonoClass *gcomparer = mono_class_get_geqcomparer_class (); g_assert (gcomparer); gcomparer_inst = mono_class_inflate_generic_class_checked (gcomparer, &ctx, error); if (is_ok (error)) { MONO_INST_NEW (cfg, typed_objref, OP_TYPED_OBJREF); typed_objref->type = STACK_OBJ; typed_objref->dreg = alloc_ireg_ref (cfg); typed_objref->sreg1 = call_res->dreg; typed_objref->klass = gcomparer_inst; MONO_ADD_INS (cfg->cbb, typed_objref); call_res = typed_objref; /* Force decompose */ cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE; cfg->cbb->needs_decompose = TRUE; } } } return call_res; } static gboolean is_exception_class (MonoClass *klass) { if (G_LIKELY (m_class_get_supertypes (klass))) return mono_class_has_parent_fast (klass, mono_defaults.exception_class); while (klass) { if (klass == mono_defaults.exception_class) return TRUE; klass = m_class_get_parent (klass); } return FALSE; } /* * is_jit_optimizer_disabled: * * Determine whenever M's assembly has a DebuggableAttribute with the * IsJITOptimizerDisabled flag set. */ static gboolean is_jit_optimizer_disabled (MonoMethod *m) { MonoAssembly *ass = m_class_get_image (m->klass)->assembly; g_assert (ass); if (ass->jit_optimizer_disabled_inited) return ass->jit_optimizer_disabled; return mono_assembly_is_jit_optimizer_disabled (ass); } gboolean mono_is_supported_tailcall_helper (gboolean value, const char *svalue) { if (!value) mono_tailcall_print ("%s %s\n", __func__, svalue); return value; } static gboolean mono_is_not_supported_tailcall_helper (gboolean value, const char *svalue, MonoMethod *method, MonoMethod *cmethod) { // Return value, printing if it inhibits tailcall. if (value && mono_tailcall_print_enabled ()) { const char *lparen = strchr (svalue, ' ') ? "(" : ""; const char *rparen = *lparen ? ")" : ""; mono_tailcall_print ("%s %s -> %s %s%s%s:%d\n", __func__, method->name, cmethod->name, lparen, svalue, rparen, value); } return value; } #define IS_NOT_SUPPORTED_TAILCALL(x) (mono_is_not_supported_tailcall_helper((x), #x, method, cmethod)) static gboolean is_supported_tailcall (MonoCompile *cfg, const guint8 *ip, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, gboolean virtual_, gboolean extra_arg, gboolean *ptailcall_calli) { // Some checks apply to "regular", some to "calli", some to both. // To ease burden on caller, always compute regular and calli. gboolean tailcall = TRUE; gboolean tailcall_calli = TRUE; if (IS_NOT_SUPPORTED_TAILCALL (virtual_ && !cfg->backend->have_op_tailcall_membase)) tailcall = FALSE; if (IS_NOT_SUPPORTED_TAILCALL (!cfg->backend->have_op_tailcall_reg)) tailcall_calli = FALSE; if (!tailcall && !tailcall_calli) goto exit; // FIXME in calli, there is no type for for the this parameter, // so we assume it might be valuetype; in future we should issue a range // check, so rule out pointing to frame (for other reference parameters also) if ( IS_NOT_SUPPORTED_TAILCALL (cmethod && fsig->hasthis && m_class_is_valuetype (cmethod->klass)) // This might point to the current method's stack. Emit range check? || IS_NOT_SUPPORTED_TAILCALL (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) || IS_NOT_SUPPORTED_TAILCALL (fsig->pinvoke) // i.e. if !cmethod (calli) || IS_NOT_SUPPORTED_TAILCALL (cfg->method->save_lmf) || IS_NOT_SUPPORTED_TAILCALL (!cmethod && fsig->hasthis) // FIXME could be valuetype to current frame; range check || IS_NOT_SUPPORTED_TAILCALL (cmethod && cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD) // http://www.mono-project.com/docs/advanced/runtime/docs/generic-sharing/ // // 1. Non-generic non-static methods of reference types have access to the // RGCTX via the "this" argument (this->vtable->rgctx). // 2. a Non-generic static methods of reference types and b. non-generic methods // of value types need to be passed a pointer to the caller's class's VTable in the MONO_ARCH_RGCTX_REG register. // 3. Generic methods need to be passed a pointer to the MRGCTX in the MONO_ARCH_RGCTX_REG register // // That is what vtable_arg is here (always?). // // Passing vtable_arg uses (requires?) a volatile non-parameter register, // such as AMD64 rax, r10, r11, or the return register on many architectures. // ARM32 does not always clearly have such a register. ARM32's return register // is a parameter register. // iPhone could use r9 except on old systems. iPhone/ARM32 is not particularly // important. Linux/arm32 is less clear. // ARM32's scratch r12 might work but only with much collateral change. // // Imagine F1 calls F2, and F2 tailcalls F3. // F2 and F3 are managed. F1 is native. // Without a tailcall, F2 can save and restore everything needed for F1. // However if the extra parameter were in a non-volatile, such as ARM32 V5/R8, // F3 cannot easily restore it for F1, in the current scheme. The current // scheme where the extra parameter is not merely an extra parameter, but // passed "outside of the ABI". // // If all native to managed transitions are intercepted and wrapped (w/o tailcall), // then they can preserve this register and the rest of the managed callgraph // treat it as volatile. // // Interface method dispatch has the same problem (imt_arg). || IS_NOT_SUPPORTED_TAILCALL (extra_arg && !cfg->backend->have_volatile_non_param_register) || IS_NOT_SUPPORTED_TAILCALL (cfg->gsharedvt) ) { tailcall_calli = FALSE; tailcall = FALSE; goto exit; } for (int i = 0; i < fsig->param_count; ++i) { if (IS_NOT_SUPPORTED_TAILCALL (m_type_is_byref (fsig->params [i]) || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)) { tailcall_calli = FALSE; tailcall = FALSE; // These can point to the current method's stack. Emit range check? goto exit; } } MonoMethodSignature *caller_signature; MonoMethodSignature *callee_signature; caller_signature = mono_method_signature_internal (method); callee_signature = cmethod ? mono_method_signature_internal (cmethod) : fsig; g_assert (caller_signature); g_assert (callee_signature); // Require an exact match on return type due to various conversions in emit_move_return_value that would be skipped. // The main troublesome conversions are double <=> float. // CoreCLR allows some conversions here, such as integer truncation. // As well I <=> I[48] and U <=> U[48] would be ok, for matching size. if (IS_NOT_SUPPORTED_TAILCALL (mini_get_underlying_type (caller_signature->ret)->type != mini_get_underlying_type (callee_signature->ret)->type) || IS_NOT_SUPPORTED_TAILCALL (!mono_arch_tailcall_supported (cfg, caller_signature, callee_signature, virtual_))) { tailcall_calli = FALSE; tailcall = FALSE; goto exit; } /* Debugging support */ #if 0 if (!mono_debug_count ()) { tailcall_calli = FALSE; tailcall = FALSE; goto exit; } #endif // See check_sp in mini_emit_calli_full. if (tailcall_calli && IS_NOT_SUPPORTED_TAILCALL (mini_should_check_stack_pointer (cfg))) tailcall_calli = FALSE; exit: mono_tailcall_print ("tail.%s %s -> %s tailcall:%d tailcall_calli:%d gshared:%d extra_arg:%d virtual_:%d\n", mono_opcode_name (*ip), method->name, cmethod ? cmethod->name : "calli", tailcall, tailcall_calli, cfg->gshared, extra_arg, virtual_); *ptailcall_calli = tailcall_calli; return tailcall; } /* * is_addressable_valuetype_load * * Returns true if a previous load can be done without doing an extra copy, given the new instruction ip and the type of the object being loaded ldtype */ static gboolean is_addressable_valuetype_load (MonoCompile* cfg, guint8* ip, MonoType* ldtype) { /* Avoid loading a struct just to load one of its fields */ gboolean is_load_instruction = (*ip == CEE_LDFLD); gboolean is_in_previous_bb = ip_in_bb(cfg, cfg->cbb, ip); gboolean is_struct = MONO_TYPE_ISSTRUCT(ldtype); return is_load_instruction && is_in_previous_bb && is_struct; } /* * handle_ctor_call: * * Handle calls made to ctors from NEWOBJ opcodes. */ static void handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp, guint8 *ip, int *inline_costs) { MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins; if (cmethod && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) { g_assert (MONO_TYPE_IS_VOID (fsig->ret)); CHECK_CFG_EXCEPTION; return; } if (mono_class_generic_sharing_enabled (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE)) { MonoRgctxAccess access = mini_get_rgctx_access_for_method (cmethod); if (access == MONO_RGCTX_ACCESS_MRGCTX) { mono_class_vtable_checked (cmethod->klass, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (cmethod->klass); vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX); } else if (access == MONO_RGCTX_ACCESS_VTABLE) { vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE); CHECK_CFG_ERROR; CHECK_TYPELOAD (cmethod->klass); } else { g_assert (access == MONO_RGCTX_ACCESS_THIS); } } /* Avoid virtual calls to ctors if possible */ if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg && mono_method_check_inlining (cfg, cmethod) && !mono_class_is_subclass_of_internal (cmethod->klass, mono_defaults.exception_class, FALSE)) { int costs; if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, NULL))) { cfg->real_offset += 5; *inline_costs += costs - 5; } else { INLINE_FAILURE ("inline failure"); // FIXME-VT: Clean this up if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) GSHAREDVT_FAILURE(*ip); mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL); } } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) { MonoInst *addr; addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE); if (cfg->llvm_only) { // FIXME: Avoid initializing vtable_arg mini_emit_llvmonly_calli (cfg, fsig, sp, addr); } else { mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg); } } else if (context_used && ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) || !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) { MonoInst *cmethod_addr; /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */ if (cfg->llvm_only) { MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_FTNDESC); mini_emit_llvmonly_calli (cfg, fsig, sp, addr); } else { cmethod_addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE); mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg); } } else { INLINE_FAILURE ("ctor call"); ins = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, vtable_arg); } exception_exit: mono_error_exit: return; } typedef struct { MonoMethod *method; gboolean inst_tailcall; } HandleCallData; /* * handle_constrained_call: * * Handle constrained calls. Return a MonoInst* representing the call or NULL. * May overwrite sp [0] and modify the ref_... parameters. */ static MonoInst* handle_constrained_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoClass *constrained_class, MonoInst **sp, HandleCallData *cdata, MonoMethod **ref_cmethod, gboolean *ref_virtual, gboolean *ref_emit_widen) { MonoInst *ins, *addr; MonoMethod *method = cdata->method; gboolean constrained_partial_call = FALSE; gboolean constrained_is_generic_param = m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR || m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR; MonoType *gshared_constraint = NULL; if (constrained_is_generic_param && cfg->gshared) { if (!mini_is_gsharedvt_klass (constrained_class)) { g_assert (!m_class_is_valuetype (cmethod->klass)); if (!mini_type_is_reference (m_class_get_byval_arg (constrained_class))) constrained_partial_call = TRUE; MonoType *t = m_class_get_byval_arg (constrained_class); MonoGenericParam *gparam = t->data.generic_param; gshared_constraint = gparam->gshared_constraint; } } if (mini_is_gsharedvt_klass (constrained_class)) { if ((cmethod->klass != mono_defaults.object_class) && m_class_is_valuetype (constrained_class) && m_class_is_valuetype (cmethod->klass)) { /* The 'Own method' case below */ } else if (m_class_get_image (cmethod->klass) != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !m_class_is_valuetype (cmethod->klass)) { /* 'The type parameter is instantiated as a reference type' case below. */ } else { ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, ref_emit_widen); CHECK_CFG_EXCEPTION; g_assert (ins); if (cdata->inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall constrained_class %s -> %s\n", method->name, cmethod->name); return ins; } } if (m_method_is_static (cmethod)) { /* Call to an abstract static method, handled normally */ return NULL; } else if (constrained_partial_call) { gboolean need_box = TRUE; /* * The receiver is a valuetype, but the exact type is not known at compile time. This means the * called method is not known at compile time either. The called method could end up being * one of the methods on the parent classes (object/valuetype/enum), in which case we need * to box the receiver. * A simple solution would be to box always and make a normal virtual call, but that would * be bad performance wise. */ if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass) && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT)) { /* * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing necessary. */ /* If the method is not abstract, it's a default interface method, and we need to box */ need_box = FALSE; } if (gshared_constraint && MONO_TYPE_IS_PRIMITIVE (gshared_constraint) && cmethod->klass == mono_defaults.object_class && !strcmp (cmethod->name, "GetHashCode")) { /* * The receiver is constrained to a primitive type or an enum with the same basetype. * Enum.GetHashCode () returns the hash code of the underlying type (see comments in Enum.cs), * so the constrained call can be replaced with a normal call to the basetype GetHashCode () * method. */ MonoClass *gshared_constraint_class = mono_class_from_mono_type_internal (gshared_constraint); cmethod = get_method_nofail (gshared_constraint_class, cmethod->name, 0, 0); g_assert (cmethod); *ref_cmethod = cmethod; *ref_virtual = FALSE; if (cfg->verbose_level) printf (" -> %s\n", mono_method_get_full_name (cmethod)); return NULL; } if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class)) { /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0); ins->klass = constrained_class; sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class)); CHECK_CFG_EXCEPTION; } else if (need_box) { MonoInst *box_type; MonoBasicBlock *is_ref_bb, *end_bb; MonoInst *nonbox_call, *addr; /* * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call * if needed. * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT, * the no-box case goes to a method in Int32, while the box case goes to a method in Enum. */ addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE); NEW_BBLOCK (cfg, is_ref_bb); NEW_BBLOCK (cfg, end_bb); box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb); /* Non-ref case */ if (cfg->llvm_only) /* addr is an ftndesc in this case */ nonbox_call = mini_emit_llvmonly_calli (cfg, fsig, sp, addr); else nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* Ref case */ MONO_START_BB (cfg, is_ref_bb); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0); ins->klass = constrained_class; sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class)); CHECK_CFG_EXCEPTION; if (cfg->llvm_only) ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr); else ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); MONO_START_BB (cfg, end_bb); cfg->cbb = end_bb; nonbox_call->dreg = ins->dreg; if (cdata->inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall constrained_partial_need_box %s -> %s\n", method->name, cmethod->name); return ins; } else { g_assert (mono_class_is_interface (cmethod->klass)); addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE); if (cfg->llvm_only) ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr); else ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL); if (cdata->inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall constrained_partial %s -> %s\n", method->name, cmethod->name); return ins; } } else if (!m_class_is_valuetype (constrained_class)) { int dreg = alloc_ireg_ref (cfg); /* * The type parameter is instantiated as a reference * type. We have a managed pointer on the stack, so * we need to dereference it here. */ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0); ins->type = STACK_OBJ; sp [0] = ins; } else if (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class) { /* * The type parameter is instantiated as a valuetype, * but that type doesn't override the method we're * calling, so we need to box `this'. */ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0); ins->klass = constrained_class; sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class)); CHECK_CFG_EXCEPTION; } else { if (cmethod->klass != constrained_class) { /* Enums/default interface methods */ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0); ins->klass = constrained_class; sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class)); CHECK_CFG_EXCEPTION; } *ref_virtual = FALSE; } exception_exit: return NULL; } static void emit_setret (MonoCompile *cfg, MonoInst *val) { MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret); MonoInst *ins; if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) { MonoInst *ret_addr; if (!cfg->vret_addr) { EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val); } else { EMIT_NEW_RETLOADA (cfg, ret_addr); MonoClass *ret_class = mono_class_from_mono_type_internal (ret_type); if (MONO_CLASS_IS_SIMD (cfg, ret_class)) EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREX_MEMBASE, ret_addr->dreg, 0, val->dreg); else EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg); ins->klass = ret_class; } } else { #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK if (COMPILE_SOFT_FLOAT (cfg) && !m_type_is_byref (ret_type) && ret_type->type == MONO_TYPE_R4) { MonoInst *conv; MonoInst *iargs [ ] = { val }; conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs); mono_arch_emit_setret (cfg, cfg->method, conv); } else { mono_arch_emit_setret (cfg, cfg->method, val); } #else mono_arch_emit_setret (cfg, cfg->method, val); #endif } } /* * Emit a call to enter the interpreter for methods with filter clauses. */ static void emit_llvmonly_interp_entry (MonoCompile *cfg, MonoMethodHeader *header) { MonoInst *ins; MonoInst **iargs; MonoMethodSignature *sig = mono_method_signature_internal (cfg->method); MonoInst *ftndesc; cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, sig); /* * Emit a call to the interp entry function. We emit it here instead of the llvm backend since * calling conventions etc. are easier to handle here. The LLVM backend will only emit the * entry/exit bblocks. */ g_assert (cfg->cbb == cfg->bb_init); if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (sig)) { /* * Would have to generate a gsharedvt out wrapper which calls the interp entry wrapper, but * the gsharedvt out wrapper might not exist if the caller is also a gsharedvt method since * the concrete signature of the call might not exist in the program. * So transition directly to the interpreter without the wrappers. */ MonoInst *args_ins; MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM); ins->dreg = alloc_preg (cfg); ins->inst_imm = sig->param_count * sizeof (target_mgreg_t); MONO_ADD_INS (cfg->cbb, ins); args_ins = ins; for (int i = 0; i < sig->hasthis + sig->param_count; ++i) { MonoInst *arg_addr_ins; EMIT_NEW_VARLOADA ((cfg), arg_addr_ins, cfg->args [i], cfg->arg_types [i]); EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args_ins->dreg, i * sizeof (target_mgreg_t), arg_addr_ins->dreg); } MonoInst *ret_var = NULL; MonoInst *ret_arg_ins; if (!MONO_TYPE_IS_VOID (sig->ret)) { ret_var = mono_compile_create_var (cfg, sig->ret, OP_LOCAL); EMIT_NEW_VARLOADA (cfg, ret_arg_ins, ret_var, sig->ret); } else { EMIT_NEW_PCONST (cfg, ret_arg_ins, NULL); } iargs = g_newa (MonoInst*, 3); iargs [0] = emit_get_rgctx_method (cfg, -1, cfg->method, MONO_RGCTX_INFO_INTERP_METHOD); iargs [1] = ret_arg_ins; iargs [2] = args_ins; mono_emit_jit_icall_id (cfg, MONO_JIT_ICALL_mini_llvmonly_interp_entry_gsharedvt, iargs); if (!MONO_TYPE_IS_VOID (sig->ret)) EMIT_NEW_VARLOAD (cfg, ins, ret_var, sig->ret); else ins = NULL; } else { /* Obtain the interp entry function */ ftndesc = emit_get_rgctx_method (cfg, -1, cfg->method, MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY); /* Call it */ iargs = g_newa (MonoInst*, sig->param_count + 1); for (int i = 0; i < sig->param_count + sig->hasthis; ++i) EMIT_NEW_ARGLOAD (cfg, iargs [i], i); ins = mini_emit_llvmonly_calli (cfg, sig, iargs, ftndesc); } /* Do a normal return */ if (cfg->ret) { emit_setret (cfg, ins); /* * Since only bb_entry/bb_exit is emitted if interp_entry_only is set, * its possible that the return value becomes an OP_PHI node whose inputs * are not emitted. Make it volatile to prevent that. */ cfg->ret->flags |= MONO_INST_VOLATILE; } MONO_INST_NEW (cfg, ins, OP_BR); ins->inst_target_bb = cfg->bb_exit; MONO_ADD_INS (cfg->cbb, ins); link_bblock (cfg, cfg->cbb, cfg->bb_exit); } typedef union _MonoOpcodeParameter { gint32 i32; gint64 i64; float f; double d; guchar *branch_target; } MonoOpcodeParameter; typedef struct _MonoOpcodeInfo { guint constant : 4; // private gint pops : 3; // public -1 means variable gint pushes : 3; // public -1 means variable } MonoOpcodeInfo; static const MonoOpcodeInfo* mono_opcode_decode (guchar *ip, guint op_size, MonoOpcodeEnum il_op, MonoOpcodeParameter *parameter) { #define Push0 (0) #define Pop0 (0) #define Push1 (1) #define Pop1 (1) #define PushI (1) #define PopI (1) #define PushI8 (1) #define PopI8 (1) #define PushRef (1) #define PopRef (1) #define PushR4 (1) #define PopR4 (1) #define PushR8 (1) #define PopR8 (1) #define VarPush (-1) #define VarPop (-1) static const MonoOpcodeInfo mono_opcode_info [ ] = { #define OPDEF(name, str, pops, pushes, param, param_constant, a, b, c, flow) {param_constant + 1, pops, pushes }, #include "mono/cil/opcode.def" #undef OPDEF }; #undef Push0 #undef Pop0 #undef Push1 #undef Pop1 #undef PushI #undef PopI #undef PushI8 #undef PopI8 #undef PushRef #undef PopRef #undef PushR4 #undef PopR4 #undef PushR8 #undef PopR8 #undef VarPush #undef VarPop gint32 delta; guchar *next_ip = ip + op_size; const MonoOpcodeInfo *info = &mono_opcode_info [il_op]; switch (mono_opcodes [il_op].argument) { case MonoInlineNone: parameter->i32 = (int)info->constant - 1; break; case MonoInlineString: case MonoInlineType: case MonoInlineField: case MonoInlineMethod: case MonoInlineTok: case MonoInlineSig: case MonoShortInlineR: case MonoInlineI: parameter->i32 = read32 (next_ip - 4); // FIXME check token type? break; case MonoShortInlineI: parameter->i32 = (signed char)next_ip [-1]; break; case MonoInlineVar: parameter->i32 = read16 (next_ip - 2); break; case MonoShortInlineVar: parameter->i32 = next_ip [-1]; break; case MonoInlineR: case MonoInlineI8: parameter->i64 = read64 (next_ip - 8); break; case MonoShortInlineBrTarget: delta = (signed char)next_ip [-1]; goto branch_target; case MonoInlineBrTarget: delta = (gint32)read32 (next_ip - 4); branch_target: parameter->branch_target = delta + next_ip; break; case MonoInlineSwitch: // complicated break; default: g_error ("%s %d %d\n", __func__, il_op, mono_opcodes [il_op].argument); } return info; } /* * mono_method_to_ir: * * Translate the .net IL into linear IR. * * @start_bblock: if not NULL, the starting basic block, used during inlining. * @end_bblock: if not NULL, the ending basic block, used during inlining. * @return_var: if not NULL, the place where the return value is stored, used during inlining. * @inline_args: if not NULL, contains the arguments to the inline call * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise. * @is_virtual_call: whether this method is being called as a result of a call to callvirt * * This method is used to turn ECMA IL into Mono's internal Linear IR * reprensetation. It is used both for entire methods, as well as * inlining existing methods. In the former case, the @start_bblock, * @end_bblock, @return_var, @inline_args are all set to NULL, and the * inline_offset is set to zero. * * Returns: the inline cost, or -1 if there was an error processing this method. */ int mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock, MonoInst *return_var, MonoInst **inline_args, guint inline_offset, gboolean is_virtual_call) { ERROR_DECL (error); // Buffer to hold parameters to mono_new_array, instead of varargs. MonoInst *array_new_localalloc_ins = NULL; MonoInst *ins, **sp, **stack_start; MonoBasicBlock *tblock = NULL; MonoBasicBlock *init_localsbb = NULL, *init_localsbb2 = NULL; MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL; MonoMethod *method_definition; MonoInst **arg_array; MonoMethodHeader *header; MonoImage *image; guint32 token, ins_flag; MonoClass *klass; MonoClass *constrained_class = NULL; gboolean save_last_error = FALSE; guchar *ip, *end, *target, *err_pos; MonoMethodSignature *sig; MonoGenericContext *generic_context = NULL; MonoGenericContainer *generic_container = NULL; MonoType **param_types; int i, n, start_new_bblock, dreg; int num_calls = 0, inline_costs = 0; guint num_args; GSList *class_inits = NULL; gboolean dont_verify, dont_verify_stloc, readonly = FALSE; int context_used; gboolean init_locals, seq_points, skip_dead_blocks; gboolean sym_seq_points = FALSE; MonoDebugMethodInfo *minfo; MonoBitSet *seq_point_locs = NULL; MonoBitSet *seq_point_set_locs = NULL; const char *ovf_exc = NULL; gboolean emitted_funccall_seq_point = FALSE; gboolean detached_before_ret = FALSE; gboolean ins_has_side_effect; if (!cfg->disable_inline) cfg->disable_inline = (method->iflags & METHOD_IMPL_ATTRIBUTE_NOOPTIMIZATION) || is_jit_optimizer_disabled (method); cfg->current_method = method; image = m_class_get_image (method->klass); /* serialization and xdomain stuff may need access to private fields and methods */ dont_verify = FALSE; dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */ dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP; dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE; /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */ dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_OTHER; dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED; dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF; header = mono_method_get_header_checked (method, cfg->error); if (!header) { mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); goto exception_exit; } else { cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header); } generic_container = mono_method_get_generic_container (method); sig = mono_method_signature_internal (method); num_args = sig->hasthis + sig->param_count; ip = (guchar*)header->code; cfg->cil_start = ip; end = ip + header->code_size; cfg->stat_cil_code_size += header->code_size; seq_points = cfg->gen_seq_points && cfg->method == method; if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) { /* We could hit a seq point before attaching to the JIT (#8338) */ seq_points = FALSE; } if (method->wrapper_type == MONO_WRAPPER_OTHER) { WrapperInfo *info = mono_marshal_get_wrapper_info (method); if (info->subtype == WRAPPER_SUBTYPE_INTERP_IN) { /* We could hit a seq point before attaching to the JIT (#8338) */ seq_points = FALSE; } } if (cfg->prof_coverage) { if (cfg->compile_aot) g_error ("Coverage profiling is not supported with AOT."); INLINE_FAILURE ("coverage profiling"); cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size); } if ((cfg->gen_sdb_seq_points && cfg->method == method) || cfg->prof_coverage) { minfo = mono_debug_lookup_method (method); if (minfo) { MonoSymSeqPoint *sps; int i, n_il_offsets; mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets); seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0); seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0); sym_seq_points = TRUE; for (i = 0; i < n_il_offsets; ++i) { if (sps [i].il_offset < header->code_size) mono_bitset_set_fast (seq_point_locs, sps [i].il_offset); } g_free (sps); MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method); if (asyncMethod) { for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++) { mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]); mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]); } mono_debug_free_method_async_debug_info (asyncMethod); } } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (m_class_get_image (method->klass))) { /* Methods without line number info like auto-generated property accessors */ seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0); seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0); sym_seq_points = TRUE; } } /* * Methods without init_locals set could cause asserts in various passes * (#497220). To work around this, we emit dummy initialization opcodes * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported * on some platforms. */ if (cfg->opt & MONO_OPT_UNSAFE) init_locals = header->init_locals; else init_locals = TRUE; method_definition = method; while (method_definition->is_inflated) { MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition; method_definition = imethod->declaring; } if (sig->is_inflated) generic_context = mono_method_get_context (method); else if (generic_container) generic_context = &generic_container->context; cfg->generic_context = generic_context; if (!cfg->gshared) g_assert (!sig->has_type_parameters); if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) { g_assert (method->is_inflated); g_assert (mono_method_get_context (method)->method_inst); } if (method->is_inflated && mono_method_get_context (method)->method_inst) g_assert (sig->generic_param_count); if (cfg->method == method) { cfg->real_offset = 0; } else { cfg->real_offset = inline_offset; } cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size); cfg->cil_offset_to_bb_len = header->code_size; if (cfg->verbose_level > 2) printf ("method to IR %s\n", mono_method_full_name (method, TRUE)); param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args); if (sig->hasthis) param_types [0] = m_class_is_valuetype (method->klass) ? m_class_get_this_arg (method->klass) : m_class_get_byval_arg (method->klass); for (n = 0; n < sig->param_count; ++n) param_types [n + sig->hasthis] = sig->params [n]; cfg->arg_types = param_types; cfg->dont_inline = g_list_prepend (cfg->dont_inline, method); if (cfg->method == method) { /* ENTRY BLOCK */ NEW_BBLOCK (cfg, start_bblock); cfg->bb_entry = start_bblock; start_bblock->cil_code = NULL; start_bblock->cil_length = 0; /* EXIT BLOCK */ NEW_BBLOCK (cfg, end_bblock); cfg->bb_exit = end_bblock; end_bblock->cil_code = NULL; end_bblock->cil_length = 0; end_bblock->flags |= BB_INDIRECT_JUMP_TARGET; g_assert (cfg->num_bblocks == 2); arg_array = cfg->args; if (header->num_clauses) { cfg->spvars = g_hash_table_new (NULL, NULL); cfg->exvars = g_hash_table_new (NULL, NULL); } cfg->clause_is_dead = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * header->num_clauses); /* handle exception clauses */ for (i = 0; i < header->num_clauses; ++i) { MonoBasicBlock *try_bb; MonoExceptionClause *clause = &header->clauses [i]; GET_BBLOCK (cfg, try_bb, ip + clause->try_offset); try_bb->real_offset = clause->try_offset; try_bb->try_start = TRUE; GET_BBLOCK (cfg, tblock, ip + clause->handler_offset); tblock->real_offset = clause->handler_offset; tblock->flags |= BB_EXCEPTION_HANDLER; if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY) mono_create_exvar_for_offset (cfg, clause->handler_offset); /* * Linking the try block with the EH block hinders inlining as we won't be able to * merge the bblocks from inlining and produce an artificial hole for no good reason. */ if (COMPILE_LLVM (cfg)) link_bblock (cfg, try_bb, tblock); if (*(ip + clause->handler_offset) == CEE_POP) tblock->flags |= BB_EXCEPTION_DEAD_OBJ; if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY || clause->flags == MONO_EXCEPTION_CLAUSE_FILTER || clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) { MONO_INST_NEW (cfg, ins, OP_START_HANDLER); MONO_ADD_INS (tblock, ins); if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) { /* finally clauses already have a seq point */ /* seq points for filter clauses are emitted below */ NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE); MONO_ADD_INS (tblock, ins); } /* todo: is a fault block unsafe to optimize? */ if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) tblock->flags |= BB_EXCEPTION_UNSAFE; } /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len); while (p < end) { printf ("%s", mono_disasm_code_one (NULL, method, p, &p)); }*/ /* catch and filter blocks get the exception object on the stack */ if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE || clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) { /* mostly like handle_stack_args (), but just sets the input args */ /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */ tblock->in_scount = 1; tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*)); tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset); cfg->cbb = tblock; #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */ if (!cfg->compile_llvm) { MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ); ins->dreg = tblock->in_stack [0]->dreg; MONO_ADD_INS (tblock, ins); } #else MonoInst *dummy_use; /* * Add a dummy use for the exvar so its liveness info will be * correct. */ EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]); #endif if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) { NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE); MONO_ADD_INS (tblock, ins); } if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) { GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset); tblock->flags |= BB_EXCEPTION_HANDLER; tblock->real_offset = clause->data.filter_offset; tblock->in_scount = 1; tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*)); /* The filter block shares the exvar with the handler block */ tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset); MONO_INST_NEW (cfg, ins, OP_START_HANDLER); MONO_ADD_INS (tblock, ins); } } if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER && clause->data.catch_class && cfg->gshared && mono_class_check_context_used (clause->data.catch_class)) { /* * In shared generic code with catch * clauses containing type variables * the exception handling code has to * be able to get to the rgctx. * Therefore we have to make sure that * the vtable/mrgctx argument (for * static or generic methods) or the * "this" argument (for non-static * methods) are live. */ if ((method->flags & METHOD_ATTRIBUTE_STATIC) || mini_method_get_context (method)->method_inst || m_class_is_valuetype (method->klass)) { mono_get_vtable_var (cfg); } else { MonoInst *dummy_use; EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]); } } } } else { arg_array = g_newa (MonoInst*, num_args); cfg->cbb = start_bblock; cfg->args = arg_array; mono_save_args (cfg, sig, inline_args); } if (cfg->method == method && cfg->self_init && cfg->compile_aot && !COMPILE_LLVM (cfg)) { MonoMethod *wrapper; MonoInst *args [2]; int idx; /* * Emit code to initialize this method by calling the init wrapper emitted by LLVM. * This is not efficient right now, but its only used for the methods which fail * LLVM compilation. * FIXME: Optimize this */ g_assert (!cfg->gshared); wrapper = mono_marshal_get_aot_init_wrapper (AOT_INIT_METHOD); /* Emit this into the entry bb so it comes before the GC safe point which depends on an inited GOT */ cfg->cbb = cfg->bb_entry; idx = mono_aot_get_method_index (cfg->method); EMIT_NEW_ICONST (cfg, args [0], idx); /* Dummy */ EMIT_NEW_ICONST (cfg, args [1], 0); mono_emit_method_call (cfg, wrapper, args, NULL); } if (cfg->llvm_only && cfg->interp && cfg->method == method && !cfg->deopt) { if (header->num_clauses) { for (int i = 0; i < header->num_clauses; ++i) { MonoExceptionClause *clause = &header->clauses [i]; /* Finally clauses are checked after the remove_finally pass */ if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) cfg->interp_entry_only = TRUE; } } } /* we use a separate basic block for the initialization code */ NEW_BBLOCK (cfg, init_localsbb); if (cfg->method == method) cfg->bb_init = init_localsbb; init_localsbb->real_offset = cfg->real_offset; start_bblock->next_bb = init_localsbb; link_bblock (cfg, start_bblock, init_localsbb); init_localsbb2 = init_localsbb; cfg->cbb = init_localsbb; if (cfg->gsharedvt && cfg->method == method) { MonoGSharedVtMethodInfo *info; MonoInst *var, *locals_var; int dreg; info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo)); info->method = cfg->method; info->count_entries = 16; info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries); cfg->gsharedvt_info = info; var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* prevent it from being register allocated */ //var->flags |= MONO_INST_VOLATILE; cfg->gsharedvt_info_var = var; ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info); MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg); /* Allocate locals */ locals_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* prevent it from being register allocated */ //locals_var->flags |= MONO_INST_VOLATILE; cfg->gsharedvt_locals_var = locals_var; dreg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size)); MONO_INST_NEW (cfg, ins, OP_LOCALLOC); ins->dreg = locals_var->dreg; ins->sreg1 = dreg; MONO_ADD_INS (cfg->cbb, ins); cfg->gsharedvt_locals_var_ins = ins; cfg->flags |= MONO_CFG_HAS_ALLOCA; /* if (init_locals) ins->flags |= MONO_INST_INIT; */ if (cfg->llvm_only) { init_localsbb = cfg->cbb; init_localsbb2 = cfg->cbb; } } if (cfg->deopt) { /* * Push an LMFExt frame which points to a MonoMethodILState structure. */ emit_push_lmf (cfg); /* The type doesn't matter, the llvm backend will use the correct type */ MonoInst *il_state_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); il_state_var->flags |= MONO_INST_VOLATILE; cfg->il_state_var = il_state_var; EMIT_NEW_VARLOADA (cfg, ins, cfg->il_state_var, NULL); int il_state_addr_reg = ins->dreg; /* il_state->method = method */ MonoInst *method_ins = emit_get_rgctx_method (cfg, -1, cfg->method, MONO_RGCTX_INFO_METHOD); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, il_state_addr_reg, MONO_STRUCT_OFFSET (MonoMethodILState, method), method_ins->dreg); EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL); int lmf_reg = ins->dreg; /* lmf->kind = MONO_LMFEXT_IL_STATE */ MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, lmf_reg, MONO_STRUCT_OFFSET (MonoLMFExt, kind), MONO_LMFEXT_IL_STATE); /* lmf->il_state = il_state */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMFExt, il_state), il_state_addr_reg); /* emit_get_rgctx_method () might create new bblocks */ if (cfg->llvm_only) { init_localsbb = cfg->cbb; init_localsbb2 = cfg->cbb; } } if (cfg->llvm_only && cfg->interp && cfg->method == method) { if (cfg->interp_entry_only) emit_llvmonly_interp_entry (cfg, header); } /* FIRST CODE BLOCK */ NEW_BBLOCK (cfg, tblock); tblock->cil_code = ip; cfg->cbb = tblock; cfg->ip = ip; init_localsbb->next_bb = cfg->cbb; link_bblock (cfg, init_localsbb, cfg->cbb); ADD_BBLOCK (cfg, tblock); CHECK_CFG_EXCEPTION; if (header->code_size == 0) UNVERIFIED; if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) { ip = err_pos; UNVERIFIED; } if (cfg->method == method) { int breakpoint_id = mono_debugger_method_has_breakpoint (method); if (breakpoint_id) { MONO_INST_NEW (cfg, ins, OP_BREAK); MONO_ADD_INS (cfg->cbb, ins); } mono_debug_init_method (cfg, cfg->cbb, breakpoint_id); } for (n = 0; n < header->num_locals; ++n) { if (header->locals [n]->type == MONO_TYPE_VOID && !m_type_is_byref (header->locals [n])) UNVERIFIED; } class_inits = NULL; /* We force the vtable variable here for all shared methods for the possibility that they might show up in a stack trace where their exact instantiation is needed. */ if (cfg->gshared && method == cfg->method) { if ((method->flags & METHOD_ATTRIBUTE_STATIC) || mini_method_get_context (method)->method_inst || m_class_is_valuetype (method->klass)) { mono_get_vtable_var (cfg); } else { /* FIXME: Is there a better way to do this? We need the variable live for the duration of the whole method. */ cfg->args [0]->flags |= MONO_INST_VOLATILE; } } /* add a check for this != NULL to inlined methods */ if (is_virtual_call) { MonoInst *arg_ins; // // This is just a hack to avoid checks in empty methods which could get inlined // into finally clauses preventing the removal of empty finally clauses, since all // variables in finally clauses are marked volatile so the check can't be removed // if (!(cfg->llvm_only && m_class_is_valuetype (method->klass) && header->code_size == 1 && header->code [0] == CEE_RET)) { NEW_ARGLOAD (cfg, arg_ins, 0); MONO_ADD_INS (cfg->cbb, arg_ins); MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg); } } skip_dead_blocks = !dont_verify; if (skip_dead_blocks) { original_bb = bb = mono_basic_block_split (method, cfg->error, header); CHECK_CFG_ERROR; g_assert (bb); } /* we use a spare stack slot in SWITCH and NEWOBJ and others */ stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1)); ins_flag = 0; start_new_bblock = 0; MonoOpcodeEnum il_op; il_op = MonoOpcodeEnum_Invalid; emit_set_deopt_il_offset (cfg, ip - cfg->cil_start); for (guchar *next_ip = ip; ip < end; ip = next_ip) { MonoOpcodeEnum previous_il_op = il_op; const guchar *tmp_ip = ip; const int op_size = mono_opcode_value_and_size (&tmp_ip, end, &il_op); CHECK_OPSIZE (op_size); next_ip += op_size; if (cfg->method == method) cfg->real_offset = ip - header->code; else cfg->real_offset = inline_offset; cfg->ip = ip; context_used = 0; if (start_new_bblock) { cfg->cbb->cil_length = ip - cfg->cbb->cil_code; if (start_new_bblock == 2) { g_assert (ip == tblock->cil_code); } else { GET_BBLOCK (cfg, tblock, ip); } cfg->cbb->next_bb = tblock; cfg->cbb = tblock; start_new_bblock = 0; for (i = 0; i < cfg->cbb->in_scount; ++i) { if (cfg->verbose_level > 3) printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0); EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0); *sp++ = ins; } if (class_inits) g_slist_free (class_inits); class_inits = NULL; emit_set_deopt_il_offset (cfg, ip - cfg->cil_start); } else { if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) { link_bblock (cfg, cfg->cbb, tblock); if (sp != stack_start) { handle_stack_args (cfg, stack_start, sp - stack_start); sp = stack_start; CHECK_UNVERIFIABLE (cfg); } cfg->cbb->next_bb = tblock; cfg->cbb = tblock; for (i = 0; i < cfg->cbb->in_scount; ++i) { if (cfg->verbose_level > 3) printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0); EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0); *sp++ = ins; } g_slist_free (class_inits); class_inits = NULL; emit_set_deopt_il_offset (cfg, ip - cfg->cil_start); } } /* * Methods with AggressiveInline flag could be inlined even if the class has a cctor. * This might create a branch so emit it in the first code bblock instead of into initlocals_bb. */ if (ip - header->code == 0 && cfg->method != method && cfg->compile_aot && (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && mono_class_needs_cctor_run (method->klass, method)) { emit_class_init (cfg, method->klass); } if (skip_dead_blocks) { int ip_offset = ip - header->code; if (ip_offset == bb->end) bb = bb->next; if (bb->dead) { g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/ if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset); if (ip_offset + op_size == bb->end) { MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); start_new_bblock = 1; } continue; } } /* * Sequence points are points where the debugger can place a breakpoint. * Currently, we generate these automatically at points where the IL * stack is empty. */ if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) { /* * Make methods interruptable at the beginning, and at the targets of * backward branches. * Also, do this at the start of every bblock in methods with clauses too, * to be able to handle instructions with inprecise control flow like * throw/endfinally. * Backward branches are handled at the end of method-to-ir (). */ gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses); gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code); /* Avoid sequence points on empty IL like .volatile */ // FIXME: Enable this //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) { NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc); if ((sp != stack_start) && !sym_seq_point) ins->flags |= MONO_INST_NONEMPTY_STACK; MONO_ADD_INS (cfg->cbb, ins); if (sym_seq_points) mono_bitset_set_fast (seq_point_set_locs, ip - header->code); if (cfg->prof_coverage) { guint32 cil_offset = ip - header->code; gpointer counter = &cfg->coverage_info->data [cil_offset].count; cfg->coverage_info->data [cil_offset].cil_code = ip; if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) { MonoInst *one_ins, *load_ins; EMIT_NEW_PCONST (cfg, load_ins, counter); EMIT_NEW_ICONST (cfg, one_ins, 1); MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4); ins->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = load_ins->dreg; ins->inst_offset = 0; ins->sreg2 = one_ins->dreg; ins->type = STACK_I4; MONO_ADD_INS (cfg->cbb, ins); } else { EMIT_NEW_PCONST (cfg, ins, counter); MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1); } } } cfg->cbb->real_offset = cfg->real_offset; if (cfg->verbose_level > 3) printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL)); /* * This is used to compute BB_HAS_SIDE_EFFECTS, which is used for the elimination of * foreach finally clauses, so only IL opcodes which occur in such clauses * need to set this. */ ins_has_side_effect = TRUE; // Variables shared by CEE_CALLI CEE_CALL CEE_CALLVIRT CEE_JMP. // Initialize to either what they all need or zero. gboolean emit_widen = TRUE; gboolean tailcall = FALSE; gboolean common_call = FALSE; MonoInst *keep_this_alive = NULL; MonoMethod *cmethod = NULL; MonoMethodSignature *fsig = NULL; // These are used only in CALL/CALLVIRT but must be initialized also for CALLI, // since it jumps into CALL/CALLVIRT. gboolean need_seq_point = FALSE; gboolean push_res = TRUE; gboolean skip_ret = FALSE; gboolean tailcall_remove_ret = FALSE; // FIXME split 500 lines load/store field into separate file/function. MonoOpcodeParameter parameter; const MonoOpcodeInfo* info = mono_opcode_decode (ip, op_size, il_op, &parameter); g_assert (info); n = parameter.i32; token = parameter.i32; target = parameter.branch_target; // Check stack size for push/pop except variable cases -- -1 like call/ret/newobj. const int pushes = info->pushes; const int pops = info->pops; if (pushes >= 0 && pops >= 0) { g_assert (pushes - pops <= 1); if (pushes - pops == 1) CHECK_STACK_OVF (); } if (pops >= 0) CHECK_STACK (pops); switch (il_op) { case MONO_CEE_NOP: if (seq_points && !sym_seq_points && sp != stack_start) { /* * The C# compiler uses these nops to notify the JIT that it should * insert seq points. */ NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE); MONO_ADD_INS (cfg->cbb, ins); } if (cfg->keep_cil_nops) MONO_INST_NEW (cfg, ins, OP_HARD_NOP); else MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); emitted_funccall_seq_point = FALSE; ins_has_side_effect = FALSE; break; case MONO_CEE_BREAK: if (mini_should_insert_breakpoint (cfg->method)) { ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL); } else { MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); } break; case MONO_CEE_LDARG_0: case MONO_CEE_LDARG_1: case MONO_CEE_LDARG_2: case MONO_CEE_LDARG_3: case MONO_CEE_LDARG_S: case MONO_CEE_LDARG: CHECK_ARG (n); if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, cfg->arg_types[n])) { EMIT_NEW_ARGLOADA (cfg, ins, n); } else { EMIT_NEW_ARGLOAD (cfg, ins, n); } *sp++ = ins; break; case MONO_CEE_LDLOC_0: case MONO_CEE_LDLOC_1: case MONO_CEE_LDLOC_2: case MONO_CEE_LDLOC_3: case MONO_CEE_LDLOC_S: case MONO_CEE_LDLOC: CHECK_LOCAL (n); if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, header->locals[n])) { EMIT_NEW_LOCLOADA (cfg, ins, n); } else { EMIT_NEW_LOCLOAD (cfg, ins, n); } *sp++ = ins; break; case MONO_CEE_STLOC_0: case MONO_CEE_STLOC_1: case MONO_CEE_STLOC_2: case MONO_CEE_STLOC_3: case MONO_CEE_STLOC_S: case MONO_CEE_STLOC: CHECK_LOCAL (n); --sp; *sp = convert_value (cfg, header->locals [n], *sp); if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp)) UNVERIFIED; emit_stloc_ir (cfg, sp, header, n); inline_costs += 1; break; case MONO_CEE_LDARGA_S: case MONO_CEE_LDARGA: CHECK_ARG (n); NEW_ARGLOADA (cfg, ins, n); MONO_ADD_INS (cfg->cbb, ins); *sp++ = ins; break; case MONO_CEE_STARG_S: case MONO_CEE_STARG: --sp; CHECK_ARG (n); *sp = convert_value (cfg, param_types [n], *sp); if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp)) UNVERIFIED; emit_starg_ir (cfg, sp, n); break; case MONO_CEE_LDLOCA: case MONO_CEE_LDLOCA_S: { guchar *tmp_ip; CHECK_LOCAL (n); if ((tmp_ip = emit_optimized_ldloca_ir (cfg, next_ip, end, n))) { next_ip = tmp_ip; il_op = MONO_CEE_INITOBJ; inline_costs += 1; break; } ins_has_side_effect = FALSE; EMIT_NEW_LOCLOADA (cfg, ins, n); *sp++ = ins; break; } case MONO_CEE_LDNULL: EMIT_NEW_PCONST (cfg, ins, NULL); ins->type = STACK_OBJ; *sp++ = ins; break; case MONO_CEE_LDC_I4_M1: case MONO_CEE_LDC_I4_0: case MONO_CEE_LDC_I4_1: case MONO_CEE_LDC_I4_2: case MONO_CEE_LDC_I4_3: case MONO_CEE_LDC_I4_4: case MONO_CEE_LDC_I4_5: case MONO_CEE_LDC_I4_6: case MONO_CEE_LDC_I4_7: case MONO_CEE_LDC_I4_8: case MONO_CEE_LDC_I4_S: case MONO_CEE_LDC_I4: EMIT_NEW_ICONST (cfg, ins, n); *sp++ = ins; break; case MONO_CEE_LDC_I8: MONO_INST_NEW (cfg, ins, OP_I8CONST); ins->type = STACK_I8; ins->dreg = alloc_dreg (cfg, STACK_I8); ins->inst_l = parameter.i64; MONO_ADD_INS (cfg->cbb, ins); *sp++ = ins; break; case MONO_CEE_LDC_R4: { float *f; gboolean use_aotconst = FALSE; #ifdef TARGET_POWERPC /* FIXME: Clean this up */ if (cfg->compile_aot) use_aotconst = TRUE; #endif /* FIXME: we should really allocate this only late in the compilation process */ f = (float *)mono_mem_manager_alloc (cfg->mem_manager, sizeof (float)); if (use_aotconst) { MonoInst *cons; int dreg; EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f); dreg = alloc_freg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0); ins->type = cfg->r4_stack_type; } else { MONO_INST_NEW (cfg, ins, OP_R4CONST); ins->type = cfg->r4_stack_type; ins->dreg = alloc_dreg (cfg, STACK_R8); ins->inst_p0 = f; MONO_ADD_INS (cfg->cbb, ins); } *f = parameter.f; *sp++ = ins; break; } case MONO_CEE_LDC_R8: { double *d; gboolean use_aotconst = FALSE; #ifdef TARGET_POWERPC /* FIXME: Clean this up */ if (cfg->compile_aot) use_aotconst = TRUE; #endif /* FIXME: we should really allocate this only late in the compilation process */ d = (double *)mono_mem_manager_alloc (cfg->mem_manager, sizeof (double)); if (use_aotconst) { MonoInst *cons; int dreg; EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d); dreg = alloc_freg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0); ins->type = STACK_R8; } else { MONO_INST_NEW (cfg, ins, OP_R8CONST); ins->type = STACK_R8; ins->dreg = alloc_dreg (cfg, STACK_R8); ins->inst_p0 = d; MONO_ADD_INS (cfg->cbb, ins); } *d = parameter.d; *sp++ = ins; break; } case MONO_CEE_DUP: { MonoInst *temp, *store; MonoClass *klass; sp--; ins = *sp; klass = ins->klass; temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL); EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins); EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0); ins->klass = klass; *sp++ = ins; EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0); ins->klass = klass; *sp++ = ins; inline_costs += 2; break; } case MONO_CEE_POP: --sp; #ifdef TARGET_X86 if (sp [0]->type == STACK_R8) /* we need to pop the value from the x86 FP stack */ MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg); #endif break; case MONO_CEE_JMP: { MonoCallInst *call; int i, n; INLINE_FAILURE ("jmp"); GSHAREDVT_FAILURE (il_op); if (stack_start != sp) UNVERIFIED; /* FIXME: check the signature matches */ cmethod = mini_get_method (cfg, method, token, NULL, generic_context); CHECK_CFG_ERROR; if (cfg->gshared && mono_method_check_context_used (cmethod)) GENERIC_SHARING_FAILURE (CEE_JMP); mini_profiler_emit_tail_call (cfg, cmethod); fsig = mono_method_signature_internal (cmethod); n = fsig->param_count + fsig->hasthis; if (cfg->llvm_only) { MonoInst **args; args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n); for (i = 0; i < n; ++i) EMIT_NEW_ARGLOAD (cfg, args [i], i); ins = mini_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL); /* * The code in mono-basic-block.c treats the rest of the code as dead, but we * have to emit a normal return since llvm expects it. */ if (cfg->ret) emit_setret (cfg, ins); MONO_INST_NEW (cfg, ins, OP_BR); ins->inst_target_bb = end_bblock; MONO_ADD_INS (cfg->cbb, ins); link_bblock (cfg, cfg->cbb, end_bblock); break; } else { /* Handle tailcalls similarly to calls */ DISABLE_AOT (cfg); mini_emit_tailcall_parameters (cfg, fsig); MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL); call->method = cmethod; // FIXME Other initialization of the tailcall field occurs after // it is used. So this is the only "real" use and needs more attention. call->tailcall = TRUE; call->signature = fsig; call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n); call->inst.inst_p0 = cmethod; for (i = 0; i < n; ++i) EMIT_NEW_ARGLOAD (cfg, call->args [i], i); if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret))) call->vret_var = cfg->vret_addr; mono_arch_emit_call (cfg, call); cfg->param_area = MAX(cfg->param_area, call->stack_usage); MONO_ADD_INS (cfg->cbb, (MonoInst*)call); } start_new_bblock = 1; break; } case MONO_CEE_CALLI: { // FIXME tail.calli is problemetic because the this pointer's type // is not in the signature, and we cannot check for a byref valuetype. MonoInst *addr; MonoInst *callee = NULL; // Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT. common_call = TRUE; // i.e. skip_ret/push_res/seq_point logic cmethod = NULL; gboolean const inst_tailcall = G_UNLIKELY (debug_tailcall_try_all ? (next_ip < end && next_ip [0] == CEE_RET) : ((ins_flag & MONO_INST_TAILCALL) != 0)); ins = NULL; //GSHAREDVT_FAILURE (il_op); CHECK_STACK (1); --sp; addr = *sp; g_assert (addr); fsig = mini_get_signature (method, token, generic_context, cfg->error); CHECK_CFG_ERROR; if (method->dynamic && fsig->pinvoke) { MonoInst *args [3]; /* * This is a call through a function pointer using a pinvoke * signature. Have to create a wrapper and call that instead. * FIXME: This is very slow, need to create a wrapper at JIT time * instead based on the signature. */ EMIT_NEW_IMAGECONST (cfg, args [0], ((MonoDynamicMethod*)method)->assembly->image); EMIT_NEW_PCONST (cfg, args [1], fsig); args [2] = addr; // FIXME tailcall? addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args); } if (!method->dynamic && fsig->pinvoke && !method->wrapper_type) { /* MONO_WRAPPER_DYNAMIC_METHOD dynamic method handled above in the method->dynamic case; for other wrapper types assume the code knows what its doing and added its own GC transitions */ gboolean skip_gc_trans = fsig->suppress_gc_transition; if (!skip_gc_trans) { #if 0 fprintf (stderr, "generating wrapper for calli in method %s with wrapper type %s\n", method->name, mono_wrapper_type_to_str (method->wrapper_type)); #endif /* Call the wrapper that will do the GC transition instead */ MonoMethod *wrapper = mono_marshal_get_native_func_wrapper_indirect (method->klass, fsig, cfg->compile_aot); fsig = mono_method_signature_internal (wrapper); n = fsig->param_count - 1; /* wrapper has extra fnptr param */ CHECK_STACK (n); /* move the args to allow room for 'this' in the first position */ while (n--) { --sp; sp [1] = sp [0]; } sp[0] = addr; /* n+1 args, first arg is the address of the indirect method to call */ g_assert (!fsig->hasthis && !fsig->pinvoke); ins = mono_emit_method_call (cfg, wrapper, /*args*/sp, NULL); goto calli_end; } } n = fsig->param_count + fsig->hasthis; CHECK_STACK (n); //g_assert (!virtual_ || fsig->hasthis); sp -= n; if (!(cfg->method->wrapper_type && cfg->method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD) && check_call_signature (cfg, fsig, sp)) { if (break_on_unverified ()) check_call_signature (cfg, fsig, sp); // Again, step through it. UNVERIFIED; } inline_costs += CALL_COST * MIN(10, num_calls++); /* * Making generic calls out of gsharedvt methods. * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid * patching gshared method addresses into a gsharedvt method. */ if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) { /* * We pass the address to the gsharedvt trampoline in the rgctx reg */ callee = addr; g_assert (addr); // Doubles as boolean after tailcall check. } inst_tailcall && is_supported_tailcall (cfg, ip, method, NULL, fsig, FALSE/*virtual irrelevant*/, addr != NULL, &tailcall); if (save_last_error) mono_emit_jit_icall (cfg, mono_marshal_clear_last_error, NULL); if (callee) { if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE) /* Not tested */ GSHAREDVT_FAILURE (il_op); if (cfg->llvm_only) // FIXME: GSHAREDVT_FAILURE (il_op); addr = emit_get_rgctx_sig (cfg, context_used, fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI); ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, callee, tailcall); goto calli_end; } /* Prevent inlining of methods with indirect calls */ INLINE_FAILURE ("indirect call"); if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) { MonoJumpInfoType info_type; gpointer info_data; /* * Instead of emitting an indirect call, emit a direct call * with the contents of the aotconst as the patch info. */ if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) { info_type = (MonoJumpInfoType)addr->inst_c1; info_data = addr->inst_p0; } else { info_type = (MonoJumpInfoType)addr->inst_right->inst_c1; info_data = addr->inst_right->inst_left; } if (info_type == MONO_PATCH_INFO_ICALL_ADDR) { // non-JIT icall, mostly builtin, but also user-extensible tailcall = FALSE; ins = (MonoInst*)mini_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp); NULLIFY_INS (addr); goto calli_end; } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR || info_type == MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR) { tailcall = FALSE; ins = (MonoInst*)mini_emit_abs_call (cfg, info_type, info_data, fsig, sp); NULLIFY_INS (addr); goto calli_end; } } if (cfg->llvm_only && !(cfg->method->wrapper_type && cfg->method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)) ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr); else ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, NULL, tailcall); goto calli_end; } case MONO_CEE_CALL: case MONO_CEE_CALLVIRT: { MonoInst *addr; addr = NULL; int array_rank; array_rank = 0; gboolean virtual_; virtual_ = il_op == MONO_CEE_CALLVIRT; gboolean pass_imt_from_rgctx; pass_imt_from_rgctx = FALSE; MonoInst *imt_arg; imt_arg = NULL; gboolean pass_vtable; pass_vtable = FALSE; gboolean pass_mrgctx; pass_mrgctx = FALSE; MonoInst *vtable_arg; vtable_arg = NULL; gboolean check_this; check_this = FALSE; gboolean delegate_invoke; delegate_invoke = FALSE; gboolean direct_icall; direct_icall = FALSE; gboolean tailcall_calli; tailcall_calli = FALSE; gboolean noreturn; noreturn = FALSE; gboolean gshared_static_virtual; gshared_static_virtual = FALSE; #ifdef TARGET_WASM gboolean needs_stack_walk; needs_stack_walk = FALSE; #endif // Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT. common_call = FALSE; // variables to help in assertions gboolean called_is_supported_tailcall; called_is_supported_tailcall = FALSE; MonoMethod *tailcall_method; tailcall_method = NULL; MonoMethod *tailcall_cmethod; tailcall_cmethod = NULL; MonoMethodSignature *tailcall_fsig; tailcall_fsig = NULL; gboolean tailcall_virtual; tailcall_virtual = FALSE; gboolean tailcall_extra_arg; tailcall_extra_arg = FALSE; gboolean inst_tailcall; inst_tailcall = G_UNLIKELY (debug_tailcall_try_all ? (next_ip < end && next_ip [0] == CEE_RET) : ((ins_flag & MONO_INST_TAILCALL) != 0)); ins = NULL; /* Used to pass arguments to called functions */ HandleCallData cdata; memset (&cdata, 0, sizeof (HandleCallData)); cmethod = mini_get_method (cfg, method, token, NULL, generic_context); CHECK_CFG_ERROR; if (cfg->verbose_level > 3) printf ("cmethod = %s\n", mono_method_get_full_name (cmethod)); MonoMethod *cil_method; cil_method = cmethod; if (constrained_class) { if (m_method_is_static (cil_method) && mini_class_check_context_used (cfg, constrained_class)) { /* get_constrained_method () doesn't work on the gparams used by generic sharing */ // FIXME: Other configurations //if (!cfg->gsharedvt) // GENERIC_SHARING_FAILURE (CEE_CALL); gshared_static_virtual = TRUE; } else { cmethod = get_constrained_method (cfg, image, token, cil_method, constrained_class, generic_context); CHECK_CFG_ERROR; if (m_class_is_enumtype (constrained_class) && !strcmp (cmethod->name, "GetHashCode")) { /* Use the corresponding method from the base type to avoid boxing */ MonoType *base_type = mono_class_enum_basetype_internal (constrained_class); g_assert (base_type); constrained_class = mono_class_from_mono_type_internal (base_type); cmethod = get_method_nofail (constrained_class, cmethod->name, 0, 0); g_assert (cmethod); } } } if (!dont_verify && !cfg->skip_visibility) { MonoMethod *target_method = cil_method; if (method->is_inflated) { MonoGenericContainer *container = mono_method_get_generic_container(method_definition); MonoGenericContext *context = (container != NULL ? &container->context : NULL); target_method = mini_get_method_allow_open (method, token, NULL, context, cfg->error); CHECK_CFG_ERROR; } if (!mono_method_can_access_method (method_definition, target_method) && !mono_method_can_access_method (method, cil_method)) emit_method_access_failure (cfg, method, cil_method); } if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) { if (cfg->interp && !cfg->interp_entry_only) { /* Use the interpreter instead */ cfg->exception_message = g_strdup ("stack walk"); cfg->disable_llvm = TRUE; } #ifdef TARGET_WASM else { needs_stack_walk = TRUE; } #endif } if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT) && !gshared_static_virtual) { if (!mono_class_is_interface (method->klass)) emit_bad_image_failure (cfg, method, cil_method); else virtual_ = TRUE; } if (!m_class_is_inited (cmethod->klass)) if (!mono_class_init_internal (cmethod->klass)) TYPE_LOAD_ERROR (cmethod->klass); fsig = mono_method_signature_internal (cmethod); if (!fsig) LOAD_ERROR; if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL && mini_class_is_system_array (cmethod->klass)) { array_rank = m_class_get_rank (cmethod->klass); } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && direct_icalls_enabled (cfg, cmethod)) { direct_icall = TRUE; } else if (fsig->pinvoke) { if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) { /* * Avoid calling mono_marshal_get_native_wrapper () too early, it might call managed * callbacks on netcore. */ fsig = mono_metadata_signature_dup_mempool (cfg->mempool, fsig); fsig->pinvoke = FALSE; } else { MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot); fsig = mono_method_signature_internal (wrapper); } } else if (constrained_class) { } else { fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, cfg->error); CHECK_CFG_ERROR; } if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated)) cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig); /* See code below */ if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) { MonoBasicBlock *tbb; GET_BBLOCK (cfg, tbb, next_ip); if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) { /* * We want to extend the try block to cover the call, but we can't do it if the * call is made directly since its followed by an exception check. */ direct_icall = FALSE; } } mono_save_token_info (cfg, image, token, cil_method); if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code))) need_seq_point = TRUE; /* Don't support calls made using type arguments for now */ /* if (cfg->gsharedvt) { if (mini_is_gsharedvt_signature (fsig)) GSHAREDVT_FAILURE (il_op); } */ if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) g_assert_not_reached (); n = fsig->param_count + fsig->hasthis; if (!cfg->gshared && mono_class_is_gtd (cmethod->klass)) UNVERIFIED; if (!cfg->gshared) g_assert (!mono_method_check_context_used (cmethod)); CHECK_STACK (n); //g_assert (!virtual_ || fsig->hasthis); sp -= n; if (virtual_ && cmethod && sp [0] && sp [0]->opcode == OP_TYPED_OBJREF) { ERROR_DECL (error); MonoMethod *new_cmethod = mono_class_get_virtual_method (sp [0]->klass, cmethod, error); if (is_ok (error)) { cmethod = new_cmethod; virtual_ = FALSE; } else { mono_error_cleanup (error); } } if (cmethod && method_does_not_return (cmethod)) { cfg->cbb->out_of_line = TRUE; noreturn = TRUE; } cdata.method = method; cdata.inst_tailcall = inst_tailcall; /* * We have the `constrained.' prefix opcode. */ if (constrained_class) { ins = handle_constrained_call (cfg, cmethod, fsig, constrained_class, sp, &cdata, &cmethod, &virtual_, &emit_widen); CHECK_CFG_EXCEPTION; if (!gshared_static_virtual) constrained_class = NULL; if (ins) goto call_end; } for (int i = 0; i < fsig->param_count; ++i) sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]); if (check_call_signature (cfg, fsig, sp)) { if (break_on_unverified ()) check_call_signature (cfg, fsig, sp); // Again, step through it. UNVERIFIED; } if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke")) delegate_invoke = TRUE; /* * Implement a workaround for the inherent races involved in locking: * Monitor.Enter () * try { * } finally { * Monitor.Exit () * } * If a thread abort happens between the call to Monitor.Enter () and the start of the * try block, the Exit () won't be executed, see: * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx * To work around this, we extend such try blocks to include the last x bytes * of the Monitor.Enter () call. */ if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) { MonoBasicBlock *tbb; GET_BBLOCK (cfg, tbb, next_ip); /* * Only extend try blocks with a finally, to avoid catching exceptions thrown * from Monitor.Enter like ArgumentNullException. */ if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) { /* Mark this bblock as needing to be extended */ tbb->extend_try_block = TRUE; } } /* Conversion to a JIT intrinsic */ gboolean ins_type_initialized; if ((ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp, &ins_type_initialized))) { if (!MONO_TYPE_IS_VOID (fsig->ret)) { if (!ins_type_initialized) mini_type_to_eval_stack_type ((cfg), fsig->ret, ins); emit_widen = FALSE; } // FIXME This is only missed if in fact the intrinsic involves a call. if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall intrins %s -> %s\n", method->name, cmethod->name); goto call_end; } CHECK_CFG_ERROR; /* * If the callee is a shared method, then its static cctor * might not get called after the call was patched. */ if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) { emit_class_init (cfg, cmethod->klass); CHECK_TYPELOAD (cmethod->klass); } /* Inlining */ if ((cfg->opt & MONO_OPT_INLINE) && !inst_tailcall && (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) && mono_method_check_inlining (cfg, cmethod)) { int costs; gboolean always = FALSE; gboolean is_empty = FALSE; if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) { /* Prevent inlining of methods that call wrappers */ INLINE_FAILURE ("wrapper call"); // FIXME? Does this write to cmethod impact tailcall_supported? Probably not. // Neither pinvoke or icall are likely to be tailcalled. cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE); always = TRUE; } costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &is_empty); if (costs) { cfg->real_offset += 5; if (!MONO_TYPE_IS_VOID (fsig->ret)) /* *sp is already set by inline_method */ ins = *sp; inline_costs += costs; // FIXME This is missed if the inlinee contains tail calls that // would work, but not once inlined into caller. // This matchingness could be a factor in inlining. // i.e. Do not inline if it hurts tailcall, do inline // if it helps and/or or is neutral, and helps performance // using usual heuristics. // Note that inlining will expose multiple tailcall opportunities // so the tradeoff is not obvious. If we can tailcall anything // like desktop, then this factor mostly falls away, except // that inlining can affect tailcall performance due to // signature match/mismatch. if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall inline %s -> %s\n", method->name, cmethod->name); if (is_empty) ins_has_side_effect = FALSE; goto call_end; } } check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx); if (cfg->gshared) { MonoGenericContext *cmethod_context = mono_method_get_context (cmethod); context_used = mini_method_check_context_used (cfg, cmethod); if (!context_used && gshared_static_virtual) context_used = mini_class_check_context_used (cfg, constrained_class); if (context_used && mono_class_is_interface (cmethod->klass) && !m_method_is_static (cmethod)) { /* Generic method interface calls are resolved via a helper function and don't need an imt. */ if (!cmethod_context || !cmethod_context->method_inst) pass_imt_from_rgctx = TRUE; } /* * If a shared method calls another * shared method then the caller must * have a generic sharing context * because the magic trampoline * requires it. FIXME: We shouldn't * have to force the vtable/mrgctx * variable here. Instead there * should be a flag in the cfg to * request a generic sharing context. */ if (context_used && ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cfg->method->klass))) mono_get_vtable_var (cfg); } if (pass_vtable) { if (context_used) { vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE); } else { MonoVTable *vtable = mono_class_vtable_checked (cmethod->klass, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (cmethod->klass); EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable); } } if (pass_mrgctx) { g_assert (!vtable_arg); if (!cfg->compile_aot) { /* * emit_get_rgctx_method () calls mono_class_vtable () so check * for type load errors before. */ mono_class_setup_vtable (cmethod->klass); CHECK_TYPELOAD (cmethod->klass); } vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX); if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod))) { if (virtual_) check_this = TRUE; virtual_ = FALSE; } } if (pass_imt_from_rgctx) { g_assert (!pass_vtable); imt_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); g_assert (imt_arg); } if (check_this) MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg); /* Calling virtual generic methods */ // These temporaries help detangle "pure" computation of // inputs to is_supported_tailcall from side effects, so that // is_supported_tailcall can be computed just once. gboolean virtual_generic; virtual_generic = FALSE; gboolean virtual_generic_imt; virtual_generic_imt = FALSE; if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_METHOD_IS_FINAL (cmethod) && fsig->generic_param_count && !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) && !cfg->llvm_only) { g_assert (fsig->is_inflated); virtual_generic = TRUE; /* Prevent inlining of methods that contain indirect calls */ INLINE_FAILURE ("virtual generic call"); if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) GSHAREDVT_FAILURE (il_op); if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) { virtual_generic_imt = TRUE; g_assert (!imt_arg); if (!context_used) g_assert (cmethod->is_inflated); imt_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); g_assert (imt_arg); virtual_ = TRUE; vtable_arg = NULL; } } // Capture some intent before computing tailcall. gboolean make_generic_call_out_of_gsharedvt_method; gboolean will_have_imt_arg; make_generic_call_out_of_gsharedvt_method = FALSE; will_have_imt_arg = FALSE; /* * Making generic calls out of gsharedvt methods. * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid * patching gshared method addresses into a gsharedvt method. */ if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) && !(m_class_get_rank (cmethod->klass) && m_class_get_byval_arg (cmethod->klass)->type != MONO_TYPE_SZARRAY) && (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) { make_generic_call_out_of_gsharedvt_method = TRUE; if (virtual_) { if (fsig->generic_param_count) { will_have_imt_arg = TRUE; } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) { will_have_imt_arg = TRUE; } } } /* Tail prefix / tailcall optimization */ /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests. Inlining and stack traces are not guaranteed however. */ /* FIXME: runtime generic context pointer for jumps? */ /* FIXME: handle this for generic sharing eventually */ // tailcall means "the backend can and will handle it". // inst_tailcall means the tail. prefix is present. tailcall_extra_arg = vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass); tailcall = inst_tailcall && is_supported_tailcall (cfg, ip, method, cmethod, fsig, virtual_, tailcall_extra_arg, &tailcall_calli); // Writes to imt_arg, vtable_arg, virtual_, cmethod, must not occur from here (inputs to is_supported_tailcall). // Capture values to later assert they don't change. called_is_supported_tailcall = TRUE; tailcall_method = method; tailcall_cmethod = cmethod; tailcall_fsig = fsig; tailcall_virtual = virtual_; if (virtual_generic) { if (virtual_generic_imt) { if (tailcall) { /* Prevent inlining of methods with tailcalls (the call stack would be altered) */ INLINE_FAILURE ("tailcall"); } common_call = TRUE; goto call_end; } MonoInst *this_temp, *this_arg_temp, *store; MonoInst *iargs [4]; this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL); NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]); MONO_ADD_INS (cfg->cbb, store); /* FIXME: This should be a managed pointer */ this_arg_temp = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0); iargs [1] = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0); addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs); EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0); ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL); if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall virtual generic %s -> %s\n", method->name, cmethod->name); goto call_end; } CHECK_CFG_ERROR; /* Tail recursion elimination */ if (((cfg->opt & MONO_OPT_TAILCALL) || inst_tailcall) && il_op == MONO_CEE_CALL && cmethod == method && next_ip < end && next_ip [0] == CEE_RET && !vtable_arg) { gboolean has_vtargs = FALSE; int i; /* Prevent inlining of methods with tailcalls (the call stack would be altered) */ INLINE_FAILURE ("tailcall"); /* keep it simple */ for (i = fsig->param_count - 1; !has_vtargs && i >= 0; i--) has_vtargs = MONO_TYPE_ISSTRUCT (mono_method_signature_internal (cmethod)->params [i]); if (!has_vtargs) { if (need_seq_point) { emit_seq_point (cfg, method, ip, FALSE, TRUE); need_seq_point = FALSE; } for (i = 0; i < n; ++i) EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]); mini_profiler_emit_tail_call (cfg, cmethod); MONO_INST_NEW (cfg, ins, OP_BR); MONO_ADD_INS (cfg->cbb, ins); tblock = start_bblock->out_bb [0]; link_bblock (cfg, cfg->cbb, tblock); ins->inst_target_bb = tblock; start_new_bblock = 1; /* skip the CEE_RET, too */ if (ip_in_bb (cfg, cfg->cbb, next_ip)) skip_ret = TRUE; push_res = FALSE; need_seq_point = FALSE; goto call_end; } } inline_costs += CALL_COST * MIN(10, num_calls++); /* * Synchronized wrappers. * Its hard to determine where to replace a method with its synchronized * wrapper without causing an infinite recursion. The current solution is * to add the synchronized wrapper in the trampolines, and to * change the called method to a dummy wrapper, and resolve that wrapper * to the real method in mono_jit_compile_method (). */ if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) { MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method); if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig)) { // FIXME? Does this write to cmethod impact tailcall_supported? Probably not. cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod); } } /* * Making generic calls out of gsharedvt methods. * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid * patching gshared method addresses into a gsharedvt method. */ if (make_generic_call_out_of_gsharedvt_method) { if (virtual_) { //if (mono_class_is_interface (cmethod->klass)) //GSHAREDVT_FAILURE (il_op); // disable for possible remoting calls if (fsig->hasthis && method->klass == mono_defaults.object_class) GSHAREDVT_FAILURE (il_op); if (fsig->generic_param_count) { /* virtual generic call */ g_assert (!imt_arg); g_assert (will_have_imt_arg); /* Same as the virtual generic case above */ imt_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); g_assert (imt_arg); } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) { /* This can happen when we call a fully instantiated iface method */ g_assert (will_have_imt_arg); imt_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); g_assert (imt_arg); } /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */ vtable_arg = NULL; } if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke"))) keep_this_alive = sp [0]; MonoRgctxInfoType info_type; if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT; else info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE; addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type); if (cfg->llvm_only) { // FIXME: Avoid initializing vtable_arg ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr); if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall llvmonly gsharedvt %s -> %s\n", method->name, cmethod->name); } else { tailcall = tailcall_calli; ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall); tailcall_remove_ret |= tailcall; } goto call_end; } /* Generic sharing */ /* * Calls to generic methods from shared code cannot go through the trampoline infrastructure * in some cases, because the called method might end up being different on every call. * Load the called method address from the rgctx and do an indirect call in these cases. * Use this if the callee is gsharedvt sharable too, since * at runtime we might find an instantiation so the call cannot * be patched (the 'no_patch' code path in mini-trampolines.c). */ gboolean gshared_indirect; gshared_indirect = context_used && !imt_arg && !array_rank && !delegate_invoke; if (gshared_indirect) gshared_indirect = (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) || !mono_class_generic_sharing_enabled (cmethod->klass) || gshared_static_virtual); if (gshared_indirect) gshared_indirect = (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)); if (gshared_indirect) { INLINE_FAILURE ("gshared"); g_assert (cfg->gshared && cmethod); g_assert (!addr); if (fsig->hasthis) MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg); if (cfg->llvm_only) { if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) { /* Handled in handle_constrained_gsharedvt_call () */ g_assert (!gshared_static_virtual); addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER); } else { if (gshared_static_virtual) addr = emit_get_rgctx_virt_method (cfg, -1, constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE); else addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_FTNDESC); } // FIXME: Avoid initializing imt_arg/vtable_arg ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr); if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall context_used_llvmonly %s -> %s\n", method->name, cmethod->name); } else { if (gshared_static_virtual) { /* * cmethod is a static interface method, the actual called method at runtime * needs to be computed using constrained_class and cmethod. */ addr = emit_get_rgctx_virt_method (cfg, -1, constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE); } else { addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE); } if (inst_tailcall) mono_tailcall_print ("%s tailcall_calli#2 %s -> %s\n", tailcall_calli ? "making" : "missed", method->name, cmethod->name); tailcall = tailcall_calli; ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall); tailcall_remove_ret |= tailcall; } goto call_end; } /* Direct calls to icalls */ if (direct_icall) { MonoMethod *wrapper; int costs; /* Inline the wrapper */ wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot); costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE, NULL); g_assert (costs > 0); cfg->real_offset += 5; if (!MONO_TYPE_IS_VOID (fsig->ret)) /* *sp is already set by inline_method */ ins = *sp; inline_costs += costs; if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall direct_icall %s -> %s\n", method->name, cmethod->name); goto call_end; } /* Array methods */ if (array_rank) { MonoInst *addr; if (strcmp (cmethod->name, "Set") == 0) { /* array Set */ MonoInst *val = sp [fsig->param_count]; if (val->type == STACK_OBJ) { MonoInst *iargs [ ] = { sp [0], val }; mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs); } addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE); if (!mini_debug_options.weak_memory_model && val->type == STACK_OBJ) mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg); if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val)) mini_emit_write_barrier (cfg, addr, val); if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass)) GSHAREDVT_FAILURE (il_op); } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */ addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0); } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */ if (!m_class_is_valuetype (m_class_get_element_class (cmethod->klass)) && !readonly) mini_emit_check_array_type (cfg, sp [0], cmethod->klass); CHECK_TYPELOAD (cmethod->klass); readonly = FALSE; addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE); ins = addr; } else { g_assert_not_reached (); } emit_widen = FALSE; if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall array_rank %s -> %s\n", method->name, cmethod->name); goto call_end; } ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL); if (ins) { if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall redirect %s -> %s\n", method->name, cmethod->name); goto call_end; } /* Tail prefix / tailcall optimization */ if (tailcall) { /* Prevent inlining of methods with tailcalls (the call stack would be altered) */ INLINE_FAILURE ("tailcall"); } /* * Virtual calls in llvm-only mode. */ if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) { ins = mini_emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp); goto call_end; } /* Common call */ if (!(cfg->opt & MONO_OPT_AGGRESSIVE_INLINING) && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !method_does_not_return (cmethod)) INLINE_FAILURE ("call"); common_call = TRUE; #ifdef TARGET_WASM /* Push an LMF so these frames can be enumerated during stack walks by mono_arch_unwind_frame () */ if (needs_stack_walk && !cfg->deopt) { MonoInst *method_ins; int lmf_reg; emit_push_lmf (cfg); EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL); lmf_reg = ins->dreg; /* The lmf->method field will be used to look up the MonoJitInfo for this method */ method_ins = emit_get_rgctx_method (cfg, mono_method_check_context_used (cfg->method), cfg->method, MONO_RGCTX_INFO_METHOD); EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, method), method_ins->dreg); } #endif call_end: // Check that the decision to tailcall would not have changed. g_assert (!called_is_supported_tailcall || tailcall_method == method); // FIXME? cmethod does change, weaken the assert if we weren't tailcalling anyway. // If this still fails, restructure the code, or call tailcall_supported again and assert no change. g_assert (!called_is_supported_tailcall || !tailcall || tailcall_cmethod == cmethod); g_assert (!called_is_supported_tailcall || tailcall_fsig == fsig); g_assert (!called_is_supported_tailcall || tailcall_virtual == virtual_); g_assert (!called_is_supported_tailcall || tailcall_extra_arg == (vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass))); if (common_call) // FIXME goto call_end && !common_call often skips tailcall processing. ins = mini_emit_method_call_full (cfg, cmethod, fsig, tailcall, sp, virtual_ ? sp [0] : NULL, imt_arg, vtable_arg); /* * Handle devirt of some A.B.C calls by replacing the result of A.B with a OP_TYPED_OBJREF instruction, so the .C * call can be devirtualized above. */ if (cmethod) ins = handle_call_res_devirt (cfg, cmethod, ins); #ifdef TARGET_WASM if (common_call && needs_stack_walk && !cfg->deopt) /* If an exception is thrown, the LMF is popped by a call to mini_llvmonly_pop_lmf () */ emit_pop_lmf (cfg); #endif if (noreturn) { MONO_INST_NEW (cfg, ins, OP_NOT_REACHED); MONO_ADD_INS (cfg->cbb, ins); } calli_end: if ((tailcall_remove_ret || (common_call && tailcall)) && !cfg->llvm_only) { link_bblock (cfg, cfg->cbb, end_bblock); start_new_bblock = 1; // FIXME: Eliminate unreachable epilogs /* * OP_TAILCALL has no return value, so skip the CEE_RET if it is * only reachable from this call. */ GET_BBLOCK (cfg, tblock, next_ip); if (tblock == cfg->cbb || tblock->in_count == 0) skip_ret = TRUE; push_res = FALSE; need_seq_point = FALSE; } if (ins_flag & MONO_INST_TAILCALL) mini_test_tailcall (cfg, tailcall); /* End of call, INS should contain the result of the call, if any */ if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) { g_assert (ins); if (emit_widen) *sp++ = mono_emit_widen_call_res (cfg, ins, fsig); else *sp++ = ins; } if (save_last_error) { save_last_error = FALSE; #ifdef TARGET_WIN32 // Making icalls etc could clobber the value so emit inline code // to read last error on Windows. MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR); ins->dreg = alloc_dreg (cfg, STACK_I4); ins->type = STACK_I4; MONO_ADD_INS (cfg->cbb, ins); mono_emit_jit_icall (cfg, mono_marshal_set_last_error_windows, &ins); #else mono_emit_jit_icall (cfg, mono_marshal_set_last_error, NULL); #endif } if (keep_this_alive) { MonoInst *dummy_use; /* See mini_emit_method_call_full () */ EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive); } if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) { /* * Clang can convert these calls to tailcalls which screw up the stack * walk. This happens even when the -fno-optimize-sibling-calls * option is passed to clang. * Work around this by emitting a dummy call. */ mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL); } CHECK_CFG_EXCEPTION; if (skip_ret) { // FIXME When not followed by CEE_RET, correct behavior is to raise an exception. g_assert (next_ip [0] == CEE_RET); next_ip += 1; il_op = MonoOpcodeEnum_Invalid; // Call or ret? Unclear. } ins_flag = 0; constrained_class = NULL; if (need_seq_point) { //check is is a nested call and remove the non_empty_stack of the last call, only for non native methods if (!(method->flags & METHOD_IMPL_ATTRIBUTE_NATIVE)) { if (emitted_funccall_seq_point) { if (cfg->last_seq_point) cfg->last_seq_point->flags |= MONO_INST_NESTED_CALL; } else emitted_funccall_seq_point = TRUE; } emit_seq_point (cfg, method, next_ip, FALSE, TRUE); } break; } case MONO_CEE_RET: if (!detached_before_ret) mini_profiler_emit_leave (cfg, sig->ret->type != MONO_TYPE_VOID ? sp [-1] : NULL); g_assert (!method_does_not_return (method)); if (cfg->method != method) { /* return from inlined method */ /* * If in_count == 0, that means the ret is unreachable due to * being preceded by a throw. In that case, inline_method () will * handle setting the return value * (test case: test_0_inline_throw ()). */ if (return_var && cfg->cbb->in_count) { MonoType *ret_type = mono_method_signature_internal (method)->ret; MonoInst *store; CHECK_STACK (1); --sp; *sp = convert_value (cfg, ret_type, *sp); if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp)) UNVERIFIED; //g_assert (returnvar != -1); EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp); cfg->ret_var_set = TRUE; } } else { if (cfg->lmf_var && cfg->cbb->in_count && (!cfg->llvm_only || cfg->deopt)) emit_pop_lmf (cfg); if (cfg->ret) { MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (method)->ret); if (seq_points && !sym_seq_points) { /* * Place a seq point here too even through the IL stack is not * empty, so a step over on * call <FOO> * ret * will work correctly. */ NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE); MONO_ADD_INS (cfg->cbb, ins); } g_assert (!return_var); CHECK_STACK (1); --sp; *sp = convert_value (cfg, ret_type, *sp); if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp)) UNVERIFIED; emit_setret (cfg, *sp); } } if (sp != stack_start) UNVERIFIED; MONO_INST_NEW (cfg, ins, OP_BR); ins->inst_target_bb = end_bblock; MONO_ADD_INS (cfg->cbb, ins); link_bblock (cfg, cfg->cbb, end_bblock); start_new_bblock = 1; break; case MONO_CEE_BR_S: MONO_INST_NEW (cfg, ins, OP_BR); GET_BBLOCK (cfg, tblock, target); link_bblock (cfg, cfg->cbb, tblock); ins->inst_target_bb = tblock; if (sp != stack_start) { handle_stack_args (cfg, stack_start, sp - stack_start); sp = stack_start; CHECK_UNVERIFIABLE (cfg); } MONO_ADD_INS (cfg->cbb, ins); start_new_bblock = 1; inline_costs += BRANCH_COST; break; case MONO_CEE_BEQ_S: case MONO_CEE_BGE_S: case MONO_CEE_BGT_S: case MONO_CEE_BLE_S: case MONO_CEE_BLT_S: case MONO_CEE_BNE_UN_S: case MONO_CEE_BGE_UN_S: case MONO_CEE_BGT_UN_S: case MONO_CEE_BLE_UN_S: case MONO_CEE_BLT_UN_S: MONO_INST_NEW (cfg, ins, il_op + BIG_BRANCH_OFFSET); ADD_BINCOND (NULL); sp = stack_start; inline_costs += BRANCH_COST; break; case MONO_CEE_BR: MONO_INST_NEW (cfg, ins, OP_BR); GET_BBLOCK (cfg, tblock, target); link_bblock (cfg, cfg->cbb, tblock); ins->inst_target_bb = tblock; if (sp != stack_start) { handle_stack_args (cfg, stack_start, sp - stack_start); sp = stack_start; CHECK_UNVERIFIABLE (cfg); } MONO_ADD_INS (cfg->cbb, ins); start_new_bblock = 1; inline_costs += BRANCH_COST; break; case MONO_CEE_BRFALSE_S: case MONO_CEE_BRTRUE_S: case MONO_CEE_BRFALSE: case MONO_CEE_BRTRUE: { MonoInst *cmp; gboolean is_true = il_op == MONO_CEE_BRTRUE_S || il_op == MONO_CEE_BRTRUE; if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8) UNVERIFIED; sp--; GET_BBLOCK (cfg, tblock, target); link_bblock (cfg, cfg->cbb, tblock); GET_BBLOCK (cfg, tblock, next_ip); link_bblock (cfg, cfg->cbb, tblock); if (sp != stack_start) { handle_stack_args (cfg, stack_start, sp - stack_start); CHECK_UNVERIFIABLE (cfg); } MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM); cmp->sreg1 = sp [0]->dreg; type_from_op (cfg, cmp, sp [0], NULL); CHECK_TYPE (cmp); #if SIZEOF_REGISTER == 4 if (cmp->opcode == OP_LCOMPARE_IMM) { /* Convert it to OP_LCOMPARE */ MONO_INST_NEW (cfg, ins, OP_I8CONST); ins->type = STACK_I8; ins->dreg = alloc_dreg (cfg, STACK_I8); ins->inst_l = 0; MONO_ADD_INS (cfg->cbb, ins); cmp->opcode = OP_LCOMPARE; cmp->sreg2 = ins->dreg; } #endif MONO_ADD_INS (cfg->cbb, cmp); MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ); type_from_op (cfg, ins, sp [0], NULL); MONO_ADD_INS (cfg->cbb, ins); ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2); GET_BBLOCK (cfg, tblock, target); ins->inst_true_bb = tblock; GET_BBLOCK (cfg, tblock, next_ip); ins->inst_false_bb = tblock; start_new_bblock = 2; sp = stack_start; inline_costs += BRANCH_COST; break; } case MONO_CEE_BEQ: case MONO_CEE_BGE: case MONO_CEE_BGT: case MONO_CEE_BLE: case MONO_CEE_BLT: case MONO_CEE_BNE_UN: case MONO_CEE_BGE_UN: case MONO_CEE_BGT_UN: case MONO_CEE_BLE_UN: case MONO_CEE_BLT_UN: MONO_INST_NEW (cfg, ins, il_op); ADD_BINCOND (NULL); sp = stack_start; inline_costs += BRANCH_COST; break; case MONO_CEE_SWITCH: { MonoInst *src1; MonoBasicBlock **targets; MonoBasicBlock *default_bblock; MonoJumpInfoBBTable *table; int offset_reg = alloc_preg (cfg); int target_reg = alloc_preg (cfg); int table_reg = alloc_preg (cfg); int sum_reg = alloc_preg (cfg); gboolean use_op_switch; n = read32 (ip + 1); --sp; src1 = sp [0]; if ((src1->type != STACK_I4) && (src1->type != STACK_PTR)) UNVERIFIED; ip += 5; GET_BBLOCK (cfg, default_bblock, next_ip); default_bblock->flags |= BB_INDIRECT_JUMP_TARGET; targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n); for (i = 0; i < n; ++i) { GET_BBLOCK (cfg, tblock, next_ip + (gint32)read32 (ip)); targets [i] = tblock; targets [i]->flags |= BB_INDIRECT_JUMP_TARGET; ip += 4; } if (sp != stack_start) { /* * Link the current bb with the targets as well, so handle_stack_args * will set their in_stack correctly. */ link_bblock (cfg, cfg->cbb, default_bblock); for (i = 0; i < n; ++i) link_bblock (cfg, cfg->cbb, targets [i]); handle_stack_args (cfg, stack_start, sp - stack_start); sp = stack_start; CHECK_UNVERIFIABLE (cfg); /* Undo the links */ mono_unlink_bblock (cfg, cfg->cbb, default_bblock); for (i = 0; i < n; ++i) mono_unlink_bblock (cfg, cfg->cbb, targets [i]); } MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock); for (i = 0; i < n; ++i) link_bblock (cfg, cfg->cbb, targets [i]); table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable)); table->table = targets; table->table_size = n; use_op_switch = FALSE; #ifdef TARGET_ARM /* ARM implements SWITCH statements differently */ /* FIXME: Make it use the generic implementation */ if (!cfg->compile_aot) use_op_switch = TRUE; #endif if (COMPILE_LLVM (cfg)) use_op_switch = TRUE; cfg->cbb->has_jump_table = 1; if (use_op_switch) { MONO_INST_NEW (cfg, ins, OP_SWITCH); ins->sreg1 = src1->dreg; ins->inst_p0 = table; ins->inst_many_bb = targets; ins->klass = (MonoClass *)GUINT_TO_POINTER (n); MONO_ADD_INS (cfg->cbb, ins); } else { if (TARGET_SIZEOF_VOID_P == 8) MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3); else MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2); #if SIZEOF_REGISTER == 8 /* The upper word might not be zero, and we add it to a 64 bit address later */ MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg); #endif if (cfg->compile_aot) { MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH); } else { MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE); ins->inst_c1 = MONO_PATCH_INFO_SWITCH; ins->inst_p0 = table; ins->dreg = table_reg; MONO_ADD_INS (cfg->cbb, ins); } /* FIXME: Use load_memindex */ MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0); MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg); } start_new_bblock = 1; inline_costs += BRANCH_COST * 2; break; } case MONO_CEE_LDIND_I1: case MONO_CEE_LDIND_U1: case MONO_CEE_LDIND_I2: case MONO_CEE_LDIND_U2: case MONO_CEE_LDIND_I4: case MONO_CEE_LDIND_U4: case MONO_CEE_LDIND_I8: case MONO_CEE_LDIND_I: case MONO_CEE_LDIND_R4: case MONO_CEE_LDIND_R8: case MONO_CEE_LDIND_REF: --sp; if (!(ins_flag & MONO_INST_NONULLCHECK)) MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, FALSE); ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (ldind_to_type (il_op)), sp [0], 0, ins_flag); *sp++ = ins; ins_flag = 0; break; case MONO_CEE_STIND_REF: case MONO_CEE_STIND_I1: case MONO_CEE_STIND_I2: case MONO_CEE_STIND_I4: case MONO_CEE_STIND_I8: case MONO_CEE_STIND_R4: case MONO_CEE_STIND_R8: case MONO_CEE_STIND_I: { sp -= 2; if (il_op == MONO_CEE_STIND_REF && sp [1]->type != STACK_OBJ) { /* stind.ref must only be used with object references. */ UNVERIFIED; } if (il_op == MONO_CEE_STIND_R4 && sp [1]->type == STACK_R8) sp [1] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.single_class), sp [1]); mini_emit_memory_store (cfg, m_class_get_byval_arg (stind_to_type (il_op)), sp [0], sp [1], ins_flag); ins_flag = 0; inline_costs += 1; break; } case MONO_CEE_MUL: MONO_INST_NEW (cfg, ins, il_op); sp -= 2; ins->sreg1 = sp [0]->dreg; ins->sreg2 = sp [1]->dreg; type_from_op (cfg, ins, sp [0], sp [1]); CHECK_TYPE (ins); ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); /* Use the immediate opcodes if possible */ int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode); if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->inst_c0)) { if (imm_opcode != -1) { ins->opcode = imm_opcode; ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0); ins->sreg2 = -1; NULLIFY_INS (sp [1]); } } MONO_ADD_INS ((cfg)->cbb, (ins)); *sp++ = mono_decompose_opcode (cfg, ins); break; case MONO_CEE_ADD: case MONO_CEE_SUB: case MONO_CEE_DIV: case MONO_CEE_DIV_UN: case MONO_CEE_REM: case MONO_CEE_REM_UN: case MONO_CEE_AND: case MONO_CEE_OR: case MONO_CEE_XOR: case MONO_CEE_SHL: case MONO_CEE_SHR: case MONO_CEE_SHR_UN: { MONO_INST_NEW (cfg, ins, il_op); sp -= 2; ins->sreg1 = sp [0]->dreg; ins->sreg2 = sp [1]->dreg; type_from_op (cfg, ins, sp [0], sp [1]); CHECK_TYPE (ins); add_widen_op (cfg, ins, &sp [0], &sp [1]); ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); /* Use the immediate opcodes if possible */ int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode); if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) { if (imm_opcode != -1) { ins->opcode = imm_opcode; if (sp [1]->opcode == OP_I8CONST) { #if SIZEOF_REGISTER == 8 ins->inst_imm = sp [1]->inst_l; #else ins->inst_l = sp [1]->inst_l; #endif } else { ins->inst_imm = (gssize)(sp [1]->inst_c0); } ins->sreg2 = -1; /* Might be followed by an instruction added by add_widen_op */ if (sp [1]->next == NULL) NULLIFY_INS (sp [1]); } } MONO_ADD_INS ((cfg)->cbb, (ins)); *sp++ = mono_decompose_opcode (cfg, ins); break; } case MONO_CEE_NEG: case MONO_CEE_NOT: case MONO_CEE_CONV_I1: case MONO_CEE_CONV_I2: case MONO_CEE_CONV_I4: case MONO_CEE_CONV_R4: case MONO_CEE_CONV_R8: case MONO_CEE_CONV_U4: case MONO_CEE_CONV_I8: case MONO_CEE_CONV_U8: case MONO_CEE_CONV_OVF_I8: case MONO_CEE_CONV_OVF_U8: case MONO_CEE_CONV_R_UN: /* Special case this earlier so we have long constants in the IR */ if ((il_op == MONO_CEE_CONV_I8 || il_op == MONO_CEE_CONV_U8) && (sp [-1]->opcode == OP_ICONST)) { int data = sp [-1]->inst_c0; sp [-1]->opcode = OP_I8CONST; sp [-1]->type = STACK_I8; #if SIZEOF_REGISTER == 8 if (il_op == MONO_CEE_CONV_U8) sp [-1]->inst_c0 = (guint32)data; else sp [-1]->inst_c0 = data; #else if (il_op == MONO_CEE_CONV_U8) sp [-1]->inst_l = (guint32)data; else sp [-1]->inst_l = data; #endif sp [-1]->dreg = alloc_dreg (cfg, STACK_I8); } else { ADD_UNOP (il_op); } break; case MONO_CEE_CONV_OVF_I4: case MONO_CEE_CONV_OVF_I1: case MONO_CEE_CONV_OVF_I2: case MONO_CEE_CONV_OVF_I: case MONO_CEE_CONV_OVF_I1_UN: case MONO_CEE_CONV_OVF_I2_UN: case MONO_CEE_CONV_OVF_I4_UN: case MONO_CEE_CONV_OVF_I8_UN: case MONO_CEE_CONV_OVF_I_UN: if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) { /* floats are always signed, _UN has no effect */ ADD_UNOP (CEE_CONV_OVF_I8); if (il_op == MONO_CEE_CONV_OVF_I1_UN) ADD_UNOP (MONO_CEE_CONV_OVF_I1); else if (il_op == MONO_CEE_CONV_OVF_I2_UN) ADD_UNOP (MONO_CEE_CONV_OVF_I2); else if (il_op == MONO_CEE_CONV_OVF_I4_UN) ADD_UNOP (MONO_CEE_CONV_OVF_I4); else if (il_op == MONO_CEE_CONV_OVF_I8_UN) ; else ADD_UNOP (il_op); } else { ADD_UNOP (il_op); } break; case MONO_CEE_CONV_OVF_U1: case MONO_CEE_CONV_OVF_U2: case MONO_CEE_CONV_OVF_U4: case MONO_CEE_CONV_OVF_U: case MONO_CEE_CONV_OVF_U1_UN: case MONO_CEE_CONV_OVF_U2_UN: case MONO_CEE_CONV_OVF_U4_UN: case MONO_CEE_CONV_OVF_U8_UN: case MONO_CEE_CONV_OVF_U_UN: if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) { /* floats are always signed, _UN has no effect */ ADD_UNOP (CEE_CONV_OVF_U8); ADD_UNOP (il_op); } else { ADD_UNOP (il_op); } break; case MONO_CEE_CONV_U2: case MONO_CEE_CONV_U1: case MONO_CEE_CONV_I: case MONO_CEE_CONV_U: ADD_UNOP (il_op); CHECK_CFG_EXCEPTION; break; case MONO_CEE_ADD_OVF: case MONO_CEE_ADD_OVF_UN: case MONO_CEE_MUL_OVF: case MONO_CEE_MUL_OVF_UN: case MONO_CEE_SUB_OVF: case MONO_CEE_SUB_OVF_UN: MONO_INST_NEW (cfg, ins, il_op); sp -= 2; ins->sreg1 = sp [0]->dreg; ins->sreg2 = sp [1]->dreg; type_from_op (cfg, ins, sp [0], sp [1]); CHECK_TYPE (ins); if (ovf_exc) ins->inst_exc_name = ovf_exc; else ins->inst_exc_name = "OverflowException"; /* Have to insert a widening op */ add_widen_op (cfg, ins, &sp [0], &sp [1]); ins->dreg = alloc_dreg (cfg, (MonoStackType)(ins)->type); MONO_ADD_INS ((cfg)->cbb, ins); /* The opcode might be emulated, so need to special case this */ if (ovf_exc && mono_find_jit_opcode_emulation (ins->opcode)) { switch (ins->opcode) { case OP_IMUL_OVF_UN: /* This opcode is just a placeholder, it will be emulated also */ ins->opcode = OP_IMUL_OVF_UN_OOM; break; case OP_LMUL_OVF_UN: /* This opcode is just a placeholder, it will be emulated also */ ins->opcode = OP_LMUL_OVF_UN_OOM; break; default: g_assert_not_reached (); } } ovf_exc = NULL; *sp++ = mono_decompose_opcode (cfg, ins); break; case MONO_CEE_CPOBJ: GSHAREDVT_FAILURE (il_op); GSHAREDVT_FAILURE (*ip); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); sp -= 2; mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag); ins_flag = 0; break; case MONO_CEE_LDOBJ: { int loc_index = -1; int stloc_len = 0; --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); /* Optimize the common ldobj+stloc combination */ if (next_ip < end) { switch (next_ip [0]) { case MONO_CEE_STLOC_S: CHECK_OPSIZE (7); loc_index = next_ip [1]; stloc_len = 2; break; case MONO_CEE_STLOC_0: case MONO_CEE_STLOC_1: case MONO_CEE_STLOC_2: case MONO_CEE_STLOC_3: loc_index = next_ip [0] - CEE_STLOC_0; stloc_len = 1; break; default: break; } } if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, next_ip)) { CHECK_LOCAL (loc_index); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), sp [0]->dreg, 0); ins->dreg = cfg->locals [loc_index]->dreg; ins->flags |= ins_flag; il_op = (MonoOpcodeEnum)next_ip [0]; next_ip += stloc_len; if (ins_flag & MONO_INST_VOLATILE) { /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ); } ins_flag = 0; break; } /* Optimize the ldobj+stobj combination */ if (next_ip + 4 < end && next_ip [0] == CEE_STOBJ && ip_in_bb (cfg, cfg->cbb, next_ip) && read32 (next_ip + 1) == token) { CHECK_STACK (1); sp --; mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag); il_op = (MonoOpcodeEnum)next_ip [0]; next_ip += 5; ins_flag = 0; break; } ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (klass), sp [0], 0, ins_flag); *sp++ = ins; ins_flag = 0; inline_costs += 1; break; } case MONO_CEE_LDSTR: if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) { EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n)); ins->type = STACK_OBJ; *sp = ins; } else if (method->wrapper_type != MONO_WRAPPER_NONE) { MonoInst *iargs [1]; char *str = (char *)mono_method_get_wrapper_data (method, n); if (cfg->compile_aot) EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str); else EMIT_NEW_PCONST (cfg, iargs [0], str); *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper_internal, iargs); } else { { if (cfg->cbb->out_of_line) { MonoInst *iargs [2]; if (image == mono_defaults.corlib) { /* * Avoid relocations in AOT and save some space by using a * version of helper_ldstr specialized to mscorlib. */ EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n)); *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs); } else { /* Avoid creating the string object */ EMIT_NEW_IMAGECONST (cfg, iargs [0], image); EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n)); *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs); } } else if (cfg->compile_aot) { NEW_LDSTRCONST (cfg, ins, image, n); *sp = ins; MONO_ADD_INS (cfg->cbb, ins); } else { NEW_PCONST (cfg, ins, NULL); ins->type = STACK_OBJ; ins->inst_p0 = mono_ldstr_checked (image, mono_metadata_token_index (n), cfg->error); CHECK_CFG_ERROR; if (!ins->inst_p0) OUT_OF_MEMORY_FAILURE; *sp = ins; MONO_ADD_INS (cfg->cbb, ins); } } } sp++; break; case MONO_CEE_NEWOBJ: { MonoInst *iargs [2]; MonoMethodSignature *fsig; MonoInst this_ins; MonoInst *alloc; MonoInst *vtable_arg = NULL; cmethod = mini_get_method (cfg, method, token, NULL, generic_context); CHECK_CFG_ERROR; fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, cfg->error); CHECK_CFG_ERROR; mono_save_token_info (cfg, image, token, cmethod); if (!mono_class_init_internal (cmethod->klass)) TYPE_LOAD_ERROR (cmethod->klass); context_used = mini_method_check_context_used (cfg, cmethod); if (!dont_verify && !cfg->skip_visibility) { MonoMethod *cil_method = cmethod; MonoMethod *target_method = cil_method; if (method->is_inflated) { MonoGenericContainer *container = mono_method_get_generic_container(method_definition); MonoGenericContext *context = (container != NULL ? &container->context : NULL); target_method = mini_get_method_allow_open (method, token, NULL, context, cfg->error); CHECK_CFG_ERROR; } if (!mono_method_can_access_method (method_definition, target_method) && !mono_method_can_access_method (method, cil_method)) emit_method_access_failure (cfg, method, cil_method); } if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) { emit_class_init (cfg, cmethod->klass); CHECK_TYPELOAD (cmethod->klass); } /* if (cfg->gsharedvt) { if (mini_is_gsharedvt_variable_signature (sig)) GSHAREDVT_FAILURE (il_op); } */ n = fsig->param_count; CHECK_STACK (n); /* * Generate smaller code for the common newobj <exception> instruction in * argument checking code. */ if (cfg->cbb->out_of_line && m_class_get_image (cmethod->klass) == mono_defaults.corlib && is_exception_class (cmethod->klass) && n <= 2 && ((n < 1) || (!m_type_is_byref (fsig->params [0]) && fsig->params [0]->type == MONO_TYPE_STRING)) && ((n < 2) || (!m_type_is_byref (fsig->params [1]) && fsig->params [1]->type == MONO_TYPE_STRING))) { MonoInst *iargs [3]; sp -= n; EMIT_NEW_ICONST (cfg, iargs [0], m_class_get_type_token (cmethod->klass)); switch (n) { case 0: *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs); break; case 1: iargs [1] = sp [0]; *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs); break; case 2: iargs [1] = sp [0]; iargs [2] = sp [1]; *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs); break; default: g_assert_not_reached (); } inline_costs += 5; break; } /* move the args to allow room for 'this' in the first position */ while (n--) { --sp; sp [1] = sp [0]; } for (int i = 0; i < fsig->param_count; ++i) sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]); /* check_call_signature () requires sp[0] to be set */ this_ins.type = STACK_OBJ; sp [0] = &this_ins; if (check_call_signature (cfg, fsig, sp)) UNVERIFIED; iargs [0] = NULL; if (mini_class_is_system_array (cmethod->klass)) { *sp = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); MonoJitICallId function = MONO_JIT_ICALL_ZeroIsReserved; int rank = m_class_get_rank (cmethod->klass); int n = fsig->param_count; /* Optimize the common cases, use ctor using length for each rank (no lbound). */ if (n == rank) { switch (n) { case 1: function = MONO_JIT_ICALL_mono_array_new_1; break; case 2: function = MONO_JIT_ICALL_mono_array_new_2; break; case 3: function = MONO_JIT_ICALL_mono_array_new_3; break; case 4: function = MONO_JIT_ICALL_mono_array_new_4; break; default: break; } } /* Regular case, rank > 4 or legnth, lbound specified per rank. */ if (function == MONO_JIT_ICALL_ZeroIsReserved) { // FIXME Maximum value of param_count? Realistically 64. Fits in imm? if (!array_new_localalloc_ins) { MONO_INST_NEW (cfg, array_new_localalloc_ins, OP_LOCALLOC_IMM); array_new_localalloc_ins->dreg = alloc_preg (cfg); cfg->flags |= MONO_CFG_HAS_ALLOCA; MONO_ADD_INS (init_localsbb, array_new_localalloc_ins); } array_new_localalloc_ins->inst_imm = MAX (array_new_localalloc_ins->inst_imm, n * sizeof (target_mgreg_t)); int dreg = array_new_localalloc_ins->dreg; if (2 * rank == n) { /* [lbound, length, lbound, length, ...] * mono_array_new_n_icall expects a non-interleaved list of * lbounds and lengths, so deinterleave here. */ for (int l = 0; l < 2; ++l) { int src = l; int dst = l * rank; for (int r = 0; r < rank; ++r, src += 2, ++dst) { NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, dreg, dst * sizeof (target_mgreg_t), sp [src + 1]->dreg); MONO_ADD_INS (cfg->cbb, ins); } } } else { /* [length, length, length, ...] */ for (int i = 0; i < n; ++i) { NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, dreg, i * sizeof (target_mgreg_t), sp [i + 1]->dreg); MONO_ADD_INS (cfg->cbb, ins); } } EMIT_NEW_ICONST (cfg, ins, n); sp [1] = ins; EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), dreg); ins->type = STACK_PTR; sp [2] = ins; // FIXME Adjust sp by n - 3? Attempts failed. function = MONO_JIT_ICALL_mono_array_new_n_icall; } alloc = mono_emit_jit_icall_id (cfg, function, sp); } else if (cmethod->string_ctor) { g_assert (!context_used); g_assert (!vtable_arg); /* we simply pass a null pointer */ EMIT_NEW_PCONST (cfg, *sp, NULL); /* now call the string ctor */ alloc = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL); } else { if (m_class_is_valuetype (cmethod->klass)) { iargs [0] = mono_compile_create_var (cfg, m_class_get_byval_arg (cmethod->klass), OP_LOCAL); mini_emit_init_rvar (cfg, iargs [0]->dreg, m_class_get_byval_arg (cmethod->klass)); EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0); alloc = NULL; /* * The code generated by mini_emit_virtual_call () expects * iargs [0] to be a boxed instance, but luckily the vcall * will be transformed into a normal call there. */ } else if (context_used) { alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used); *sp = alloc; } else { MonoVTable *vtable = NULL; if (!cfg->compile_aot) vtable = mono_class_vtable_checked (cmethod->klass, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (cmethod->klass); /* * TypeInitializationExceptions thrown from the mono_runtime_class_init * call in mono_jit_runtime_invoke () can abort the finalizer thread. * As a workaround, we call class cctors before allocating objects. */ if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) { emit_class_init (cfg, cmethod->klass); if (cfg->verbose_level > 2) printf ("class %s.%s needs init call for ctor\n", m_class_get_name_space (cmethod->klass), m_class_get_name (cmethod->klass)); class_inits = g_slist_prepend (class_inits, cmethod->klass); } alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0); *sp = alloc; } CHECK_CFG_EXCEPTION; /*for handle_alloc*/ if (alloc) MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg); /* Now call the actual ctor */ int ctor_inline_costs = 0; handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &ctor_inline_costs); // don't contribute to inline_const if ctor has [MethodImpl(MethodImplOptions.AggressiveInlining)] if (!COMPILE_LLVM(cfg) || !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING)) inline_costs += ctor_inline_costs; CHECK_CFG_EXCEPTION; } if (alloc == NULL) { /* Valuetype */ EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0); mini_type_to_eval_stack_type (cfg, m_class_get_byval_arg (ins->klass), ins); *sp++= ins; } else { *sp++ = alloc; } inline_costs += 5; if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code))) emit_seq_point (cfg, method, next_ip, FALSE, TRUE); break; } case MONO_CEE_CASTCLASS: case MONO_CEE_ISINST: { --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); if (sp [0]->type != STACK_OBJ) UNVERIFIED; MONO_INST_NEW (cfg, ins, (il_op == MONO_CEE_ISINST) ? OP_ISINST : OP_CASTCLASS); ins->dreg = alloc_preg (cfg); ins->sreg1 = (*sp)->dreg; ins->klass = klass; ins->type = STACK_OBJ; MONO_ADD_INS (cfg->cbb, ins); CHECK_CFG_EXCEPTION; *sp++ = ins; cfg->flags |= MONO_CFG_HAS_TYPE_CHECK; break; } case MONO_CEE_UNBOX_ANY: { MonoInst *res, *addr; --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); mono_save_token_info (cfg, image, token, klass); context_used = mini_class_check_context_used (cfg, klass); if (mini_is_gsharedvt_klass (klass)) { res = handle_unbox_gsharedvt (cfg, klass, *sp); inline_costs += 2; } else if (mini_class_is_reference (klass)) { if (MONO_INS_IS_PCONST_NULL (*sp)) { EMIT_NEW_PCONST (cfg, res, NULL); res->type = STACK_OBJ; } else { MONO_INST_NEW (cfg, res, OP_CASTCLASS); res->dreg = alloc_preg (cfg); res->sreg1 = (*sp)->dreg; res->klass = klass; res->type = STACK_OBJ; MONO_ADD_INS (cfg->cbb, res); cfg->flags |= MONO_CFG_HAS_TYPE_CHECK; } } else if (mono_class_is_nullable (klass)) { res = handle_unbox_nullable (cfg, *sp, klass, context_used); } else { addr = mini_handle_unbox (cfg, klass, *sp, context_used); /* LDOBJ */ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0); res = ins; inline_costs += 2; } *sp ++ = res; break; } case MONO_CEE_BOX: { MonoInst *val; MonoClass *enum_class; MonoMethod *has_flag; MonoMethodSignature *has_flag_sig; --sp; val = *sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); mono_save_token_info (cfg, image, token, klass); context_used = mini_class_check_context_used (cfg, klass); if (mini_class_is_reference (klass)) { *sp++ = val; break; } val = convert_value (cfg, m_class_get_byval_arg (klass), val); if (klass == mono_defaults.void_class) UNVERIFIED; if (target_type_is_incompatible (cfg, m_class_get_byval_arg (klass), val)) UNVERIFIED; /* frequent check in generic code: box (struct), brtrue */ /* * Look for: * * <push int/long ptr> * <push int/long> * box MyFlags * constrained. MyFlags * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum) * * If we find this sequence and the operand types on box and constrained * are equal, we can emit a specialized instruction sequence instead of * the very slow HasFlag () call. * This code sequence is generated by older mcs/csc, the newer one is handled in * emit_inst_for_method (). */ guint32 constrained_token; guint32 callvirt_token; if ((cfg->opt & MONO_OPT_INTRINS) && // FIXME ip_in_bb as we go? next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) && (ip = il_read_constrained (next_ip, end, &constrained_token)) && ip_in_bb (cfg, cfg->cbb, ip) && (ip = il_read_callvirt (ip, end, &callvirt_token)) && ip_in_bb (cfg, cfg->cbb, ip) && m_class_is_enumtype (klass) && (enum_class = mini_get_class (method, constrained_token, generic_context)) && (has_flag = mini_get_method (cfg, method, callvirt_token, NULL, generic_context)) && has_flag->klass == mono_defaults.enum_class && !strcmp (has_flag->name, "HasFlag") && (has_flag_sig = mono_method_signature_internal (has_flag)) && has_flag_sig->hasthis && has_flag_sig->param_count == 1) { CHECK_TYPELOAD (enum_class); if (enum_class == klass) { MonoInst *enum_this, *enum_flag; next_ip = ip; il_op = MONO_CEE_CALLVIRT; --sp; enum_this = sp [0]; enum_flag = sp [1]; *sp++ = mini_handle_enum_has_flag (cfg, klass, enum_this, -1, enum_flag); break; } } guint32 unbox_any_token; /* * Common in generic code: * box T1, unbox.any T2. */ if ((cfg->opt & MONO_OPT_INTRINS) && next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) && (ip = il_read_unbox_any (next_ip, end, &unbox_any_token))) { MonoClass *unbox_klass = mini_get_class (method, unbox_any_token, generic_context); CHECK_TYPELOAD (unbox_klass); if (klass == unbox_klass) { next_ip = ip; *sp++ = val; break; } } // Optimize // // box // call object::GetType() // guint32 gettype_token; if ((ip = il_read_call(next_ip, end, &gettype_token)) && ip_in_bb (cfg, cfg->cbb, ip)) { MonoMethod* gettype_method = mini_get_method (cfg, method, gettype_token, NULL, generic_context); if (!strcmp (gettype_method->name, "GetType") && gettype_method->klass == mono_defaults.object_class) { mono_class_init_internal(klass); if (mono_class_get_checked (m_class_get_image (klass), m_class_get_type_token (klass), error) == klass) { if (cfg->compile_aot) { EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, m_class_get_image (klass), m_class_get_type_token (klass), generic_context); } else { MonoType *klass_type = m_class_get_byval_arg (klass); MonoReflectionType* reflection_type = mono_type_get_object_checked (klass_type, cfg->error); EMIT_NEW_PCONST (cfg, ins, reflection_type); } ins->type = STACK_OBJ; ins->klass = mono_defaults.systemtype_class; *sp++ = ins; next_ip = ip; break; } } } // Optimize // // box // ldnull // ceq (or cgt.un) // // to just // // ldc.i4.0 (or 1) guchar* ldnull_ip; if ((ldnull_ip = il_read_op (next_ip, end, CEE_LDNULL, MONO_CEE_LDNULL)) && ip_in_bb (cfg, cfg->cbb, ldnull_ip)) { gboolean is_eq = FALSE, is_neq = FALSE; if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CEQ))) is_eq = TRUE; else if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CGT_UN))) is_neq = TRUE; if ((is_eq || is_neq) && ip_in_bb (cfg, cfg->cbb, ip) && !mono_class_is_nullable (klass) && !mini_is_gsharedvt_klass (klass)) { next_ip = ip; il_op = (MonoOpcodeEnum) (is_eq ? CEE_LDC_I4_0 : CEE_LDC_I4_1); EMIT_NEW_ICONST (cfg, ins, is_eq ? 0 : 1); ins->type = STACK_I4; *sp++ = ins; break; } } guint32 isinst_tk = 0; if ((ip = il_read_op_and_token (next_ip, end, CEE_ISINST, MONO_CEE_ISINST, &isinst_tk)) && ip_in_bb (cfg, cfg->cbb, ip)) { MonoClass *isinst_class = mini_get_class (method, isinst_tk, generic_context); if (!mono_class_is_nullable (klass) && !mono_class_is_nullable (isinst_class) && !mini_is_gsharedvt_variable_klass (klass) && !mini_is_gsharedvt_variable_klass (isinst_class) && !mono_class_is_open_constructed_type (m_class_get_byval_arg (klass)) && !mono_class_is_open_constructed_type (m_class_get_byval_arg (isinst_class))) { // Optimize // // box // isinst [Type] // brfalse/brtrue // // to // // ldc.i4.0 (or 1) // brfalse/brtrue // guchar* br_ip = NULL; if ((br_ip = il_read_brtrue (ip, end, &target)) || (br_ip = il_read_brtrue_s (ip, end, &target)) || (br_ip = il_read_brfalse (ip, end, &target)) || (br_ip = il_read_brfalse_s (ip, end, &target))) { gboolean isinst = mono_class_is_assignable_from_internal (isinst_class, klass); next_ip = ip; il_op = (MonoOpcodeEnum) (isinst ? CEE_LDC_I4_1 : CEE_LDC_I4_0); EMIT_NEW_ICONST (cfg, ins, isinst ? 1 : 0); ins->type = STACK_I4; *sp++ = ins; break; } // Optimize // // box // isinst [Type] // ldnull // ceq/cgt.un // // to // // ldc.i4.0 (or 1) // guchar* ldnull_ip = NULL; if ((ldnull_ip = il_read_op (ip, end, CEE_LDNULL, MONO_CEE_LDNULL)) && ip_in_bb (cfg, cfg->cbb, ldnull_ip)) { gboolean is_eq = FALSE, is_neq = FALSE; if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CEQ))) is_eq = TRUE; else if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CGT_UN))) is_neq = TRUE; if ((is_eq || is_neq) && ip_in_bb (cfg, cfg->cbb, ip) && !mono_class_is_nullable (klass) && !mini_is_gsharedvt_klass (klass)) { gboolean isinst = mono_class_is_assignable_from_internal (isinst_class, klass); next_ip = ip; if (is_eq) isinst = !isinst; il_op = (MonoOpcodeEnum) (isinst ? CEE_LDC_I4_1 : CEE_LDC_I4_0); EMIT_NEW_ICONST (cfg, ins, isinst ? 1 : 0); ins->type = STACK_I4; *sp++ = ins; break; } } // Optimize // // box // isinst [Type] // unbox.any // // to // // nop // guchar* unbox_ip = NULL; guint32 unbox_token = 0; if ((unbox_ip = il_read_unbox_any (ip, end, &unbox_token)) && ip_in_bb (cfg, cfg->cbb, unbox_ip)) { MonoClass *unbox_klass = mini_get_class (method, unbox_token, generic_context); CHECK_TYPELOAD (unbox_klass); if (!mono_class_is_nullable (unbox_klass) && !mini_is_gsharedvt_klass (unbox_klass) && klass == isinst_class && klass == unbox_klass) { *sp++ = val; next_ip = unbox_ip; break; } } } } gboolean is_true; // FIXME: LLVM can't handle the inconsistent bb linking if (!mono_class_is_nullable (klass) && !mini_is_gsharedvt_klass (klass) && next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) && ( (is_true = !!(ip = il_read_brtrue (next_ip, end, &target))) || (is_true = !!(ip = il_read_brtrue_s (next_ip, end, &target))) || (ip = il_read_brfalse (next_ip, end, &target)) || (ip = il_read_brfalse_s (next_ip, end, &target)))) { int dreg; MonoBasicBlock *true_bb, *false_bb; il_op = (MonoOpcodeEnum)next_ip [0]; next_ip = ip; if (cfg->verbose_level > 3) { printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL)); printf ("<box+brtrue opt>\n"); } /* * We need to link both bblocks, since it is needed for handling stack * arguments correctly (See test_0_box_brtrue_opt_regress_81102). * Branching to only one of them would lead to inconsistencies, so * generate an ICONST+BRTRUE, the branch opts will get rid of them. */ GET_BBLOCK (cfg, true_bb, target); GET_BBLOCK (cfg, false_bb, next_ip); mono_link_bblock (cfg, cfg->cbb, true_bb); mono_link_bblock (cfg, cfg->cbb, false_bb); if (sp != stack_start) { handle_stack_args (cfg, stack_start, sp - stack_start); sp = stack_start; CHECK_UNVERIFIABLE (cfg); } if (COMPILE_LLVM (cfg)) { dreg = alloc_ireg (cfg); MONO_EMIT_NEW_ICONST (cfg, dreg, 0); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1); MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb); } else { /* The JIT can't eliminate the iconst+compare */ MONO_INST_NEW (cfg, ins, OP_BR); ins->inst_target_bb = is_true ? true_bb : false_bb; MONO_ADD_INS (cfg->cbb, ins); } start_new_bblock = 1; break; } if (m_class_is_enumtype (klass) && !mini_is_gsharedvt_klass (klass) && !(val->type == STACK_I8 && TARGET_SIZEOF_VOID_P == 4)) { /* Can't do this with 64 bit enums on 32 bit since the vtype decomp pass is ran after the long decomp pass */ if (val->opcode == OP_ICONST) { MONO_INST_NEW (cfg, ins, OP_BOX_ICONST); ins->type = STACK_OBJ; ins->klass = klass; ins->inst_c0 = val->inst_c0; ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type); } else { MONO_INST_NEW (cfg, ins, OP_BOX); ins->type = STACK_OBJ; ins->klass = klass; ins->sreg1 = val->dreg; ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type); } MONO_ADD_INS (cfg->cbb, ins); *sp++ = ins; } else { *sp++ = mini_emit_box (cfg, val, klass, context_used); } CHECK_CFG_EXCEPTION; inline_costs += 1; break; } case MONO_CEE_UNBOX: { --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); mono_save_token_info (cfg, image, token, klass); context_used = mini_class_check_context_used (cfg, klass); if (mono_class_is_nullable (klass)) { MonoInst *val; val = handle_unbox_nullable (cfg, *sp, klass, context_used); EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), m_class_get_byval_arg (val->klass)); *sp++= ins; } else { ins = mini_handle_unbox (cfg, klass, *sp, context_used); *sp++ = ins; } inline_costs += 2; break; } case MONO_CEE_LDFLD: case MONO_CEE_LDFLDA: case MONO_CEE_STFLD: case MONO_CEE_LDSFLD: case MONO_CEE_LDSFLDA: case MONO_CEE_STSFLD: { MonoClassField *field; guint foffset; gboolean is_instance; gpointer addr = NULL; gboolean is_special_static; MonoType *ftype; MonoInst *store_val = NULL; MonoInst *thread_ins; is_instance = (il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDFLDA || il_op == MONO_CEE_STFLD); if (is_instance) { if (il_op == MONO_CEE_STFLD) { sp -= 2; store_val = sp [1]; } else { --sp; } if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8) UNVERIFIED; if (il_op != MONO_CEE_LDFLD && sp [0]->type == STACK_VTYPE) UNVERIFIED; } else { if (il_op == MONO_CEE_STSFLD) { sp--; store_val = sp [0]; } } if (method->wrapper_type != MONO_WRAPPER_NONE) { field = (MonoClassField *)mono_method_get_wrapper_data (method, token); klass = m_field_get_parent (field); } else { klass = NULL; field = mono_field_from_token_checked (image, token, &klass, generic_context, cfg->error); if (!field) CHECK_TYPELOAD (klass); CHECK_CFG_ERROR; } if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field)) FIELD_ACCESS_FAILURE (method, field); mono_class_init_internal (klass); mono_class_setup_fields (klass); ftype = mono_field_get_type_internal (field); /* * LDFLD etc. is usable on static fields as well, so convert those cases to * the static case. */ if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) { switch (il_op) { case MONO_CEE_LDFLD: il_op = MONO_CEE_LDSFLD; break; case MONO_CEE_STFLD: il_op = MONO_CEE_STSFLD; break; case MONO_CEE_LDFLDA: il_op = MONO_CEE_LDSFLDA; break; default: g_assert_not_reached (); } is_instance = FALSE; } context_used = mini_class_check_context_used (cfg, klass); if (il_op == MONO_CEE_LDSFLD) { ins = mini_emit_inst_for_field_load (cfg, field); if (ins) { *sp++ = ins; goto field_access_end; } } /* INSTANCE CASE */ if (is_instance) g_assert (field->offset); foffset = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject): field->offset; if (il_op == MONO_CEE_STFLD) { sp [1] = convert_value (cfg, field->type, sp [1]); if (target_type_is_incompatible (cfg, field->type, sp [1])) UNVERIFIED; { MonoInst *store; MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ()); if (ins_flag & MONO_INST_VOLATILE) { /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); } if (mini_is_gsharedvt_klass (klass)) { MonoInst *offset_ins; context_used = mini_class_check_context_used (cfg, klass); offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET); /* The value is offset by 1 */ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1); dreg = alloc_ireg_mp (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg); if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) { store = mini_emit_storing_write_barrier (cfg, ins, sp [1]); } else { /* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg); } } else { if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) { /* insert call to write barrier */ MonoInst *ptr; int dreg; dreg = alloc_ireg_mp (cfg); EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset); store = mini_emit_storing_write_barrier (cfg, ptr, sp [1]); } else { EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg); } } if (sp [0]->opcode != OP_LDADDR) store->flags |= MONO_INST_FAULT; store->flags |= ins_flag; } goto field_access_end; } if (is_instance) { if (sp [0]->type == STACK_VTYPE) { MonoInst *var; /* Have to compute the address of the variable */ var = get_vreg_to_inst (cfg, sp [0]->dreg); if (!var) var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, sp [0]->dreg); else g_assert (var->klass == klass); EMIT_NEW_VARLOADA (cfg, ins, var, m_class_get_byval_arg (var->klass)); sp [0] = ins; } if (il_op == MONO_CEE_LDFLDA) { if (sp [0]->type == STACK_OBJ) { MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0); MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException"); } dreg = alloc_ireg_mp (cfg); if (mini_is_gsharedvt_klass (klass)) { MonoInst *offset_ins; offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET); /* The value is offset by 1 */ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1); EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg); } else { EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset); } ins->klass = mono_class_from_mono_type_internal (field->type); ins->type = STACK_MP; *sp++ = ins; } else { MonoInst *load; MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ()); #ifdef MONO_ARCH_SIMD_INTRINSICS if (sp [0]->opcode == OP_LDADDR && m_class_is_simd_type (klass) && cfg->opt & MONO_OPT_SIMD) { ins = mono_emit_simd_field_load (cfg, field, sp [0]); if (ins) { *sp++ = ins; goto field_access_end; } } #endif MonoInst *field_add_inst = sp [0]; if (mini_is_gsharedvt_klass (klass)) { MonoInst *offset_ins; offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET); /* The value is offset by 1 */ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1); EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg); foffset = 0; } load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag); if (sp [0]->opcode != OP_LDADDR) load->flags |= MONO_INST_FAULT; *sp++ = load; } } if (is_instance) goto field_access_end; /* STATIC CASE */ context_used = mini_class_check_context_used (cfg, klass); if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) { mono_error_set_field_missing (cfg->error, m_field_get_parent (field), field->name, NULL, "Using static instructions with literal field"); CHECK_CFG_ERROR; } /* The special_static_fields field is init'd in mono_class_vtable, so it needs * to be called here. */ if (!context_used) { mono_class_vtable_checked (klass, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (klass); } addr = mono_special_static_field_get_offset (field, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (klass); is_special_static = mono_class_field_is_special_static (field); if (is_special_static && ((gsize)addr & 0x80000000) == 0) thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD); else thread_ins = NULL; /* Generate IR to compute the field address */ if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))) { /* * Fast access to TLS data * Inline version of get_thread_static_data () in * threads.c. */ guint32 offset; int idx, static_data_reg, array_reg, dreg; static_data_reg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data)); if (cfg->compile_aot || context_used) { int offset_reg, offset2_reg, idx_reg; /* For TLS variables, this will return the TLS offset */ if (context_used) { MonoInst *addr_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET); /* The value is offset by 1 */ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, addr_ins->dreg, addr_ins->dreg, 1); } else { EMIT_NEW_SFLDACONST (cfg, ins, field); } offset_reg = ins->dreg; MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff); idx_reg = alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, TARGET_SIZEOF_VOID_P == 8 ? 3 : 2); MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg); array_reg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0); offset2_reg = alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff); dreg = alloc_ireg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg); } else { offset = (gsize)addr & 0x7fffffff; idx = offset & 0x3f; array_reg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * TARGET_SIZEOF_VOID_P); dreg = alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff)); } } else if ((cfg->compile_aot && is_special_static) || (context_used && is_special_static)) { MonoInst *iargs [1]; g_assert (m_field_get_parent (field)); if (context_used) { iargs [0] = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_CLASS_FIELD); } else { EMIT_NEW_FIELDCONST (cfg, iargs [0], field); } ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs); } else if (context_used) { MonoInst *static_data; /* g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n", method->klass->name_space, method->klass->name, method->name, depth, field->offset); */ if (mono_class_needs_cctor_run (klass, method)) emit_class_init (cfg, klass); /* * The pointer we're computing here is * * super_info.static_data + field->offset */ static_data = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_STATIC_DATA); if (mini_is_gsharedvt_klass (klass)) { MonoInst *offset_ins; offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET); /* The value is offset by 1 */ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1); dreg = alloc_ireg_mp (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg); } else if (field->offset == 0) { ins = static_data; } else { int addr_reg = mono_alloc_preg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset); } } else if (cfg->compile_aot && addr) { MonoInst *iargs [1]; g_assert (m_field_get_parent (field)); EMIT_NEW_FIELDCONST (cfg, iargs [0], field); ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs); } else { MonoVTable *vtable = NULL; if (!cfg->compile_aot) vtable = mono_class_vtable_checked (klass, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (klass); if (!addr) { if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) { if (!(g_slist_find (class_inits, klass))) { emit_class_init (cfg, klass); if (cfg->verbose_level > 2) printf ("class %s.%s needs init call for %s\n", m_class_get_name_space (klass), m_class_get_name (klass), mono_field_get_name (field)); class_inits = g_slist_prepend (class_inits, klass); } } else { if (cfg->run_cctors) { /* This makes so that inline cannot trigger */ /* .cctors: too many apps depend on them */ /* running with a specific order... */ g_assert (vtable); if (!vtable->initialized && m_class_has_cctor (vtable->klass)) INLINE_FAILURE ("class init"); if (!mono_runtime_class_init_full (vtable, cfg->error)) { mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); goto exception_exit; } } } if (cfg->compile_aot) EMIT_NEW_SFLDACONST (cfg, ins, field); else { g_assert (vtable); addr = mono_static_field_get_addr (vtable, field); g_assert (addr); EMIT_NEW_PCONST (cfg, ins, addr); } } else { MonoInst *iargs [1]; EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr)); ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs); } } /* Generate IR to do the actual load/store operation */ if ((il_op == MONO_CEE_STFLD || il_op == MONO_CEE_STSFLD)) { if (ins_flag & MONO_INST_VOLATILE) { /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); } else if (!mini_debug_options.weak_memory_model && mini_type_is_reference (ftype)) { mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); } } if (il_op == MONO_CEE_LDSFLDA) { ins->klass = mono_class_from_mono_type_internal (ftype); ins->type = STACK_PTR; *sp++ = ins; } else if (il_op == MONO_CEE_STSFLD) { MonoInst *store; EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg); store->flags |= ins_flag; } else { gboolean is_const = FALSE; MonoVTable *vtable = NULL; gpointer addr = NULL; if (!context_used) { vtable = mono_class_vtable_checked (klass, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (klass); } if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) || (!context_used && !cfg->compile_aot && vtable->initialized))) { int ro_type = ftype->type; if (!addr) addr = mono_static_field_get_addr (vtable, field); if (ro_type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (ftype->data.klass)) { ro_type = mono_class_enum_basetype_internal (ftype->data.klass)->type; } GSHAREDVT_FAILURE (il_op); /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/ is_const = TRUE; switch (ro_type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_U1: EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr)); sp++; break; case MONO_TYPE_I1: EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr)); sp++; break; case MONO_TYPE_CHAR: case MONO_TYPE_U2: EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr)); sp++; break; case MONO_TYPE_I2: EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr)); sp++; break; break; case MONO_TYPE_I4: EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr)); sp++; break; case MONO_TYPE_U4: EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr)); sp++; break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr)); mini_type_to_eval_stack_type ((cfg), field->type, *sp); sp++; break; case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: if (!mono_gc_is_moving ()) { EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr)); mini_type_to_eval_stack_type ((cfg), field->type, *sp); sp++; } else { is_const = FALSE; } break; case MONO_TYPE_I8: case MONO_TYPE_U8: EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr)); sp++; break; case MONO_TYPE_R4: case MONO_TYPE_R8: case MONO_TYPE_VALUETYPE: default: is_const = FALSE; break; } } if (!is_const) { MonoInst *load; EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0); load->flags |= ins_flag; *sp++ = load; } } field_access_end: if ((il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) { /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ); } ins_flag = 0; break; } case MONO_CEE_STOBJ: sp -= 2; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); /* FIXME: should check item at sp [1] is compatible with the type of the store. */ mini_emit_memory_store (cfg, m_class_get_byval_arg (klass), sp [0], sp [1], ins_flag); ins_flag = 0; inline_costs += 1; break; /* * Array opcodes */ case MONO_CEE_NEWARR: { MonoInst *len_ins; const char *data_ptr; int data_size = 0; guint32 field_token; --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); if (m_class_get_byval_arg (klass)->type == MONO_TYPE_VOID) UNVERIFIED; context_used = mini_class_check_context_used (cfg, klass); #ifndef TARGET_S390X if (sp [0]->type == STACK_I8 && TARGET_SIZEOF_VOID_P == 4) { MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4); ins->sreg1 = sp [0]->dreg; ins->type = STACK_I4; ins->dreg = alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, ins); *sp = mono_decompose_opcode (cfg, ins); } #else /* The array allocator expects a 64-bit input, and we cannot rely on the high bits of a 32-bit result, so we have to extend. */ if (sp [0]->type == STACK_I4 && TARGET_SIZEOF_VOID_P == 8) { MONO_INST_NEW (cfg, ins, OP_ICONV_TO_I8); ins->sreg1 = sp [0]->dreg; ins->type = STACK_I8; ins->dreg = alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, ins); *sp = mono_decompose_opcode (cfg, ins); } #endif if (context_used) { MonoInst *args [3]; MonoClass *array_class = mono_class_create_array (klass, 1); MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class); /* FIXME: Use OP_NEWARR and decompose later to help abcrem */ /* vtable */ args [0] = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE); /* array len */ args [1] = sp [0]; if (managed_alloc) ins = mono_emit_method_call (cfg, managed_alloc, args, NULL); else ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args); } else { /* Decompose later since it is needed by abcrem */ MonoClass *array_type = mono_class_create_array (klass, 1); mono_class_vtable_checked (array_type, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (array_type); MONO_INST_NEW (cfg, ins, OP_NEWARR); ins->dreg = alloc_ireg_ref (cfg); ins->sreg1 = sp [0]->dreg; ins->inst_newa_class = klass; ins->type = STACK_OBJ; ins->klass = array_type; MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE; cfg->cbb->needs_decompose = TRUE; /* Needed so mono_emit_load_get_addr () gets called */ mono_get_got_var (cfg); } len_ins = sp [0]; ip += 5; *sp++ = ins; inline_costs += 1; /* * we inline/optimize the initialization sequence if possible. * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing * for small sizes open code the memcpy * ensure the rva field is big enough */ if ((cfg->opt & MONO_OPT_INTRINS) && next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (cfg, method, cfg->compile_aot, next_ip, end, klass, len_ins->inst_c0, &data_size, &field_token, &il_op, &next_ip))) { MonoMethod *memcpy_method = mini_get_memcpy_method (); MonoInst *iargs [3]; int add_reg = alloc_ireg_mp (cfg); EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector)); if (cfg->compile_aot) { EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, m_class_get_image (method->klass), GPOINTER_TO_UINT(field_token), STACK_PTR, NULL); } else { EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr); } EMIT_NEW_ICONST (cfg, iargs [2], data_size); mono_emit_method_call (cfg, memcpy_method, iargs, NULL); } break; } case MONO_CEE_LDLEN: --sp; if (sp [0]->type != STACK_OBJ) UNVERIFIED; MONO_INST_NEW (cfg, ins, OP_LDLEN); ins->dreg = alloc_preg (cfg); ins->sreg1 = sp [0]->dreg; ins->inst_imm = MONO_STRUCT_OFFSET (MonoArray, max_length); ins->type = STACK_I4; /* This flag will be inherited by the decomposition */ ins->flags |= MONO_INST_FAULT | MONO_INST_INVARIANT_LOAD; MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE; cfg->cbb->needs_decompose = TRUE; MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, sp [0]->dreg); *sp++ = ins; break; case MONO_CEE_LDELEMA: sp -= 2; if (sp [0]->type != STACK_OBJ) UNVERIFIED; cfg->flags |= MONO_CFG_HAS_LDELEMA; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); /* we need to make sure that this array is exactly the type it needs * to be for correctness. the wrappers are lax with their usage * so we need to ignore them here */ if (!m_class_is_valuetype (klass) && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) { MonoClass *array_class = mono_class_create_array (klass, 1); mini_emit_check_array_type (cfg, sp [0], array_class); CHECK_TYPELOAD (array_class); } readonly = FALSE; ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE); *sp++ = ins; break; case MONO_CEE_LDELEM: case MONO_CEE_LDELEM_I1: case MONO_CEE_LDELEM_U1: case MONO_CEE_LDELEM_I2: case MONO_CEE_LDELEM_U2: case MONO_CEE_LDELEM_I4: case MONO_CEE_LDELEM_U4: case MONO_CEE_LDELEM_I8: case MONO_CEE_LDELEM_I: case MONO_CEE_LDELEM_R4: case MONO_CEE_LDELEM_R8: case MONO_CEE_LDELEM_REF: { MonoInst *addr; sp -= 2; if (il_op == MONO_CEE_LDELEM) { klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); mono_class_init_internal (klass); } else klass = array_access_to_klass (il_op); if (sp [0]->type != STACK_OBJ) UNVERIFIED; cfg->flags |= MONO_CFG_HAS_LDELEMA; if (mini_is_gsharedvt_variable_klass (klass)) { // FIXME-VT: OP_ICONST optimization addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0); ins->opcode = OP_LOADV_MEMBASE; } else if (sp [1]->opcode == OP_ICONST) { int array_reg = sp [0]->dreg; int index_reg = sp [1]->dreg; int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector); if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg)) MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg); MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset); } else { addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0); } *sp++ = ins; break; } case MONO_CEE_STELEM_I: case MONO_CEE_STELEM_I1: case MONO_CEE_STELEM_I2: case MONO_CEE_STELEM_I4: case MONO_CEE_STELEM_I8: case MONO_CEE_STELEM_R4: case MONO_CEE_STELEM_R8: case MONO_CEE_STELEM_REF: case MONO_CEE_STELEM: { sp -= 3; cfg->flags |= MONO_CFG_HAS_LDELEMA; if (il_op == MONO_CEE_STELEM) { klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); mono_class_init_internal (klass); } else klass = array_access_to_klass (il_op); if (sp [0]->type != STACK_OBJ) UNVERIFIED; sp [2] = convert_value (cfg, m_class_get_byval_arg (klass), sp [2]); mini_emit_array_store (cfg, klass, sp, TRUE); inline_costs += 1; break; } case MONO_CEE_CKFINITE: { --sp; if (cfg->llvm_only) { MonoInst *iargs [1]; iargs [0] = sp [0]; *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs); } else { sp [0] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.double_class), sp [0]); MONO_INST_NEW (cfg, ins, OP_CKFINITE); ins->sreg1 = sp [0]->dreg; ins->dreg = alloc_freg (cfg); ins->type = STACK_R8; MONO_ADD_INS (cfg->cbb, ins); *sp++ = mono_decompose_opcode (cfg, ins); } break; } case MONO_CEE_REFANYVAL: { MonoInst *src_var, *src; int klass_reg = alloc_preg (cfg); int dreg = alloc_preg (cfg); GSHAREDVT_FAILURE (il_op); MONO_INST_NEW (cfg, ins, il_op); --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); context_used = mini_class_check_context_used (cfg, klass); // FIXME: src_var = get_vreg_to_inst (cfg, sp [0]->dreg); if (!src_var) src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg); EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass)); if (context_used) { MonoInst *klass_ins; klass_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS); // FIXME: MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg); MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException"); } else { mini_emit_class_check (cfg, klass_reg, klass); } EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value)); ins->type = STACK_MP; ins->klass = klass; *sp++ = ins; break; } case MONO_CEE_MKREFANY: { MonoInst *loc, *addr; GSHAREDVT_FAILURE (il_op); MONO_INST_NEW (cfg, ins, il_op); --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); context_used = mini_class_check_context_used (cfg, klass); loc = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL); EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0); MonoInst *const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS); int type_reg = alloc_preg (cfg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, m_class_offsetof_byval_arg ()); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg); EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0); ins->type = STACK_VTYPE; ins->klass = mono_defaults.typed_reference_class; *sp++ = ins; break; } case MONO_CEE_LDTOKEN: { gpointer handle; MonoClass *handle_class; if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) { handle = mono_method_get_wrapper_data (method, n); handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1); if (handle_class == mono_defaults.typehandle_class) handle = m_class_get_byval_arg ((MonoClass*)handle); } else { handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, cfg->error); CHECK_CFG_ERROR; } if (!handle) LOAD_ERROR; mono_class_init_internal (handle_class); if (cfg->gshared) { if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF || mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) { /* This case handles ldtoken of an open type, like for typeof(Gen<>). */ context_used = 0; } else if (handle_class == mono_defaults.typehandle_class) { context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type_internal ((MonoType *)handle)); } else if (handle_class == mono_defaults.fieldhandle_class) context_used = mini_class_check_context_used (cfg, m_field_get_parent (((MonoClassField*)handle))); else if (handle_class == mono_defaults.methodhandle_class) context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle); else g_assert_not_reached (); } { if ((next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && ((next_ip [0] == CEE_CALL) || (next_ip [0] == CEE_CALLVIRT)) && (cmethod = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context)) && (cmethod->klass == mono_defaults.systemtype_class) && (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) { MonoClass *tclass = mono_class_from_mono_type_internal ((MonoType *)handle); mono_class_init_internal (tclass); // Optimize to true/false if next instruction is `call instance bool Type::get_IsValueType()` guchar *is_vt_ip; guint32 is_vt_token; if ((is_vt_ip = il_read_call (next_ip + 5, end, &is_vt_token)) && ip_in_bb (cfg, cfg->cbb, is_vt_ip)) { MonoMethod *is_vt_method = mini_get_method (cfg, method, is_vt_token, NULL, generic_context); if (is_vt_method->klass == mono_defaults.systemtype_class && !mini_is_gsharedvt_variable_klass (tclass) && !mono_class_is_open_constructed_type (m_class_get_byval_arg (tclass)) && !strcmp ("get_IsValueType", is_vt_method->name)) { next_ip = is_vt_ip; EMIT_NEW_ICONST (cfg, ins, m_class_is_valuetype (tclass) ? 1 : 0); ins->type = STACK_I4; *sp++ = ins; break; } } if (context_used) { MONO_INST_NEW (cfg, ins, OP_RTTYPE); ins->dreg = alloc_ireg_ref (cfg); ins->inst_p0 = tclass; ins->type = STACK_OBJ; MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE; cfg->cbb->needs_decompose = TRUE; } else if (cfg->compile_aot) { if (method->wrapper_type) { error_init (error); //got to do it since there are multiple conditionals below if (mono_class_get_checked (m_class_get_image (tclass), m_class_get_type_token (tclass), error) == tclass && !generic_context) { /* Special case for static synchronized wrappers */ EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, m_class_get_image (tclass), m_class_get_type_token (tclass), generic_context); } else { mono_error_cleanup (error); /* FIXME don't swallow the error */ /* FIXME: n is not a normal token */ DISABLE_AOT (cfg); EMIT_NEW_PCONST (cfg, ins, NULL); } } else { EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context); } } else { MonoReflectionType *rt = mono_type_get_object_checked ((MonoType *)handle, cfg->error); CHECK_CFG_ERROR; EMIT_NEW_PCONST (cfg, ins, rt); } ins->type = STACK_OBJ; ins->klass = mono_defaults.runtimetype_class; il_op = (MonoOpcodeEnum)next_ip [0]; next_ip += 5; } else { MonoInst *addr, *vtvar; vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (handle_class), OP_LOCAL); if (context_used) { if (handle_class == mono_defaults.typehandle_class) { ins = mini_emit_get_rgctx_klass (cfg, context_used, mono_class_from_mono_type_internal ((MonoType *)handle), MONO_RGCTX_INFO_TYPE); } else if (handle_class == mono_defaults.methodhandle_class) { ins = emit_get_rgctx_method (cfg, context_used, (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD); } else if (handle_class == mono_defaults.fieldhandle_class) { ins = emit_get_rgctx_field (cfg, context_used, (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD); } else { g_assert_not_reached (); } } else if (cfg->compile_aot) { EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context); } else { EMIT_NEW_PCONST (cfg, ins, handle); } EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg); EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0); } } *sp++ = ins; break; } case MONO_CEE_THROW: if (sp [-1]->type != STACK_OBJ) UNVERIFIED; MONO_INST_NEW (cfg, ins, OP_THROW); --sp; ins->sreg1 = sp [0]->dreg; cfg->cbb->out_of_line = TRUE; MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_NOT_REACHED); MONO_ADD_INS (cfg->cbb, ins); sp = stack_start; link_bblock (cfg, cfg->cbb, end_bblock); start_new_bblock = 1; /* This can complicate code generation for llvm since the return value might not be defined */ if (COMPILE_LLVM (cfg)) INLINE_FAILURE ("throw"); break; case MONO_CEE_ENDFINALLY: if (!ip_in_finally_clause (cfg, ip - header->code)) UNVERIFIED; /* mono_save_seq_point_info () depends on this */ if (sp != stack_start) emit_seq_point (cfg, method, ip, FALSE, FALSE); MONO_INST_NEW (cfg, ins, OP_ENDFINALLY); MONO_ADD_INS (cfg->cbb, ins); start_new_bblock = 1; ins_has_side_effect = FALSE; /* * Control will leave the method so empty the stack, otherwise * the next basic block will start with a nonempty stack. */ while (sp != stack_start) { sp--; } break; case MONO_CEE_LEAVE: case MONO_CEE_LEAVE_S: { GList *handlers; /* empty the stack */ g_assert (sp >= stack_start); sp = stack_start; /* * If this leave statement is in a catch block, check for a * pending exception, and rethrow it if necessary. * We avoid doing this in runtime invoke wrappers, since those are called * by native code which excepts the wrapper to catch all exceptions. */ for (i = 0; i < header->num_clauses; ++i) { MonoExceptionClause *clause = &header->clauses [i]; /* * Use <= in the final comparison to handle clauses with multiple * leave statements, like in bug #78024. * The ordering of the exception clauses guarantees that we find the * innermost clause. */ if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((il_op == MONO_CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) { MonoInst *exc_ins; MonoBasicBlock *dont_throw; /* MonoInst *load; NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0); */ exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL); NEW_BBLOCK (cfg, dont_throw); /* * Currently, we always rethrow the abort exception, despite the * fact that this is not correct. See thread6.cs for an example. * But propagating the abort exception is more important than * getting the semantics right. */ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw); MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg); MONO_START_BB (cfg, dont_throw); } } #ifdef ENABLE_LLVM cfg->cbb->try_end = (intptr_t)(ip - header->code); #endif if ((handlers = mono_find_leave_clauses (cfg, ip, target))) { GList *tmp; /* * For each finally clause that we exit we need to invoke the finally block. * After each invocation we need to add try holes for all the clauses that * we already exited. */ for (tmp = handlers; tmp; tmp = tmp->next) { MonoLeaveClause *leave = (MonoLeaveClause *) tmp->data; MonoExceptionClause *clause = leave->clause; if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) continue; MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset); MonoBasicBlock *dont_throw; /* * Emit instrumentation code before linking the basic blocks below as this * will alter cfg->cbb. */ mini_profiler_emit_call_finally (cfg, header, ip, leave->index, clause); tblock = cfg->cil_offset_to_bb [clause->handler_offset]; g_assert (tblock); link_bblock (cfg, cfg->cbb, tblock); MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0); MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER); ins->inst_target_bb = tblock; ins->inst_eh_blocks = tmp; MONO_ADD_INS (cfg->cbb, ins); cfg->cbb->has_call_handler = 1; /* Throw exception if exvar is set */ /* FIXME Do we need this for calls from catch/filter ? */ NEW_BBLOCK (cfg, dont_throw); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw); mono_emit_jit_icall (cfg, ves_icall_thread_finish_async_abort, NULL); cfg->cbb->clause_holes = tmp; MONO_START_BB (cfg, dont_throw); cfg->cbb->clause_holes = tmp; if (COMPILE_LLVM (cfg)) { MonoBasicBlock *target_bb; /* * Link the finally bblock with the target, since it will * conceptually branch there. */ GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1); GET_BBLOCK (cfg, target_bb, target); link_bblock (cfg, tblock, target_bb); } } } MONO_INST_NEW (cfg, ins, OP_BR); MONO_ADD_INS (cfg->cbb, ins); GET_BBLOCK (cfg, tblock, target); link_bblock (cfg, cfg->cbb, tblock); ins->inst_target_bb = tblock; start_new_bblock = 1; break; } /* * Mono specific opcodes */ case MONO_CEE_MONO_ICALL: { g_assert (method->wrapper_type != MONO_WRAPPER_NONE); const MonoJitICallId jit_icall_id = (MonoJitICallId)token; MonoJitICallInfo * const info = mono_find_jit_icall_info (jit_icall_id); CHECK_STACK (info->sig->param_count); sp -= info->sig->param_count; if (token == MONO_JIT_ICALL_mono_threads_attach_coop) { MonoInst *addr; MonoBasicBlock *next_bb; if (cfg->compile_aot) { /* * This is called on unattached threads, so it cannot go through the trampoline * infrastructure. Use an indirect call through a got slot initialized at load time * instead. */ EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, GUINT_TO_POINTER (jit_icall_id)); ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL); } else { ins = mono_emit_jit_icall_id (cfg, jit_icall_id, sp); } /* * Parts of the initlocals code needs to come after this, since it might call methods like memset. * Also profiling needs to be after attach. */ init_localsbb2 = cfg->cbb; NEW_BBLOCK (cfg, next_bb); MONO_START_BB (cfg, next_bb); } else { if (token == MONO_JIT_ICALL_mono_threads_detach_coop) { /* can't emit profiling code after a detach, so emit it now */ mini_profiler_emit_leave (cfg, NULL); detached_before_ret = TRUE; } ins = mono_emit_jit_icall_id (cfg, jit_icall_id, sp); } if (!MONO_TYPE_IS_VOID (info->sig->ret)) *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); break; } MonoJumpInfoType ldptr_type; case MONO_CEE_MONO_LDPTR_CARD_TABLE: ldptr_type = MONO_PATCH_INFO_GC_CARD_TABLE_ADDR; goto mono_ldptr; case MONO_CEE_MONO_LDPTR_NURSERY_START: ldptr_type = MONO_PATCH_INFO_GC_NURSERY_START; goto mono_ldptr; case MONO_CEE_MONO_LDPTR_NURSERY_BITS: ldptr_type = MONO_PATCH_INFO_GC_NURSERY_BITS; goto mono_ldptr; case MONO_CEE_MONO_LDPTR_INT_REQ_FLAG: ldptr_type = MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG; goto mono_ldptr; case MONO_CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT: ldptr_type = MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT; mono_ldptr: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); ins = mini_emit_runtime_constant (cfg, ldptr_type, NULL); *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); break; case MONO_CEE_MONO_LDPTR: { gpointer ptr; g_assert (method->wrapper_type != MONO_WRAPPER_NONE); ptr = mono_method_get_wrapper_data (method, token); EMIT_NEW_PCONST (cfg, ins, ptr); *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); /* Can't embed random pointers into AOT code */ DISABLE_AOT (cfg); break; } case MONO_CEE_MONO_JIT_ICALL_ADDR: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, GUINT_TO_POINTER (token)); *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); break; case MONO_CEE_MONO_ICALL_ADDR: { MonoMethod *cmethod; gpointer ptr; g_assert (method->wrapper_type != MONO_WRAPPER_NONE); cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token); if (cfg->compile_aot) { if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) { /* * This is generated by emit_native_wrapper () to resolve the pinvoke address * before the call, its not needed when using direct pinvoke. * This is not an optimization, but its used to avoid looking up pinvokes * on platforms which don't support dlopen (). */ EMIT_NEW_PCONST (cfg, ins, NULL); } else { EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod); } } else { ptr = mono_lookup_internal_call (cmethod); g_assert (ptr); EMIT_NEW_PCONST (cfg, ins, ptr); } *sp++ = ins; break; } case MONO_CEE_MONO_VTADDR: { g_assert (method->wrapper_type != MONO_WRAPPER_NONE); MonoInst *src_var, *src; --sp; // FIXME: src_var = get_vreg_to_inst (cfg, sp [0]->dreg); EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype); *sp++ = src; break; } case MONO_CEE_MONO_NEWOBJ: { g_assert (method->wrapper_type != MONO_WRAPPER_NONE); MonoInst *iargs [2]; klass = (MonoClass *)mono_method_get_wrapper_data (method, token); mono_class_init_internal (klass); NEW_CLASSCONST (cfg, iargs [0], klass); MONO_ADD_INS (cfg->cbb, iargs [0]); *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs); inline_costs += CALL_COST * MIN(10, num_calls++); break; } case MONO_CEE_MONO_OBJADDR: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); --sp; MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = alloc_ireg_mp (cfg); ins->sreg1 = sp [0]->dreg; ins->type = STACK_MP; MONO_ADD_INS (cfg->cbb, ins); *sp++ = ins; break; case MONO_CEE_MONO_LDNATIVEOBJ: /* * Similar to LDOBJ, but instead load the unmanaged * representation of the vtype to the stack. */ g_assert (method->wrapper_type != MONO_WRAPPER_NONE); --sp; klass = (MonoClass *)mono_method_get_wrapper_data (method, token); g_assert (m_class_is_valuetype (klass)); mono_class_init_internal (klass); { MonoInst *src, *dest, *temp; src = sp [0]; temp = mono_compile_create_var (cfg, m_class_get_byval_arg (klass), OP_LOCAL); temp->backend.is_pinvoke = 1; EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0); mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0); EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0); dest->type = STACK_VTYPE; dest->klass = klass; *sp ++ = dest; } break; case MONO_CEE_MONO_RETOBJ: { /* * Same as RET, but return the native representation of a vtype * to the caller. */ g_assert (method->wrapper_type != MONO_WRAPPER_NONE); g_assert (cfg->ret); g_assert (mono_method_signature_internal (method)->pinvoke); --sp; klass = (MonoClass *)mono_method_get_wrapper_data (method, token); if (!cfg->vret_addr) { g_assert (cfg->ret_var_is_local); EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype); } else { EMIT_NEW_RETLOADA (cfg, ins); } mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0); if (sp != stack_start) UNVERIFIED; if (!detached_before_ret) mini_profiler_emit_leave (cfg, sp [0]); MONO_INST_NEW (cfg, ins, OP_BR); ins->inst_target_bb = end_bblock; MONO_ADD_INS (cfg->cbb, ins); link_bblock (cfg, cfg->cbb, end_bblock); start_new_bblock = 1; break; } case MONO_CEE_MONO_SAVE_LMF: case MONO_CEE_MONO_RESTORE_LMF: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); break; case MONO_CEE_MONO_CLASSCONST: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token)); *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); break; case MONO_CEE_MONO_METHODCONST: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); EMIT_NEW_METHODCONST (cfg, ins, mono_method_get_wrapper_data (method, token)); *sp++ = ins; break; case MONO_CEE_MONO_PINVOKE_ADDR_CACHE: { g_assert (method->wrapper_type != MONO_WRAPPER_NONE); MonoMethod *pinvoke_method = (MonoMethod*)mono_method_get_wrapper_data (method, token); /* This is a memory slot used by the wrapper */ if (cfg->compile_aot) { EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHOD_PINVOKE_ADDR_CACHE, pinvoke_method); } else { gpointer addr = mono_mem_manager_alloc0 (cfg->mem_manager, sizeof (gpointer)); EMIT_NEW_PCONST (cfg, ins, addr); } *sp++ = ins; break; } case MONO_CEE_MONO_NOT_TAKEN: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); cfg->cbb->out_of_line = TRUE; break; case MONO_CEE_MONO_TLS: { MonoTlsKey key; g_assert (method->wrapper_type != MONO_WRAPPER_NONE); key = (MonoTlsKey)n; g_assert (key < TLS_KEY_NUM); ins = mono_create_tls_get (cfg, key); g_assert (ins); ins->type = STACK_PTR; *sp++ = ins; break; } case MONO_CEE_MONO_DYN_CALL: { MonoCallInst *call; /* It would be easier to call a trampoline, but that would put an * extra frame on the stack, confusing exception handling. So * implement it inline using an opcode for now. */ g_assert (method->wrapper_type != MONO_WRAPPER_NONE); if (!cfg->dyn_call_var) { cfg->dyn_call_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* prevent it from being register allocated */ cfg->dyn_call_var->flags |= MONO_INST_VOLATILE; } /* Has to use a call inst since local regalloc expects it */ MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL); ins = (MonoInst*)call; sp -= 2; ins->sreg1 = sp [0]->dreg; ins->sreg2 = sp [1]->dreg; MONO_ADD_INS (cfg->cbb, ins); cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area); /* OP_DYN_CALL might need to allocate a dynamically sized param area */ cfg->flags |= MONO_CFG_HAS_ALLOCA; inline_costs += CALL_COST * MIN(10, num_calls++); break; } case MONO_CEE_MONO_MEMORY_BARRIER: { g_assert (method->wrapper_type != MONO_WRAPPER_NONE); mini_emit_memory_barrier (cfg, (int)n); break; } case MONO_CEE_MONO_ATOMIC_STORE_I4: { g_assert (method->wrapper_type != MONO_WRAPPER_NONE); g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4)); sp -= 2; MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4); ins->dreg = sp [0]->dreg; ins->sreg1 = sp [1]->dreg; ins->backend.memory_barrier_kind = (int)n; MONO_ADD_INS (cfg->cbb, ins); break; } case MONO_CEE_MONO_LD_DELEGATE_METHOD_PTR: { CHECK_STACK (1); --sp; dreg = alloc_preg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); *sp++ = ins; break; } case MONO_CEE_MONO_CALLI_EXTRA_ARG: { MonoInst *addr; MonoMethodSignature *fsig; MonoInst *arg; /* * This is the same as CEE_CALLI, but passes an additional argument * to the called method in llvmonly mode. * This is only used by delegate invoke wrappers to call the * actual delegate method. */ g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE); ins = NULL; cmethod = NULL; CHECK_STACK (1); --sp; addr = *sp; fsig = mini_get_signature (method, token, generic_context, cfg->error); CHECK_CFG_ERROR; if (cfg->llvm_only) cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig); n = fsig->param_count + fsig->hasthis + 1; CHECK_STACK (n); sp -= n; arg = sp [n - 1]; if (cfg->llvm_only) { /* * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt * cconv. This is set by mono_init_delegate (). */ if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) { MonoInst *callee = addr; MonoInst *call, *localloc_ins; MonoBasicBlock *is_gsharedvt_bb, *end_bb; int low_bit_reg = alloc_preg (cfg); NEW_BBLOCK (cfg, is_gsharedvt_bb); NEW_BBLOCK (cfg, end_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb); /* Normal case: callee uses a normal cconv, have to add an out wrapper */ addr = emit_get_rgctx_sig (cfg, context_used, fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI); /* * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg. */ MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM); ins->dreg = alloc_preg (cfg); ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P; MONO_ADD_INS (cfg->cbb, ins); localloc_ins = ins; cfg->flags |= MONO_CFG_HAS_ALLOCA; MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg); call = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */ MONO_START_BB (cfg, is_gsharedvt_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1); ins = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee); ins->dreg = call->dreg; MONO_START_BB (cfg, end_bb); } else { /* Caller uses a normal calling conv */ MonoInst *callee = addr; MonoInst *call, *localloc_ins; MonoBasicBlock *is_gsharedvt_bb, *end_bb; int low_bit_reg = alloc_preg (cfg); NEW_BBLOCK (cfg, is_gsharedvt_bb); NEW_BBLOCK (cfg, end_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb); /* Normal case: callee uses a normal cconv, no conversion is needed */ call = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */ MONO_START_BB (cfg, is_gsharedvt_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1); NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig); MONO_ADD_INS (cfg->cbb, addr); /* * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg. */ MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM); ins->dreg = alloc_preg (cfg); ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P; MONO_ADD_INS (cfg->cbb, ins); localloc_ins = ins; cfg->flags |= MONO_CFG_HAS_ALLOCA; MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg); ins = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr); ins->dreg = call->dreg; MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); MONO_START_BB (cfg, end_bb); } } else { /* Same as CEE_CALLI */ if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) { /* * We pass the address to the gsharedvt trampoline in the rgctx reg */ MonoInst *callee = addr; addr = emit_get_rgctx_sig (cfg, context_used, fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI); ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee); } else { ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL); } } if (!MONO_TYPE_IS_VOID (fsig->ret)) *sp++ = mono_emit_widen_call_res (cfg, ins, fsig); CHECK_CFG_EXCEPTION; ins_flag = 0; constrained_class = NULL; break; } case MONO_CEE_MONO_LDDOMAIN: { MonoDomain *domain = mono_get_root_domain (); g_assert (method->wrapper_type != MONO_WRAPPER_NONE); EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : domain); *sp++ = ins; break; } case MONO_CEE_MONO_SAVE_LAST_ERROR: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); // Just an IL prefix, setting this flag, picked up by call instructions. save_last_error = TRUE; break; case MONO_CEE_MONO_GET_RGCTX_ARG: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); mono_create_rgctx_var (cfg); MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = alloc_dreg (cfg, STACK_PTR); ins->sreg1 = cfg->rgctx_var->dreg; ins->type = STACK_PTR; MONO_ADD_INS (cfg->cbb, ins); *sp++ = ins; break; case MONO_CEE_MONO_GET_SP: { /* Used by COOP only, so this is good enough */ MonoInst *var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); EMIT_NEW_VARLOADA (cfg, ins, var, NULL); *sp++ = ins; break; } case MONO_CEE_MONO_REMAP_OVF_EXC: /* Remap the exception thrown by the next _OVF opcode */ g_assert (method->wrapper_type != MONO_WRAPPER_NONE); ovf_exc = (const char*)mono_method_get_wrapper_data (method, token); break; case MONO_CEE_ARGLIST: { /* somewhat similar to LDTOKEN */ MonoInst *addr, *vtvar; vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.argumenthandle_class), OP_LOCAL); EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0); EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg); EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0); ins->type = STACK_VTYPE; ins->klass = mono_defaults.argumenthandle_class; *sp++ = ins; break; } case MONO_CEE_CEQ: case MONO_CEE_CGT: case MONO_CEE_CGT_UN: case MONO_CEE_CLT: case MONO_CEE_CLT_UN: { MonoInst *cmp, *arg1, *arg2; sp -= 2; arg1 = sp [0]; arg2 = sp [1]; /* * The following transforms: * CEE_CEQ into OP_CEQ * CEE_CGT into OP_CGT * CEE_CGT_UN into OP_CGT_UN * CEE_CLT into OP_CLT * CEE_CLT_UN into OP_CLT_UN */ MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]); MONO_INST_NEW (cfg, ins, cmp->opcode); cmp->sreg1 = arg1->dreg; cmp->sreg2 = arg2->dreg; type_from_op (cfg, cmp, arg1, arg2); CHECK_TYPE (cmp); add_widen_op (cfg, cmp, &arg1, &arg2); if ((arg1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP)))) cmp->opcode = OP_LCOMPARE; else if (arg1->type == STACK_R4) cmp->opcode = OP_RCOMPARE; else if (arg1->type == STACK_R8) cmp->opcode = OP_FCOMPARE; else cmp->opcode = OP_ICOMPARE; MONO_ADD_INS (cfg->cbb, cmp); ins->type = STACK_I4; ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type); type_from_op (cfg, ins, arg1, arg2); if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) { /* * The backends expect the fceq opcodes to do the * comparison too. */ ins->sreg1 = cmp->sreg1; ins->sreg2 = cmp->sreg2; NULLIFY_INS (cmp); } MONO_ADD_INS (cfg->cbb, ins); *sp++ = ins; break; } case MONO_CEE_LDFTN: { MonoInst *argconst; MonoMethod *cil_method; cmethod = mini_get_method (cfg, method, n, NULL, generic_context); CHECK_CFG_ERROR; if (constrained_class) { if (m_method_is_static (cmethod) && mini_class_check_context_used (cfg, constrained_class)) // FIXME: GENERIC_SHARING_FAILURE (CEE_LDFTN); cmethod = get_constrained_method (cfg, image, n, cmethod, constrained_class, generic_context); constrained_class = NULL; CHECK_CFG_ERROR; } mono_class_init_internal (cmethod->klass); mono_save_token_info (cfg, image, n, cmethod); context_used = mini_method_check_context_used (cfg, cmethod); cil_method = cmethod; if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod)) emit_method_access_failure (cfg, method, cil_method); const gboolean has_unmanaged_callers_only = cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_method_has_unmanaged_callers_only_attribute (cmethod); /* * Optimize the common case of ldftn+delegate creation */ if ((sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) { MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context); if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) { MonoInst *target_ins, *handle_ins; MonoMethod *invoke; int invoke_context_used; if (G_UNLIKELY (has_unmanaged_callers_only)) { mono_error_set_not_supported (cfg->error, "Cannot create delegate from method with UnmanagedCallersOnlyAttribute"); CHECK_CFG_ERROR; } invoke = mono_get_delegate_invoke_internal (ctor_method->klass); if (!invoke || !mono_method_signature_internal (invoke)) LOAD_ERROR; invoke_context_used = mini_method_check_context_used (cfg, invoke); target_ins = sp [-1]; if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) { /*BAD IMPL: We must not add a null check for virtual invoke delegates.*/ if (mono_method_signature_internal (invoke)->param_count == mono_method_signature_internal (cmethod)->param_count) { MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0); MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException"); } } if ((invoke_context_used == 0 || !cfg->gsharedvt) || cfg->llvm_only) { if (cfg->verbose_level > 3) g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL)); if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, FALSE))) { sp --; *sp = handle_ins; CHECK_CFG_EXCEPTION; sp ++; next_ip += 5; il_op = MONO_CEE_NEWOBJ; break; } else { CHECK_CFG_ERROR; } } } } /* UnmanagedCallersOnlyAttribute means ldftn should return a method callable from native */ if (G_UNLIKELY (has_unmanaged_callers_only)) { if (G_UNLIKELY (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) { // Follow CoreCLR, disallow [UnmanagedCallersOnly] and [DllImport] to be used // together emit_not_supported_failure (cfg); EMIT_NEW_PCONST (cfg, ins, NULL); *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); break; } MonoClass *delegate_klass = NULL; MonoGCHandle target_handle = 0; ERROR_DECL (wrapper_error); MonoMethod *wrapped_cmethod; wrapped_cmethod = mono_marshal_get_managed_wrapper (cmethod, delegate_klass, target_handle, wrapper_error); if (!is_ok (wrapper_error)) { /* if we couldn't create a wrapper because cmethod isn't supposed to have an UnmanagedCallersOnly attribute, follow CoreCLR behavior and throw when the method with the ldftn is executing, not when it is being compiled. */ emit_invalid_program_with_msg (cfg, wrapper_error, method, cmethod); mono_error_cleanup (wrapper_error); EMIT_NEW_PCONST (cfg, ins, NULL); *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); break; } else { cmethod = wrapped_cmethod; } } argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst); *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); break; } case MONO_CEE_LDVIRTFTN: { MonoInst *args [2]; cmethod = mini_get_method (cfg, method, n, NULL, generic_context); CHECK_CFG_ERROR; mono_class_init_internal (cmethod->klass); context_used = mini_method_check_context_used (cfg, cmethod); /* * Optimize the common case of ldvirtftn+delegate creation */ if (previous_il_op == MONO_CEE_DUP && (sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) { MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context); if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) { MonoInst *target_ins, *handle_ins; MonoMethod *invoke; int invoke_context_used; const gboolean is_virtual = (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) != 0; invoke = mono_get_delegate_invoke_internal (ctor_method->klass); if (!invoke || !mono_method_signature_internal (invoke)) LOAD_ERROR; invoke_context_used = mini_method_check_context_used (cfg, invoke); target_ins = sp [-1]; if (invoke_context_used == 0 || !cfg->gsharedvt || cfg->llvm_only) { if (cfg->verbose_level > 3) g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL)); if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, is_virtual))) { sp -= 2; *sp = handle_ins; CHECK_CFG_EXCEPTION; next_ip += 5; previous_il_op = MONO_CEE_NEWOBJ; sp ++; break; } else { CHECK_CFG_ERROR; } } } } --sp; args [0] = *sp; args [1] = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); if (context_used) *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args); else *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args); inline_costs += CALL_COST * MIN(10, num_calls++); break; } case MONO_CEE_LOCALLOC: { MonoBasicBlock *non_zero_bb, *end_bb; int alloc_ptr = alloc_preg (cfg); --sp; if (sp != stack_start) UNVERIFIED; if (cfg->method != method) /* * Inlining this into a loop in a parent could lead to * stack overflows which is different behavior than the * non-inlined case, thus disable inlining in this case. */ INLINE_FAILURE("localloc"); NEW_BBLOCK (cfg, non_zero_bb); NEW_BBLOCK (cfg, end_bb); /* if size != zero */ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb); //size is zero, so result is NULL MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); MONO_START_BB (cfg, non_zero_bb); MONO_INST_NEW (cfg, ins, OP_LOCALLOC); ins->dreg = alloc_ptr; ins->sreg1 = sp [0]->dreg; ins->type = STACK_PTR; MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_HAS_ALLOCA; if (header->init_locals) ins->flags |= MONO_INST_INIT; MONO_START_BB (cfg, end_bb); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr); ins->type = STACK_PTR; *sp++ = ins; break; } case MONO_CEE_ENDFILTER: { MonoExceptionClause *clause, *nearest; int cc; --sp; if ((sp != stack_start) || (sp [0]->type != STACK_I4)) UNVERIFIED; MONO_INST_NEW (cfg, ins, OP_ENDFILTER); ins->sreg1 = (*sp)->dreg; MONO_ADD_INS (cfg->cbb, ins); start_new_bblock = 1; nearest = NULL; for (cc = 0; cc < header->num_clauses; ++cc) { clause = &header->clauses [cc]; if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) && ((next_ip - header->code) > clause->data.filter_offset && (next_ip - header->code) <= clause->handler_offset) && (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) nearest = clause; } g_assert (nearest); if ((next_ip - header->code) != nearest->handler_offset) UNVERIFIED; break; } case MONO_CEE_UNALIGNED_: ins_flag |= MONO_INST_UNALIGNED; /* FIXME: record alignment? we can assume 1 for now */ break; case MONO_CEE_VOLATILE_: ins_flag |= MONO_INST_VOLATILE; break; case MONO_CEE_TAIL_: ins_flag |= MONO_INST_TAILCALL; cfg->flags |= MONO_CFG_HAS_TAILCALL; /* Can't inline tailcalls at this time */ inline_costs += 100000; break; case MONO_CEE_INITOBJ: --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); if (mini_class_is_reference (klass)) MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0); else mini_emit_initobj (cfg, *sp, NULL, klass); inline_costs += 1; break; case MONO_CEE_CONSTRAINED_: constrained_class = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (constrained_class); ins_has_side_effect = FALSE; break; case MONO_CEE_CPBLK: sp -= 3; mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag); ins_flag = 0; inline_costs += 1; break; case MONO_CEE_INITBLK: sp -= 3; mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag); ins_flag = 0; inline_costs += 1; break; case MONO_CEE_NO_: if (ip [2] & CEE_NO_TYPECHECK) ins_flag |= MONO_INST_NOTYPECHECK; if (ip [2] & CEE_NO_RANGECHECK) ins_flag |= MONO_INST_NORANGECHECK; if (ip [2] & CEE_NO_NULLCHECK) ins_flag |= MONO_INST_NONULLCHECK; break; case MONO_CEE_RETHROW: { MonoInst *load; int handler_offset = -1; for (i = 0; i < header->num_clauses; ++i) { MonoExceptionClause *clause = &header->clauses [i]; if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) { handler_offset = clause->handler_offset; break; } } cfg->cbb->flags |= BB_EXCEPTION_UNSAFE; if (handler_offset == -1) UNVERIFIED; EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0); MONO_INST_NEW (cfg, ins, OP_RETHROW); ins->sreg1 = load->dreg; MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_NOT_REACHED); MONO_ADD_INS (cfg->cbb, ins); sp = stack_start; link_bblock (cfg, cfg->cbb, end_bblock); start_new_bblock = 1; break; } case MONO_CEE_MONO_RETHROW: { if (sp [-1]->type != STACK_OBJ) UNVERIFIED; MONO_INST_NEW (cfg, ins, OP_RETHROW); --sp; ins->sreg1 = sp [0]->dreg; cfg->cbb->out_of_line = TRUE; MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_NOT_REACHED); MONO_ADD_INS (cfg->cbb, ins); sp = stack_start; link_bblock (cfg, cfg->cbb, end_bblock); start_new_bblock = 1; /* This can complicate code generation for llvm since the return value might not be defined */ if (COMPILE_LLVM (cfg)) INLINE_FAILURE ("mono_rethrow"); break; } case MONO_CEE_SIZEOF: { guint32 val; int ialign; if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (m_class_get_image (method->klass)) && !generic_context) { MonoType *type = mono_type_create_from_typespec_checked (image, token, cfg->error); CHECK_CFG_ERROR; val = mono_type_size (type, &ialign); EMIT_NEW_ICONST (cfg, ins, val); } else { MonoClass *klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); if (mini_is_gsharedvt_klass (klass)) { ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_SIZEOF); ins->type = STACK_I4; } else { val = mono_type_size (m_class_get_byval_arg (klass), &ialign); EMIT_NEW_ICONST (cfg, ins, val); } } *sp++ = ins; break; } case MONO_CEE_REFANYTYPE: { MonoInst *src_var, *src; GSHAREDVT_FAILURE (il_op); --sp; // FIXME: src_var = get_vreg_to_inst (cfg, sp [0]->dreg); if (!src_var) src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg); EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (mono_defaults.typehandle_class), src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type)); *sp++ = ins; break; } case MONO_CEE_READONLY_: readonly = TRUE; break; case MONO_CEE_UNUSED56: case MONO_CEE_UNUSED57: case MONO_CEE_UNUSED70: case MONO_CEE_UNUSED: case MONO_CEE_UNUSED99: case MONO_CEE_UNUSED58: case MONO_CEE_UNUSED1: UNVERIFIED; default: g_warning ("opcode 0x%02x not handled", il_op); UNVERIFIED; } if (ins_has_side_effect) cfg->cbb->flags |= BB_HAS_SIDE_EFFECTS; } if (start_new_bblock != 1) UNVERIFIED; cfg->cbb->cil_length = ip - cfg->cbb->cil_code; if (cfg->cbb->next_bb) { /* This could already be set because of inlining, #693905 */ MonoBasicBlock *bb = cfg->cbb; while (bb->next_bb) bb = bb->next_bb; bb->next_bb = end_bblock; } else { cfg->cbb->next_bb = end_bblock; } #if defined(TARGET_POWERPC) || defined(TARGET_X86) if (cfg->compile_aot) /* FIXME: The plt slots require a GOT var even if the method doesn't use it */ mono_get_got_var (cfg); #endif #ifdef TARGET_WASM if (cfg->lmf_var && !cfg->deopt) { // mini_llvmonly_pop_lmf () might be called before emit_push_lmf () so initialize the LMF cfg->cbb = init_localsbb; EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL); int lmf_reg = ins->dreg; EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_IMM, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), 0); } #endif if (cfg->method == method && cfg->got_var) mono_emit_load_got_addr (cfg); if (init_localsbb) { cfg->cbb = init_localsbb; cfg->ip = NULL; for (i = 0; i < header->num_locals; ++i) { /* * Vtype initialization might need to be done after CEE_JIT_ATTACH, since it can make calls to memset (), * which need the trampoline code to work. */ if (MONO_TYPE_ISSTRUCT (header->locals [i])) cfg->cbb = init_localsbb2; else cfg->cbb = init_localsbb; emit_init_local (cfg, i, header->locals [i], init_locals); } } if (cfg->init_ref_vars && cfg->method == method) { /* Emit initialization for ref vars */ // FIXME: Avoid duplication initialization for IL locals. for (i = 0; i < cfg->num_varinfo; ++i) { MonoInst *ins = cfg->varinfo [i]; if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ) MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL); } } if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) { cfg->cbb = init_localsbb; emit_push_lmf (cfg); } /* emit profiler enter code after a jit attach if there is one */ cfg->cbb = init_localsbb2; mini_profiler_emit_enter (cfg); cfg->cbb = init_localsbb; if (seq_points) { MonoBasicBlock *bb; /* * Make seq points at backward branch targets interruptable. */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT) bb->code->flags |= MONO_INST_SINGLE_STEP_LOC; } /* Add a sequence point for method entry/exit events */ if (seq_points && cfg->gen_sdb_seq_points) { NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE); MONO_ADD_INS (init_localsbb, ins); NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE); MONO_ADD_INS (cfg->bb_exit, ins); } /* * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because * the code they refer to was dead (#11880). */ if (sym_seq_points) { for (i = 0; i < header->code_size; ++i) { if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) { MonoInst *ins; NEW_SEQ_POINT (cfg, ins, i, FALSE); mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE); } } } cfg->ip = NULL; if (cfg->method == method) { compute_bb_regions (cfg); } else { MonoBasicBlock *bb; /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */ for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) { bb->real_offset = inline_offset; } } if (inline_costs < 0) { char *mname; /* Method is too large */ mname = mono_method_full_name (method, TRUE); mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname)); g_free (mname); } if ((cfg->verbose_level > 2) && (cfg->method == method)) mono_print_code (cfg, "AFTER METHOD-TO-IR"); goto cleanup; mono_error_exit: if (cfg->verbose_level > 3) g_print ("exiting due to error"); g_assert (!is_ok (cfg->error)); goto cleanup; exception_exit: if (cfg->verbose_level > 3) g_print ("exiting due to exception"); g_assert (cfg->exception_type != MONO_EXCEPTION_NONE); goto cleanup; unverified: if (cfg->verbose_level > 3) g_print ("exiting due to invalid il"); set_exception_type_from_invalid_il (cfg, method, ip); goto cleanup; cleanup: g_slist_free (class_inits); mono_basic_block_free (original_bb); cfg->dont_inline = g_list_remove (cfg->dont_inline, method); if (cfg->exception_type) return -1; else return inline_costs; } static int store_membase_reg_to_store_membase_imm (int opcode) { switch (opcode) { case OP_STORE_MEMBASE_REG: return OP_STORE_MEMBASE_IMM; case OP_STOREI1_MEMBASE_REG: return OP_STOREI1_MEMBASE_IMM; case OP_STOREI2_MEMBASE_REG: return OP_STOREI2_MEMBASE_IMM; case OP_STOREI4_MEMBASE_REG: return OP_STOREI4_MEMBASE_IMM; case OP_STOREI8_MEMBASE_REG: return OP_STOREI8_MEMBASE_IMM; default: g_assert_not_reached (); } return -1; } int mono_op_to_op_imm (int opcode) { switch (opcode) { case OP_IADD: return OP_IADD_IMM; case OP_ISUB: return OP_ISUB_IMM; case OP_IDIV: return OP_IDIV_IMM; case OP_IDIV_UN: return OP_IDIV_UN_IMM; case OP_IREM: return OP_IREM_IMM; case OP_IREM_UN: return OP_IREM_UN_IMM; case OP_IMUL: return OP_IMUL_IMM; case OP_IAND: return OP_IAND_IMM; case OP_IOR: return OP_IOR_IMM; case OP_IXOR: return OP_IXOR_IMM; case OP_ISHL: return OP_ISHL_IMM; case OP_ISHR: return OP_ISHR_IMM; case OP_ISHR_UN: return OP_ISHR_UN_IMM; case OP_LADD: return OP_LADD_IMM; case OP_LSUB: return OP_LSUB_IMM; case OP_LAND: return OP_LAND_IMM; case OP_LOR: return OP_LOR_IMM; case OP_LXOR: return OP_LXOR_IMM; case OP_LSHL: return OP_LSHL_IMM; case OP_LSHR: return OP_LSHR_IMM; case OP_LSHR_UN: return OP_LSHR_UN_IMM; #if SIZEOF_REGISTER == 8 case OP_LMUL: return OP_LMUL_IMM; case OP_LREM: return OP_LREM_IMM; #endif case OP_COMPARE: return OP_COMPARE_IMM; case OP_ICOMPARE: return OP_ICOMPARE_IMM; case OP_LCOMPARE: return OP_LCOMPARE_IMM; case OP_STORE_MEMBASE_REG: return OP_STORE_MEMBASE_IMM; case OP_STOREI1_MEMBASE_REG: return OP_STOREI1_MEMBASE_IMM; case OP_STOREI2_MEMBASE_REG: return OP_STOREI2_MEMBASE_IMM; case OP_STOREI4_MEMBASE_REG: return OP_STOREI4_MEMBASE_IMM; #if defined(TARGET_X86) || defined (TARGET_AMD64) case OP_X86_PUSH: return OP_X86_PUSH_IMM; case OP_X86_COMPARE_MEMBASE_REG: return OP_X86_COMPARE_MEMBASE_IMM; #endif #if defined(TARGET_AMD64) case OP_AMD64_ICOMPARE_MEMBASE_REG: return OP_AMD64_ICOMPARE_MEMBASE_IMM; #endif case OP_VOIDCALL_REG: return OP_VOIDCALL; case OP_CALL_REG: return OP_CALL; case OP_LCALL_REG: return OP_LCALL; case OP_FCALL_REG: return OP_FCALL; case OP_LOCALLOC: return OP_LOCALLOC_IMM; } return -1; } int mono_load_membase_to_load_mem (int opcode) { // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro #if defined(TARGET_X86) || defined(TARGET_AMD64) switch (opcode) { case OP_LOAD_MEMBASE: return OP_LOAD_MEM; case OP_LOADU1_MEMBASE: return OP_LOADU1_MEM; case OP_LOADU2_MEMBASE: return OP_LOADU2_MEM; case OP_LOADI4_MEMBASE: return OP_LOADI4_MEM; case OP_LOADU4_MEMBASE: return OP_LOADU4_MEM; #if SIZEOF_REGISTER == 8 case OP_LOADI8_MEMBASE: return OP_LOADI8_MEM; #endif } #endif return -1; } static int op_to_op_dest_membase (int store_opcode, int opcode) { #if defined(TARGET_X86) if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG))) return -1; switch (opcode) { case OP_IADD: return OP_X86_ADD_MEMBASE_REG; case OP_ISUB: return OP_X86_SUB_MEMBASE_REG; case OP_IAND: return OP_X86_AND_MEMBASE_REG; case OP_IOR: return OP_X86_OR_MEMBASE_REG; case OP_IXOR: return OP_X86_XOR_MEMBASE_REG; case OP_ADD_IMM: case OP_IADD_IMM: return OP_X86_ADD_MEMBASE_IMM; case OP_SUB_IMM: case OP_ISUB_IMM: return OP_X86_SUB_MEMBASE_IMM; case OP_AND_IMM: case OP_IAND_IMM: return OP_X86_AND_MEMBASE_IMM; case OP_OR_IMM: case OP_IOR_IMM: return OP_X86_OR_MEMBASE_IMM; case OP_XOR_IMM: case OP_IXOR_IMM: return OP_X86_XOR_MEMBASE_IMM; case OP_MOVE: return OP_NOP; } #endif #if defined(TARGET_AMD64) if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG))) return -1; switch (opcode) { case OP_IADD: return OP_X86_ADD_MEMBASE_REG; case OP_ISUB: return OP_X86_SUB_MEMBASE_REG; case OP_IAND: return OP_X86_AND_MEMBASE_REG; case OP_IOR: return OP_X86_OR_MEMBASE_REG; case OP_IXOR: return OP_X86_XOR_MEMBASE_REG; case OP_IADD_IMM: return OP_X86_ADD_MEMBASE_IMM; case OP_ISUB_IMM: return OP_X86_SUB_MEMBASE_IMM; case OP_IAND_IMM: return OP_X86_AND_MEMBASE_IMM; case OP_IOR_IMM: return OP_X86_OR_MEMBASE_IMM; case OP_IXOR_IMM: return OP_X86_XOR_MEMBASE_IMM; case OP_LADD: return OP_AMD64_ADD_MEMBASE_REG; case OP_LSUB: return OP_AMD64_SUB_MEMBASE_REG; case OP_LAND: return OP_AMD64_AND_MEMBASE_REG; case OP_LOR: return OP_AMD64_OR_MEMBASE_REG; case OP_LXOR: return OP_AMD64_XOR_MEMBASE_REG; case OP_ADD_IMM: case OP_LADD_IMM: return OP_AMD64_ADD_MEMBASE_IMM; case OP_SUB_IMM: case OP_LSUB_IMM: return OP_AMD64_SUB_MEMBASE_IMM; case OP_AND_IMM: case OP_LAND_IMM: return OP_AMD64_AND_MEMBASE_IMM; case OP_OR_IMM: case OP_LOR_IMM: return OP_AMD64_OR_MEMBASE_IMM; case OP_XOR_IMM: case OP_LXOR_IMM: return OP_AMD64_XOR_MEMBASE_IMM; case OP_MOVE: return OP_NOP; } #endif return -1; } static int op_to_op_store_membase (int store_opcode, int opcode) { #if defined(TARGET_X86) || defined(TARGET_AMD64) switch (opcode) { case OP_ICEQ: if (store_opcode == OP_STOREI1_MEMBASE_REG) return OP_X86_SETEQ_MEMBASE; case OP_CNE: if (store_opcode == OP_STOREI1_MEMBASE_REG) return OP_X86_SETNE_MEMBASE; } #endif return -1; } static int op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode) { #ifdef TARGET_X86 /* FIXME: This has sign extension issues */ /* if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE)) return OP_X86_COMPARE_MEMBASE8_IMM; */ if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))) return -1; switch (opcode) { case OP_X86_PUSH: return OP_X86_PUSH_MEMBASE; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: return OP_X86_COMPARE_MEMBASE_IMM; case OP_COMPARE: case OP_ICOMPARE: return OP_X86_COMPARE_MEMBASE_REG; } #endif #ifdef TARGET_AMD64 /* FIXME: This has sign extension issues */ /* if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE)) return OP_X86_COMPARE_MEMBASE8_IMM; */ switch (opcode) { case OP_X86_PUSH: if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE)) return OP_X86_PUSH_MEMBASE; break; /* FIXME: This only works for 32 bit immediates case OP_COMPARE_IMM: case OP_LCOMPARE_IMM: if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE)) return OP_AMD64_COMPARE_MEMBASE_IMM; */ case OP_ICOMPARE_IMM: if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) return OP_AMD64_ICOMPARE_MEMBASE_IMM; break; case OP_COMPARE: case OP_LCOMPARE: if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE) return OP_AMD64_ICOMPARE_MEMBASE_REG; if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE)) return OP_AMD64_COMPARE_MEMBASE_REG; break; case OP_ICOMPARE: if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) return OP_AMD64_ICOMPARE_MEMBASE_REG; break; } #endif return -1; } static int op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode) { #ifdef TARGET_X86 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))) return -1; switch (opcode) { case OP_COMPARE: case OP_ICOMPARE: return OP_X86_COMPARE_REG_MEMBASE; case OP_IADD: return OP_X86_ADD_REG_MEMBASE; case OP_ISUB: return OP_X86_SUB_REG_MEMBASE; case OP_IAND: return OP_X86_AND_REG_MEMBASE; case OP_IOR: return OP_X86_OR_REG_MEMBASE; case OP_IXOR: return OP_X86_XOR_REG_MEMBASE; } #endif #ifdef TARGET_AMD64 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) { switch (opcode) { case OP_ICOMPARE: return OP_AMD64_ICOMPARE_REG_MEMBASE; case OP_IADD: return OP_X86_ADD_REG_MEMBASE; case OP_ISUB: return OP_X86_SUB_REG_MEMBASE; case OP_IAND: return OP_X86_AND_REG_MEMBASE; case OP_IOR: return OP_X86_OR_REG_MEMBASE; case OP_IXOR: return OP_X86_XOR_REG_MEMBASE; } } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) { switch (opcode) { case OP_COMPARE: case OP_LCOMPARE: return OP_AMD64_COMPARE_REG_MEMBASE; case OP_LADD: return OP_AMD64_ADD_REG_MEMBASE; case OP_LSUB: return OP_AMD64_SUB_REG_MEMBASE; case OP_LAND: return OP_AMD64_AND_REG_MEMBASE; case OP_LOR: return OP_AMD64_OR_REG_MEMBASE; case OP_LXOR: return OP_AMD64_XOR_REG_MEMBASE; } } #endif return -1; } int mono_op_to_op_imm_noemul (int opcode) { MONO_DISABLE_WARNING(4065) // switch with default but no case switch (opcode) { #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS) case OP_LSHR: case OP_LSHL: case OP_LSHR_UN: return -1; #endif #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV) case OP_IDIV: case OP_IDIV_UN: case OP_IREM: case OP_IREM_UN: return -1; #endif #if defined(MONO_ARCH_EMULATE_MUL_DIV) case OP_IMUL: return -1; #endif default: return mono_op_to_op_imm (opcode); } MONO_RESTORE_WARNING } gboolean mono_op_no_side_effects (int opcode) { /* FIXME: Add more instructions */ /* INEG sets the condition codes, and the OP_LNEG decomposition depends on this on x86 */ switch (opcode) { case OP_MOVE: case OP_FMOVE: case OP_VMOVE: case OP_XMOVE: case OP_RMOVE: case OP_VZERO: case OP_XZERO: case OP_ICONST: case OP_I8CONST: case OP_ADD_IMM: case OP_R8CONST: case OP_LADD_IMM: case OP_ISUB_IMM: case OP_IADD_IMM: case OP_LNEG: case OP_ISUB: case OP_CMOV_IGE: case OP_ISHL_IMM: case OP_ISHR_IMM: case OP_ISHR_UN_IMM: case OP_IAND_IMM: case OP_ICONV_TO_U1: case OP_ICONV_TO_I1: case OP_SEXT_I4: case OP_LCONV_TO_U1: case OP_ICONV_TO_U2: case OP_ICONV_TO_I2: case OP_LCONV_TO_I2: case OP_LDADDR: case OP_PHI: case OP_NOP: case OP_ZEXT_I4: case OP_NOT_NULL: case OP_IL_SEQ_POINT: case OP_RTTYPE: return TRUE; default: return FALSE; } } gboolean mono_ins_no_side_effects (MonoInst *ins) { if (mono_op_no_side_effects (ins->opcode)) return TRUE; if (ins->opcode == OP_AOTCONST) { MonoJumpInfoType type = (MonoJumpInfoType)(intptr_t)ins->inst_p1; // Some AOTCONSTs have side effects switch (type) { case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_VTABLE: case MONO_PATCH_INFO_METHOD_RGCTX: return TRUE; } } return FALSE; } /** * mono_handle_global_vregs: * * Make vregs used in more than one bblock 'global', i.e. allocate a variable * for them. */ void mono_handle_global_vregs (MonoCompile *cfg) { gint32 *vreg_to_bb; MonoBasicBlock *bb; int i, pos; vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1); #ifdef MONO_ARCH_SIMD_INTRINSICS if (cfg->uses_simd_intrinsics & MONO_CFG_USES_SIMD_INTRINSICS_SIMPLIFY_INDIRECTION) mono_simd_simplify_indirection (cfg); #endif /* Find local vregs used in more than one bb */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins = bb->code; int block_num = bb->block_num; if (cfg->verbose_level > 2) printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num); cfg->cbb = bb; for (; ins; ins = ins->next) { const char *spec = INS_INFO (ins->opcode); int regtype = 0, regindex; gint32 prev_bb; if (G_UNLIKELY (cfg->verbose_level > 2)) mono_print_ins (ins); g_assert (ins->opcode >= MONO_CEE_LAST); for (regindex = 0; regindex < 4; regindex ++) { int vreg = 0; if (regindex == 0) { regtype = spec [MONO_INST_DEST]; if (regtype == ' ') continue; vreg = ins->dreg; } else if (regindex == 1) { regtype = spec [MONO_INST_SRC1]; if (regtype == ' ') continue; vreg = ins->sreg1; } else if (regindex == 2) { regtype = spec [MONO_INST_SRC2]; if (regtype == ' ') continue; vreg = ins->sreg2; } else if (regindex == 3) { regtype = spec [MONO_INST_SRC3]; if (regtype == ' ') continue; vreg = ins->sreg3; } #if SIZEOF_REGISTER == 4 /* In the LLVM case, the long opcodes are not decomposed */ if (regtype == 'l' && !COMPILE_LLVM (cfg)) { /* * Since some instructions reference the original long vreg, * and some reference the two component vregs, it is quite hard * to determine when it needs to be global. So be conservative. */ if (!get_vreg_to_inst (cfg, vreg)) { mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg); if (cfg->verbose_level > 2) printf ("LONG VREG R%d made global.\n", vreg); } /* * Make the component vregs volatile since the optimizations can * get confused otherwise. */ get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE; get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE; } #endif g_assert (vreg != -1); prev_bb = vreg_to_bb [vreg]; if (prev_bb == 0) { /* 0 is a valid block num */ vreg_to_bb [vreg] = block_num + 1; } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) { if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS))) continue; if (!get_vreg_to_inst (cfg, vreg)) { if (G_UNLIKELY (cfg->verbose_level > 2)) printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num); switch (regtype) { case 'i': if (vreg_is_ref (cfg, vreg)) mono_compile_create_var_for_vreg (cfg, mono_get_object_type (), OP_LOCAL, vreg); else mono_compile_create_var_for_vreg (cfg, mono_get_int_type (), OP_LOCAL, vreg); break; case 'l': mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg); break; case 'f': mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL, vreg); break; case 'v': case 'x': mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL, vreg); break; default: g_assert_not_reached (); } } /* Flag as having been used in more than one bb */ vreg_to_bb [vreg] = -1; } } } } /* If a variable is used in only one bblock, convert it into a local vreg */ for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *var = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); switch (var->type) { case STACK_I4: case STACK_OBJ: case STACK_PTR: case STACK_MP: case STACK_VTYPE: #if SIZEOF_REGISTER == 8 case STACK_I8: #endif #if !defined(TARGET_X86) /* Enabling this screws up the fp stack on x86 */ case STACK_R8: #endif if (mono_arch_is_soft_float ()) break; /* if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype)) break; */ /* Arguments are implicitly global */ /* Putting R4 vars into registers doesn't work currently */ /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */ if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (m_class_get_byval_arg (var->klass)->type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) { /* * Make that the variable's liveness interval doesn't contain a call, since * that would cause the lvreg to be spilled, making the whole optimization * useless. */ /* This is too slow for JIT compilation */ #if 0 if (cfg->compile_aot && vreg_to_bb [var->dreg]) { MonoInst *ins; int def_index, call_index, ins_index; gboolean spilled = FALSE; def_index = -1; call_index = -1; ins_index = 0; for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) { const char *spec = INS_INFO (ins->opcode); if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg)) def_index = ins_index; if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) || ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) { if (call_index > def_index) { spilled = TRUE; break; } } if (MONO_IS_CALL (ins)) call_index = ins_index; ins_index ++; } if (spilled) break; } #endif if (G_UNLIKELY (cfg->verbose_level > 2)) printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx); var->flags |= MONO_INST_IS_DEAD; cfg->vreg_to_inst [var->dreg] = NULL; } break; } } /* * Compress the varinfo and vars tables so the liveness computation is faster and * takes up less space. */ pos = 0; for (i = 0; i < cfg->num_varinfo; ++i) { MonoInst *var = cfg->varinfo [i]; if (pos < i && cfg->locals_start == i) cfg->locals_start = pos; if (!(var->flags & MONO_INST_IS_DEAD)) { if (pos < i) { cfg->varinfo [pos] = cfg->varinfo [i]; cfg->varinfo [pos]->inst_c0 = pos; memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar)); cfg->vars [pos].idx = pos; #if SIZEOF_REGISTER == 4 if (cfg->varinfo [pos]->type == STACK_I8) { /* Modify the two component vars too */ MonoInst *var1; var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg)); var1->inst_c0 = pos; var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg)); var1->inst_c0 = pos; } #endif } pos ++; } } cfg->num_varinfo = pos; if (cfg->locals_start > cfg->num_varinfo) cfg->locals_start = cfg->num_varinfo; } /* * mono_allocate_gsharedvt_vars: * * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array. * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes. */ void mono_allocate_gsharedvt_vars (MonoCompile *cfg) { int i; cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg); for (i = 0; i < cfg->num_varinfo; ++i) { MonoInst *ins = cfg->varinfo [i]; int idx; if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) { if (i >= cfg->locals_start) { /* Local */ idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET); cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1; ins->opcode = OP_GSHAREDVT_LOCAL; ins->inst_imm = idx; } else { /* Arg */ cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1; ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET; } } } } /** * mono_spill_global_vars: * * Generate spill code for variables which are not allocated to registers, * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if * code is generated which could be optimized by the local optimization passes. */ void mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts) { MonoBasicBlock *bb; char spec2 [16]; int orig_next_vreg; guint32 *vreg_to_lvreg; guint32 *lvregs; guint32 i, lvregs_len, lvregs_size; gboolean dest_has_lvreg = FALSE; MonoStackType stacktypes [128]; MonoInst **live_range_start, **live_range_end; MonoBasicBlock **live_range_start_bb, **live_range_end_bb; *need_local_opts = FALSE; memset (spec2, 0, sizeof (spec2)); /* FIXME: Move this function to mini.c */ stacktypes [(int)'i'] = STACK_PTR; stacktypes [(int)'l'] = STACK_I8; stacktypes [(int)'f'] = STACK_R8; #ifdef MONO_ARCH_SIMD_INTRINSICS stacktypes [(int)'x'] = STACK_VTYPE; #endif #if SIZEOF_REGISTER == 4 /* Create MonoInsts for longs */ for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) { switch (ins->type) { case STACK_R8: case STACK_I8: { MonoInst *tree; if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg)) break; g_assert (ins->opcode == OP_REGOFFSET); tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg)); g_assert (tree); tree->opcode = OP_REGOFFSET; tree->inst_basereg = ins->inst_basereg; tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET; tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg)); g_assert (tree); tree->opcode = OP_REGOFFSET; tree->inst_basereg = ins->inst_basereg; tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET; break; } default: break; } } } #endif if (cfg->compute_gc_maps) { /* registers need liveness info even for !non refs */ for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; if (ins->opcode == OP_REGVAR) ins->flags |= MONO_INST_GC_TRACK; } } /* FIXME: widening and truncation */ /* * As an optimization, when a variable allocated to the stack is first loaded into * an lvreg, we will remember the lvreg and use it the next time instead of loading * the variable again. */ orig_next_vreg = cfg->next_vreg; vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg); lvregs_size = 1024; lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size); lvregs_len = 0; /* * These arrays contain the first and last instructions accessing a given * variable. * Since we emit bblocks in the same order we process them here, and we * don't split live ranges, these will precisely describe the live range of * the variable, i.e. the instruction range where a valid value can be found * in the variables location. * The live range is computed using the liveness info computed by the liveness pass. * We can't use vmv->range, since that is an abstract live range, and we need * one which is instruction precise. * FIXME: Variables used in out-of-line bblocks have a hole in their live range. */ /* FIXME: Only do this if debugging info is requested */ live_range_start = g_new0 (MonoInst*, cfg->next_vreg); live_range_end = g_new0 (MonoInst*, cfg->next_vreg); live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg); live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg); /* Add spill loads/stores */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins; if (cfg->verbose_level > 2) printf ("\nSPILL BLOCK %d:\n", bb->block_num); /* Clear vreg_to_lvreg array */ for (i = 0; i < lvregs_len; i++) vreg_to_lvreg [lvregs [i]] = 0; lvregs_len = 0; cfg->cbb = bb; MONO_BB_FOR_EACH_INS (bb, ins) { const char *spec = INS_INFO (ins->opcode); int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs; gboolean store, no_lvreg; int sregs [MONO_MAX_SRC_REGS]; if (G_UNLIKELY (cfg->verbose_level > 2)) mono_print_ins (ins); if (ins->opcode == OP_NOP) continue; /* * We handle LDADDR here as well, since it can only be decomposed * when variable addresses are known. */ if (ins->opcode == OP_LDADDR) { MonoInst *var = (MonoInst *)ins->inst_p0; if (var->opcode == OP_VTARG_ADDR) { /* Happens on SPARC/S390 where vtypes are passed by reference */ MonoInst *vtaddr = var->inst_left; if (vtaddr->opcode == OP_REGVAR) { ins->opcode = OP_MOVE; ins->sreg1 = vtaddr->dreg; } else if (var->inst_left->opcode == OP_REGOFFSET) { ins->opcode = OP_LOAD_MEMBASE; ins->inst_basereg = vtaddr->inst_basereg; ins->inst_offset = vtaddr->inst_offset; } else NOT_IMPLEMENTED; } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) { /* gsharedvt arg passed by ref */ g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET); ins->opcode = OP_LOAD_MEMBASE; ins->inst_basereg = var->inst_basereg; ins->inst_offset = var->inst_offset; } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) { MonoInst *load, *load2, *load3; int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1; int reg1, reg2, reg3; MonoInst *info_var = cfg->gsharedvt_info_var; MonoInst *locals_var = cfg->gsharedvt_locals_var; /* * gsharedvt local. * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx]. */ g_assert (var->opcode == OP_GSHAREDVT_LOCAL); g_assert (info_var); g_assert (locals_var); /* Mark the instruction used to compute the locals var as used */ cfg->gsharedvt_locals_var_ins = NULL; /* Load the offset */ if (info_var->opcode == OP_REGOFFSET) { reg1 = alloc_ireg (cfg); NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset); } else if (info_var->opcode == OP_REGVAR) { load = NULL; reg1 = info_var->dreg; } else { g_assert_not_reached (); } reg2 = alloc_ireg (cfg); NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P)); /* Load the locals area address */ reg3 = alloc_ireg (cfg); if (locals_var->opcode == OP_REGOFFSET) { NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset); } else if (locals_var->opcode == OP_REGVAR) { NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg); } else { g_assert_not_reached (); } /* Compute the address */ ins->opcode = OP_PADD; ins->sreg1 = reg3; ins->sreg2 = reg2; mono_bblock_insert_before_ins (bb, ins, load3); mono_bblock_insert_before_ins (bb, load3, load2); if (load) mono_bblock_insert_before_ins (bb, load2, load); } else { g_assert (var->opcode == OP_REGOFFSET); ins->opcode = OP_ADD_IMM; ins->sreg1 = var->inst_basereg; ins->inst_imm = var->inst_offset; } *need_local_opts = TRUE; spec = INS_INFO (ins->opcode); } if (ins->opcode < MONO_CEE_LAST) { mono_print_ins (ins); g_assert_not_reached (); } /* * Store opcodes have destbasereg in the dreg, but in reality, it is an * src register. * FIXME: */ if (MONO_IS_STORE_MEMBASE (ins)) { tmp_reg = ins->dreg; ins->dreg = ins->sreg2; ins->sreg2 = tmp_reg; store = TRUE; spec2 [MONO_INST_DEST] = ' '; spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1]; spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST]; spec2 [MONO_INST_SRC3] = ' '; spec = spec2; } else if (MONO_IS_STORE_MEMINDEX (ins)) g_assert_not_reached (); else store = FALSE; no_lvreg = FALSE; if (G_UNLIKELY (cfg->verbose_level > 2)) { printf ("\t %.3s %d", spec, ins->dreg); num_sregs = mono_inst_get_src_registers (ins, sregs); for (srcindex = 0; srcindex < num_sregs; ++srcindex) printf (" %d", sregs [srcindex]); printf ("\n"); } /***************/ /* DREG */ /***************/ regtype = spec [MONO_INST_DEST]; g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' '))); prev_dreg = -1; int dreg_using_dest_to_membase_op = -1; if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) { MonoInst *var = get_vreg_to_inst (cfg, ins->dreg); MonoInst *store_ins; int store_opcode; MonoInst *def_ins = ins; int dreg = ins->dreg; /* The original vreg */ store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype); if (var->opcode == OP_REGVAR) { ins->dreg = var->dreg; } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) { /* * Instead of emitting a load+store, use a _membase opcode. */ g_assert (var->opcode == OP_REGOFFSET); if (ins->opcode == OP_MOVE) { NULLIFY_INS (ins); def_ins = NULL; } else { dreg_using_dest_to_membase_op = ins->dreg; ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode); ins->inst_basereg = var->inst_basereg; ins->inst_offset = var->inst_offset; ins->dreg = -1; } spec = INS_INFO (ins->opcode); } else { guint32 lvreg; g_assert (var->opcode == OP_REGOFFSET); prev_dreg = ins->dreg; /* Invalidate any previous lvreg for this vreg */ vreg_to_lvreg [ins->dreg] = 0; lvreg = 0; if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) { regtype = 'l'; store_opcode = OP_STOREI8_MEMBASE_REG; } ins->dreg = alloc_dreg (cfg, stacktypes [regtype]); #if SIZEOF_REGISTER != 8 if (regtype == 'l') { NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg)); mono_bblock_insert_after_ins (bb, ins, store_ins); NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg)); mono_bblock_insert_after_ins (bb, ins, store_ins); def_ins = store_ins; } else #endif { g_assert (store_opcode != OP_STOREV_MEMBASE); /* Try to fuse the store into the instruction itself */ /* FIXME: Add more instructions */ if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) { ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode); ins->inst_imm = ins->inst_c0; ins->inst_destbasereg = var->inst_basereg; ins->inst_offset = var->inst_offset; spec = INS_INFO (ins->opcode); } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) { ins->opcode = store_opcode; ins->inst_destbasereg = var->inst_basereg; ins->inst_offset = var->inst_offset; no_lvreg = TRUE; tmp_reg = ins->dreg; ins->dreg = ins->sreg2; ins->sreg2 = tmp_reg; store = TRUE; spec2 [MONO_INST_DEST] = ' '; spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1]; spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST]; spec2 [MONO_INST_SRC3] = ' '; spec = spec2; } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) { // FIXME: The backends expect the base reg to be in inst_basereg ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode); ins->dreg = -1; ins->inst_basereg = var->inst_basereg; ins->inst_offset = var->inst_offset; spec = INS_INFO (ins->opcode); } else { /* printf ("INS: "); mono_print_ins (ins); */ /* Create a store instruction */ NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg); /* Insert it after the instruction */ mono_bblock_insert_after_ins (bb, ins, store_ins); def_ins = store_ins; /* * We can't assign ins->dreg to var->dreg here, since the * sregs could use it. So set a flag, and do it after * the sregs. */ if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) dest_has_lvreg = TRUE; } } } if (def_ins && !live_range_start [dreg]) { live_range_start [dreg] = def_ins; live_range_start_bb [dreg] = bb; } if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) { MonoInst *tmp; MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF); tmp->inst_c1 = dreg; mono_bblock_insert_after_ins (bb, def_ins, tmp); } } /************/ /* SREGS */ /************/ num_sregs = mono_inst_get_src_registers (ins, sregs); for (srcindex = 0; srcindex < 3; ++srcindex) { regtype = spec [MONO_INST_SRC1 + srcindex]; sreg = sregs [srcindex]; g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' '))); if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) { MonoInst *var = get_vreg_to_inst (cfg, sreg); MonoInst *use_ins = ins; MonoInst *load_ins; guint32 load_opcode; if (var->opcode == OP_REGVAR) { sregs [srcindex] = var->dreg; //mono_inst_set_src_registers (ins, sregs); live_range_end [sreg] = use_ins; live_range_end_bb [sreg] = bb; if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) { MonoInst *tmp; MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE); /* var->dreg is a hreg */ tmp->inst_c1 = sreg; mono_bblock_insert_after_ins (bb, ins, tmp); } continue; } g_assert (var->opcode == OP_REGOFFSET); load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype); g_assert (load_opcode != OP_LOADV_MEMBASE); if (vreg_to_lvreg [sreg]) { g_assert (vreg_to_lvreg [sreg] != -1); /* The variable is already loaded to an lvreg */ if (G_UNLIKELY (cfg->verbose_level > 2)) printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg); sregs [srcindex] = vreg_to_lvreg [sreg]; //mono_inst_set_src_registers (ins, sregs); continue; } /* Try to fuse the load into the instruction */ if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) { ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode); sregs [0] = var->inst_basereg; //mono_inst_set_src_registers (ins, sregs); ins->inst_offset = var->inst_offset; } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) { ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode); sregs [1] = var->inst_basereg; //mono_inst_set_src_registers (ins, sregs); ins->inst_offset = var->inst_offset; } else { if (MONO_IS_REAL_MOVE (ins)) { ins->opcode = OP_NOP; sreg = ins->dreg; } else { //printf ("%d ", srcindex); mono_print_ins (ins); sreg = alloc_dreg (cfg, stacktypes [regtype]); if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) { if (var->dreg == prev_dreg) { /* * sreg refers to the value loaded by the load * emitted below, but we need to use ins->dreg * since it refers to the store emitted earlier. */ sreg = ins->dreg; } g_assert (sreg != -1); if (var->dreg == dreg_using_dest_to_membase_op) { if (cfg->verbose_level > 2) printf ("\tCan't cache R%d because it's part of a dreg dest_membase optimization\n", var->dreg); } else { vreg_to_lvreg [var->dreg] = sreg; } if (lvregs_len >= lvregs_size) { guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2); memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size); lvregs = new_lvregs; lvregs_size *= 2; } lvregs [lvregs_len ++] = var->dreg; } } sregs [srcindex] = sreg; //mono_inst_set_src_registers (ins, sregs); #if SIZEOF_REGISTER != 8 if (regtype == 'l') { NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET); mono_bblock_insert_before_ins (bb, ins, load_ins); NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET); mono_bblock_insert_before_ins (bb, ins, load_ins); use_ins = load_ins; } else #endif { #if SIZEOF_REGISTER == 4 g_assert (load_opcode != OP_LOADI8_MEMBASE); #endif NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset); mono_bblock_insert_before_ins (bb, ins, load_ins); use_ins = load_ins; } if (cfg->verbose_level > 2) mono_print_ins_index (0, use_ins); } if (var->dreg < orig_next_vreg) { live_range_end [var->dreg] = use_ins; live_range_end_bb [var->dreg] = bb; } if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) { MonoInst *tmp; MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE); tmp->inst_c1 = var->dreg; mono_bblock_insert_after_ins (bb, ins, tmp); } } } mono_inst_set_src_registers (ins, sregs); if (dest_has_lvreg) { g_assert (ins->dreg != -1); vreg_to_lvreg [prev_dreg] = ins->dreg; if (lvregs_len >= lvregs_size) { guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2); memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size); lvregs = new_lvregs; lvregs_size *= 2; } lvregs [lvregs_len ++] = prev_dreg; dest_has_lvreg = FALSE; } if (store) { tmp_reg = ins->dreg; ins->dreg = ins->sreg2; ins->sreg2 = tmp_reg; } if (MONO_IS_CALL (ins)) { /* Clear vreg_to_lvreg array */ for (i = 0; i < lvregs_len; i++) vreg_to_lvreg [lvregs [i]] = 0; lvregs_len = 0; } else if (ins->opcode == OP_NOP) { ins->dreg = -1; MONO_INST_NULLIFY_SREGS (ins); } if (cfg->verbose_level > 2) mono_print_ins_index (1, ins); } /* Extend the live range based on the liveness info */ if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) { for (i = 0; i < cfg->num_varinfo; i ++) { MonoMethodVar *vi = MONO_VARINFO (cfg, i); if (vreg_is_volatile (cfg, vi->vreg)) /* The liveness info is incomplete */ continue; if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) { /* Live from at least the first ins of this bb */ live_range_start [vi->vreg] = bb->code; live_range_start_bb [vi->vreg] = bb; } if (mono_bitset_test_fast (bb->live_out_set, i)) { /* Live at least until the last ins of this bb */ live_range_end [vi->vreg] = bb->last_ins; live_range_end_bb [vi->vreg] = bb; } } } } /* * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them * by storing the current native offset into MonoMethodVar->live_range_start/end. */ if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) { for (i = 0; i < cfg->num_varinfo; ++i) { int vreg = MONO_VARINFO (cfg, i)->vreg; MonoInst *ins; if (live_range_start [vreg]) { MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START); ins->inst_c0 = i; ins->inst_c1 = vreg; mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins); } if (live_range_end [vreg]) { MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END); ins->inst_c0 = i; ins->inst_c1 = vreg; if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins) mono_add_ins_to_end (live_range_end_bb [vreg], ins); else mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins); } } } if (cfg->gsharedvt_locals_var_ins) { /* Nullify if unused */ cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST; cfg->gsharedvt_locals_var_ins->inst_imm = 0; } g_free (live_range_start); g_free (live_range_end); g_free (live_range_start_bb); g_free (live_range_end_bb); } /** * FIXME: * - use 'iadd' instead of 'int_add' * - handling ovf opcodes: decompose in method_to_ir. * - unify iregs/fregs * -> partly done, the missing parts are: * - a more complete unification would involve unifying the hregs as well, so * code wouldn't need if (fp) all over the place. but that would mean the hregs * would no longer map to the machine hregs, so the code generators would need to * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks * wouldn't work any more. Duplicating the code in mono_local_regalloc () into * fp/non-fp branches speeds it up by about 15%. * - use sext/zext opcodes instead of shifts * - add OP_ICALL * - get rid of TEMPLOADs if possible and use vregs instead * - clean up usage of OP_P/OP_ opcodes * - cleanup usage of DUMMY_USE * - cleanup the setting of ins->type for MonoInst's which are pushed on the * stack * - set the stack type and allocate a dreg in the EMIT_NEW macros * - get rid of all the <foo>2 stuff when the new JIT is ready. * - make sure handle_stack_args () is called before the branch is emitted * - when the new IR is done, get rid of all unused stuff * - COMPARE/BEQ as separate instructions or unify them ? * - keeping them separate allows specialized compare instructions like * compare_imm, compare_membase * - most back ends unify fp compare+branch, fp compare+ceq * - integrate mono_save_args into inline_method * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2 * - handle long shift opts on 32 bit platforms somehow: they require * 3 sregs (2 for arg1 and 1 for arg2) * - make byref a 'normal' type. * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a * variable if needed. * - do not start a new IL level bblock when cfg->cbb is changed by a function call * like inline_method. * - remove inlining restrictions * - fix LNEG and enable cfold of INEG * - generalize x86 optimizations like ldelema as a peephole optimization * - add store_mem_imm for amd64 * - optimize the loading of the interruption flag in the managed->native wrappers * - avoid special handling of OP_NOP in passes * - move code inserting instructions into one function/macro. * - try a coalescing phase after liveness analysis * - add float -> vreg conversion + local optimizations on !x86 * - figure out how to handle decomposed branches during optimizations, ie. * compare+branch, op_jump_table+op_br etc. * - promote RuntimeXHandles to vregs * - vtype cleanups: * - add a NEW_VARLOADA_VREG macro * - the vtype optimizations are blocked by the LDADDR opcodes generated for * accessing vtype fields. * - get rid of I8CONST on 64 bit platforms * - dealing with the increase in code size due to branches created during opcode * decomposition: * - use extended basic blocks * - all parts of the JIT * - handle_global_vregs () && local regalloc * - avoid introducing global vregs during decomposition, like 'vtable' in isinst * - sources of increase in code size: * - vtypes * - long compares * - isinst and castclass * - lvregs not allocated to global registers even if used multiple times * - call cctors outside the JIT, to make -v output more readable and JIT timings more * meaningful. * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization) * - add all micro optimizations from the old JIT * - put tree optimizations into the deadce pass * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch * specific function. * - unify the float comparison opcodes with the other comparison opcodes, i.e. * fcompare + branchCC. * - create a helper function for allocating a stack slot, taking into account * MONO_CFG_HAS_SPILLUP. * - merge r68207. * - optimize mono_regstate2_alloc_int/float. * - fix the pessimistic handling of variables accessed in exception handler blocks. * - need to write a tree optimization pass, but the creation of trees is difficult, i.e. * parts of the tree could be separated by other instructions, killing the tree * arguments, or stores killing loads etc. Also, should we fold loads into other * instructions if the result of the load is used multiple times ? * - make the REM_IMM optimization in mini-x86.c arch-independent. * - LAST MERGE: 108395. * - when returning vtypes in registers, generate IR and append it to the end of the * last bb instead of doing it in the epilog. * - change the store opcodes so they use sreg1 instead of dreg to store the base register. */ /* NOTES ----- - When to decompose opcodes: - earlier: this makes some optimizations hard to implement, since the low level IR no longer contains the necessary information. But it is easier to do. - later: harder to implement, enables more optimizations. - Branches inside bblocks: - created when decomposing complex opcodes. - branches to another bblock: harmless, but not tracked by the branch optimizations, so need to branch to a label at the start of the bblock. - branches to inside the same bblock: very problematic, trips up the local reg allocator. Can be fixed by spitting the current bblock, but that is a complex operation, since some local vregs can become global vregs etc. - Local/global vregs: - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the local register allocator. - global vregs: used in more than one bblock. Have an associated MonoMethodVar structure, created by mono_create_var (). Assigned to hregs or the stack by the global register allocator. - When to do optimizations like alu->alu_imm: - earlier -> saves work later on since the IR will be smaller/simpler - later -> can work on more instructions - Handling of valuetypes: - When a vtype is pushed on the stack, a new temporary is created, an instruction computing its address (LDADDR) is emitted and pushed on the stack. Need to optimize cases when the vtype is used immediately as in argument passing, stloc etc. - Instead of the to_end stuff in the old JIT, simply call the function handling the values on the stack before emitting the last instruction of the bb. */ #else /* !DISABLE_JIT */ MONO_EMPTY_SOURCE_FILE (method_to_ir); #endif /* !DISABLE_JIT */
/** * \file * Convert CIL to the JIT internal representation * * Author: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * * (C) 2002 Ximian, Inc. * Copyright 2003-2010 Novell, Inc (http://www.novell.com) * Copyright 2011 Xamarin, Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include <config.h> #include <glib.h> #include <mono/utils/mono-compiler.h> #include "mini.h" #ifndef DISABLE_JIT #include <signal.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <math.h> #include <string.h> #include <ctype.h> #ifdef HAVE_SYS_TIME_H #include <sys/time.h> #endif #ifdef HAVE_ALLOCA_H #include <alloca.h> #endif #include <mono/utils/memcheck.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/assembly.h> #include <mono/metadata/assembly-internals.h> #include <mono/metadata/attrdefs.h> #include <mono/metadata/loader.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/class.h> #include <mono/metadata/class-abi-details.h> #include <mono/metadata/object.h> #include <mono/metadata/exception.h> #include <mono/metadata/exception-internals.h> #include <mono/metadata/opcodes.h> #include <mono/metadata/mono-endian.h> #include <mono/metadata/tokentype.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/marshal.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/debug-internals.h> #include <mono/metadata/gc-internals.h> #include <mono/metadata/threads-types.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/profiler.h> #include <mono/metadata/monitor.h> #include <mono/utils/mono-memory-model.h> #include <mono/utils/mono-error-internals.h> #include <mono/metadata/mono-basic-block.h> #include <mono/metadata/reflection-internals.h> #include <mono/utils/mono-threads-coop.h> #include <mono/utils/mono-utils-debug.h> #include <mono/utils/mono-logger-internals.h> #include <mono/metadata/verify-internals.h> #include <mono/metadata/icall-decl.h> #include "mono/metadata/icall-signatures.h" #include "trace.h" #include "ir-emit.h" #include "jit-icalls.h" #include <mono/jit/jit.h> #include "seq-points.h" #include "aot-compiler.h" #include "mini-llvm.h" #include "mini-runtime.h" #include "llvmonly-runtime.h" #include "mono/utils/mono-tls-inline.h" #define BRANCH_COST 10 #define CALL_COST 10 /* Used for the JIT */ #define INLINE_LENGTH_LIMIT 20 /* * The aot and jit inline limits should be different, * since aot sees the whole program so we can let opt inline methods for us, * while the jit only sees one method, so we have to inline things ourselves. */ /* Used by LLVM AOT */ #define LLVM_AOT_INLINE_LENGTH_LIMIT 30 /* Used to LLVM JIT */ #define LLVM_JIT_INLINE_LENGTH_LIMIT 100 static const gboolean debug_tailcall = FALSE; // logging static const gboolean debug_tailcall_try_all = FALSE; // consider any call followed by ret gboolean mono_tailcall_print_enabled (void) { return debug_tailcall || MONO_TRACE_IS_TRACED (G_LOG_LEVEL_DEBUG, MONO_TRACE_TAILCALL); } void mono_tailcall_print (const char *format, ...) { if (!mono_tailcall_print_enabled ()) return; va_list args; va_start (args, format); g_printv (format, args); va_end (args); } /* These have 'cfg' as an implicit argument */ #define INLINE_FAILURE(msg) do { \ if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \ inline_failure (cfg, msg); \ goto exception_exit; \ } \ } while (0) #define CHECK_CFG_EXCEPTION do {\ if (cfg->exception_type != MONO_EXCEPTION_NONE) \ goto exception_exit; \ } while (0) #define FIELD_ACCESS_FAILURE(method, field) do { \ field_access_failure ((cfg), (method), (field)); \ goto exception_exit; \ } while (0) #define GENERIC_SHARING_FAILURE(opcode) do { \ if (cfg->gshared) { \ gshared_failure (cfg, opcode, __FILE__, __LINE__); \ goto exception_exit; \ } \ } while (0) #define GSHAREDVT_FAILURE(opcode) do { \ if (cfg->gsharedvt) { \ gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \ goto exception_exit; \ } \ } while (0) #define OUT_OF_MEMORY_FAILURE do { \ mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \ mono_error_set_out_of_memory (cfg->error, ""); \ goto exception_exit; \ } while (0) #define DISABLE_AOT(cfg) do { \ if ((cfg)->verbose_level >= 2) \ printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \ (cfg)->disable_aot = TRUE; \ } while (0) #define LOAD_ERROR do { \ break_on_unverified (); \ mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \ goto exception_exit; \ } while (0) #define TYPE_LOAD_ERROR(klass) do { \ cfg->exception_ptr = klass; \ LOAD_ERROR; \ } while (0) #define CHECK_CFG_ERROR do {\ if (!is_ok (cfg->error)) { \ mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \ goto mono_error_exit; \ } \ } while (0) int mono_op_to_op_imm (int opcode); int mono_op_to_op_imm_noemul (int opcode); static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always, gboolean *is_empty); static MonoInst* convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins); /* helper methods signatures */ /* type loading helpers */ static GENERATE_GET_CLASS_WITH_CACHE (iequatable, "System", "IEquatable`1") static GENERATE_GET_CLASS_WITH_CACHE (geqcomparer, "System.Collections.Generic", "GenericEqualityComparer`1"); /* * Instruction metadata */ #ifdef MINI_OP #undef MINI_OP #endif #ifdef MINI_OP3 #undef MINI_OP3 #endif #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ', #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3, #define NONE ' ' #define IREG 'i' #define FREG 'f' #define VREG 'v' #define XREG 'x' #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == TARGET_SIZEOF_VOID_P #define LREG IREG #else #define LREG 'l' #endif /* keep in sync with the enum in mini.h */ const char mini_ins_info[] = { #include "mini-ops.h" }; #undef MINI_OP #undef MINI_OP3 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)), #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))), /* * This should contain the index of the last sreg + 1. This is not the same * as the number of sregs for opcodes like IA64_CMP_EQ_IMM. */ const gint8 mini_ins_sreg_counts[] = { #include "mini-ops.h" }; #undef MINI_OP #undef MINI_OP3 guint32 mono_alloc_ireg (MonoCompile *cfg) { return alloc_ireg (cfg); } guint32 mono_alloc_lreg (MonoCompile *cfg) { return alloc_lreg (cfg); } guint32 mono_alloc_freg (MonoCompile *cfg) { return alloc_freg (cfg); } guint32 mono_alloc_preg (MonoCompile *cfg) { return alloc_preg (cfg); } guint32 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type) { return alloc_dreg (cfg, stack_type); } /* * mono_alloc_ireg_ref: * * Allocate an IREG, and mark it as holding a GC ref. */ guint32 mono_alloc_ireg_ref (MonoCompile *cfg) { return alloc_ireg_ref (cfg); } /* * mono_alloc_ireg_mp: * * Allocate an IREG, and mark it as holding a managed pointer. */ guint32 mono_alloc_ireg_mp (MonoCompile *cfg) { return alloc_ireg_mp (cfg); } /* * mono_alloc_ireg_copy: * * Allocate an IREG with the same GC type as VREG. */ guint32 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg) { if (vreg_is_ref (cfg, vreg)) return alloc_ireg_ref (cfg); else if (vreg_is_mp (cfg, vreg)) return alloc_ireg_mp (cfg); else return alloc_ireg (cfg); } guint mono_type_to_regmove (MonoCompile *cfg, MonoType *type) { if (m_type_is_byref (type)) return OP_MOVE; type = mini_get_underlying_type (type); handle_enum: switch (type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_MOVE; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_MOVE; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_MOVE; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: return OP_MOVE; case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: return OP_MOVE; case MONO_TYPE_I8: case MONO_TYPE_U8: #if SIZEOF_REGISTER == 8 return OP_MOVE; #else return OP_LMOVE; #endif case MONO_TYPE_R4: return cfg->r4fp ? OP_RMOVE : OP_FMOVE; case MONO_TYPE_R8: return OP_FMOVE; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { type = mono_class_enum_basetype_internal (type->data.klass); goto handle_enum; } if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type))) return OP_XMOVE; return OP_VMOVE; case MONO_TYPE_TYPEDBYREF: return OP_VMOVE; case MONO_TYPE_GENERICINST: if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type))) return OP_XMOVE; type = m_class_get_byval_arg (type->data.generic_class->container_class); goto handle_enum; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: g_assert (cfg->gshared); if (mini_type_var_is_vt (type)) return OP_VMOVE; else return mono_type_to_regmove (cfg, mini_get_underlying_type (type)); default: g_error ("unknown type 0x%02x in type_to_regstore", type->type); } return -1; } void mono_print_bb (MonoBasicBlock *bb, const char *msg) { int i; MonoInst *tree; GString *str = g_string_new (""); g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num); for (i = 0; i < bb->in_count; ++i) g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn); g_string_append_printf (str, ", OUT: "); for (i = 0; i < bb->out_count; ++i) g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn); g_string_append_printf (str, " ]\n"); g_print ("%s", str->str); g_string_free (str, TRUE); for (tree = bb->code; tree; tree = tree->next) mono_print_ins_index (-1, tree); } static MONO_NEVER_INLINE gboolean break_on_unverified (void) { if (mini_debug_options.break_on_unverified) { G_BREAKPOINT (); return TRUE; } return FALSE; } static void clear_cfg_error (MonoCompile *cfg) { mono_error_cleanup (cfg->error); error_init (cfg->error); } static MONO_NEVER_INLINE void field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field) { char *method_fname = mono_method_full_name (method, TRUE); char *field_fname = mono_field_full_name (field); mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); mono_error_set_generic_error (cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); g_free (method_fname); g_free (field_fname); } static MONO_NEVER_INLINE void inline_failure (MonoCompile *cfg, const char *msg) { if (cfg->verbose_level >= 2) printf ("inline failed: %s\n", msg); mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED); } static MONO_NEVER_INLINE void gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line) { if (cfg->verbose_level > 2) printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name (opcode), line); mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); } static MONO_NEVER_INLINE void gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line) { cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line); if (cfg->verbose_level >= 2) printf ("%s\n", cfg->exception_message); mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); } void mini_set_inline_failure (MonoCompile *cfg, const char *msg) { if (cfg->verbose_level >= 2) printf ("inline failed: %s\n", msg); mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED); } /* * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e. * foo<T> (int i) { ldarg.0; box T; } */ #define UNVERIFIED do { \ if (cfg->gsharedvt) { \ if (cfg->verbose_level > 2) \ printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \ mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \ goto exception_exit; \ } \ break_on_unverified (); \ goto unverified; \ } while (0) #define GET_BBLOCK(cfg,tblock,ip) do { \ (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \ if (!(tblock)) { \ if ((ip) >= end || (ip) < header->code) UNVERIFIED; \ NEW_BBLOCK (cfg, (tblock)); \ (tblock)->cil_code = (ip); \ ADD_BBLOCK (cfg, (tblock)); \ } \ } while (0) /* Emit conversions so both operands of a binary opcode are of the same type */ static void add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref) { MonoInst *arg1 = *arg1_ref; MonoInst *arg2 = *arg2_ref; if (cfg->r4fp && ((arg1->type == STACK_R4 && arg2->type == STACK_R8) || (arg1->type == STACK_R8 && arg2->type == STACK_R4))) { MonoInst *conv; /* Mixing r4/r8 is allowed by the spec */ if (arg1->type == STACK_R4) { int dreg = alloc_freg (cfg); EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg); conv->type = STACK_R8; ins->sreg1 = dreg; *arg1_ref = conv; } if (arg2->type == STACK_R4) { int dreg = alloc_freg (cfg); EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg); conv->type = STACK_R8; ins->sreg2 = dreg; *arg2_ref = conv; } } #if SIZEOF_REGISTER == 8 /* FIXME: Need to add many more cases */ if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { MonoInst *widen; int dr = alloc_preg (cfg); EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); (ins)->sreg2 = widen->dreg; } #endif } #define ADD_UNOP(op) do { \ MONO_INST_NEW (cfg, ins, (op)); \ sp--; \ ins->sreg1 = sp [0]->dreg; \ type_from_op (cfg, ins, sp [0], NULL); \ CHECK_TYPE (ins); \ (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \ MONO_ADD_INS ((cfg)->cbb, (ins)); \ *sp++ = mono_decompose_opcode (cfg, ins); \ } while (0) #define ADD_BINCOND(next_block) do { \ MonoInst *cmp; \ sp -= 2; \ MONO_INST_NEW(cfg, cmp, OP_COMPARE); \ cmp->sreg1 = sp [0]->dreg; \ cmp->sreg2 = sp [1]->dreg; \ add_widen_op (cfg, cmp, &sp [0], &sp [1]); \ type_from_op (cfg, cmp, sp [0], sp [1]); \ CHECK_TYPE (cmp); \ type_from_op (cfg, ins, sp [0], sp [1]); \ ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \ GET_BBLOCK (cfg, tblock, target); \ link_bblock (cfg, cfg->cbb, tblock); \ ins->inst_true_bb = tblock; \ if ((next_block)) { \ link_bblock (cfg, cfg->cbb, (next_block)); \ ins->inst_false_bb = (next_block); \ start_new_bblock = 1; \ } else { \ GET_BBLOCK (cfg, tblock, next_ip); \ link_bblock (cfg, cfg->cbb, tblock); \ ins->inst_false_bb = tblock; \ start_new_bblock = 2; \ } \ if (sp != stack_start) { \ handle_stack_args (cfg, stack_start, sp - stack_start); \ CHECK_UNVERIFIABLE (cfg); \ } \ MONO_ADD_INS (cfg->cbb, cmp); \ MONO_ADD_INS (cfg->cbb, ins); \ } while (0) /* * * link_bblock: Links two basic blocks * * links two basic blocks in the control flow graph, the 'from' * argument is the starting block and the 'to' argument is the block * the control flow ends to after 'from'. */ static void link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to) { MonoBasicBlock **newa; int i, found; #if 0 if (from->cil_code) { if (to->cil_code) printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code); else printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code); } else { if (to->cil_code) printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code); else printf ("edge from entry to exit\n"); } #endif found = FALSE; for (i = 0; i < from->out_count; ++i) { if (to == from->out_bb [i]) { found = TRUE; break; } } if (!found) { newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1)); for (i = 0; i < from->out_count; ++i) { newa [i] = from->out_bb [i]; } newa [i] = to; from->out_count++; from->out_bb = newa; } found = FALSE; for (i = 0; i < to->in_count; ++i) { if (from == to->in_bb [i]) { found = TRUE; break; } } if (!found) { newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1)); for (i = 0; i < to->in_count; ++i) { newa [i] = to->in_bb [i]; } newa [i] = from; to->in_count++; to->in_bb = newa; } } void mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to) { link_bblock (cfg, from, to); } static void mono_create_spvar_for_region (MonoCompile *cfg, int region); static void mark_bb_in_region (MonoCompile *cfg, guint region, uint32_t start, uint32_t end) { MonoBasicBlock *bb = cfg->cil_offset_to_bb [start]; //start must exist in cil_offset_to_bb as those are il offsets used by EH which should have GET_BBLOCK early. g_assert (bb); if (cfg->verbose_level > 1) g_print ("FIRST BB for %d is BB_%d\n", start, bb->block_num); for (; bb && bb->real_offset < end; bb = bb->next_bb) { //no one claimed this bb, take it. if (bb->region == -1) { bb->region = region; continue; } //current region is an early handler, bail if ((bb->region & (0xf << 4)) != MONO_REGION_TRY) { continue; } //current region is a try, only overwrite if new region is a handler if ((region & (0xf << 4)) != MONO_REGION_TRY) { bb->region = region; } } if (cfg->spvars) mono_create_spvar_for_region (cfg, region); } static void compute_bb_regions (MonoCompile *cfg) { MonoBasicBlock *bb; MonoMethodHeader *header = cfg->header; int i; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) bb->region = -1; for (i = 0; i < header->num_clauses; ++i) { MonoExceptionClause *clause = &header->clauses [i]; if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) mark_bb_in_region (cfg, ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags, clause->data.filter_offset, clause->handler_offset); guint handler_region; if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY) handler_region = ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags; else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) handler_region = ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags; else handler_region = ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags; mark_bb_in_region (cfg, handler_region, clause->handler_offset, clause->handler_offset + clause->handler_len); mark_bb_in_region (cfg, ((i + 1) << 8) | clause->flags, clause->try_offset, clause->try_offset + clause->try_len); } if (cfg->verbose_level > 2) { MonoBasicBlock *bb; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) g_print ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region); } } static gboolean ip_in_finally_clause (MonoCompile *cfg, int offset) { MonoMethodHeader *header = cfg->header; MonoExceptionClause *clause; int i; for (i = 0; i < header->num_clauses; ++i) { clause = &header->clauses [i]; if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT) continue; if (MONO_OFFSET_IN_HANDLER (clause, offset)) return TRUE; } return FALSE; } /* Find clauses between ip and target, from inner to outer */ static GList* mono_find_leave_clauses (MonoCompile *cfg, guchar *ip, guchar *target) { MonoMethodHeader *header = cfg->header; MonoExceptionClause *clause; int i; GList *res = NULL; for (i = 0; i < header->num_clauses; ++i) { clause = &header->clauses [i]; if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) && (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) { MonoLeaveClause *leave = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoLeaveClause)); leave->index = i; leave->clause = clause; res = g_list_append_mempool (cfg->mempool, res, leave); } } return res; } static void mono_create_spvar_for_region (MonoCompile *cfg, int region) { MonoInst *var; var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region)); if (var) return; var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* prevent it from being register allocated */ var->flags |= MONO_INST_VOLATILE; g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var); } MonoInst * mono_find_exvar_for_offset (MonoCompile *cfg, int offset) { return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset)); } static MonoInst* mono_create_exvar_for_offset (MonoCompile *cfg, int offset) { MonoInst *var; var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset)); if (var) return var; var = mono_compile_create_var (cfg, mono_get_object_type (), OP_LOCAL); /* prevent it from being register allocated */ var->flags |= MONO_INST_VOLATILE; g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var); return var; } /* * Returns the type used in the eval stack when @type is loaded. * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases. */ void mini_type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst) { MonoClass *klass; type = mini_get_underlying_type (type); inst->klass = klass = mono_class_from_mono_type_internal (type); if (m_type_is_byref (type)) { inst->type = STACK_MP; return; } handle_enum: switch (type->type) { case MONO_TYPE_VOID: inst->type = STACK_INV; return; case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: inst->type = STACK_I4; return; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: inst->type = STACK_PTR; return; case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: inst->type = STACK_OBJ; return; case MONO_TYPE_I8: case MONO_TYPE_U8: inst->type = STACK_I8; return; case MONO_TYPE_R4: inst->type = cfg->r4_stack_type; break; case MONO_TYPE_R8: inst->type = STACK_R8; return; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { type = mono_class_enum_basetype_internal (type->data.klass); goto handle_enum; } else { inst->klass = klass; inst->type = STACK_VTYPE; return; } case MONO_TYPE_TYPEDBYREF: inst->klass = mono_defaults.typed_reference_class; inst->type = STACK_VTYPE; return; case MONO_TYPE_GENERICINST: type = m_class_get_byval_arg (type->data.generic_class->container_class); goto handle_enum; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: g_assert (cfg->gshared); if (mini_is_gsharedvt_type (type)) { g_assert (cfg->gsharedvt); inst->type = STACK_VTYPE; } else { mini_type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst); } return; default: g_error ("unknown type 0x%02x in eval stack type", type->type); } } /* * The following tables are used to quickly validate the IL code in type_from_op (). */ #define IF_P8(v) (SIZEOF_VOID_P == 8 ? v : STACK_INV) #define IF_P8_I8 IF_P8(STACK_I8) #define IF_P8_PTR IF_P8(STACK_PTR) static const char bin_num_table [STACK_MAX] [STACK_MAX] = { {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_I4, IF_P8_I8, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV}, {STACK_INV, IF_P8_I8, STACK_I8, IF_P8_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_PTR, IF_P8_PTR, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8}, {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4} }; static const char neg_table [] = { STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4 }; /* reduce the size of this table */ static const char bin_int_table [STACK_MAX] [STACK_MAX] = { {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_I4, IF_P8_I8, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, IF_P8_I8, STACK_I8, IF_P8_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_PTR, IF_P8_PTR, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV} }; #define P1 (SIZEOF_VOID_P == 8) static const char bin_comp_table [STACK_MAX] [STACK_MAX] = { /* Inv i L p F & O vt r4 */ {0}, {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */ {0, 0, 1,P1, 0, 0, 0, 0}, /* L, int64 */ {0, 1,P1, 1, 0, 2, 4, 0}, /* p, ptr */ {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */ {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */ {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */ {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */ {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */ }; #undef P1 /* reduce the size of this table */ static const char shift_table [STACK_MAX] [STACK_MAX] = { {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}, {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV} }; /* * Tables to map from the non-specific opcode to the matching * type-specific opcode. */ /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */ static const guint16 binops_op_map [STACK_MAX] = { 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD }; /* handles from CEE_NEG to CEE_CONV_U8 */ static const guint16 unops_op_map [STACK_MAX] = { 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG }; /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */ static const guint16 ovfops_op_map [STACK_MAX] = { 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2 }; /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */ static const guint16 ovf2ops_op_map [STACK_MAX] = { 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN }; /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */ static const guint16 ovf3ops_op_map [STACK_MAX] = { 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1 }; /* handles from CEE_BEQ to CEE_BLT_UN */ static const guint16 beqops_op_map [STACK_MAX] = { 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ }; /* handles from CEE_CEQ to CEE_CLT_UN */ static const guint16 ceqops_op_map [STACK_MAX] = { 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ }; /* * Sets ins->type (the type on the eval stack) according to the * type of the opcode and the arguments to it. * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV. * * FIXME: this function sets ins->type unconditionally in some cases, but * it should set it to invalid for some types (a conv.x on an object) */ static void type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2) { switch (ins->opcode) { /* binops */ case MONO_CEE_ADD: case MONO_CEE_SUB: case MONO_CEE_MUL: case MONO_CEE_DIV: case MONO_CEE_REM: /* FIXME: check unverifiable args for STACK_MP */ ins->type = bin_num_table [src1->type] [src2->type]; ins->opcode += binops_op_map [ins->type]; break; case MONO_CEE_DIV_UN: case MONO_CEE_REM_UN: case MONO_CEE_AND: case MONO_CEE_OR: case MONO_CEE_XOR: ins->type = bin_int_table [src1->type] [src2->type]; ins->opcode += binops_op_map [ins->type]; break; case MONO_CEE_SHL: case MONO_CEE_SHR: case MONO_CEE_SHR_UN: ins->type = shift_table [src1->type] [src2->type]; ins->opcode += binops_op_map [ins->type]; break; case OP_COMPARE: case OP_LCOMPARE: case OP_ICOMPARE: ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV; if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP)))) ins->opcode = OP_LCOMPARE; else if (src1->type == STACK_R4) ins->opcode = OP_RCOMPARE; else if (src1->type == STACK_R8) ins->opcode = OP_FCOMPARE; else ins->opcode = OP_ICOMPARE; break; case OP_ICOMPARE_IMM: ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV; if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP)))) ins->opcode = OP_LCOMPARE_IMM; break; case MONO_CEE_BEQ: case MONO_CEE_BGE: case MONO_CEE_BGT: case MONO_CEE_BLE: case MONO_CEE_BLT: case MONO_CEE_BNE_UN: case MONO_CEE_BGE_UN: case MONO_CEE_BGT_UN: case MONO_CEE_BLE_UN: case MONO_CEE_BLT_UN: ins->opcode += beqops_op_map [src1->type]; break; case OP_CEQ: ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV; ins->opcode += ceqops_op_map [src1->type]; break; case OP_CGT: case OP_CGT_UN: case OP_CLT: case OP_CLT_UN: ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV; ins->opcode += ceqops_op_map [src1->type]; break; /* unops */ case MONO_CEE_NEG: ins->type = neg_table [src1->type]; ins->opcode += unops_op_map [ins->type]; break; case MONO_CEE_NOT: if (src1->type >= STACK_I4 && src1->type <= STACK_PTR) ins->type = src1->type; else ins->type = STACK_INV; ins->opcode += unops_op_map [ins->type]; break; case MONO_CEE_CONV_I1: case MONO_CEE_CONV_I2: case MONO_CEE_CONV_I4: case MONO_CEE_CONV_U4: ins->type = STACK_I4; ins->opcode += unops_op_map [src1->type]; break; case MONO_CEE_CONV_R_UN: ins->type = STACK_R8; switch (src1->type) { case STACK_I4: case STACK_PTR: ins->opcode = OP_ICONV_TO_R_UN; break; case STACK_I8: ins->opcode = OP_LCONV_TO_R_UN; break; case STACK_R4: ins->opcode = OP_RCONV_TO_R8; break; case STACK_R8: ins->opcode = OP_FMOVE; break; } break; case MONO_CEE_CONV_OVF_I1: case MONO_CEE_CONV_OVF_U1: case MONO_CEE_CONV_OVF_I2: case MONO_CEE_CONV_OVF_U2: case MONO_CEE_CONV_OVF_I4: case MONO_CEE_CONV_OVF_U4: ins->type = STACK_I4; ins->opcode += ovf3ops_op_map [src1->type]; break; case MONO_CEE_CONV_OVF_I_UN: case MONO_CEE_CONV_OVF_U_UN: ins->type = STACK_PTR; ins->opcode += ovf2ops_op_map [src1->type]; break; case MONO_CEE_CONV_OVF_I1_UN: case MONO_CEE_CONV_OVF_I2_UN: case MONO_CEE_CONV_OVF_I4_UN: case MONO_CEE_CONV_OVF_U1_UN: case MONO_CEE_CONV_OVF_U2_UN: case MONO_CEE_CONV_OVF_U4_UN: ins->type = STACK_I4; ins->opcode += ovf2ops_op_map [src1->type]; break; case MONO_CEE_CONV_U: ins->type = STACK_PTR; switch (src1->type) { case STACK_I4: ins->opcode = OP_ICONV_TO_U; break; case STACK_PTR: case STACK_MP: case STACK_OBJ: #if TARGET_SIZEOF_VOID_P == 8 ins->opcode = OP_LCONV_TO_U; #else ins->opcode = OP_MOVE; #endif break; case STACK_I8: ins->opcode = OP_LCONV_TO_U; break; case STACK_R8: if (TARGET_SIZEOF_VOID_P == 8) ins->opcode = OP_FCONV_TO_U8; else ins->opcode = OP_FCONV_TO_U4; break; case STACK_R4: if (TARGET_SIZEOF_VOID_P == 8) ins->opcode = OP_RCONV_TO_U8; else ins->opcode = OP_RCONV_TO_U4; break; } break; case MONO_CEE_CONV_I8: case MONO_CEE_CONV_U8: ins->type = STACK_I8; ins->opcode += unops_op_map [src1->type]; break; case MONO_CEE_CONV_OVF_I8: case MONO_CEE_CONV_OVF_U8: ins->type = STACK_I8; ins->opcode += ovf3ops_op_map [src1->type]; break; case MONO_CEE_CONV_OVF_U8_UN: case MONO_CEE_CONV_OVF_I8_UN: ins->type = STACK_I8; ins->opcode += ovf2ops_op_map [src1->type]; break; case MONO_CEE_CONV_R4: ins->type = cfg->r4_stack_type; ins->opcode += unops_op_map [src1->type]; break; case MONO_CEE_CONV_R8: ins->type = STACK_R8; ins->opcode += unops_op_map [src1->type]; break; case OP_CKFINITE: ins->type = STACK_R8; break; case MONO_CEE_CONV_U2: case MONO_CEE_CONV_U1: ins->type = STACK_I4; ins->opcode += ovfops_op_map [src1->type]; break; case MONO_CEE_CONV_I: case MONO_CEE_CONV_OVF_I: case MONO_CEE_CONV_OVF_U: ins->type = STACK_PTR; ins->opcode += ovfops_op_map [src1->type]; switch (ins->opcode) { case OP_FCONV_TO_I: ins->opcode = TARGET_SIZEOF_VOID_P == 4 ? OP_FCONV_TO_I4 : OP_FCONV_TO_I8; break; case OP_RCONV_TO_I: ins->opcode = TARGET_SIZEOF_VOID_P == 4 ? OP_RCONV_TO_I4 : OP_RCONV_TO_I8; break; default: break; } break; case MONO_CEE_ADD_OVF: case MONO_CEE_ADD_OVF_UN: case MONO_CEE_MUL_OVF: case MONO_CEE_MUL_OVF_UN: case MONO_CEE_SUB_OVF: case MONO_CEE_SUB_OVF_UN: ins->type = bin_num_table [src1->type] [src2->type]; ins->opcode += ovfops_op_map [src1->type]; if (ins->type == STACK_R8) ins->type = STACK_INV; break; case OP_LOAD_MEMBASE: ins->type = STACK_PTR; break; case OP_LOADI1_MEMBASE: case OP_LOADU1_MEMBASE: case OP_LOADI2_MEMBASE: case OP_LOADU2_MEMBASE: case OP_LOADI4_MEMBASE: case OP_LOADU4_MEMBASE: ins->type = STACK_PTR; break; case OP_LOADI8_MEMBASE: ins->type = STACK_I8; break; case OP_LOADR4_MEMBASE: ins->type = cfg->r4_stack_type; break; case OP_LOADR8_MEMBASE: ins->type = STACK_R8; break; default: g_error ("opcode 0x%04x not handled in type from op", ins->opcode); break; } if (ins->type == STACK_MP) { if (src1->type == STACK_MP) ins->klass = src1->klass; else ins->klass = mono_defaults.object_class; } } void mini_type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2) { type_from_op (cfg, ins, src1, src2); } static MonoClass* ldind_to_type (int op) { switch (op) { case MONO_CEE_LDIND_I1: return mono_defaults.sbyte_class; case MONO_CEE_LDIND_U1: return mono_defaults.byte_class; case MONO_CEE_LDIND_I2: return mono_defaults.int16_class; case MONO_CEE_LDIND_U2: return mono_defaults.uint16_class; case MONO_CEE_LDIND_I4: return mono_defaults.int32_class; case MONO_CEE_LDIND_U4: return mono_defaults.uint32_class; case MONO_CEE_LDIND_I8: return mono_defaults.int64_class; case MONO_CEE_LDIND_I: return mono_defaults.int_class; case MONO_CEE_LDIND_R4: return mono_defaults.single_class; case MONO_CEE_LDIND_R8: return mono_defaults.double_class; case MONO_CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type default: g_error ("Unknown ldind type %d", op); } } static MonoClass* stind_to_type (int op) { switch (op) { case MONO_CEE_STIND_I1: return mono_defaults.sbyte_class; case MONO_CEE_STIND_I2: return mono_defaults.int16_class; case MONO_CEE_STIND_I4: return mono_defaults.int32_class; case MONO_CEE_STIND_I8: return mono_defaults.int64_class; case MONO_CEE_STIND_I: return mono_defaults.int_class; case MONO_CEE_STIND_R4: return mono_defaults.single_class; case MONO_CEE_STIND_R8: return mono_defaults.double_class; case MONO_CEE_STIND_REF: return mono_defaults.object_class; default: g_error ("Unknown stind type %d", op); } } #if 0 static const char param_table [STACK_MAX] [STACK_MAX] = { {0}, }; static int check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig) { int i; if (sig->hasthis) { switch (args->type) { case STACK_I4: case STACK_I8: case STACK_R8: case STACK_VTYPE: case STACK_INV: return 0; } args++; } for (i = 0; i < sig->param_count; ++i) { switch (args [i].type) { case STACK_INV: return 0; case STACK_MP: if (m_type_is_byref (!sig->params [i])) return 0; continue; case STACK_OBJ: if (m_type_is_byref (sig->params [i])) return 0; switch (m_type_is_byref (sig->params [i])) { case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: break; default: return 0; } continue; case STACK_R8: if (m_type_is_byref (sig->params [i])) return 0; if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8) return 0; continue; case STACK_PTR: case STACK_I4: case STACK_I8: case STACK_VTYPE: break; } /*if (!param_table [args [i].type] [sig->params [i]->type]) return 0;*/ } return 1; } #endif /* * The got_var contains the address of the Global Offset Table when AOT * compiling. */ MonoInst * mono_get_got_var (MonoCompile *cfg) { if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only) return NULL; if (!cfg->got_var) { cfg->got_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); } return cfg->got_var; } static void mono_create_rgctx_var (MonoCompile *cfg) { if (!cfg->rgctx_var) { cfg->rgctx_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* force the var to be stack allocated */ if (!cfg->llvm_only) cfg->rgctx_var->flags |= MONO_INST_VOLATILE; } } static MonoInst * mono_get_mrgctx_var (MonoCompile *cfg) { g_assert (cfg->gshared); mono_create_rgctx_var (cfg); return cfg->rgctx_var; } static MonoInst * mono_get_vtable_var (MonoCompile *cfg) { g_assert (cfg->gshared); /* The mrgctx and the vtable are stored in the same var */ mono_create_rgctx_var (cfg); return cfg->rgctx_var; } static MonoType* type_from_stack_type (MonoInst *ins) { switch (ins->type) { case STACK_I4: return mono_get_int32_type (); case STACK_I8: return m_class_get_byval_arg (mono_defaults.int64_class); case STACK_PTR: return mono_get_int_type (); case STACK_R4: return m_class_get_byval_arg (mono_defaults.single_class); case STACK_R8: return m_class_get_byval_arg (mono_defaults.double_class); case STACK_MP: return m_class_get_this_arg (ins->klass); case STACK_OBJ: return mono_get_object_type (); case STACK_VTYPE: return m_class_get_byval_arg (ins->klass); default: g_error ("stack type %d to monotype not handled\n", ins->type); } return NULL; } MonoStackType mini_type_to_stack_type (MonoCompile *cfg, MonoType *t) { t = mini_type_get_underlying_type (t); switch (t->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: return STACK_I4; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: return STACK_PTR; case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: return STACK_OBJ; case MONO_TYPE_I8: case MONO_TYPE_U8: return STACK_I8; case MONO_TYPE_R4: return (MonoStackType)cfg->r4_stack_type; case MONO_TYPE_R8: return STACK_R8; case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: return STACK_VTYPE; case MONO_TYPE_GENERICINST: if (mono_type_generic_inst_is_valuetype (t)) return STACK_VTYPE; else return STACK_OBJ; break; default: g_assert_not_reached (); } return (MonoStackType)-1; } static MonoClass* array_access_to_klass (int opcode) { switch (opcode) { case MONO_CEE_LDELEM_U1: return mono_defaults.byte_class; case MONO_CEE_LDELEM_U2: return mono_defaults.uint16_class; case MONO_CEE_LDELEM_I: case MONO_CEE_STELEM_I: return mono_defaults.int_class; case MONO_CEE_LDELEM_I1: case MONO_CEE_STELEM_I1: return mono_defaults.sbyte_class; case MONO_CEE_LDELEM_I2: case MONO_CEE_STELEM_I2: return mono_defaults.int16_class; case MONO_CEE_LDELEM_I4: case MONO_CEE_STELEM_I4: return mono_defaults.int32_class; case MONO_CEE_LDELEM_U4: return mono_defaults.uint32_class; case MONO_CEE_LDELEM_I8: case MONO_CEE_STELEM_I8: return mono_defaults.int64_class; case MONO_CEE_LDELEM_R4: case MONO_CEE_STELEM_R4: return mono_defaults.single_class; case MONO_CEE_LDELEM_R8: case MONO_CEE_STELEM_R8: return mono_defaults.double_class; case MONO_CEE_LDELEM_REF: case MONO_CEE_STELEM_REF: return mono_defaults.object_class; default: g_assert_not_reached (); } return NULL; } /* * We try to share variables when possible */ static MonoInst * mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins) { MonoInst *res; int pos, vnum; MonoType *type; type = type_from_stack_type (ins); /* inlining can result in deeper stacks */ if (cfg->inline_depth || slot >= cfg->header->max_stack) return mono_compile_create_var (cfg, type, OP_LOCAL); pos = ins->type - 1 + slot * STACK_MAX; switch (ins->type) { case STACK_I4: case STACK_I8: case STACK_R8: case STACK_PTR: case STACK_MP: case STACK_OBJ: if ((vnum = cfg->intvars [pos])) return cfg->varinfo [vnum]; res = mono_compile_create_var (cfg, type, OP_LOCAL); cfg->intvars [pos] = res->inst_c0; break; default: res = mono_compile_create_var (cfg, type, OP_LOCAL); } return res; } static void mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key) { /* * Don't use this if a generic_context is set, since that means AOT can't * look up the method using just the image+token. * table == 0 means this is a reference made from a wrapper. */ if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) { MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken)); jump_info_token->image = image; jump_info_token->token = token; g_hash_table_insert (cfg->token_info_hash, key, jump_info_token); } } /* * This function is called to handle items that are left on the evaluation stack * at basic block boundaries. What happens is that we save the values to local variables * and we reload them later when first entering the target basic block (with the * handle_loaded_temps () function). * A single joint point will use the same variables (stored in the array bb->out_stack or * bb->in_stack, if the basic block is before or after the joint point). * * This function needs to be called _before_ emitting the last instruction of * the bb (i.e. before emitting a branch). * If the stack merge fails at a join point, cfg->unverifiable is set. */ static void handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count) { int i, bindex; MonoBasicBlock *bb = cfg->cbb; MonoBasicBlock *outb; MonoInst *inst, **locals; gboolean found; if (!count) return; if (cfg->verbose_level > 3) printf ("%d item(s) on exit from B%d\n", count, bb->block_num); if (!bb->out_scount) { bb->out_scount = count; //printf ("bblock %d has out:", bb->block_num); found = FALSE; for (i = 0; i < bb->out_count; ++i) { outb = bb->out_bb [i]; /* exception handlers are linked, but they should not be considered for stack args */ if (outb->flags & BB_EXCEPTION_HANDLER) continue; //printf (" %d", outb->block_num); if (outb->in_stack) { found = TRUE; bb->out_stack = outb->in_stack; break; } } //printf ("\n"); if (!found) { bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count); for (i = 0; i < count; ++i) { /* * try to reuse temps already allocated for this purpouse, if they occupy the same * stack slot and if they are of the same type. * This won't cause conflicts since if 'local' is used to * store one of the values in the in_stack of a bblock, then * the same variable will be used for the same outgoing stack * slot as well. * This doesn't work when inlining methods, since the bblocks * in the inlined methods do not inherit their in_stack from * the bblock they are inlined to. See bug #58863 for an * example. */ bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]); } } } for (i = 0; i < bb->out_count; ++i) { outb = bb->out_bb [i]; /* exception handlers are linked, but they should not be considered for stack args */ if (outb->flags & BB_EXCEPTION_HANDLER) continue; if (outb->in_scount) { if (outb->in_scount != bb->out_scount) { cfg->unverifiable = TRUE; return; } continue; /* check they are the same locals */ } outb->in_scount = count; outb->in_stack = bb->out_stack; } locals = bb->out_stack; cfg->cbb = bb; for (i = 0; i < count; ++i) { sp [i] = convert_value (cfg, locals [i]->inst_vtype, sp [i]); EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]); inst->cil_code = sp [i]->cil_code; sp [i] = locals [i]; if (cfg->verbose_level > 3) printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0); } /* * It is possible that the out bblocks already have in_stack assigned, and * the in_stacks differ. In this case, we will store to all the different * in_stacks. */ found = TRUE; bindex = 0; while (found) { /* Find a bblock which has a different in_stack */ found = FALSE; while (bindex < bb->out_count) { outb = bb->out_bb [bindex]; /* exception handlers are linked, but they should not be considered for stack args */ if (outb->flags & BB_EXCEPTION_HANDLER) { bindex++; continue; } if (outb->in_stack != locals) { for (i = 0; i < count; ++i) { sp [i] = convert_value (cfg, outb->in_stack [i]->inst_vtype, sp [i]); EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]); inst->cil_code = sp [i]->cil_code; sp [i] = locals [i]; if (cfg->verbose_level > 3) printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0); } locals = outb->in_stack; found = TRUE; break; } bindex ++; } } } MonoInst* mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data) { MonoInst *ins; if (cfg->compile_aot) { MONO_DISABLE_WARNING (4306) // 'type cast': conversion from 'MonoJumpInfoType' to 'MonoInst *' of greater size EMIT_NEW_AOTCONST (cfg, ins, patch_type, data); MONO_RESTORE_WARNING } else { MonoJumpInfo ji; gpointer target; ERROR_DECL (error); ji.type = patch_type; ji.data.target = data; target = mono_resolve_patch_target_ext (cfg->mem_manager, NULL, NULL, &ji, FALSE, error); mono_error_assert_ok (error); EMIT_NEW_PCONST (cfg, ins, target); } return ins; } static MonoInst* mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key) { int tls_offset = mono_tls_get_tls_offset (key); if (cfg->compile_aot) return NULL; if (tls_offset != -1 && mono_arch_have_fast_tls ()) { MonoInst *ins; MONO_INST_NEW (cfg, ins, OP_TLS_GET); ins->dreg = mono_alloc_preg (cfg); ins->inst_offset = tls_offset; return ins; } return NULL; } static MonoInst* mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key) { MonoInst *fast_tls = NULL; if (!mini_debug_options.use_fallback_tls) fast_tls = mono_create_fast_tls_getter (cfg, key); if (fast_tls) { MONO_ADD_INS (cfg->cbb, fast_tls); return fast_tls; } const MonoJitICallId jit_icall_id = mono_get_tls_key_to_jit_icall_id (key); if (cfg->compile_aot && !cfg->llvm_only) { MonoInst *addr; /* * tls getters are critical pieces of code and we don't want to resolve them * through the standard plt/tramp mechanism since we might expose ourselves * to crashes and infinite recursions. * Therefore the NOCALL part of MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, FALSE in is_plt_patch. */ EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, GUINT_TO_POINTER (jit_icall_id)); return mini_emit_calli (cfg, mono_icall_sig_ptr, NULL, addr, NULL, NULL); } else { return mono_emit_jit_icall_id (cfg, jit_icall_id, NULL); } } /* * emit_push_lmf: * * Emit IR to push the current LMF onto the LMF stack. */ static void emit_push_lmf (MonoCompile *cfg) { /* * Emit IR to push the LMF: * lmf_addr = <lmf_addr from tls> * lmf->lmf_addr = lmf_addr * lmf->prev_lmf = *lmf_addr * *lmf_addr = lmf */ MonoInst *ins, *lmf_ins; if (!cfg->lmf_ir) return; int lmf_reg, prev_lmf_reg; /* * Store lmf_addr in a variable, so it can be allocated to a global register. */ if (!cfg->lmf_addr_var) cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); if (!cfg->lmf_var) { MonoInst *lmf_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); lmf_var->flags |= MONO_INST_VOLATILE; lmf_var->flags |= MONO_INST_LMF; cfg->lmf_var = lmf_var; } lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR); g_assert (lmf_ins); lmf_ins->dreg = cfg->lmf_addr_var->dreg; EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL); lmf_reg = ins->dreg; prev_lmf_reg = alloc_preg (cfg); /* Save previous_lmf */ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0); if (cfg->deopt) /* Mark this as an LMFExt */ EMIT_NEW_BIALU_IMM (cfg, ins, OP_POR_IMM, prev_lmf_reg, prev_lmf_reg, 2); EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg); /* Set new lmf */ EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg); } /* * emit_pop_lmf: * * Emit IR to pop the current LMF from the LMF stack. */ static void emit_pop_lmf (MonoCompile *cfg) { int lmf_reg, lmf_addr_reg; MonoInst *ins; if (!cfg->lmf_ir) return; EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL); lmf_reg = ins->dreg; int prev_lmf_reg; /* * Emit IR to pop the LMF: * *(lmf->lmf_addr) = lmf->prev_lmf */ /* This could be called before emit_push_lmf () */ if (!cfg->lmf_addr_var) cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); lmf_addr_reg = cfg->lmf_addr_var->dreg; prev_lmf_reg = alloc_preg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf)); if (cfg->deopt) /* Clear out the bit set by push_lmf () to mark this as LMFExt */ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PXOR_IMM, prev_lmf_reg, prev_lmf_reg, 2); EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg); } /* * target_type_is_incompatible: * @cfg: MonoCompile context * * Check that the item @arg on the evaluation stack can be stored * in the target type (can be a local, or field, etc). * The cfg arg can be used to check if we need verification or just * validity checks. * * Returns: non-0 value if arg can't be stored on a target. */ static int target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg) { MonoType *simple_type; MonoClass *klass; if (m_type_is_byref (target)) { /* FIXME: check that the pointed to types match */ if (arg->type == STACK_MP) { /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */ MonoClass *target_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (mono_class_from_mono_type_internal (target)))); MonoClass *source_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass))); /* if the target is native int& or X* or same type */ if (target->type == MONO_TYPE_I || target->type == MONO_TYPE_PTR || target_class_lowered == source_class_lowered) return 0; /* Both are primitive type byrefs and the source points to a larger type that the destination */ if (MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (target_class_lowered)) && MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (source_class_lowered)) && mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered)) return 0; return 1; } if (arg->type == STACK_PTR) return 0; return 1; } simple_type = mini_get_underlying_type (target); switch (simple_type->type) { case MONO_TYPE_VOID: return 1; case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: if (arg->type != STACK_I4 && arg->type != STACK_PTR) return 1; return 0; case MONO_TYPE_PTR: /* STACK_MP is needed when setting pinned locals */ if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP) #if SIZEOF_VOID_P == 8 if (arg->type != STACK_I8) #endif return 1; return 0; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_FNPTR: /* * Some opcodes like ldloca returns 'transient pointers' which can be stored in * in native int. (#688008). */ if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP) return 1; return 0; case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: if (arg->type != STACK_OBJ) return 1; /* FIXME: check type compatibility */ return 0; case MONO_TYPE_I8: case MONO_TYPE_U8: if (arg->type != STACK_I8) #if SIZEOF_VOID_P == 8 if (arg->type != STACK_PTR) #endif return 1; return 0; case MONO_TYPE_R4: if (arg->type != cfg->r4_stack_type) return 1; return 0; case MONO_TYPE_R8: if (arg->type != STACK_R8) return 1; return 0; case MONO_TYPE_VALUETYPE: if (arg->type != STACK_VTYPE) return 1; klass = mono_class_from_mono_type_internal (simple_type); if (klass != arg->klass) return 1; return 0; case MONO_TYPE_TYPEDBYREF: if (arg->type != STACK_VTYPE) return 1; klass = mono_class_from_mono_type_internal (simple_type); if (klass != arg->klass) return 1; return 0; case MONO_TYPE_GENERICINST: if (mono_type_generic_inst_is_valuetype (simple_type)) { MonoClass *target_class; if (arg->type != STACK_VTYPE) return 1; klass = mono_class_from_mono_type_internal (simple_type); target_class = mono_class_from_mono_type_internal (target); /* The second cases is needed when doing partial sharing */ if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass)))) return 1; return 0; } else { if (arg->type != STACK_OBJ) return 1; /* FIXME: check type compatibility */ return 0; } case MONO_TYPE_VAR: case MONO_TYPE_MVAR: g_assert (cfg->gshared); if (mini_type_var_is_vt (simple_type)) { if (arg->type != STACK_VTYPE) return 1; } else { if (arg->type != STACK_OBJ) return 1; } return 0; default: g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type); } return 1; } /* * convert_value: * * Emit some implicit conversions which are not part of the .net spec, but are allowed by MS.NET. */ static MonoInst* convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins) { if (!cfg->r4fp) return ins; type = mini_get_underlying_type (type); switch (type->type) { case MONO_TYPE_R4: if (ins->type == STACK_R8) { int dreg = alloc_freg (cfg); MonoInst *conv; EMIT_NEW_UNALU (cfg, conv, OP_FCONV_TO_R4, dreg, ins->dreg); conv->type = STACK_R4; return conv; } break; case MONO_TYPE_R8: if (ins->type == STACK_R4) { int dreg = alloc_freg (cfg); MonoInst *conv; EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, ins->dreg); conv->type = STACK_R8; return conv; } break; default: break; } return ins; } /* * Prepare arguments for passing to a function call. * Return a non-zero value if the arguments can't be passed to the given * signature. * The type checks are not yet complete and some conversions may need * casts on 32 or 64 bit architectures. * * FIXME: implement this using target_type_is_incompatible () */ static gboolean check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args) { MonoType *simple_type; int i; if (sig->hasthis) { if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR) return TRUE; args++; } for (i = 0; i < sig->param_count; ++i) { if (m_type_is_byref (sig->params [i])) { if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR) return TRUE; continue; } simple_type = mini_get_underlying_type (sig->params [i]); handle_enum: switch (simple_type->type) { case MONO_TYPE_VOID: return TRUE; case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR) return TRUE; continue; case MONO_TYPE_I: case MONO_TYPE_U: if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ) return TRUE; continue; case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: if (args [i]->type != STACK_I4 && !(SIZEOF_VOID_P == 8 && args [i]->type == STACK_I8) && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ) return TRUE; continue; case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: if (args [i]->type != STACK_OBJ) return TRUE; continue; case MONO_TYPE_I8: case MONO_TYPE_U8: if (args [i]->type != STACK_I8 && !(SIZEOF_VOID_P == 8 && (args [i]->type == STACK_I4 || args [i]->type == STACK_PTR))) return TRUE; continue; case MONO_TYPE_R4: if (args [i]->type != cfg->r4_stack_type) return TRUE; continue; case MONO_TYPE_R8: if (args [i]->type != STACK_R8) return TRUE; continue; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (simple_type->data.klass)) { simple_type = mono_class_enum_basetype_internal (simple_type->data.klass); goto handle_enum; } if (args [i]->type != STACK_VTYPE) return TRUE; continue; case MONO_TYPE_TYPEDBYREF: if (args [i]->type != STACK_VTYPE) return TRUE; continue; case MONO_TYPE_GENERICINST: simple_type = m_class_get_byval_arg (simple_type->data.generic_class->container_class); goto handle_enum; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: /* gsharedvt */ if (args [i]->type != STACK_VTYPE) return TRUE; continue; default: g_error ("unknown type 0x%02x in check_call_signature", simple_type->type); } } return FALSE; } MonoJumpInfo * mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target) { MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo)); ji->ip.i = ip; ji->type = type; ji->data.target = target; return ji; } int mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass) { if (cfg->gshared) return mono_class_check_context_used (klass); else return 0; } int mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method) { if (cfg->gshared) return mono_method_check_context_used (method); else return 0; } /* * check_method_sharing: * * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD. */ static void check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx) { gboolean pass_vtable = FALSE; gboolean pass_mrgctx = FALSE; if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cmethod->klass)) && (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) { gboolean sharable = FALSE; if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) sharable = TRUE; /* * Pass vtable iff target method might * be shared, which means that sharing * is enabled for its class and its * context is sharable (and it's not a * generic method). */ if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst)) pass_vtable = TRUE; } if (mini_method_needs_mrgctx (cmethod)) { if (mini_method_is_default_method (cmethod)) pass_vtable = FALSE; else g_assert (!pass_vtable); if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) { pass_mrgctx = TRUE; } else { if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature_internal (cmethod))) pass_mrgctx = TRUE; } } if (out_pass_vtable) *out_pass_vtable = pass_vtable; if (out_pass_mrgctx) *out_pass_mrgctx = pass_mrgctx; } static gboolean direct_icalls_enabled (MonoCompile *cfg, MonoMethod *method) { if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls) return FALSE; if (method && cfg->compile_aot && mono_aot_direct_icalls_enabled_for_method (cfg, method)) return TRUE; /* LLVM on amd64 can't handle calls to non-32 bit addresses */ #ifdef TARGET_AMD64 if (cfg->compile_llvm && !cfg->llvm_only) return FALSE; #endif return FALSE; } MonoInst* mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args) { /* * Call the jit icall without a wrapper if possible. * The wrapper is needed to be able to do stack walks for asynchronously suspended * threads when debugging. */ if (direct_icalls_enabled (cfg, NULL)) { int costs; if (!info->wrapper_method) { info->wrapper_method = mono_marshal_get_icall_wrapper (info, TRUE); mono_memory_barrier (); } /* * Inline the wrapper method, which is basically a call to the C icall, and * an exception check. */ costs = inline_method (cfg, info->wrapper_method, NULL, args, NULL, il_offset, TRUE, NULL); g_assert (costs > 0); g_assert (!MONO_TYPE_IS_VOID (info->sig->ret)); return args [0]; } return mono_emit_jit_icall_id (cfg, mono_jit_icall_info_id (info), args); } static MonoInst* mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig) { if (!MONO_TYPE_IS_VOID (fsig->ret)) { if ((fsig->pinvoke || LLVM_ENABLED) && !m_type_is_byref (fsig->ret)) { int widen_op = -1; /* * Native code might return non register sized integers * without initializing the upper bits. */ switch (mono_type_to_load_membase (cfg, fsig->ret)) { case OP_LOADI1_MEMBASE: widen_op = OP_ICONV_TO_I1; break; case OP_LOADU1_MEMBASE: widen_op = OP_ICONV_TO_U1; break; case OP_LOADI2_MEMBASE: widen_op = OP_ICONV_TO_I2; break; case OP_LOADU2_MEMBASE: widen_op = OP_ICONV_TO_U2; break; default: break; } if (widen_op != -1) { int dreg = alloc_preg (cfg); MonoInst *widen; EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg); widen->type = ins->type; ins = widen; } } } return ins; } static MonoInst* emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type); static void emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee) { MonoInst *args [2]; args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD); args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD); mono_emit_jit_icall (cfg, mono_throw_method_access, args); } static void emit_bad_image_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee) { mono_emit_jit_icall (cfg, mono_throw_bad_image, NULL); } static void emit_not_supported_failure (MonoCompile *cfg) { mono_emit_jit_icall (cfg, mono_throw_not_supported, NULL); } static void emit_invalid_program_with_msg (MonoCompile *cfg, MonoError *error_msg, MonoMethod *caller, MonoMethod *callee) { g_assert (!is_ok (error_msg)); char *str = mono_mem_manager_strdup (cfg->mem_manager, mono_error_get_message (error_msg)); MonoInst *iargs[1]; if (cfg->compile_aot) EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str); else EMIT_NEW_PCONST (cfg, iargs [0], str); mono_emit_jit_icall (cfg, mono_throw_invalid_program, iargs); } // FIXME Consolidate the multiple functions named get_method_nofail. static MonoMethod* get_method_nofail (MonoClass *klass, const char *method_name, int num_params, int flags) { MonoMethod *method; ERROR_DECL (error); method = mono_class_get_method_from_name_checked (klass, method_name, num_params, flags, error); mono_error_assert_ok (error); g_assertf (method, "Could not lookup method %s in %s", method_name, m_class_get_name (klass)); return method; } MonoMethod* mini_get_memcpy_method (void) { static MonoMethod *memcpy_method = NULL; if (!memcpy_method) { memcpy_method = get_method_nofail (mono_defaults.string_class, "memcpy", 3, 0); if (!memcpy_method) g_error ("Old corlib found. Install a new one"); } return memcpy_method; } MonoInst* mini_emit_storing_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value) { MonoInst *store; /* * Add a release memory barrier so the object contents are flushed * to memory before storing the reference into another object. */ if (!mini_debug_options.weak_memory_model) mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); EMIT_NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, ptr->dreg, 0, value->dreg); mini_emit_write_barrier (cfg, ptr, value); return store; } void mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value) { int card_table_shift_bits; target_mgreg_t card_table_mask; guint8 *card_table; MonoInst *dummy_use; int nursery_shift_bits; size_t nursery_size; if (!cfg->gen_write_barriers) return; //method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]) card_table = mono_gc_get_target_card_table (&card_table_shift_bits, &card_table_mask); mono_gc_get_nursery (&nursery_shift_bits, &nursery_size); if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) { MonoInst *wbarrier; MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER); wbarrier->sreg1 = ptr->dreg; wbarrier->sreg2 = value->dreg; MONO_ADD_INS (cfg->cbb, wbarrier); } else if (card_table) { int offset_reg = alloc_preg (cfg); int card_reg; MonoInst *ins; /* * We emit a fast light weight write barrier. This always marks cards as in the concurrent * collector case, so, for the serial collector, it might slightly slow down nursery * collections. We also expect that the host system and the target system have the same card * table configuration, which is the case if they have the same pointer size. */ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits); if (card_table_mask) MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask); /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support * IMM's larger than 32bits. */ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL); card_reg = ins->dreg; MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg); MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1); } else { MonoMethod *write_barrier = mono_gc_get_write_barrier (); mono_emit_method_call (cfg, write_barrier, &ptr, NULL); } EMIT_NEW_DUMMY_USE (cfg, dummy_use, value); } MonoMethod* mini_get_memset_method (void) { static MonoMethod *memset_method = NULL; if (!memset_method) { memset_method = get_method_nofail (mono_defaults.string_class, "memset", 3, 0); if (!memset_method) g_error ("Old corlib found. Install a new one"); } return memset_method; } void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass) { MonoInst *iargs [3]; int n; guint32 align; MonoMethod *memset_method; MonoInst *size_ins = NULL; MonoInst *bzero_ins = NULL; static MonoMethod *bzero_method; /* FIXME: Optimize this for the case when dest is an LDADDR */ mono_class_init_internal (klass); if (mini_is_gsharedvt_klass (klass)) { size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE); bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO); if (!bzero_method) bzero_method = get_method_nofail (mono_defaults.string_class, "bzero_aligned_1", 2, 0); g_assert (bzero_method); iargs [0] = dest; iargs [1] = size_ins; mini_emit_calli (cfg, mono_method_signature_internal (bzero_method), iargs, bzero_ins, NULL, NULL); return; } klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (klass))); n = mono_class_value_size (klass, &align); if (n <= TARGET_SIZEOF_VOID_P * 8) { mini_emit_memset (cfg, dest->dreg, 0, n, 0, align); } else { memset_method = mini_get_memset_method (); iargs [0] = dest; EMIT_NEW_ICONST (cfg, iargs [1], 0); EMIT_NEW_ICONST (cfg, iargs [2], n); mono_emit_method_call (cfg, memset_method, iargs, NULL); } } static gboolean context_used_is_mrgctx (MonoCompile *cfg, int context_used) { /* gshared dim methods use an mrgctx */ if (mini_method_is_default_method (cfg->method)) return context_used != 0; return context_used & MONO_GENERIC_CONTEXT_USED_METHOD; } /* * emit_get_rgctx: * * Emit IR to return either the vtable or the mrgctx. */ static MonoInst* emit_get_rgctx (MonoCompile *cfg, int context_used) { MonoMethod *method = cfg->method; g_assert (cfg->gshared); /* Data whose context contains method type vars is stored in the mrgctx */ if (context_used_is_mrgctx (cfg, context_used)) { MonoInst *mrgctx_loc, *mrgctx_var; g_assert (cfg->rgctx_access == MONO_RGCTX_ACCESS_MRGCTX); if (!mini_method_is_default_method (method)) g_assert (method->is_inflated && mono_method_get_context (method)->method_inst); if (cfg->llvm_only) { mrgctx_var = mono_get_mrgctx_var (cfg); } else { /* Volatile */ mrgctx_loc = mono_get_mrgctx_var (cfg); g_assert (mrgctx_loc->flags & MONO_INST_VOLATILE); EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0); } return mrgctx_var; } /* * The rest of the entries are stored in vtable->runtime_generic_context so * have to return a vtable. */ if (cfg->rgctx_access == MONO_RGCTX_ACCESS_MRGCTX) { MonoInst *mrgctx_loc, *mrgctx_var, *vtable_var; int vtable_reg; /* We are passed an mrgctx, return mrgctx->class_vtable */ if (cfg->llvm_only) { mrgctx_var = mono_get_mrgctx_var (cfg); } else { mrgctx_loc = mono_get_mrgctx_var (cfg); g_assert (mrgctx_loc->flags & MONO_INST_VOLATILE); EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0); } vtable_reg = alloc_preg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable)); vtable_var->type = STACK_PTR; return vtable_var; } else if (cfg->rgctx_access == MONO_RGCTX_ACCESS_VTABLE) { MonoInst *vtable_loc, *vtable_var; /* We are passed a vtable, return it */ if (cfg->llvm_only) { vtable_var = mono_get_vtable_var (cfg); } else { vtable_loc = mono_get_vtable_var (cfg); g_assert (vtable_loc->flags & MONO_INST_VOLATILE); EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0); } vtable_var->type = STACK_PTR; return vtable_var; } else { MonoInst *ins, *this_ins; int vtable_reg; /* We are passed a this pointer, return this->vtable */ EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, mono_get_object_type ()); vtable_reg = alloc_preg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); return ins; } } static MonoJumpInfoRgctxEntry * mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type) { MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry)); if (in_mrgctx) res->d.method = method; else res->d.klass = method->klass; res->in_mrgctx = in_mrgctx; res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo)); res->data->type = patch_type; res->data->data.target = patch_data; res->info_type = info_type; return res; } static MonoInst* emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type); static MonoInst* emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry) { MonoInst *call; MonoInst *slot_ins; EMIT_NEW_AOTCONST (cfg, slot_ins, MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry); // Can't add basic blocks during interp entry mode if (cfg->disable_inline_rgctx_fetch || cfg->interp_entry_only) { MonoInst *args [2] = { rgctx, slot_ins }; if (entry->in_mrgctx) call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args); else call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args); return call; } MonoBasicBlock *slowpath_bb, *end_bb; MonoInst *ins, *res; int rgctx_reg, res_reg; /* * rgctx = vtable->runtime_generic_context; * if (rgctx) { * val = rgctx [slot + 1]; * if (val) * return val; * } * <slowpath> */ NEW_BBLOCK (cfg, end_bb); NEW_BBLOCK (cfg, slowpath_bb); if (entry->in_mrgctx) { rgctx_reg = rgctx->dreg; } else { rgctx_reg = alloc_preg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context)); // FIXME: Avoid this check by allocating the table when the vtable is created etc. MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb); } int table_size = mono_class_rgctx_get_array_size (0, entry->in_mrgctx); if (entry->in_mrgctx) table_size -= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / TARGET_SIZEOF_VOID_P; MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_ins->dreg, table_size - 1); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBGE, slowpath_bb); int shifted_slot_reg = alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISHL_IMM, shifted_slot_reg, slot_ins->dreg, TARGET_SIZEOF_VOID_P == 8 ? 3 : 2); int addr_reg = alloc_preg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, addr_reg, rgctx_reg); EMIT_NEW_BIALU (cfg, ins, OP_PADD, addr_reg, addr_reg, shifted_slot_reg); int val_reg = alloc_preg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, addr_reg, TARGET_SIZEOF_VOID_P + (entry->in_mrgctx ? MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT : 0)); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb); res_reg = alloc_preg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, val_reg); res = ins; MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); MONO_START_BB (cfg, slowpath_bb); slowpath_bb->out_of_line = TRUE; MonoInst *args[2] = { rgctx, slot_ins }; if (entry->in_mrgctx) call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args); else call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, call->dreg); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); MONO_START_BB (cfg, end_bb); return res; } /* * emit_rgctx_fetch: * * Emit IR to load the value of the rgctx entry ENTRY from the rgctx. */ static MonoInst* emit_rgctx_fetch (MonoCompile *cfg, int context_used, MonoJumpInfoRgctxEntry *entry) { MonoInst *rgctx = emit_get_rgctx (cfg, context_used); if (cfg->llvm_only) return emit_rgctx_fetch_inline (cfg, rgctx, entry); else return mini_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, mono_icall_sig_ptr_ptr, &rgctx); } /* * mini_emit_get_rgctx_klass: * * Emit IR to load the property RGCTX_TYPE of KLASS. If context_used is 0, emit * normal constants, else emit a load from the rgctx. */ MonoInst* mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type) { if (!context_used) { MonoInst *ins; switch (rgctx_type) { case MONO_RGCTX_INFO_KLASS: EMIT_NEW_CLASSCONST (cfg, ins, klass); return ins; case MONO_RGCTX_INFO_VTABLE: { MonoVTable *vtable = mono_class_vtable_checked (klass, cfg->error); CHECK_CFG_ERROR; EMIT_NEW_VTABLECONST (cfg, ins, vtable); return ins; } default: g_assert_not_reached (); } } // Its cheaper to load these from the gsharedvt info struct if (cfg->llvm_only && cfg->gsharedvt) return mini_emit_get_gsharedvt_info_klass (cfg, klass, rgctx_type); MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_CLASS, klass, rgctx_type); return emit_rgctx_fetch (cfg, context_used, entry); mono_error_exit: return NULL; } static MonoInst* emit_get_rgctx_sig (MonoCompile *cfg, int context_used, MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type) { MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type); return emit_rgctx_fetch (cfg, context_used, entry); } static MonoInst* emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used, MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type) { MonoJumpInfoGSharedVtCall *call_info; MonoJumpInfoRgctxEntry *entry; call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall)); call_info->sig = sig; call_info->method = cmethod; entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type); return emit_rgctx_fetch (cfg, context_used, entry); } /* * emit_get_rgctx_virt_method: * * Return data for method VIRT_METHOD for a receiver of type KLASS. */ static MonoInst* emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used, MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type) { MonoJumpInfoVirtMethod *info; MonoJumpInfoRgctxEntry *entry; if (context_used == -1) context_used = mono_class_check_context_used (klass) | mono_method_check_context_used (virt_method); info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod)); info->klass = klass; info->method = virt_method; entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type); return emit_rgctx_fetch (cfg, context_used, entry); } static MonoInst* emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoGSharedVtMethodInfo *info) { MonoJumpInfoRgctxEntry *entry; entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO); return emit_rgctx_fetch (cfg, context_used, entry); } /* * emit_get_rgctx_method: * * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit * normal constants, else emit a load from the rgctx. */ static MonoInst* emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type) { if (context_used == -1) context_used = mono_method_check_context_used (cmethod); if (!context_used) { MonoInst *ins; switch (rgctx_type) { case MONO_RGCTX_INFO_METHOD: EMIT_NEW_METHODCONST (cfg, ins, cmethod); return ins; case MONO_RGCTX_INFO_METHOD_RGCTX: EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod); return ins; case MONO_RGCTX_INFO_METHOD_FTNDESC: EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHOD_FTNDESC, cmethod); return ins; case MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY: EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_LLVMONLY_INTERP_ENTRY, cmethod); return ins; default: g_assert_not_reached (); } } else { // Its cheaper to load these from the gsharedvt info struct if (cfg->llvm_only && cfg->gsharedvt) return emit_get_gsharedvt_info (cfg, cmethod, rgctx_type); MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type); return emit_rgctx_fetch (cfg, context_used, entry); } } static MonoInst* emit_get_rgctx_field (MonoCompile *cfg, int context_used, MonoClassField *field, MonoRgctxInfoType rgctx_type) { // Its cheaper to load these from the gsharedvt info struct if (cfg->llvm_only && cfg->gsharedvt) return emit_get_gsharedvt_info (cfg, field, rgctx_type); MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_FIELD, field, rgctx_type); return emit_rgctx_fetch (cfg, context_used, entry); } MonoInst* mini_emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type) { return emit_get_rgctx_method (cfg, context_used, cmethod, rgctx_type); } static int get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type) { MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info; MonoRuntimeGenericContextInfoTemplate *template_; int i, idx; g_assert (info); for (i = 0; i < info->num_entries; ++i) { MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i]; if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET) return i; } if (info->num_entries == info->count_entries) { MonoRuntimeGenericContextInfoTemplate *new_entries; int new_count_entries = info->count_entries ? info->count_entries * 2 : 16; new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries); memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries); info->entries = new_entries; info->count_entries = new_count_entries; } idx = info->num_entries; template_ = &info->entries [idx]; template_->info_type = rgctx_type; template_->data = data; info->num_entries ++; return idx; } /* * emit_get_gsharedvt_info: * * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline. */ static MonoInst* emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type) { MonoInst *ins; int idx, dreg; idx = get_gsharedvt_info_slot (cfg, data, rgctx_type); /* Load info->entries [idx] */ dreg = alloc_preg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P)); return ins; } MonoInst* mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type) { return emit_get_gsharedvt_info (cfg, m_class_get_byval_arg (klass), rgctx_type); } /* * On return the caller must check @klass for load errors. */ static void emit_class_init (MonoCompile *cfg, MonoClass *klass) { MonoInst *vtable_arg; int context_used; context_used = mini_class_check_context_used (cfg, klass); if (context_used) { vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VTABLE); } else { MonoVTable *vtable = mono_class_vtable_checked (klass, cfg->error); if (!is_ok (cfg->error)) { mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); return; } EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable); } if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) { MonoInst *ins; /* * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode, * so this doesn't have to clobber any regs and it doesn't break basic blocks. */ MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT); ins->sreg1 = vtable_arg->dreg; MONO_ADD_INS (cfg->cbb, ins); } else { int inited_reg; MonoBasicBlock *inited_bb; inited_reg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized)); NEW_BBLOCK (cfg, inited_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb); cfg->cbb->out_of_line = TRUE; mono_emit_jit_icall (cfg, mono_generic_class_init, &vtable_arg); MONO_START_BB (cfg, inited_bb); } } static void emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack) { MonoInst *ins; if (cfg->gen_seq_points && cfg->method == method) { NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc); if (nonempty_stack) ins->flags |= MONO_INST_NONEMPTY_STACK; MONO_ADD_INS (cfg->cbb, ins); cfg->last_seq_point = ins; } } void mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check) { if (mini_debug_options.better_cast_details) { int vtable_reg = alloc_preg (cfg); int klass_reg = alloc_preg (cfg); MonoBasicBlock *is_null_bb = NULL; MonoInst *tls_get; if (null_check) { NEW_BBLOCK (cfg, is_null_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb); } tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS); if (!tls_get) { fprintf (stderr, "error: --debug=casts not supported on this platform.\n."); exit (1); } MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable)); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg); MonoInst *class_ins = mini_emit_get_rgctx_klass (cfg, mini_class_check_context_used (cfg, klass), klass, MONO_RGCTX_INFO_KLASS); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), class_ins->dreg); if (null_check) MONO_START_BB (cfg, is_null_bb); } } void mini_reset_cast_details (MonoCompile *cfg) { /* Reset the variables holding the cast details */ if (mini_debug_options.better_cast_details) { MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS); /* It is enough to reset the from field */ MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0); } } /* * On return the caller must check @array_class for load errors */ static void mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class) { int vtable_reg = alloc_preg (cfg); int context_used; context_used = mini_class_check_context_used (cfg, array_class); mini_save_cast_details (cfg, array_class, obj->dreg, FALSE); MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable)); if (context_used) { MonoInst *vtable_ins; vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE); MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg); } else { if (cfg->compile_aot) { int vt_reg; MonoVTable *vtable; if (!(vtable = mono_class_vtable_checked (array_class, cfg->error))) { mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); return; } vt_reg = alloc_preg (cfg); MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable); MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg); } else { MonoVTable *vtable; if (!(vtable = mono_class_vtable_checked (array_class, cfg->error))) { mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); return; } MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, (gssize)vtable); } } MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException"); mini_reset_cast_details (cfg); } /** * Handles unbox of a Nullable<T>. If context_used is non zero, then shared * generic code is generated. */ static MonoInst* handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used) { MonoMethod* method; if (m_class_is_enumtype (mono_class_get_nullable_param_internal (klass))) method = get_method_nofail (klass, "UnboxExact", 1, 0); else method = get_method_nofail (klass, "Unbox", 1, 0); g_assert (method); if (context_used) { MonoInst *rgctx, *addr; /* FIXME: What if the class is shared? We might not have to get the address of the method from the RGCTX. */ if (cfg->llvm_only) { addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_FTNDESC); cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature_internal (method)); return mini_emit_llvmonly_calli (cfg, mono_method_signature_internal (method), &val, addr); } else { addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_GENERIC_METHOD_CODE); rgctx = emit_get_rgctx (cfg, context_used); return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx); } } else { gboolean pass_vtable, pass_mrgctx; MonoInst *rgctx_arg = NULL; check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx); g_assert (!pass_mrgctx); if (pass_vtable) { MonoVTable *vtable = mono_class_vtable_checked (method->klass, cfg->error); mono_error_assert_ok (cfg->error); EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable); } return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg); } } MonoInst* mini_handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst *val, int context_used) { MonoInst *add; int obj_reg; int vtable_reg = alloc_dreg (cfg ,STACK_PTR); int klass_reg = alloc_dreg (cfg ,STACK_PTR); int eclass_reg = alloc_dreg (cfg ,STACK_PTR); int rank_reg = alloc_dreg (cfg ,STACK_I4); obj_reg = val->dreg; MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable)); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank)); /* FIXME: generics */ g_assert (m_class_get_rank (klass) == 0); // Check rank == 0 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0); MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException"); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass)); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, m_class_offsetof_element_class ()); if (context_used) { MonoInst *element_class; /* This assertion is from the unboxcast insn */ g_assert (m_class_get_rank (klass) == 0); element_class = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_ELEMENT_KLASS); MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg); MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException"); } else { mini_save_cast_details (cfg, m_class_get_element_class (klass), obj_reg, FALSE); mini_emit_class_check (cfg, eclass_reg, m_class_get_element_class (klass)); mini_reset_cast_details (cfg); } NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, MONO_ABI_SIZEOF (MonoObject)); MONO_ADD_INS (cfg->cbb, add); add->type = STACK_MP; add->klass = klass; return add; } static MonoInst* handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj) { MonoInst *addr, *klass_inst, *is_ref, *args[16]; MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb; MonoInst *ins; int dreg, addr_reg; klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS); /* obj */ args [0] = obj; /* klass */ args [1] = klass_inst; /* CASTCLASS */ obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args); NEW_BBLOCK (cfg, is_ref_bb); NEW_BBLOCK (cfg, is_nullable_bb); NEW_BBLOCK (cfg, end_bb); is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb); /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */ addr_reg = alloc_dreg (cfg, STACK_MP); /* Non-ref case */ /* UNBOX */ NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, MONO_ABI_SIZEOF (MonoObject)); MONO_ADD_INS (cfg->cbb, addr); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* Ref case */ MONO_START_BB (cfg, is_ref_bb); /* Save the ref to a temporary */ dreg = alloc_ireg (cfg); EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, m_class_get_byval_arg (klass)); addr->dreg = addr_reg; MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* Nullable case */ MONO_START_BB (cfg, is_nullable_bb); { MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX); MonoInst *unbox_call; MonoMethodSignature *unbox_sig; unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *))); unbox_sig->ret = m_class_get_byval_arg (klass); unbox_sig->param_count = 1; unbox_sig->params [0] = mono_get_object_type (); if (cfg->llvm_only) unbox_call = mini_emit_llvmonly_calli (cfg, unbox_sig, &obj, addr); else unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL); EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, m_class_get_byval_arg (klass)); addr->dreg = addr_reg; } MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* End */ MONO_START_BB (cfg, end_bb); /* LDOBJ */ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr_reg, 0); return ins; } /* * Returns NULL and set the cfg exception on error. */ static MonoInst* handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used) { MonoInst *iargs [2]; MonoJitICallId alloc_ftn; if (mono_class_get_flags (klass) & TYPE_ATTRIBUTE_ABSTRACT) { char* full_name = mono_type_get_full_name (klass); mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); mono_error_set_member_access (cfg->error, "Cannot create an abstract class: %s", full_name); g_free (full_name); return NULL; } if (context_used) { gboolean known_instance_size = !mini_is_gsharedvt_klass (klass); MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size); iargs [0] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VTABLE); alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new_specific; if (managed_alloc) { if (known_instance_size) { int size = mono_class_instance_size (klass); if (size < MONO_ABI_SIZEOF (MonoObject)) g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass)); EMIT_NEW_ICONST (cfg, iargs [1], size); } return mono_emit_method_call (cfg, managed_alloc, iargs, NULL); } return mono_emit_jit_icall_id (cfg, alloc_ftn, iargs); } if (cfg->compile_aot && cfg->cbb->out_of_line && m_class_get_type_token (klass) && m_class_get_image (klass) == mono_defaults.corlib && !mono_class_is_ginst (klass)) { /* This happens often in argument checking code, eg. throw new FooException... */ /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */ EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (m_class_get_type_token (klass))); alloc_ftn = MONO_JIT_ICALL_mono_helper_newobj_mscorlib; } else { MonoVTable *vtable = mono_class_vtable_checked (klass, cfg->error); if (!is_ok (cfg->error)) { mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); return NULL; } MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE); if (managed_alloc) { int size = mono_class_instance_size (klass); if (size < MONO_ABI_SIZEOF (MonoObject)) g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass)); EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable); EMIT_NEW_ICONST (cfg, iargs [1], size); return mono_emit_method_call (cfg, managed_alloc, iargs, NULL); } alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new_specific; EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable); } return mono_emit_jit_icall_id (cfg, alloc_ftn, iargs); } /* * Returns NULL and set the cfg exception on error. */ MonoInst* mini_emit_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used) { MonoInst *alloc, *ins; if (G_UNLIKELY (m_class_is_byreflike (klass))) { mono_error_set_bad_image (cfg->error, m_class_get_image (cfg->method->klass), "Cannot box IsByRefLike type '%s.%s'", m_class_get_name_space (klass), m_class_get_name (klass)); mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); return NULL; } if (mono_class_is_nullable (klass)) { MonoMethod* method = get_method_nofail (klass, "Box", 1, 0); if (context_used) { if (cfg->llvm_only) { MonoMethodSignature *sig = mono_method_signature_internal (method); MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_FTNDESC); cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, sig); return mini_emit_llvmonly_calli (cfg, sig, &val, addr); } else { /* FIXME: What if the class is shared? We might not have to get the method address from the RGCTX. */ MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_GENERIC_METHOD_CODE); MonoInst *rgctx = emit_get_rgctx (cfg, context_used); return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx); } } else { gboolean pass_vtable, pass_mrgctx; MonoInst *rgctx_arg = NULL; check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx); g_assert (!pass_mrgctx); if (pass_vtable) { MonoVTable *vtable = mono_class_vtable_checked (method->klass, cfg->error); mono_error_assert_ok (cfg->error); EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable); } return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg); } } if (mini_is_gsharedvt_klass (klass)) { MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb; MonoInst *res, *is_ref, *src_var, *addr; int dreg; dreg = alloc_ireg (cfg); NEW_BBLOCK (cfg, is_ref_bb); NEW_BBLOCK (cfg, is_nullable_bb); NEW_BBLOCK (cfg, end_bb); is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb); /* Non-ref case */ alloc = handle_alloc (cfg, klass, TRUE, context_used); if (!alloc) return NULL; EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg); ins->opcode = OP_STOREV_MEMBASE; EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg); res->type = STACK_OBJ; res->klass = klass; MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* Ref case */ MONO_START_BB (cfg, is_ref_bb); /* val is a vtype, so has to load the value manually */ src_var = get_vreg_to_inst (cfg, val->dreg); if (!src_var) src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, val->dreg); EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* Nullable case */ MONO_START_BB (cfg, is_nullable_bb); { MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_BOX); MonoInst *box_call; MonoMethodSignature *box_sig; /* * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot * construct that method at JIT time, so have to do things by hand. */ box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *))); box_sig->ret = mono_get_object_type (); box_sig->param_count = 1; box_sig->params [0] = m_class_get_byval_arg (klass); if (cfg->llvm_only) box_call = mini_emit_llvmonly_calli (cfg, box_sig, &val, addr); else box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL); EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg); res->type = STACK_OBJ; res->klass = klass; } MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); MONO_START_BB (cfg, end_bb); return res; } alloc = handle_alloc (cfg, klass, TRUE, context_used); if (!alloc) return NULL; EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg); return alloc; } static gboolean method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod) { if (cmethod->klass == mono_defaults.systemtype_class) { if (!strcmp (cmethod->name, "GetType")) return TRUE; } /* * In corelib code, methods which need to do a stack walk declare a StackCrawlMark local and pass it as an * arguments until it reaches an icall. Its hard to detect which methods do that especially with * StackCrawlMark.LookForMyCallersCaller, so for now, just hardcode the classes which contain the public * methods whose caller is needed. */ if (mono_is_corlib_image (m_class_get_image (cmethod->klass))) { const char *cname = m_class_get_name (cmethod->klass); if (!strcmp (cname, "Assembly") || !strcmp (cname, "AssemblyLoadContext") || (!strcmp (cname, "Activator"))) { if (!strcmp (cmethod->name, "op_Equality")) return FALSE; return TRUE; } } return FALSE; } G_GNUC_UNUSED MonoInst* mini_handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, int enum_val_reg, MonoInst *enum_flag) { MonoType *enum_type = mono_type_get_underlying_type (m_class_get_byval_arg (klass)); guint32 load_opc = mono_type_to_load_membase (cfg, enum_type); gboolean is_i4; switch (enum_type->type) { case MONO_TYPE_I8: case MONO_TYPE_U8: #if SIZEOF_REGISTER == 8 case MONO_TYPE_I: case MONO_TYPE_U: #endif is_i4 = FALSE; break; default: is_i4 = TRUE; break; } { MonoInst *load = NULL, *and_, *cmp, *ceq; int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg); int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg); int dest_reg = alloc_ireg (cfg); if (enum_this) { EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0); } else { g_assert (enum_val_reg != -1); enum_reg = enum_val_reg; } EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg); EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg); EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1); ceq->type = STACK_I4; if (!is_i4) { load = load ? mono_decompose_opcode (cfg, load) : NULL; and_ = mono_decompose_opcode (cfg, and_); cmp = mono_decompose_opcode (cfg, cmp); ceq = mono_decompose_opcode (cfg, ceq); } return ceq; } } static void emit_set_deopt_il_offset (MonoCompile *cfg, int offset) { MonoInst *ins; if (!(cfg->deopt && cfg->method == cfg->current_method)) return; EMIT_NEW_VARLOADA (cfg, ins, cfg->il_state_var, NULL); MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, ins->dreg, MONO_STRUCT_OFFSET (MonoMethodILState, il_offset), offset); } static MonoInst* emit_get_rgctx_dele_tramp (MonoCompile *cfg, int context_used, MonoClass *klass, MonoMethod *virt_method, gboolean _virtual, MonoRgctxInfoType rgctx_type) { MonoDelegateClassMethodPair *info; MonoJumpInfoRgctxEntry *entry; info = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair)); info->klass = klass; info->method = virt_method; info->is_virtual = _virtual; entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, info, rgctx_type); return emit_rgctx_fetch (cfg, context_used, entry); } /* * Returns NULL and set the cfg exception on error. */ static G_GNUC_UNUSED MonoInst* handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int target_method_context_used, int invoke_context_used, gboolean virtual_) { MonoInst *ptr; int dreg; gpointer trampoline; MonoInst *obj, *tramp_ins; guint8 **code_slot; if (virtual_ && !cfg->llvm_only) { MonoMethod *invoke = mono_get_delegate_invoke_internal (klass); g_assert (invoke); //FIXME verify & fix any issue with removing invoke_context_used restriction if (invoke_context_used || !mono_get_delegate_virtual_invoke_impl (mono_method_signature_internal (invoke), target_method_context_used ? NULL : method)) return NULL; } obj = handle_alloc (cfg, klass, FALSE, invoke_context_used); if (!obj) return NULL; /* Inline the contents of mono_delegate_ctor */ /* Set target field */ /* Optimize away setting of NULL target */ if (!MONO_INS_IS_PCONST_NULL (target)) { if (!(method->flags & METHOD_ATTRIBUTE_STATIC)) { MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target->dreg, 0); MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException"); } if (!mini_debug_options.weak_memory_model) mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg); if (cfg->gen_write_barriers) { dreg = alloc_preg (cfg); EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target)); mini_emit_write_barrier (cfg, ptr, target); } } /* Set method field */ if (!(target_method_context_used || invoke_context_used) && !cfg->llvm_only) { //If compiling with gsharing enabled, it's faster to load method the delegate trampoline info than to use a rgctx slot MonoInst *method_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg); } if (cfg->llvm_only) { if (virtual_) { MonoInst *args [ ] = { obj, target, emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD) }; mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate_virtual, args); return obj; } } /* * To avoid looking up the compiled code belonging to the target method * in mono_delegate_trampoline (), we allocate a per-domain memory slot to * store it, and we fill it after the method has been compiled. */ if (!method->dynamic && !cfg->llvm_only) { MonoInst *code_slot_ins; if (target_method_context_used) { code_slot_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE); } else { MonoJitMemoryManager *jit_mm = (MonoJitMemoryManager*)cfg->jit_mm; jit_mm_lock (jit_mm); if (!jit_mm->method_code_hash) jit_mm->method_code_hash = g_hash_table_new (NULL, NULL); code_slot = (guint8 **)g_hash_table_lookup (jit_mm->method_code_hash, method); if (!code_slot) { code_slot = (guint8 **)mono_mem_manager_alloc0 (jit_mm->mem_manager, sizeof (gpointer)); g_hash_table_insert (jit_mm->method_code_hash, method, code_slot); } jit_mm_unlock (jit_mm); code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method); } MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg); } if (target_method_context_used || invoke_context_used) { tramp_ins = emit_get_rgctx_dele_tramp (cfg, target_method_context_used | invoke_context_used, klass, method, virtual_, MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO); //This is emited as a contant store for the non-shared case. //We copy from the delegate trampoline info as it's faster than a rgctx fetch dreg = alloc_preg (cfg); if (!cfg->llvm_only) { MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), dreg); } } else if (cfg->compile_aot) { MonoDelegateClassMethodPair *del_tramp; del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair)); del_tramp->klass = klass; del_tramp->method = method; del_tramp->is_virtual = virtual_; EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp); } else { if (virtual_) trampoline = mono_create_delegate_virtual_trampoline (klass, method); else trampoline = mono_create_delegate_trampoline_info (klass, method); EMIT_NEW_PCONST (cfg, tramp_ins, trampoline); } if (cfg->llvm_only) { MonoInst *args [ ] = { obj, tramp_ins }; mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate, args); return obj; } /* Set invoke_impl field */ if (virtual_) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg); } else { dreg = alloc_preg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg); dreg = alloc_preg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg); } dreg = alloc_preg (cfg); MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg); /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */ return obj; } /* * handle_constrained_gsharedvt_call: * * Handle constrained calls where the receiver is a gsharedvt type. * Return the instruction representing the call. Set the cfg exception on failure. */ static MonoInst* handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class, gboolean *ref_emit_widen) { MonoInst *ins = NULL; gboolean emit_widen = *ref_emit_widen; gboolean supported; /* * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype. * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we * pack the arguments into an array, and do the rest of the work in in an icall. */ supported = ((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!m_class_is_valuetype (cmethod->klass) && m_class_get_image (cmethod->klass) != mono_defaults.corlib)); if (supported) supported = (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)); if (supported) { if (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1)) { supported = TRUE; } else { supported = TRUE; for (int i = 0; i < fsig->param_count; ++i) { if (!(m_type_is_byref (fsig->params [i]) || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_IS_REFERENCE (fsig->params [i]) || MONO_TYPE_ISSTRUCT (fsig->params [i]) || mini_is_gsharedvt_type (fsig->params [i]))) supported = FALSE; } } } if (supported) { MonoInst *args [5]; /* * This case handles calls to * - object:ToString()/Equals()/GetHashCode(), * - System.IComparable<T>:CompareTo() * - System.IEquatable<T>:Equals () * plus some simple interface calls enough to support AsyncTaskMethodBuilder. */ if (fsig->hasthis) args [0] = sp [0]; else EMIT_NEW_PCONST (cfg, args [0], NULL); args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD); args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS); /* !fsig->hasthis is for the wrapper for the Object.GetType () icall or static virtual methods */ if ((fsig->hasthis || m_method_is_static (cmethod)) && fsig->param_count) { /* Call mono_gsharedvt_constrained_call (gpointer mp, MonoMethod *cmethod, MonoClass *klass, gboolean *deref_args, gpointer *args) */ gboolean has_gsharedvt = FALSE; for (int i = 0; i < fsig->param_count; ++i) { if (mini_is_gsharedvt_type (fsig->params [i])) has_gsharedvt = TRUE; } /* Pass an array of bools which signal whenever the corresponding argument is a gsharedvt ref type */ if (has_gsharedvt) { MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM); ins->dreg = alloc_preg (cfg); ins->inst_imm = fsig->param_count; MONO_ADD_INS (cfg->cbb, ins); args [3] = ins; } else { EMIT_NEW_PCONST (cfg, args [3], 0); } /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */ MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM); ins->dreg = alloc_preg (cfg); ins->inst_imm = fsig->param_count * sizeof (target_mgreg_t); MONO_ADD_INS (cfg->cbb, ins); args [4] = ins; for (int i = 0; i < fsig->param_count; ++i) { int addr_reg; if (mini_is_gsharedvt_type (fsig->params [i])) { MonoInst *is_deref; int deref_arg_reg; ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (fsig->params [i]), MONO_RGCTX_INFO_CLASS_BOX_TYPE); deref_arg_reg = alloc_preg (cfg); /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */ EMIT_NEW_BIALU_IMM (cfg, is_deref, OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, i, is_deref->dreg); } else if (has_gsharedvt) { MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, args [3]->dreg, i, 0); } MonoInst *arg = sp [i + fsig->hasthis]; if (mini_is_gsharedvt_type (fsig->params [i]) || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_ISSTRUCT (fsig->params [i])) { EMIT_NEW_VARLOADA_VREG (cfg, ins, arg->dreg, fsig->params [i]); addr_reg = ins->dreg; EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), addr_reg); } else { EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), arg->dreg); } } } else { EMIT_NEW_ICONST (cfg, args [3], 0); EMIT_NEW_ICONST (cfg, args [4], 0); } ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args); emit_widen = FALSE; if (mini_is_gsharedvt_type (fsig->ret)) { ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type_internal (fsig->ret), ins); } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret))) { MonoInst *add; /* Unbox */ NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, MONO_ABI_SIZEOF (MonoObject)); MONO_ADD_INS (cfg->cbb, add); /* Load value */ NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0); MONO_ADD_INS (cfg->cbb, ins); /* ins represents the call result */ } } else { GSHAREDVT_FAILURE (CEE_CALLVIRT); } *ref_emit_widen = emit_widen; return ins; exception_exit: return NULL; } static void mono_emit_load_got_addr (MonoCompile *cfg) { MonoInst *getaddr, *dummy_use; if (!cfg->got_var || cfg->got_var_allocated) return; MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR); getaddr->cil_code = cfg->header->code; getaddr->dreg = cfg->got_var->dreg; /* Add it to the start of the first bblock */ if (cfg->bb_entry->code) { getaddr->next = cfg->bb_entry->code; cfg->bb_entry->code = getaddr; } else MONO_ADD_INS (cfg->bb_entry, getaddr); cfg->got_var_allocated = TRUE; /* * Add a dummy use to keep the got_var alive, since real uses might * only be generated by the back ends. * Add it to end_bblock, so the variable's lifetime covers the whole * method. * It would be better to make the usage of the got var explicit in all * cases when the backend needs it (i.e. calls, throw etc.), so this * wouldn't be needed. */ NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var); MONO_ADD_INS (cfg->bb_exit, dummy_use); } static MonoMethod* get_constrained_method (MonoCompile *cfg, MonoImage *image, guint32 token, MonoMethod *cil_method, MonoClass *constrained_class, MonoGenericContext *generic_context) { MonoMethod *cmethod = cil_method; gboolean constrained_is_generic_param = m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR || m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR; if (cfg->current_method->wrapper_type != MONO_WRAPPER_NONE) { if (cfg->verbose_level > 2) printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class)); if (!(constrained_is_generic_param && cfg->gshared)) { cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, cfg->error); CHECK_CFG_ERROR; } } else { if (cfg->verbose_level > 2) printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class)); if (constrained_is_generic_param && cfg->gshared) { /* * This is needed since get_method_constrained can't find * the method in klass representing a type var. * The type var is guaranteed to be a reference type in this * case. */ if (!mini_is_gsharedvt_klass (constrained_class)) g_assert (!m_class_is_valuetype (cmethod->klass)); } else { cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, cfg->error); CHECK_CFG_ERROR; } } return cmethod; mono_error_exit: return NULL; } static gboolean method_does_not_return (MonoMethod *method) { // FIXME: Under netcore, these are decorated with the [DoesNotReturn] attribute return m_class_get_image (method->klass) == mono_defaults.corlib && !strcmp (m_class_get_name (method->klass), "ThrowHelper") && strstr (method->name, "Throw") == method->name && !method->is_inflated; } static int inline_limit, llvm_jit_inline_limit, llvm_aot_inline_limit; static gboolean inline_limit_inited; static gboolean mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method) { MonoMethodHeaderSummary header; MonoVTable *vtable; int limit; #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK MonoMethodSignature *sig = mono_method_signature_internal (method); int i; #endif if (cfg->disable_inline) return FALSE; if (cfg->gsharedvt) return FALSE; if (cfg->inline_depth > 10) return FALSE; if (!mono_method_get_header_summary (method, &header)) return FALSE; /*runtime, icall and pinvoke are checked by summary call*/ if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) || (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) || header.has_clauses) return FALSE; if (method->flags & METHOD_ATTRIBUTE_REQSECOBJ) /* Used to mark methods containing StackCrawlMark locals */ return FALSE; /* also consider num_locals? */ /* Do the size check early to avoid creating vtables */ if (!inline_limit_inited) { char *inlinelimit; if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) { inline_limit = atoi (inlinelimit); llvm_jit_inline_limit = inline_limit; llvm_aot_inline_limit = inline_limit; g_free (inlinelimit); } else { inline_limit = INLINE_LENGTH_LIMIT; llvm_jit_inline_limit = LLVM_JIT_INLINE_LENGTH_LIMIT; llvm_aot_inline_limit = LLVM_AOT_INLINE_LENGTH_LIMIT; } inline_limit_inited = TRUE; } if (COMPILE_LLVM (cfg)) { if (cfg->compile_aot) limit = llvm_aot_inline_limit; else limit = llvm_jit_inline_limit; } else { limit = inline_limit; } if (header.code_size >= limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING)) return FALSE; /* * if we can initialize the class of the method right away, we do, * otherwise we don't allow inlining if the class needs initialization, * since it would mean inserting a call to mono_runtime_class_init() * inside the inlined code */ if (cfg->gshared && m_class_has_cctor (method->klass) && mini_class_check_context_used (cfg, method->klass)) return FALSE; { /* The AggressiveInlining hint is a good excuse to force that cctor to run. */ if ((cfg->opt & MONO_OPT_AGGRESSIVE_INLINING) || method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) { if (m_class_has_cctor (method->klass)) { ERROR_DECL (error); vtable = mono_class_vtable_checked (method->klass, error); if (!is_ok (error)) { mono_error_cleanup (error); return FALSE; } if (!cfg->compile_aot) { if (!mono_runtime_class_init_full (vtable, error)) { mono_error_cleanup (error); return FALSE; } } } } else if (mono_class_is_before_field_init (method->klass)) { if (cfg->run_cctors && m_class_has_cctor (method->klass)) { ERROR_DECL (error); /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */ if (!m_class_get_runtime_vtable (method->klass)) /* No vtable created yet */ return FALSE; vtable = mono_class_vtable_checked (method->klass, error); if (!is_ok (error)) { mono_error_cleanup (error); return FALSE; } /* This makes so that inline cannot trigger */ /* .cctors: too many apps depend on them */ /* running with a specific order... */ if (! vtable->initialized) return FALSE; if (!mono_runtime_class_init_full (vtable, error)) { mono_error_cleanup (error); return FALSE; } } } else if (mono_class_needs_cctor_run (method->klass, NULL)) { ERROR_DECL (error); if (!m_class_get_runtime_vtable (method->klass)) /* No vtable created yet */ return FALSE; vtable = mono_class_vtable_checked (method->klass, error); if (!is_ok (error)) { mono_error_cleanup (error); return FALSE; } if (!vtable->initialized) return FALSE; } } #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK if (mono_arch_is_soft_float ()) { /* FIXME: */ if (sig->ret && sig->ret->type == MONO_TYPE_R4) return FALSE; for (i = 0; i < sig->param_count; ++i) if (!m_type_is_byref (sig->params [i]) && sig->params [i]->type == MONO_TYPE_R4) return FALSE; } #endif if (g_list_find (cfg->dont_inline, method)) return FALSE; if (mono_profiler_get_call_instrumentation_flags (method)) return FALSE; if (mono_profiler_coverage_instrumentation_enabled (method)) return FALSE; if (method_does_not_return (method)) return FALSE; return TRUE; } static gboolean mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable) { if (!cfg->compile_aot) { g_assert (vtable); if (vtable->initialized) return FALSE; } if (mono_class_is_before_field_init (klass)) { if (cfg->method == method) return FALSE; } if (!mono_class_needs_cctor_run (klass, method)) return FALSE; if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass)) /* The initialization is already done before the method is called */ return FALSE; return TRUE; } int mini_emit_sext_index_reg (MonoCompile *cfg, MonoInst *index) { int index_reg = index->dreg; int index2_reg; #if SIZEOF_REGISTER == 8 /* The array reg is 64 bits but the index reg is only 32 */ if (COMPILE_LLVM (cfg)) { /* * abcrem can't handle the OP_SEXT_I4, so add this after abcrem, * during OP_BOUNDS_CHECK decomposition, and in the implementation * of OP_X86_LEA for llvm. */ index2_reg = index_reg; } else { index2_reg = alloc_preg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg); } #else if (index->type == STACK_I8) { index2_reg = alloc_preg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg); } else { index2_reg = index_reg; } #endif return index2_reg; } MonoInst* mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck, gboolean bounded) { MonoInst *ins; guint32 size; int mult_reg, add_reg, array_reg, index2_reg, bounds_reg, lower_bound_reg, realidx2_reg; int context_used; if (mini_is_gsharedvt_variable_klass (klass)) { size = -1; } else { mono_class_init_internal (klass); size = mono_class_array_element_size (klass); } mult_reg = alloc_preg (cfg); array_reg = arr->dreg; realidx2_reg = index2_reg = mini_emit_sext_index_reg (cfg, index); if (bounded) { bounds_reg = alloc_preg (cfg); lower_bound_reg = alloc_preg (cfg); realidx2_reg = alloc_preg (cfg); MonoBasicBlock *is_null_bb = NULL; NEW_BBLOCK (cfg, is_null_bb); // gint32 lower_bound = 0; // if (arr->bounds) // lower_bound = arr->bounds.lower_bound; // realidx2 = index2 - lower_bound; MONO_EMIT_NEW_PCONST (cfg, lower_bound_reg, NULL); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds)); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, lower_bound_reg, bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound)); MONO_START_BB (cfg, is_null_bb); MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2_reg, lower_bound_reg); } if (bcheck) MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, realidx2_reg); #if defined(TARGET_X86) || defined(TARGET_AMD64) if (size == 1 || size == 2 || size == 4 || size == 8) { static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 }; EMIT_NEW_X86_LEA (cfg, ins, array_reg, realidx2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector)); ins->klass = klass; ins->type = STACK_MP; return ins; } #endif add_reg = alloc_ireg_mp (cfg); if (size == -1) { MonoInst *rgctx_ins; /* gsharedvt */ g_assert (cfg->gshared); context_used = mini_class_check_context_used (cfg, klass); g_assert (context_used); rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE); MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, realidx2_reg, rgctx_ins->dreg); } else { MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, realidx2_reg, size); } MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg); NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector)); ins->klass = klass; ins->type = STACK_MP; MONO_ADD_INS (cfg->cbb, ins); return ins; } static MonoInst* mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2) { int bounds_reg = alloc_preg (cfg); int add_reg = alloc_ireg_mp (cfg); int mult_reg = alloc_preg (cfg); int mult2_reg = alloc_preg (cfg); int low1_reg = alloc_preg (cfg); int low2_reg = alloc_preg (cfg); int high1_reg = alloc_preg (cfg); int high2_reg = alloc_preg (cfg); int realidx1_reg = alloc_preg (cfg); int realidx2_reg = alloc_preg (cfg); int sum_reg = alloc_preg (cfg); int index1, index2; MonoInst *ins; guint32 size; mono_class_init_internal (klass); size = mono_class_array_element_size (klass); index1 = index_ins1->dreg; index2 = index_ins2->dreg; #if SIZEOF_REGISTER == 8 /* The array reg is 64 bits but the index reg is only 32 */ if (COMPILE_LLVM (cfg)) { /* Not needed */ } else { int tmpreg = alloc_preg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1); index1 = tmpreg; tmpreg = alloc_preg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2); index2 = tmpreg; } #else // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ? #endif /* range checking */ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds)); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg, bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound)); MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg, bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length)); MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg); MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg, bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound)); MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg, bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length)); MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg); MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg); MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size); MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg); NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector)); ins->type = STACK_MP; ins->klass = klass; MONO_ADD_INS (cfg->cbb, ins); return ins; } static MonoInst* mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, guchar *ip, gboolean is_set) { int rank; MonoInst *addr; MonoMethod *addr_method; int element_size; MonoClass *eclass = m_class_get_element_class (cmethod->klass); gboolean bounded = m_class_get_byval_arg (cmethod->klass) ? m_class_get_byval_arg (cmethod->klass)->type == MONO_TYPE_ARRAY : FALSE; rank = mono_method_signature_internal (cmethod)->param_count - (is_set? 1: 0); if (rank == 1) return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE, bounded); /* emit_ldelema_2 depends on OP_LMUL */ if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) { return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]); } if (mini_is_gsharedvt_variable_klass (eclass)) element_size = 0; else element_size = mono_class_array_element_size (eclass); addr_method = mono_marshal_get_array_address (rank, element_size); addr = mono_emit_method_call (cfg, addr_method, sp, NULL); return addr; } static gboolean mini_class_is_reference (MonoClass *klass) { return mini_type_is_reference (m_class_get_byval_arg (klass)); } MonoInst* mini_emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks) { if (safety_checks && mini_class_is_reference (klass) && !(MONO_INS_IS_PCONST_NULL (sp [2]))) { MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class); MonoMethod *helper; MonoInst *iargs [3]; if (sp [0]->type != STACK_OBJ) return NULL; if (sp [2]->type != STACK_OBJ) return NULL; iargs [2] = sp [2]; iargs [1] = sp [1]; iargs [0] = sp [0]; MonoClass *array_class = sp [0]->klass; if (array_class && m_class_get_rank (array_class) == 1) { MonoClass *eclass = m_class_get_element_class (array_class); if (m_class_is_sealed (eclass)) { helper = mono_marshal_get_virtual_stelemref (array_class); /* Make a non-virtual call if possible */ return mono_emit_method_call (cfg, helper, iargs, NULL); } } helper = mono_marshal_get_virtual_stelemref (obj_array); if (!helper->slot) mono_class_setup_vtable (obj_array); g_assert (helper->slot); return mono_emit_method_call (cfg, helper, iargs, sp [0]); } else { MonoInst *ins; if (mini_is_gsharedvt_variable_klass (klass)) { MonoInst *addr; // FIXME-VT: OP_ICONST optimization addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE); EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg); ins->opcode = OP_STOREV_MEMBASE; } else if (sp [1]->opcode == OP_ICONST) { int array_reg = sp [0]->dreg; int index_reg = sp [1]->dreg; int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector); if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg) && sp [1]->inst_c0 < 0) MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg); if (safety_checks) MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg); EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset, sp [2]->dreg); } else { MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks, FALSE); if (!mini_debug_options.weak_memory_model && mini_class_is_reference (klass)) mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg); if (mini_class_is_reference (klass)) mini_emit_write_barrier (cfg, addr, sp [2]); } return ins; } } MonoInst* mini_emit_memory_barrier (MonoCompile *cfg, int kind) { MonoInst *ins = NULL; MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER); MONO_ADD_INS (cfg->cbb, ins); ins->backend.memory_barrier_kind = kind; return ins; } /* * This entry point could be used later for arbitrary method * redirection. */ inline static MonoInst* mini_redirect_call (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins) { if (method->klass == mono_defaults.string_class) { /* managed string allocation support */ if (strcmp (method->name, "FastAllocateString") == 0) { MonoInst *iargs [2]; MonoVTable *vtable = mono_class_vtable_checked (method->klass, cfg->error); MonoMethod *managed_alloc = NULL; mono_error_assert_ok (cfg->error); /*Should not fail since it System.String*/ #ifndef MONO_CROSS_COMPILE managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE); #endif if (!managed_alloc) return NULL; EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable); iargs [1] = args [0]; return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins); } } return NULL; } static void mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp) { MonoInst *store, *temp; int i; for (i = 0; i < sig->param_count + sig->hasthis; ++i) { MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis]; /* * FIXME: We should use *args++ = sp [0], but that would mean the arg * would be different than the MonoInst's used to represent arguments, and * the ldelema implementation can't deal with that. * Solution: When ldelema is used on an inline argument, create a var for * it, emit ldelema on that var, and emit the saving code below in * inline_method () if needed. */ temp = mono_compile_create_var (cfg, argtype, OP_LOCAL); cfg->args [i] = temp; /* This uses cfg->args [i] which is set by the preceding line */ EMIT_NEW_ARGSTORE (cfg, store, i, *sp); store->cil_code = sp [0]->cil_code; sp++; } } #define MONO_INLINE_CALLED_LIMITED_METHODS 1 #define MONO_INLINE_CALLER_LIMITED_METHODS 1 #if (MONO_INLINE_CALLED_LIMITED_METHODS) static gboolean check_inline_called_method_name_limit (MonoMethod *called_method) { int strncmp_result; static const char *limit = NULL; if (limit == NULL) { const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT"); if (limit_string != NULL) limit = limit_string; else limit = ""; } if (limit [0] != '\0') { char *called_method_name = mono_method_full_name (called_method, TRUE); strncmp_result = strncmp (called_method_name, limit, strlen (limit)); g_free (called_method_name); //return (strncmp_result <= 0); return (strncmp_result == 0); } else { return TRUE; } } #endif #if (MONO_INLINE_CALLER_LIMITED_METHODS) static gboolean check_inline_caller_method_name_limit (MonoMethod *caller_method) { int strncmp_result; static const char *limit = NULL; if (limit == NULL) { const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT"); if (limit_string != NULL) { limit = limit_string; } else { limit = ""; } } if (limit [0] != '\0') { char *caller_method_name = mono_method_full_name (caller_method, TRUE); strncmp_result = strncmp (caller_method_name, limit, strlen (limit)); g_free (caller_method_name); //return (strncmp_result <= 0); return (strncmp_result == 0); } else { return TRUE; } } #endif void mini_emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype) { static double r8_0 = 0.0; static float r4_0 = 0.0; MonoInst *ins; int t; rtype = mini_get_underlying_type (rtype); t = rtype->type; if (m_type_is_byref (rtype)) { MONO_EMIT_NEW_PCONST (cfg, dreg, NULL); } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) { MONO_EMIT_NEW_ICONST (cfg, dreg, 0); } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) { MONO_EMIT_NEW_I8CONST (cfg, dreg, 0); } else if (cfg->r4fp && t == MONO_TYPE_R4) { MONO_INST_NEW (cfg, ins, OP_R4CONST); ins->type = STACK_R4; ins->inst_p0 = (void*)&r4_0; ins->dreg = dreg; MONO_ADD_INS (cfg->cbb, ins); } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) { MONO_INST_NEW (cfg, ins, OP_R8CONST); ins->type = STACK_R8; ins->inst_p0 = (void*)&r8_0; ins->dreg = dreg; MONO_ADD_INS (cfg->cbb, ins); } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) || ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) { MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype)); } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) { MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype)); } else { MONO_EMIT_NEW_PCONST (cfg, dreg, NULL); } } static void emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype) { int t; rtype = mini_get_underlying_type (rtype); t = rtype->type; if (m_type_is_byref (rtype)) { MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST); } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) { MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST); } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) { MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST); } else if (cfg->r4fp && t == MONO_TYPE_R4) { MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST); } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) { MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST); } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) || ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) { MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO); } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) { MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO); } else { mini_emit_init_rvar (cfg, dreg, rtype); } } /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */ static void emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init) { MonoInst *var = cfg->locals [local]; if (COMPILE_SOFT_FLOAT (cfg)) { MonoInst *store; int reg = alloc_dreg (cfg, (MonoStackType)var->type); mini_emit_init_rvar (cfg, reg, type); EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins); } else { if (init) mini_emit_init_rvar (cfg, var->dreg, type); else emit_dummy_init_rvar (cfg, var->dreg, type); } } int mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always) { return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always, NULL); } /* * inline_method: * * Return the cost of inlining CMETHOD, or zero if it should not be inlined. */ static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always, gboolean *is_empty) { ERROR_DECL (error); MonoInst *ins, *rvar = NULL; MonoMethodHeader *cheader; MonoBasicBlock *ebblock, *sbblock; int i, costs; MonoInst **prev_locals, **prev_args; MonoType **prev_arg_types; guint prev_real_offset; GHashTable *prev_cbb_hash; MonoBasicBlock **prev_cil_offset_to_bb; MonoBasicBlock *prev_cbb; const guchar *prev_ip; guchar *prev_cil_start; guint32 prev_cil_offset_to_bb_len; MonoMethod *prev_current_method; MonoGenericContext *prev_generic_context; gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE; g_assert (cfg->exception_type == MONO_EXCEPTION_NONE); #if (MONO_INLINE_CALLED_LIMITED_METHODS) if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod)) return 0; #endif #if (MONO_INLINE_CALLER_LIMITED_METHODS) if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method)) return 0; #endif if (!fsig) fsig = mono_method_signature_internal (cmethod); if (cfg->verbose_level > 2) printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE)); if (!cmethod->inline_info) { cfg->stat_inlineable_methods++; cmethod->inline_info = 1; } if (is_empty) *is_empty = FALSE; /* allocate local variables */ cheader = mono_method_get_header_checked (cmethod, error); if (!cheader) { if (inline_always) { mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); mono_error_move (cfg->error, error); } else { mono_error_cleanup (error); } return 0; } if (is_empty && cheader->code_size == 1 && cheader->code [0] == CEE_RET) *is_empty = TRUE; /* allocate space to store the return value */ if (!MONO_TYPE_IS_VOID (fsig->ret)) { rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL); } prev_locals = cfg->locals; cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*)); for (i = 0; i < cheader->num_locals; ++i) cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL); /* allocate start and end blocks */ /* This is needed so if the inline is aborted, we can clean up */ NEW_BBLOCK (cfg, sbblock); sbblock->real_offset = real_offset; NEW_BBLOCK (cfg, ebblock); ebblock->block_num = cfg->num_bblocks++; ebblock->real_offset = real_offset; prev_args = cfg->args; prev_arg_types = cfg->arg_types; prev_ret_var_set = cfg->ret_var_set; prev_real_offset = cfg->real_offset; prev_cbb_hash = cfg->cbb_hash; prev_cil_offset_to_bb = cfg->cil_offset_to_bb; prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len; prev_cil_start = cfg->cil_start; prev_ip = cfg->ip; prev_cbb = cfg->cbb; prev_current_method = cfg->current_method; prev_generic_context = cfg->generic_context; prev_disable_inline = cfg->disable_inline; cfg->ret_var_set = FALSE; cfg->inline_depth ++; if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) virtual_ = TRUE; costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_); ret_var_set = cfg->ret_var_set; cfg->real_offset = prev_real_offset; cfg->cbb_hash = prev_cbb_hash; cfg->cil_offset_to_bb = prev_cil_offset_to_bb; cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len; cfg->cil_start = prev_cil_start; cfg->ip = prev_ip; cfg->locals = prev_locals; cfg->args = prev_args; cfg->arg_types = prev_arg_types; cfg->current_method = prev_current_method; cfg->generic_context = prev_generic_context; cfg->ret_var_set = prev_ret_var_set; cfg->disable_inline = prev_disable_inline; cfg->inline_depth --; if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) { if (cfg->verbose_level > 2) printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE)); mono_error_assert_ok (cfg->error); cfg->stat_inlined_methods++; /* always add some code to avoid block split failures */ MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (prev_cbb, ins); prev_cbb->next_bb = sbblock; link_bblock (cfg, prev_cbb, sbblock); /* * Get rid of the begin and end bblocks if possible to aid local * optimizations. */ if (prev_cbb->out_count == 1) mono_merge_basic_blocks (cfg, prev_cbb, sbblock); if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock)) mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]); if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) { MonoBasicBlock *prev = ebblock->in_bb [0]; if (prev->next_bb == ebblock) { mono_merge_basic_blocks (cfg, prev, ebblock); cfg->cbb = prev; if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) { mono_merge_basic_blocks (cfg, prev_cbb, prev); cfg->cbb = prev_cbb; } } else { /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */ cfg->cbb = ebblock; } } else { /* * Its possible that the rvar is set in some prev bblock, but not in others. * (#1835). */ if (rvar) { MonoBasicBlock *bb; for (i = 0; i < ebblock->in_count; ++i) { bb = ebblock->in_bb [i]; if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) { cfg->cbb = bb; mini_emit_init_rvar (cfg, rvar->dreg, fsig->ret); } } } cfg->cbb = ebblock; } if (rvar) { /* * If the inlined method contains only a throw, then the ret var is not * set, so set it to a dummy value. */ if (!ret_var_set) mini_emit_init_rvar (cfg, rvar->dreg, fsig->ret); EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0); *sp++ = ins; } cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader); return costs + 1; } else { if (cfg->verbose_level > 2) { const char *msg = mono_error_get_message (cfg->error); printf ("INLINE ABORTED %s (cost %d) %s\n", mono_method_full_name (cmethod, TRUE), costs, msg ? msg : ""); } cfg->exception_type = MONO_EXCEPTION_NONE; clear_cfg_error (cfg); /* This gets rid of the newly added bblocks */ cfg->cbb = prev_cbb; } cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader); return 0; } /* * Some of these comments may well be out-of-date. * Design decisions: we do a single pass over the IL code (and we do bblock * splitting/merging in the few cases when it's required: a back jump to an IL * address that was not already seen as bblock starting point). * Code is validated as we go (full verification is still better left to metadata/verify.c). * Complex operations are decomposed in simpler ones right away. We need to let the * arch-specific code peek and poke inside this process somehow (except when the * optimizations can take advantage of the full semantic info of coarse opcodes). * All the opcodes of the form opcode.s are 'normalized' to opcode. * MonoInst->opcode initially is the IL opcode or some simplification of that * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific * opcode with value bigger than OP_LAST. * At this point the IR can be handed over to an interpreter, a dumb code generator * or to the optimizing code generator that will translate it to SSA form. * * Profiling directed optimizations. * We may compile by default with few or no optimizations and instrument the code * or the user may indicate what methods to optimize the most either in a config file * or through repeated runs where the compiler applies offline the optimizations to * each method and then decides if it was worth it. */ #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED #define CHECK_STACK_OVF() if (((sp - stack_start) + 1) > header->max_stack) UNVERIFIED #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED #define CHECK_OPSIZE(size) if ((size) < 1 || ip + (size) > end) UNVERIFIED #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass)) /* offset from br.s -> br like opcodes */ #define BIG_BRANCH_OFFSET 13 static gboolean ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip) { MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start]; return b == NULL || b == bb; } static int get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, guchar *start, guchar *end, guchar **pos) { guchar *ip = start; guchar *target; int i; guint cli_addr; MonoBasicBlock *bblock; const MonoOpcode *opcode; while (ip < end) { cli_addr = ip - start; i = mono_opcode_value ((const guint8 **)&ip, end); if (i < 0) UNVERIFIED; opcode = &mono_opcodes [i]; switch (opcode->argument) { case MonoInlineNone: ip++; break; case MonoInlineString: case MonoInlineType: case MonoInlineField: case MonoInlineMethod: case MonoInlineTok: case MonoInlineSig: case MonoShortInlineR: case MonoInlineI: ip += 5; break; case MonoInlineVar: ip += 3; break; case MonoShortInlineVar: case MonoShortInlineI: ip += 2; break; case MonoShortInlineBrTarget: target = start + cli_addr + 2 + (signed char)ip [1]; GET_BBLOCK (cfg, bblock, target); ip += 2; if (ip < end) GET_BBLOCK (cfg, bblock, ip); break; case MonoInlineBrTarget: target = start + cli_addr + 5 + (gint32)read32 (ip + 1); GET_BBLOCK (cfg, bblock, target); ip += 5; if (ip < end) GET_BBLOCK (cfg, bblock, ip); break; case MonoInlineSwitch: { guint32 n = read32 (ip + 1); guint32 j; ip += 5; cli_addr += 5 + 4 * n; target = start + cli_addr; GET_BBLOCK (cfg, bblock, target); for (j = 0; j < n; ++j) { target = start + cli_addr + (gint32)read32 (ip); GET_BBLOCK (cfg, bblock, target); ip += 4; } break; } case MonoInlineR: case MonoInlineI8: ip += 9; break; default: g_assert_not_reached (); } if (i == CEE_THROW) { guchar *bb_start = ip - 1; /* Find the start of the bblock containing the throw */ bblock = NULL; while ((bb_start >= start) && !bblock) { bblock = cfg->cil_offset_to_bb [(bb_start) - start]; bb_start --; } if (bblock) bblock->out_of_line = 1; } } return 0; unverified: exception_exit: *pos = ip; return 1; } static MonoMethod * mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error) { MonoMethod *method; error_init (error); if (m->wrapper_type != MONO_WRAPPER_NONE) { method = (MonoMethod *)mono_method_get_wrapper_data (m, token); if (context) { method = mono_class_inflate_generic_method_checked (method, context, error); } } else { method = mono_get_method_checked (m_class_get_image (m->klass), token, klass, context, error); } return method; } static MonoMethod * mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context) { ERROR_DECL (error); MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? cfg->error : error); if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (m_class_get_byval_arg (method->klass))) { mono_error_set_bad_image (cfg->error, m_class_get_image (cfg->method->klass), "Method with open type while not compiling gshared"); method = NULL; } if (!method && !cfg) mono_error_cleanup (error); /* FIXME don't swallow the error */ return method; } static MonoMethodSignature* mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error) { MonoMethodSignature *fsig; error_init (error); if (method->wrapper_type != MONO_WRAPPER_NONE) { fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token); } else { fsig = mono_metadata_parse_signature_checked (m_class_get_image (method->klass), token, error); return_val_if_nok (error, NULL); } if (context) { fsig = mono_inflate_generic_signature(fsig, context, error); } return fsig; } /* * Return the original method is a wrapper is specified. We can only access * the custom attributes from the original method. */ static MonoMethod* get_original_method (MonoMethod *method) { if (method->wrapper_type == MONO_WRAPPER_NONE) return method; /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */ if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) return NULL; /* in other cases we need to find the original method */ return mono_marshal_method_from_wrapper (method); } static guchar* il_read_op (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op) // If ip is desired_il_op, return the next ip, else NULL. { if (G_LIKELY (ip < end) && G_UNLIKELY (*ip == first_byte)) { MonoOpcodeEnum il_op = MonoOpcodeEnum_Invalid; // mono_opcode_value_and_size updates ip, but not in the expected way. const guchar *temp_ip = ip; const int size = mono_opcode_value_and_size (&temp_ip, end, &il_op); return (G_LIKELY (size > 0) && G_UNLIKELY (il_op == desired_il_op)) ? (ip + size) : NULL; } return NULL; } static guchar* il_read_op_and_token (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, guint32 *token) { ip = il_read_op (ip, end, first_byte, desired_il_op); if (ip) *token = read32 (ip - 4); // could be +1 or +2 from start return ip; } static guchar* il_read_branch_and_target (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, int size, guchar **target) { ip = il_read_op (ip, end, first_byte, desired_il_op); if (ip) { gint32 delta = 0; switch (size) { case 1: delta = (signed char)ip [-1]; break; case 4: delta = (gint32)read32 (ip - 4); break; } // FIXME verify it is within the function and start of an instruction. *target = ip + delta; return ip; } return NULL; } #define il_read_brtrue(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE, MONO_CEE_BRTRUE, 4, target)) #define il_read_brtrue_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE_S, MONO_CEE_BRTRUE_S, 1, target)) #define il_read_brfalse(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE, MONO_CEE_BRFALSE, 4, target)) #define il_read_brfalse_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE_S, MONO_CEE_BRFALSE_S, 1, target)) #define il_read_dup(ip, end) (il_read_op (ip, end, CEE_DUP, MONO_CEE_DUP)) #define il_read_newobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_NEW_OBJ, MONO_CEE_NEWOBJ, token)) #define il_read_ldtoken(ip, end, token) (il_read_op_and_token (ip, end, CEE_LDTOKEN, MONO_CEE_LDTOKEN, token)) #define il_read_call(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALL, MONO_CEE_CALL, token)) #define il_read_callvirt(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALLVIRT, MONO_CEE_CALLVIRT, token)) #define il_read_initobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_INITOBJ, token)) #define il_read_constrained(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_CONSTRAINED_, token)) #define il_read_unbox_any(ip, end, token) (il_read_op_and_token (ip, end, CEE_UNBOX_ANY, MONO_CEE_UNBOX_ANY, token)) /* * Check that the IL instructions at ip are the array initialization * sequence and return the pointer to the data and the size. */ static const char* initialize_array_data (MonoCompile *cfg, MonoMethod *method, gboolean aot, guchar *ip, guchar *end, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token, MonoOpcodeEnum *il_op, guchar **next_ip) { /* * newarr[System.Int32] * dup * ldtoken field valuetype ... * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle) */ guint32 token; guint32 field_token; if ((ip = il_read_dup (ip, end)) && ip_in_bb (cfg, cfg->cbb, ip) && (ip = il_read_ldtoken (ip, end, &field_token)) && IS_FIELD_DEF (field_token) && ip_in_bb (cfg, cfg->cbb, ip) && (ip = il_read_call (ip, end, &token))) { ERROR_DECL (error); guint32 rva; const char *data_ptr; int size = 0; MonoMethod *cmethod; MonoClass *dummy_class; MonoClassField *field = mono_field_from_token_checked (m_class_get_image (method->klass), field_token, &dummy_class, NULL, error); int dummy_align; if (!field) { mono_error_cleanup (error); /* FIXME don't swallow the error */ return NULL; } *out_field_token = field_token; cmethod = mini_get_method (NULL, method, token, NULL, NULL); if (!cmethod) return NULL; if (strcmp (cmethod->name, "InitializeArray") || strcmp (m_class_get_name (cmethod->klass), "RuntimeHelpers") || m_class_get_image (cmethod->klass) != mono_defaults.corlib) return NULL; switch (mini_get_underlying_type (m_class_get_byval_arg (klass))->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: size = 1; break; /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */ #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN case MONO_TYPE_I2: case MONO_TYPE_U2: size = 2; break; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_R4: size = 4; break; case MONO_TYPE_R8: case MONO_TYPE_I8: case MONO_TYPE_U8: size = 8; break; #endif default: return NULL; } size *= len; if (size > mono_type_size (field->type, &dummy_align)) return NULL; *out_size = size; /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/ MonoImage *method_klass_image = m_class_get_image (method->klass); if (!image_is_dynamic (method_klass_image)) { guint32 field_index = mono_metadata_token_index (field_token); mono_metadata_field_info (method_klass_image, field_index - 1, NULL, &rva, NULL); data_ptr = mono_image_rva_map (method_klass_image, rva); /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/ /* for aot code we do the lookup on load */ if (aot && data_ptr) data_ptr = (const char *)GUINT_TO_POINTER (rva); } else { /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */ g_assert (!aot); data_ptr = mono_field_get_data (field); } if (!data_ptr) return NULL; *il_op = MONO_CEE_CALL; *next_ip = ip; return data_ptr; } return NULL; } static void set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, guchar *ip) { ERROR_DECL (error); char *method_fname = mono_method_full_name (method, TRUE); char *method_code; MonoMethodHeader *header = mono_method_get_header_checked (method, error); if (!header) { method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (error)); mono_error_cleanup (error); } else if (header->code_size == 0) method_code = g_strdup ("method body is empty."); else method_code = mono_disasm_code_one (NULL, method, ip, NULL); mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code)); g_free (method_fname); g_free (method_code); cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header); } guint32 mono_type_to_stloc_coerce (MonoType *type) { if (m_type_is_byref (type)) return 0; type = mini_get_underlying_type (type); handle_enum: switch (type->type) { case MONO_TYPE_I1: return OP_ICONV_TO_I1; case MONO_TYPE_U1: return OP_ICONV_TO_U1; case MONO_TYPE_I2: return OP_ICONV_TO_I2; case MONO_TYPE_U2: return OP_ICONV_TO_U2; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R4: case MONO_TYPE_R8: case MONO_TYPE_TYPEDBYREF: case MONO_TYPE_GENERICINST: return 0; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { type = mono_class_enum_basetype_internal (type->data.klass); goto handle_enum; } return 0; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: //TODO I believe we don't need to handle gsharedvt as there won't be match and, for example, u1 is not covariant to u32 return 0; default: g_error ("unknown type 0x%02x in mono_type_to_stloc_coerce", type->type); } return -1; } static void emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n) { MonoInst *ins; guint32 coerce_op = mono_type_to_stloc_coerce (header->locals [n]); if (coerce_op) { if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) { if (cfg->verbose_level > 2) printf ("Found existing coercing is enough for stloc\n"); } else { MONO_INST_NEW (cfg, ins, coerce_op); ins->dreg = alloc_ireg (cfg); ins->sreg1 = sp [0]->dreg; ins->type = STACK_I4; ins->klass = mono_class_from_mono_type_internal (header->locals [n]); MONO_ADD_INS (cfg->cbb, ins); *sp = mono_decompose_opcode (cfg, ins); } } guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]); if (!cfg->deopt && (opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) { /* Optimize reg-reg moves away */ /* * Can't optimize other opcodes, since sp[0] might point to * the last ins of a decomposed opcode. */ sp [0]->dreg = (cfg)->locals [n]->dreg; } else { EMIT_NEW_LOCSTORE (cfg, ins, n, *sp); } } static void emit_starg_ir (MonoCompile *cfg, MonoInst **sp, int n) { MonoInst *ins; guint32 coerce_op = mono_type_to_stloc_coerce (cfg->arg_types [n]); if (coerce_op) { if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) { if (cfg->verbose_level > 2) printf ("Found existing coercing is enough for starg\n"); } else { MONO_INST_NEW (cfg, ins, coerce_op); ins->dreg = alloc_ireg (cfg); ins->sreg1 = sp [0]->dreg; ins->type = STACK_I4; ins->klass = mono_class_from_mono_type_internal (cfg->arg_types [n]); MONO_ADD_INS (cfg->cbb, ins); *sp = mono_decompose_opcode (cfg, ins); } } EMIT_NEW_ARGSTORE (cfg, ins, n, *sp); } /* * ldloca inhibits many optimizations so try to get rid of it in common * cases. */ static guchar * emit_optimized_ldloca_ir (MonoCompile *cfg, guchar *ip, guchar *end, int local) { guint32 token; MonoClass *klass; MonoType *type; guchar *start = ip; if ((ip = il_read_initobj (ip, end, &token)) && ip_in_bb (cfg, cfg->cbb, start + 1)) { /* From the INITOBJ case */ klass = mini_get_class (cfg->current_method, token, cfg->generic_context); CHECK_TYPELOAD (klass); type = mini_get_underlying_type (m_class_get_byval_arg (klass)); emit_init_local (cfg, local, type, TRUE); return ip; } exception_exit: return NULL; } static MonoInst* handle_call_res_devirt (MonoCompile *cfg, MonoMethod *cmethod, MonoInst *call_res) { /* * Devirt EqualityComparer.Default.Equals () calls for some types. * The corefx code excepts these calls to be devirtualized. * This depends on the implementation of EqualityComparer.Default, which is * in mcs/class/referencesource/mscorlib/system/collections/generic/equalitycomparer.cs */ if (m_class_get_image (cmethod->klass) == mono_defaults.corlib && !strcmp (m_class_get_name (cmethod->klass), "EqualityComparer`1") && !strcmp (cmethod->name, "get_Default")) { MonoType *param_type = mono_class_get_generic_class (cmethod->klass)->context.class_inst->type_argv [0]; MonoClass *inst; MonoGenericContext ctx; ERROR_DECL (error); memset (&ctx, 0, sizeof (ctx)); MonoType *args [ ] = { param_type }; ctx.class_inst = mono_metadata_get_generic_inst (1, args); inst = mono_class_inflate_generic_class_checked (mono_class_get_iequatable_class (), &ctx, error); mono_error_assert_ok (error); /* EqualityComparer<T>.Default returns specific types depending on T */ // FIXME: Add more /* 1. Implements IEquatable<T> */ /* * Can't use this for string/byte as it might use a different comparer: * * // Specialize type byte for performance reasons * if (t == typeof(byte)) { * return (EqualityComparer<T>)(object)(new ByteEqualityComparer()); * } * #if MOBILE * // Breaks .net serialization compatibility * if (t == typeof (string)) * return (EqualityComparer<T>)(object)new InternalStringComparer (); * #endif */ if (mono_class_is_assignable_from_internal (inst, mono_class_from_mono_type_internal (param_type)) && param_type->type != MONO_TYPE_U1 && param_type->type != MONO_TYPE_STRING) { MonoInst *typed_objref; MonoClass *gcomparer_inst; memset (&ctx, 0, sizeof (ctx)); args [0] = param_type; ctx.class_inst = mono_metadata_get_generic_inst (1, args); MonoClass *gcomparer = mono_class_get_geqcomparer_class (); g_assert (gcomparer); gcomparer_inst = mono_class_inflate_generic_class_checked (gcomparer, &ctx, error); if (is_ok (error)) { MONO_INST_NEW (cfg, typed_objref, OP_TYPED_OBJREF); typed_objref->type = STACK_OBJ; typed_objref->dreg = alloc_ireg_ref (cfg); typed_objref->sreg1 = call_res->dreg; typed_objref->klass = gcomparer_inst; MONO_ADD_INS (cfg->cbb, typed_objref); call_res = typed_objref; /* Force decompose */ cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE; cfg->cbb->needs_decompose = TRUE; } } } return call_res; } static gboolean is_exception_class (MonoClass *klass) { if (G_LIKELY (m_class_get_supertypes (klass))) return mono_class_has_parent_fast (klass, mono_defaults.exception_class); while (klass) { if (klass == mono_defaults.exception_class) return TRUE; klass = m_class_get_parent (klass); } return FALSE; } /* * is_jit_optimizer_disabled: * * Determine whenever M's assembly has a DebuggableAttribute with the * IsJITOptimizerDisabled flag set. */ static gboolean is_jit_optimizer_disabled (MonoMethod *m) { MonoAssembly *ass = m_class_get_image (m->klass)->assembly; g_assert (ass); if (ass->jit_optimizer_disabled_inited) return ass->jit_optimizer_disabled; return mono_assembly_is_jit_optimizer_disabled (ass); } gboolean mono_is_supported_tailcall_helper (gboolean value, const char *svalue) { if (!value) mono_tailcall_print ("%s %s\n", __func__, svalue); return value; } static gboolean mono_is_not_supported_tailcall_helper (gboolean value, const char *svalue, MonoMethod *method, MonoMethod *cmethod) { // Return value, printing if it inhibits tailcall. if (value && mono_tailcall_print_enabled ()) { const char *lparen = strchr (svalue, ' ') ? "(" : ""; const char *rparen = *lparen ? ")" : ""; mono_tailcall_print ("%s %s -> %s %s%s%s:%d\n", __func__, method->name, cmethod->name, lparen, svalue, rparen, value); } return value; } #define IS_NOT_SUPPORTED_TAILCALL(x) (mono_is_not_supported_tailcall_helper((x), #x, method, cmethod)) static gboolean is_supported_tailcall (MonoCompile *cfg, const guint8 *ip, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, gboolean virtual_, gboolean extra_arg, gboolean *ptailcall_calli) { // Some checks apply to "regular", some to "calli", some to both. // To ease burden on caller, always compute regular and calli. gboolean tailcall = TRUE; gboolean tailcall_calli = TRUE; if (IS_NOT_SUPPORTED_TAILCALL (virtual_ && !cfg->backend->have_op_tailcall_membase)) tailcall = FALSE; if (IS_NOT_SUPPORTED_TAILCALL (!cfg->backend->have_op_tailcall_reg)) tailcall_calli = FALSE; if (!tailcall && !tailcall_calli) goto exit; // FIXME in calli, there is no type for for the this parameter, // so we assume it might be valuetype; in future we should issue a range // check, so rule out pointing to frame (for other reference parameters also) if ( IS_NOT_SUPPORTED_TAILCALL (cmethod && fsig->hasthis && m_class_is_valuetype (cmethod->klass)) // This might point to the current method's stack. Emit range check? || IS_NOT_SUPPORTED_TAILCALL (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) || IS_NOT_SUPPORTED_TAILCALL (fsig->pinvoke) // i.e. if !cmethod (calli) || IS_NOT_SUPPORTED_TAILCALL (cfg->method->save_lmf) || IS_NOT_SUPPORTED_TAILCALL (!cmethod && fsig->hasthis) // FIXME could be valuetype to current frame; range check || IS_NOT_SUPPORTED_TAILCALL (cmethod && cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD) // http://www.mono-project.com/docs/advanced/runtime/docs/generic-sharing/ // // 1. Non-generic non-static methods of reference types have access to the // RGCTX via the "this" argument (this->vtable->rgctx). // 2. a Non-generic static methods of reference types and b. non-generic methods // of value types need to be passed a pointer to the caller's class's VTable in the MONO_ARCH_RGCTX_REG register. // 3. Generic methods need to be passed a pointer to the MRGCTX in the MONO_ARCH_RGCTX_REG register // // That is what vtable_arg is here (always?). // // Passing vtable_arg uses (requires?) a volatile non-parameter register, // such as AMD64 rax, r10, r11, or the return register on many architectures. // ARM32 does not always clearly have such a register. ARM32's return register // is a parameter register. // iPhone could use r9 except on old systems. iPhone/ARM32 is not particularly // important. Linux/arm32 is less clear. // ARM32's scratch r12 might work but only with much collateral change. // // Imagine F1 calls F2, and F2 tailcalls F3. // F2 and F3 are managed. F1 is native. // Without a tailcall, F2 can save and restore everything needed for F1. // However if the extra parameter were in a non-volatile, such as ARM32 V5/R8, // F3 cannot easily restore it for F1, in the current scheme. The current // scheme where the extra parameter is not merely an extra parameter, but // passed "outside of the ABI". // // If all native to managed transitions are intercepted and wrapped (w/o tailcall), // then they can preserve this register and the rest of the managed callgraph // treat it as volatile. // // Interface method dispatch has the same problem (imt_arg). || IS_NOT_SUPPORTED_TAILCALL (extra_arg && !cfg->backend->have_volatile_non_param_register) || IS_NOT_SUPPORTED_TAILCALL (cfg->gsharedvt) ) { tailcall_calli = FALSE; tailcall = FALSE; goto exit; } for (int i = 0; i < fsig->param_count; ++i) { if (IS_NOT_SUPPORTED_TAILCALL (m_type_is_byref (fsig->params [i]) || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)) { tailcall_calli = FALSE; tailcall = FALSE; // These can point to the current method's stack. Emit range check? goto exit; } } MonoMethodSignature *caller_signature; MonoMethodSignature *callee_signature; caller_signature = mono_method_signature_internal (method); callee_signature = cmethod ? mono_method_signature_internal (cmethod) : fsig; g_assert (caller_signature); g_assert (callee_signature); // Require an exact match on return type due to various conversions in emit_move_return_value that would be skipped. // The main troublesome conversions are double <=> float. // CoreCLR allows some conversions here, such as integer truncation. // As well I <=> I[48] and U <=> U[48] would be ok, for matching size. if (IS_NOT_SUPPORTED_TAILCALL (mini_get_underlying_type (caller_signature->ret)->type != mini_get_underlying_type (callee_signature->ret)->type) || IS_NOT_SUPPORTED_TAILCALL (!mono_arch_tailcall_supported (cfg, caller_signature, callee_signature, virtual_))) { tailcall_calli = FALSE; tailcall = FALSE; goto exit; } /* Debugging support */ #if 0 if (!mono_debug_count ()) { tailcall_calli = FALSE; tailcall = FALSE; goto exit; } #endif // See check_sp in mini_emit_calli_full. if (tailcall_calli && IS_NOT_SUPPORTED_TAILCALL (mini_should_check_stack_pointer (cfg))) tailcall_calli = FALSE; exit: mono_tailcall_print ("tail.%s %s -> %s tailcall:%d tailcall_calli:%d gshared:%d extra_arg:%d virtual_:%d\n", mono_opcode_name (*ip), method->name, cmethod ? cmethod->name : "calli", tailcall, tailcall_calli, cfg->gshared, extra_arg, virtual_); *ptailcall_calli = tailcall_calli; return tailcall; } /* * is_addressable_valuetype_load * * Returns true if a previous load can be done without doing an extra copy, given the new instruction ip and the type of the object being loaded ldtype */ static gboolean is_addressable_valuetype_load (MonoCompile* cfg, guint8* ip, MonoType* ldtype) { /* Avoid loading a struct just to load one of its fields */ gboolean is_load_instruction = (*ip == CEE_LDFLD); gboolean is_in_previous_bb = ip_in_bb(cfg, cfg->cbb, ip); gboolean is_struct = MONO_TYPE_ISSTRUCT(ldtype); return is_load_instruction && is_in_previous_bb && is_struct; } /* * handle_ctor_call: * * Handle calls made to ctors from NEWOBJ opcodes. */ static void handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp, guint8 *ip, int *inline_costs) { MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins; if (cmethod && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) { g_assert (MONO_TYPE_IS_VOID (fsig->ret)); CHECK_CFG_EXCEPTION; return; } if (mono_class_generic_sharing_enabled (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE)) { MonoRgctxAccess access = mini_get_rgctx_access_for_method (cmethod); if (access == MONO_RGCTX_ACCESS_MRGCTX) { mono_class_vtable_checked (cmethod->klass, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (cmethod->klass); vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX); } else if (access == MONO_RGCTX_ACCESS_VTABLE) { vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE); CHECK_CFG_ERROR; CHECK_TYPELOAD (cmethod->klass); } else { g_assert (access == MONO_RGCTX_ACCESS_THIS); } } /* Avoid virtual calls to ctors if possible */ if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg && mono_method_check_inlining (cfg, cmethod) && !mono_class_is_subclass_of_internal (cmethod->klass, mono_defaults.exception_class, FALSE)) { int costs; if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, NULL))) { cfg->real_offset += 5; *inline_costs += costs - 5; } else { INLINE_FAILURE ("inline failure"); // FIXME-VT: Clean this up if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) GSHAREDVT_FAILURE(*ip); mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL); } } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) { MonoInst *addr; addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE); if (cfg->llvm_only) { // FIXME: Avoid initializing vtable_arg mini_emit_llvmonly_calli (cfg, fsig, sp, addr); } else { mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg); } } else if (context_used && ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) || !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) { MonoInst *cmethod_addr; /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */ if (cfg->llvm_only) { MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_FTNDESC); mini_emit_llvmonly_calli (cfg, fsig, sp, addr); } else { cmethod_addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE); mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg); } } else { INLINE_FAILURE ("ctor call"); ins = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, vtable_arg); } exception_exit: mono_error_exit: return; } typedef struct { MonoMethod *method; gboolean inst_tailcall; } HandleCallData; /* * handle_constrained_call: * * Handle constrained calls. Return a MonoInst* representing the call or NULL. * May overwrite sp [0] and modify the ref_... parameters. */ static MonoInst* handle_constrained_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoClass *constrained_class, MonoInst **sp, HandleCallData *cdata, MonoMethod **ref_cmethod, gboolean *ref_virtual, gboolean *ref_emit_widen) { MonoInst *ins, *addr; MonoMethod *method = cdata->method; gboolean constrained_partial_call = FALSE; gboolean constrained_is_generic_param = m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR || m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR; MonoType *gshared_constraint = NULL; if (constrained_is_generic_param && cfg->gshared) { if (!mini_is_gsharedvt_klass (constrained_class)) { g_assert (!m_class_is_valuetype (cmethod->klass)); if (!mini_type_is_reference (m_class_get_byval_arg (constrained_class))) constrained_partial_call = TRUE; MonoType *t = m_class_get_byval_arg (constrained_class); MonoGenericParam *gparam = t->data.generic_param; gshared_constraint = gparam->gshared_constraint; } } if (mini_is_gsharedvt_klass (constrained_class)) { if ((cmethod->klass != mono_defaults.object_class) && m_class_is_valuetype (constrained_class) && m_class_is_valuetype (cmethod->klass)) { /* The 'Own method' case below */ } else if (m_class_get_image (cmethod->klass) != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !m_class_is_valuetype (cmethod->klass)) { /* 'The type parameter is instantiated as a reference type' case below. */ } else { ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, ref_emit_widen); CHECK_CFG_EXCEPTION; g_assert (ins); if (cdata->inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall constrained_class %s -> %s\n", method->name, cmethod->name); return ins; } } if (m_method_is_static (cmethod)) { /* Call to an abstract static method, handled normally */ return NULL; } else if (constrained_partial_call) { gboolean need_box = TRUE; /* * The receiver is a valuetype, but the exact type is not known at compile time. This means the * called method is not known at compile time either. The called method could end up being * one of the methods on the parent classes (object/valuetype/enum), in which case we need * to box the receiver. * A simple solution would be to box always and make a normal virtual call, but that would * be bad performance wise. */ if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass) && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT)) { /* * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing necessary. */ /* If the method is not abstract, it's a default interface method, and we need to box */ need_box = FALSE; } if (gshared_constraint && MONO_TYPE_IS_PRIMITIVE (gshared_constraint) && cmethod->klass == mono_defaults.object_class && !strcmp (cmethod->name, "GetHashCode")) { /* * The receiver is constrained to a primitive type or an enum with the same basetype. * Enum.GetHashCode () returns the hash code of the underlying type (see comments in Enum.cs), * so the constrained call can be replaced with a normal call to the basetype GetHashCode () * method. */ MonoClass *gshared_constraint_class = mono_class_from_mono_type_internal (gshared_constraint); cmethod = get_method_nofail (gshared_constraint_class, cmethod->name, 0, 0); g_assert (cmethod); *ref_cmethod = cmethod; *ref_virtual = FALSE; if (cfg->verbose_level) printf (" -> %s\n", mono_method_get_full_name (cmethod)); return NULL; } if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class)) { /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0); ins->klass = constrained_class; sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class)); CHECK_CFG_EXCEPTION; } else if (need_box) { MonoInst *box_type; MonoBasicBlock *is_ref_bb, *end_bb; MonoInst *nonbox_call, *addr; /* * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call * if needed. * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT, * the no-box case goes to a method in Int32, while the box case goes to a method in Enum. */ addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE); NEW_BBLOCK (cfg, is_ref_bb); NEW_BBLOCK (cfg, end_bb); box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb); /* Non-ref case */ if (cfg->llvm_only) /* addr is an ftndesc in this case */ nonbox_call = mini_emit_llvmonly_calli (cfg, fsig, sp, addr); else nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* Ref case */ MONO_START_BB (cfg, is_ref_bb); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0); ins->klass = constrained_class; sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class)); CHECK_CFG_EXCEPTION; if (cfg->llvm_only) ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr); else ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); MONO_START_BB (cfg, end_bb); cfg->cbb = end_bb; nonbox_call->dreg = ins->dreg; if (cdata->inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall constrained_partial_need_box %s -> %s\n", method->name, cmethod->name); return ins; } else { g_assert (mono_class_is_interface (cmethod->klass)); addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE); if (cfg->llvm_only) ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr); else ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL); if (cdata->inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall constrained_partial %s -> %s\n", method->name, cmethod->name); return ins; } } else if (!m_class_is_valuetype (constrained_class)) { int dreg = alloc_ireg_ref (cfg); /* * The type parameter is instantiated as a reference * type. We have a managed pointer on the stack, so * we need to dereference it here. */ EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0); ins->type = STACK_OBJ; sp [0] = ins; } else if (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class) { /* * The type parameter is instantiated as a valuetype, * but that type doesn't override the method we're * calling, so we need to box `this'. */ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0); ins->klass = constrained_class; sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class)); CHECK_CFG_EXCEPTION; } else { if (cmethod->klass != constrained_class) { /* Enums/default interface methods */ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0); ins->klass = constrained_class; sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class)); CHECK_CFG_EXCEPTION; } *ref_virtual = FALSE; } exception_exit: return NULL; } static void emit_setret (MonoCompile *cfg, MonoInst *val) { MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret); MonoInst *ins; if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) { MonoInst *ret_addr; if (!cfg->vret_addr) { EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val); } else { EMIT_NEW_RETLOADA (cfg, ret_addr); MonoClass *ret_class = mono_class_from_mono_type_internal (ret_type); if (MONO_CLASS_IS_SIMD (cfg, ret_class)) EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREX_MEMBASE, ret_addr->dreg, 0, val->dreg); else EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg); ins->klass = ret_class; } } else { #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK if (COMPILE_SOFT_FLOAT (cfg) && !m_type_is_byref (ret_type) && ret_type->type == MONO_TYPE_R4) { MonoInst *conv; MonoInst *iargs [ ] = { val }; conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs); mono_arch_emit_setret (cfg, cfg->method, conv); } else { mono_arch_emit_setret (cfg, cfg->method, val); } #else mono_arch_emit_setret (cfg, cfg->method, val); #endif } } /* * Emit a call to enter the interpreter for methods with filter clauses. */ static void emit_llvmonly_interp_entry (MonoCompile *cfg, MonoMethodHeader *header) { MonoInst *ins; MonoInst **iargs; MonoMethodSignature *sig = mono_method_signature_internal (cfg->method); MonoInst *ftndesc; cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, sig); /* * Emit a call to the interp entry function. We emit it here instead of the llvm backend since * calling conventions etc. are easier to handle here. The LLVM backend will only emit the * entry/exit bblocks. */ g_assert (cfg->cbb == cfg->bb_init); if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (sig)) { /* * Would have to generate a gsharedvt out wrapper which calls the interp entry wrapper, but * the gsharedvt out wrapper might not exist if the caller is also a gsharedvt method since * the concrete signature of the call might not exist in the program. * So transition directly to the interpreter without the wrappers. */ MonoInst *args_ins; MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM); ins->dreg = alloc_preg (cfg); ins->inst_imm = sig->param_count * sizeof (target_mgreg_t); MONO_ADD_INS (cfg->cbb, ins); args_ins = ins; for (int i = 0; i < sig->hasthis + sig->param_count; ++i) { MonoInst *arg_addr_ins; EMIT_NEW_VARLOADA ((cfg), arg_addr_ins, cfg->args [i], cfg->arg_types [i]); EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args_ins->dreg, i * sizeof (target_mgreg_t), arg_addr_ins->dreg); } MonoInst *ret_var = NULL; MonoInst *ret_arg_ins; if (!MONO_TYPE_IS_VOID (sig->ret)) { ret_var = mono_compile_create_var (cfg, sig->ret, OP_LOCAL); EMIT_NEW_VARLOADA (cfg, ret_arg_ins, ret_var, sig->ret); } else { EMIT_NEW_PCONST (cfg, ret_arg_ins, NULL); } iargs = g_newa (MonoInst*, 3); iargs [0] = emit_get_rgctx_method (cfg, -1, cfg->method, MONO_RGCTX_INFO_INTERP_METHOD); iargs [1] = ret_arg_ins; iargs [2] = args_ins; mono_emit_jit_icall_id (cfg, MONO_JIT_ICALL_mini_llvmonly_interp_entry_gsharedvt, iargs); if (!MONO_TYPE_IS_VOID (sig->ret)) EMIT_NEW_VARLOAD (cfg, ins, ret_var, sig->ret); else ins = NULL; } else { /* Obtain the interp entry function */ ftndesc = emit_get_rgctx_method (cfg, -1, cfg->method, MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY); /* Call it */ iargs = g_newa (MonoInst*, sig->param_count + 1); for (int i = 0; i < sig->param_count + sig->hasthis; ++i) EMIT_NEW_ARGLOAD (cfg, iargs [i], i); ins = mini_emit_llvmonly_calli (cfg, sig, iargs, ftndesc); } /* Do a normal return */ if (cfg->ret) { emit_setret (cfg, ins); /* * Since only bb_entry/bb_exit is emitted if interp_entry_only is set, * its possible that the return value becomes an OP_PHI node whose inputs * are not emitted. Make it volatile to prevent that. */ cfg->ret->flags |= MONO_INST_VOLATILE; } MONO_INST_NEW (cfg, ins, OP_BR); ins->inst_target_bb = cfg->bb_exit; MONO_ADD_INS (cfg->cbb, ins); link_bblock (cfg, cfg->cbb, cfg->bb_exit); } typedef union _MonoOpcodeParameter { gint32 i32; gint64 i64; float f; double d; guchar *branch_target; } MonoOpcodeParameter; typedef struct _MonoOpcodeInfo { guint constant : 4; // private gint pops : 3; // public -1 means variable gint pushes : 3; // public -1 means variable } MonoOpcodeInfo; static const MonoOpcodeInfo* mono_opcode_decode (guchar *ip, guint op_size, MonoOpcodeEnum il_op, MonoOpcodeParameter *parameter) { #define Push0 (0) #define Pop0 (0) #define Push1 (1) #define Pop1 (1) #define PushI (1) #define PopI (1) #define PushI8 (1) #define PopI8 (1) #define PushRef (1) #define PopRef (1) #define PushR4 (1) #define PopR4 (1) #define PushR8 (1) #define PopR8 (1) #define VarPush (-1) #define VarPop (-1) static const MonoOpcodeInfo mono_opcode_info [ ] = { #define OPDEF(name, str, pops, pushes, param, param_constant, a, b, c, flow) {param_constant + 1, pops, pushes }, #include "mono/cil/opcode.def" #undef OPDEF }; #undef Push0 #undef Pop0 #undef Push1 #undef Pop1 #undef PushI #undef PopI #undef PushI8 #undef PopI8 #undef PushRef #undef PopRef #undef PushR4 #undef PopR4 #undef PushR8 #undef PopR8 #undef VarPush #undef VarPop gint32 delta; guchar *next_ip = ip + op_size; const MonoOpcodeInfo *info = &mono_opcode_info [il_op]; switch (mono_opcodes [il_op].argument) { case MonoInlineNone: parameter->i32 = (int)info->constant - 1; break; case MonoInlineString: case MonoInlineType: case MonoInlineField: case MonoInlineMethod: case MonoInlineTok: case MonoInlineSig: case MonoShortInlineR: case MonoInlineI: parameter->i32 = read32 (next_ip - 4); // FIXME check token type? break; case MonoShortInlineI: parameter->i32 = (signed char)next_ip [-1]; break; case MonoInlineVar: parameter->i32 = read16 (next_ip - 2); break; case MonoShortInlineVar: parameter->i32 = next_ip [-1]; break; case MonoInlineR: case MonoInlineI8: parameter->i64 = read64 (next_ip - 8); break; case MonoShortInlineBrTarget: delta = (signed char)next_ip [-1]; goto branch_target; case MonoInlineBrTarget: delta = (gint32)read32 (next_ip - 4); branch_target: parameter->branch_target = delta + next_ip; break; case MonoInlineSwitch: // complicated break; default: g_error ("%s %d %d\n", __func__, il_op, mono_opcodes [il_op].argument); } return info; } /* * mono_method_to_ir: * * Translate the .net IL into linear IR. * * @start_bblock: if not NULL, the starting basic block, used during inlining. * @end_bblock: if not NULL, the ending basic block, used during inlining. * @return_var: if not NULL, the place where the return value is stored, used during inlining. * @inline_args: if not NULL, contains the arguments to the inline call * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise. * @is_virtual_call: whether this method is being called as a result of a call to callvirt * * This method is used to turn ECMA IL into Mono's internal Linear IR * reprensetation. It is used both for entire methods, as well as * inlining existing methods. In the former case, the @start_bblock, * @end_bblock, @return_var, @inline_args are all set to NULL, and the * inline_offset is set to zero. * * Returns: the inline cost, or -1 if there was an error processing this method. */ int mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock, MonoInst *return_var, MonoInst **inline_args, guint inline_offset, gboolean is_virtual_call) { ERROR_DECL (error); // Buffer to hold parameters to mono_new_array, instead of varargs. MonoInst *array_new_localalloc_ins = NULL; MonoInst *ins, **sp, **stack_start; MonoBasicBlock *tblock = NULL; MonoBasicBlock *init_localsbb = NULL, *init_localsbb2 = NULL; MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL; MonoMethod *method_definition; MonoInst **arg_array; MonoMethodHeader *header; MonoImage *image; guint32 token, ins_flag; MonoClass *klass; MonoClass *constrained_class = NULL; gboolean save_last_error = FALSE; guchar *ip, *end, *target, *err_pos; MonoMethodSignature *sig; MonoGenericContext *generic_context = NULL; MonoGenericContainer *generic_container = NULL; MonoType **param_types; int i, n, start_new_bblock, dreg; int num_calls = 0, inline_costs = 0; guint num_args; GSList *class_inits = NULL; gboolean dont_verify, dont_verify_stloc, readonly = FALSE; int context_used; gboolean init_locals, seq_points, skip_dead_blocks; gboolean sym_seq_points = FALSE; MonoDebugMethodInfo *minfo; MonoBitSet *seq_point_locs = NULL; MonoBitSet *seq_point_set_locs = NULL; const char *ovf_exc = NULL; gboolean emitted_funccall_seq_point = FALSE; gboolean detached_before_ret = FALSE; gboolean ins_has_side_effect; if (!cfg->disable_inline) cfg->disable_inline = (method->iflags & METHOD_IMPL_ATTRIBUTE_NOOPTIMIZATION) || is_jit_optimizer_disabled (method); cfg->current_method = method; image = m_class_get_image (method->klass); /* serialization and xdomain stuff may need access to private fields and methods */ dont_verify = FALSE; dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */ dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP; dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE; /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */ dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_OTHER; dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED; dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF; header = mono_method_get_header_checked (method, cfg->error); if (!header) { mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); goto exception_exit; } else { cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header); } generic_container = mono_method_get_generic_container (method); sig = mono_method_signature_internal (method); num_args = sig->hasthis + sig->param_count; ip = (guchar*)header->code; cfg->cil_start = ip; end = ip + header->code_size; cfg->stat_cil_code_size += header->code_size; seq_points = cfg->gen_seq_points && cfg->method == method; if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) { /* We could hit a seq point before attaching to the JIT (#8338) */ seq_points = FALSE; } if (method->wrapper_type == MONO_WRAPPER_OTHER) { WrapperInfo *info = mono_marshal_get_wrapper_info (method); if (info->subtype == WRAPPER_SUBTYPE_INTERP_IN) { /* We could hit a seq point before attaching to the JIT (#8338) */ seq_points = FALSE; } } if (cfg->prof_coverage) { if (cfg->compile_aot) g_error ("Coverage profiling is not supported with AOT."); INLINE_FAILURE ("coverage profiling"); cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size); } if ((cfg->gen_sdb_seq_points && cfg->method == method) || cfg->prof_coverage) { minfo = mono_debug_lookup_method (method); if (minfo) { MonoSymSeqPoint *sps; int i, n_il_offsets; mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets); seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0); seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0); sym_seq_points = TRUE; for (i = 0; i < n_il_offsets; ++i) { if (sps [i].il_offset < header->code_size) mono_bitset_set_fast (seq_point_locs, sps [i].il_offset); } g_free (sps); MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method); if (asyncMethod) { for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++) { mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]); mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]); } mono_debug_free_method_async_debug_info (asyncMethod); } } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (m_class_get_image (method->klass))) { /* Methods without line number info like auto-generated property accessors */ seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0); seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0); sym_seq_points = TRUE; } } /* * Methods without init_locals set could cause asserts in various passes * (#497220). To work around this, we emit dummy initialization opcodes * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported * on some platforms. */ if (cfg->opt & MONO_OPT_UNSAFE) init_locals = header->init_locals; else init_locals = TRUE; method_definition = method; while (method_definition->is_inflated) { MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition; method_definition = imethod->declaring; } if (sig->is_inflated) generic_context = mono_method_get_context (method); else if (generic_container) generic_context = &generic_container->context; cfg->generic_context = generic_context; if (!cfg->gshared) g_assert (!sig->has_type_parameters); if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) { g_assert (method->is_inflated); g_assert (mono_method_get_context (method)->method_inst); } if (method->is_inflated && mono_method_get_context (method)->method_inst) g_assert (sig->generic_param_count); if (cfg->method == method) { cfg->real_offset = 0; } else { cfg->real_offset = inline_offset; } cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size); cfg->cil_offset_to_bb_len = header->code_size; if (cfg->verbose_level > 2) printf ("method to IR %s\n", mono_method_full_name (method, TRUE)); param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args); if (sig->hasthis) param_types [0] = m_class_is_valuetype (method->klass) ? m_class_get_this_arg (method->klass) : m_class_get_byval_arg (method->klass); for (n = 0; n < sig->param_count; ++n) param_types [n + sig->hasthis] = sig->params [n]; cfg->arg_types = param_types; cfg->dont_inline = g_list_prepend (cfg->dont_inline, method); if (cfg->method == method) { /* ENTRY BLOCK */ NEW_BBLOCK (cfg, start_bblock); cfg->bb_entry = start_bblock; start_bblock->cil_code = NULL; start_bblock->cil_length = 0; /* EXIT BLOCK */ NEW_BBLOCK (cfg, end_bblock); cfg->bb_exit = end_bblock; end_bblock->cil_code = NULL; end_bblock->cil_length = 0; end_bblock->flags |= BB_INDIRECT_JUMP_TARGET; g_assert (cfg->num_bblocks == 2); arg_array = cfg->args; if (header->num_clauses) { cfg->spvars = g_hash_table_new (NULL, NULL); cfg->exvars = g_hash_table_new (NULL, NULL); } cfg->clause_is_dead = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * header->num_clauses); /* handle exception clauses */ for (i = 0; i < header->num_clauses; ++i) { MonoBasicBlock *try_bb; MonoExceptionClause *clause = &header->clauses [i]; GET_BBLOCK (cfg, try_bb, ip + clause->try_offset); try_bb->real_offset = clause->try_offset; try_bb->try_start = TRUE; GET_BBLOCK (cfg, tblock, ip + clause->handler_offset); tblock->real_offset = clause->handler_offset; tblock->flags |= BB_EXCEPTION_HANDLER; if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY) mono_create_exvar_for_offset (cfg, clause->handler_offset); /* * Linking the try block with the EH block hinders inlining as we won't be able to * merge the bblocks from inlining and produce an artificial hole for no good reason. */ if (COMPILE_LLVM (cfg)) link_bblock (cfg, try_bb, tblock); if (*(ip + clause->handler_offset) == CEE_POP) tblock->flags |= BB_EXCEPTION_DEAD_OBJ; if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY || clause->flags == MONO_EXCEPTION_CLAUSE_FILTER || clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) { MONO_INST_NEW (cfg, ins, OP_START_HANDLER); MONO_ADD_INS (tblock, ins); if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) { /* finally clauses already have a seq point */ /* seq points for filter clauses are emitted below */ NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE); MONO_ADD_INS (tblock, ins); } /* todo: is a fault block unsafe to optimize? */ if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) tblock->flags |= BB_EXCEPTION_UNSAFE; } /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len); while (p < end) { printf ("%s", mono_disasm_code_one (NULL, method, p, &p)); }*/ /* catch and filter blocks get the exception object on the stack */ if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE || clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) { /* mostly like handle_stack_args (), but just sets the input args */ /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */ tblock->in_scount = 1; tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*)); tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset); cfg->cbb = tblock; #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */ if (!cfg->compile_llvm) { MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ); ins->dreg = tblock->in_stack [0]->dreg; MONO_ADD_INS (tblock, ins); } #else MonoInst *dummy_use; /* * Add a dummy use for the exvar so its liveness info will be * correct. */ EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]); #endif if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) { NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE); MONO_ADD_INS (tblock, ins); } if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) { GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset); tblock->flags |= BB_EXCEPTION_HANDLER; tblock->real_offset = clause->data.filter_offset; tblock->in_scount = 1; tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*)); /* The filter block shares the exvar with the handler block */ tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset); MONO_INST_NEW (cfg, ins, OP_START_HANDLER); MONO_ADD_INS (tblock, ins); } } if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER && clause->data.catch_class && cfg->gshared && mono_class_check_context_used (clause->data.catch_class)) { /* * In shared generic code with catch * clauses containing type variables * the exception handling code has to * be able to get to the rgctx. * Therefore we have to make sure that * the vtable/mrgctx argument (for * static or generic methods) or the * "this" argument (for non-static * methods) are live. */ if ((method->flags & METHOD_ATTRIBUTE_STATIC) || mini_method_get_context (method)->method_inst || m_class_is_valuetype (method->klass)) { mono_get_vtable_var (cfg); } else { MonoInst *dummy_use; EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]); } } } } else { arg_array = g_newa (MonoInst*, num_args); cfg->cbb = start_bblock; cfg->args = arg_array; mono_save_args (cfg, sig, inline_args); } if (cfg->method == method && cfg->self_init && cfg->compile_aot && !COMPILE_LLVM (cfg)) { MonoMethod *wrapper; MonoInst *args [2]; int idx; /* * Emit code to initialize this method by calling the init wrapper emitted by LLVM. * This is not efficient right now, but its only used for the methods which fail * LLVM compilation. * FIXME: Optimize this */ g_assert (!cfg->gshared); wrapper = mono_marshal_get_aot_init_wrapper (AOT_INIT_METHOD); /* Emit this into the entry bb so it comes before the GC safe point which depends on an inited GOT */ cfg->cbb = cfg->bb_entry; idx = mono_aot_get_method_index (cfg->method); EMIT_NEW_ICONST (cfg, args [0], idx); /* Dummy */ EMIT_NEW_ICONST (cfg, args [1], 0); mono_emit_method_call (cfg, wrapper, args, NULL); } if (cfg->llvm_only && cfg->interp && cfg->method == method && !cfg->deopt) { if (header->num_clauses) { for (int i = 0; i < header->num_clauses; ++i) { MonoExceptionClause *clause = &header->clauses [i]; /* Finally clauses are checked after the remove_finally pass */ if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) cfg->interp_entry_only = TRUE; } } } /* we use a separate basic block for the initialization code */ NEW_BBLOCK (cfg, init_localsbb); if (cfg->method == method) cfg->bb_init = init_localsbb; init_localsbb->real_offset = cfg->real_offset; start_bblock->next_bb = init_localsbb; link_bblock (cfg, start_bblock, init_localsbb); init_localsbb2 = init_localsbb; cfg->cbb = init_localsbb; if (cfg->gsharedvt && cfg->method == method) { MonoGSharedVtMethodInfo *info; MonoInst *var, *locals_var; int dreg; info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo)); info->method = cfg->method; info->count_entries = 16; info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries); cfg->gsharedvt_info = info; var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* prevent it from being register allocated */ //var->flags |= MONO_INST_VOLATILE; cfg->gsharedvt_info_var = var; ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info); MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg); /* Allocate locals */ locals_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* prevent it from being register allocated */ //locals_var->flags |= MONO_INST_VOLATILE; cfg->gsharedvt_locals_var = locals_var; dreg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size)); MONO_INST_NEW (cfg, ins, OP_LOCALLOC); ins->dreg = locals_var->dreg; ins->sreg1 = dreg; MONO_ADD_INS (cfg->cbb, ins); cfg->gsharedvt_locals_var_ins = ins; cfg->flags |= MONO_CFG_HAS_ALLOCA; /* if (init_locals) ins->flags |= MONO_INST_INIT; */ if (cfg->llvm_only) { init_localsbb = cfg->cbb; init_localsbb2 = cfg->cbb; } } if (cfg->deopt) { /* * Push an LMFExt frame which points to a MonoMethodILState structure. */ emit_push_lmf (cfg); /* The type doesn't matter, the llvm backend will use the correct type */ MonoInst *il_state_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); il_state_var->flags |= MONO_INST_VOLATILE; cfg->il_state_var = il_state_var; EMIT_NEW_VARLOADA (cfg, ins, cfg->il_state_var, NULL); int il_state_addr_reg = ins->dreg; /* il_state->method = method */ MonoInst *method_ins = emit_get_rgctx_method (cfg, -1, cfg->method, MONO_RGCTX_INFO_METHOD); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, il_state_addr_reg, MONO_STRUCT_OFFSET (MonoMethodILState, method), method_ins->dreg); EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL); int lmf_reg = ins->dreg; /* lmf->kind = MONO_LMFEXT_IL_STATE */ MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, lmf_reg, MONO_STRUCT_OFFSET (MonoLMFExt, kind), MONO_LMFEXT_IL_STATE); /* lmf->il_state = il_state */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMFExt, il_state), il_state_addr_reg); /* emit_get_rgctx_method () might create new bblocks */ if (cfg->llvm_only) { init_localsbb = cfg->cbb; init_localsbb2 = cfg->cbb; } } if (cfg->llvm_only && cfg->interp && cfg->method == method) { if (cfg->interp_entry_only) emit_llvmonly_interp_entry (cfg, header); } /* FIRST CODE BLOCK */ NEW_BBLOCK (cfg, tblock); tblock->cil_code = ip; cfg->cbb = tblock; cfg->ip = ip; init_localsbb->next_bb = cfg->cbb; link_bblock (cfg, init_localsbb, cfg->cbb); ADD_BBLOCK (cfg, tblock); CHECK_CFG_EXCEPTION; if (header->code_size == 0) UNVERIFIED; if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) { ip = err_pos; UNVERIFIED; } if (cfg->method == method) { int breakpoint_id = mono_debugger_method_has_breakpoint (method); if (breakpoint_id) { MONO_INST_NEW (cfg, ins, OP_BREAK); MONO_ADD_INS (cfg->cbb, ins); } mono_debug_init_method (cfg, cfg->cbb, breakpoint_id); } for (n = 0; n < header->num_locals; ++n) { if (header->locals [n]->type == MONO_TYPE_VOID && !m_type_is_byref (header->locals [n])) UNVERIFIED; } class_inits = NULL; /* We force the vtable variable here for all shared methods for the possibility that they might show up in a stack trace where their exact instantiation is needed. */ if (cfg->gshared && method == cfg->method) { if ((method->flags & METHOD_ATTRIBUTE_STATIC) || mini_method_get_context (method)->method_inst || m_class_is_valuetype (method->klass)) { mono_get_vtable_var (cfg); } else { /* FIXME: Is there a better way to do this? We need the variable live for the duration of the whole method. */ cfg->args [0]->flags |= MONO_INST_VOLATILE; } } /* add a check for this != NULL to inlined methods */ if (is_virtual_call) { MonoInst *arg_ins; // // This is just a hack to avoid checks in empty methods which could get inlined // into finally clauses preventing the removal of empty finally clauses, since all // variables in finally clauses are marked volatile so the check can't be removed // if (!(cfg->llvm_only && m_class_is_valuetype (method->klass) && header->code_size == 1 && header->code [0] == CEE_RET)) { NEW_ARGLOAD (cfg, arg_ins, 0); MONO_ADD_INS (cfg->cbb, arg_ins); MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg); } } skip_dead_blocks = !dont_verify; if (skip_dead_blocks) { original_bb = bb = mono_basic_block_split (method, cfg->error, header); CHECK_CFG_ERROR; g_assert (bb); } /* we use a spare stack slot in SWITCH and NEWOBJ and others */ stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1)); ins_flag = 0; start_new_bblock = 0; MonoOpcodeEnum il_op; il_op = MonoOpcodeEnum_Invalid; emit_set_deopt_il_offset (cfg, ip - cfg->cil_start); for (guchar *next_ip = ip; ip < end; ip = next_ip) { MonoOpcodeEnum previous_il_op = il_op; const guchar *tmp_ip = ip; const int op_size = mono_opcode_value_and_size (&tmp_ip, end, &il_op); CHECK_OPSIZE (op_size); next_ip += op_size; if (cfg->method == method) cfg->real_offset = ip - header->code; else cfg->real_offset = inline_offset; cfg->ip = ip; context_used = 0; if (start_new_bblock) { cfg->cbb->cil_length = ip - cfg->cbb->cil_code; if (start_new_bblock == 2) { g_assert (ip == tblock->cil_code); } else { GET_BBLOCK (cfg, tblock, ip); } cfg->cbb->next_bb = tblock; cfg->cbb = tblock; start_new_bblock = 0; for (i = 0; i < cfg->cbb->in_scount; ++i) { if (cfg->verbose_level > 3) printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0); EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0); *sp++ = ins; } if (class_inits) g_slist_free (class_inits); class_inits = NULL; emit_set_deopt_il_offset (cfg, ip - cfg->cil_start); } else { if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) { link_bblock (cfg, cfg->cbb, tblock); if (sp != stack_start) { handle_stack_args (cfg, stack_start, sp - stack_start); sp = stack_start; CHECK_UNVERIFIABLE (cfg); } cfg->cbb->next_bb = tblock; cfg->cbb = tblock; for (i = 0; i < cfg->cbb->in_scount; ++i) { if (cfg->verbose_level > 3) printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0); EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0); *sp++ = ins; } g_slist_free (class_inits); class_inits = NULL; emit_set_deopt_il_offset (cfg, ip - cfg->cil_start); } } /* * Methods with AggressiveInline flag could be inlined even if the class has a cctor. * This might create a branch so emit it in the first code bblock instead of into initlocals_bb. */ if (ip - header->code == 0 && cfg->method != method && cfg->compile_aot && (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && mono_class_needs_cctor_run (method->klass, method)) { emit_class_init (cfg, method->klass); } if (skip_dead_blocks) { int ip_offset = ip - header->code; if (ip_offset == bb->end) bb = bb->next; if (bb->dead) { g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/ if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset); if (ip_offset + op_size == bb->end) { MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); start_new_bblock = 1; } continue; } } /* * Sequence points are points where the debugger can place a breakpoint. * Currently, we generate these automatically at points where the IL * stack is empty. */ if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) { /* * Make methods interruptable at the beginning, and at the targets of * backward branches. * Also, do this at the start of every bblock in methods with clauses too, * to be able to handle instructions with inprecise control flow like * throw/endfinally. * Backward branches are handled at the end of method-to-ir (). */ gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses); gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code); /* Avoid sequence points on empty IL like .volatile */ // FIXME: Enable this //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) { NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc); if ((sp != stack_start) && !sym_seq_point) ins->flags |= MONO_INST_NONEMPTY_STACK; MONO_ADD_INS (cfg->cbb, ins); if (sym_seq_points) mono_bitset_set_fast (seq_point_set_locs, ip - header->code); if (cfg->prof_coverage) { guint32 cil_offset = ip - header->code; gpointer counter = &cfg->coverage_info->data [cil_offset].count; cfg->coverage_info->data [cil_offset].cil_code = ip; if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) { MonoInst *one_ins, *load_ins; EMIT_NEW_PCONST (cfg, load_ins, counter); EMIT_NEW_ICONST (cfg, one_ins, 1); MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4); ins->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = load_ins->dreg; ins->inst_offset = 0; ins->sreg2 = one_ins->dreg; ins->type = STACK_I4; MONO_ADD_INS (cfg->cbb, ins); } else { EMIT_NEW_PCONST (cfg, ins, counter); MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1); } } } cfg->cbb->real_offset = cfg->real_offset; if (cfg->verbose_level > 3) printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL)); /* * This is used to compute BB_HAS_SIDE_EFFECTS, which is used for the elimination of * foreach finally clauses, so only IL opcodes which occur in such clauses * need to set this. */ ins_has_side_effect = TRUE; // Variables shared by CEE_CALLI CEE_CALL CEE_CALLVIRT CEE_JMP. // Initialize to either what they all need or zero. gboolean emit_widen = TRUE; gboolean tailcall = FALSE; gboolean common_call = FALSE; MonoInst *keep_this_alive = NULL; MonoMethod *cmethod = NULL; MonoMethodSignature *fsig = NULL; // These are used only in CALL/CALLVIRT but must be initialized also for CALLI, // since it jumps into CALL/CALLVIRT. gboolean need_seq_point = FALSE; gboolean push_res = TRUE; gboolean skip_ret = FALSE; gboolean tailcall_remove_ret = FALSE; // FIXME split 500 lines load/store field into separate file/function. MonoOpcodeParameter parameter; const MonoOpcodeInfo* info = mono_opcode_decode (ip, op_size, il_op, &parameter); g_assert (info); n = parameter.i32; token = parameter.i32; target = parameter.branch_target; // Check stack size for push/pop except variable cases -- -1 like call/ret/newobj. const int pushes = info->pushes; const int pops = info->pops; if (pushes >= 0 && pops >= 0) { g_assert (pushes - pops <= 1); if (pushes - pops == 1) CHECK_STACK_OVF (); } if (pops >= 0) CHECK_STACK (pops); switch (il_op) { case MONO_CEE_NOP: if (seq_points && !sym_seq_points && sp != stack_start) { /* * The C# compiler uses these nops to notify the JIT that it should * insert seq points. */ NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE); MONO_ADD_INS (cfg->cbb, ins); } if (cfg->keep_cil_nops) MONO_INST_NEW (cfg, ins, OP_HARD_NOP); else MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); emitted_funccall_seq_point = FALSE; ins_has_side_effect = FALSE; break; case MONO_CEE_BREAK: if (mini_should_insert_breakpoint (cfg->method)) { ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL); } else { MONO_INST_NEW (cfg, ins, OP_NOP); MONO_ADD_INS (cfg->cbb, ins); } break; case MONO_CEE_LDARG_0: case MONO_CEE_LDARG_1: case MONO_CEE_LDARG_2: case MONO_CEE_LDARG_3: case MONO_CEE_LDARG_S: case MONO_CEE_LDARG: CHECK_ARG (n); if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, cfg->arg_types[n])) { EMIT_NEW_ARGLOADA (cfg, ins, n); } else { EMIT_NEW_ARGLOAD (cfg, ins, n); } *sp++ = ins; break; case MONO_CEE_LDLOC_0: case MONO_CEE_LDLOC_1: case MONO_CEE_LDLOC_2: case MONO_CEE_LDLOC_3: case MONO_CEE_LDLOC_S: case MONO_CEE_LDLOC: CHECK_LOCAL (n); if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, header->locals[n])) { EMIT_NEW_LOCLOADA (cfg, ins, n); } else { EMIT_NEW_LOCLOAD (cfg, ins, n); } *sp++ = ins; break; case MONO_CEE_STLOC_0: case MONO_CEE_STLOC_1: case MONO_CEE_STLOC_2: case MONO_CEE_STLOC_3: case MONO_CEE_STLOC_S: case MONO_CEE_STLOC: CHECK_LOCAL (n); --sp; *sp = convert_value (cfg, header->locals [n], *sp); if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp)) UNVERIFIED; emit_stloc_ir (cfg, sp, header, n); inline_costs += 1; break; case MONO_CEE_LDARGA_S: case MONO_CEE_LDARGA: CHECK_ARG (n); NEW_ARGLOADA (cfg, ins, n); MONO_ADD_INS (cfg->cbb, ins); *sp++ = ins; break; case MONO_CEE_STARG_S: case MONO_CEE_STARG: --sp; CHECK_ARG (n); *sp = convert_value (cfg, param_types [n], *sp); if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp)) UNVERIFIED; emit_starg_ir (cfg, sp, n); break; case MONO_CEE_LDLOCA: case MONO_CEE_LDLOCA_S: { guchar *tmp_ip; CHECK_LOCAL (n); if ((tmp_ip = emit_optimized_ldloca_ir (cfg, next_ip, end, n))) { next_ip = tmp_ip; il_op = MONO_CEE_INITOBJ; inline_costs += 1; break; } ins_has_side_effect = FALSE; EMIT_NEW_LOCLOADA (cfg, ins, n); *sp++ = ins; break; } case MONO_CEE_LDNULL: EMIT_NEW_PCONST (cfg, ins, NULL); ins->type = STACK_OBJ; *sp++ = ins; break; case MONO_CEE_LDC_I4_M1: case MONO_CEE_LDC_I4_0: case MONO_CEE_LDC_I4_1: case MONO_CEE_LDC_I4_2: case MONO_CEE_LDC_I4_3: case MONO_CEE_LDC_I4_4: case MONO_CEE_LDC_I4_5: case MONO_CEE_LDC_I4_6: case MONO_CEE_LDC_I4_7: case MONO_CEE_LDC_I4_8: case MONO_CEE_LDC_I4_S: case MONO_CEE_LDC_I4: EMIT_NEW_ICONST (cfg, ins, n); *sp++ = ins; break; case MONO_CEE_LDC_I8: MONO_INST_NEW (cfg, ins, OP_I8CONST); ins->type = STACK_I8; ins->dreg = alloc_dreg (cfg, STACK_I8); ins->inst_l = parameter.i64; MONO_ADD_INS (cfg->cbb, ins); *sp++ = ins; break; case MONO_CEE_LDC_R4: { float *f; gboolean use_aotconst = FALSE; #ifdef TARGET_POWERPC /* FIXME: Clean this up */ if (cfg->compile_aot) use_aotconst = TRUE; #endif /* FIXME: we should really allocate this only late in the compilation process */ f = (float *)mono_mem_manager_alloc (cfg->mem_manager, sizeof (float)); if (use_aotconst) { MonoInst *cons; int dreg; EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f); dreg = alloc_freg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0); ins->type = cfg->r4_stack_type; } else { MONO_INST_NEW (cfg, ins, OP_R4CONST); ins->type = cfg->r4_stack_type; ins->dreg = alloc_dreg (cfg, STACK_R8); ins->inst_p0 = f; MONO_ADD_INS (cfg->cbb, ins); } *f = parameter.f; *sp++ = ins; break; } case MONO_CEE_LDC_R8: { double *d; gboolean use_aotconst = FALSE; #ifdef TARGET_POWERPC /* FIXME: Clean this up */ if (cfg->compile_aot) use_aotconst = TRUE; #endif /* FIXME: we should really allocate this only late in the compilation process */ d = (double *)mono_mem_manager_alloc (cfg->mem_manager, sizeof (double)); if (use_aotconst) { MonoInst *cons; int dreg; EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d); dreg = alloc_freg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0); ins->type = STACK_R8; } else { MONO_INST_NEW (cfg, ins, OP_R8CONST); ins->type = STACK_R8; ins->dreg = alloc_dreg (cfg, STACK_R8); ins->inst_p0 = d; MONO_ADD_INS (cfg->cbb, ins); } *d = parameter.d; *sp++ = ins; break; } case MONO_CEE_DUP: { MonoInst *temp, *store; MonoClass *klass; sp--; ins = *sp; klass = ins->klass; temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL); EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins); EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0); ins->klass = klass; *sp++ = ins; EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0); ins->klass = klass; *sp++ = ins; inline_costs += 2; break; } case MONO_CEE_POP: --sp; #ifdef TARGET_X86 if (sp [0]->type == STACK_R8) /* we need to pop the value from the x86 FP stack */ MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg); #endif break; case MONO_CEE_JMP: { MonoCallInst *call; int i, n; INLINE_FAILURE ("jmp"); GSHAREDVT_FAILURE (il_op); if (stack_start != sp) UNVERIFIED; /* FIXME: check the signature matches */ cmethod = mini_get_method (cfg, method, token, NULL, generic_context); CHECK_CFG_ERROR; if (cfg->gshared && mono_method_check_context_used (cmethod)) GENERIC_SHARING_FAILURE (CEE_JMP); mini_profiler_emit_tail_call (cfg, cmethod); fsig = mono_method_signature_internal (cmethod); n = fsig->param_count + fsig->hasthis; if (cfg->llvm_only) { MonoInst **args; args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n); for (i = 0; i < n; ++i) EMIT_NEW_ARGLOAD (cfg, args [i], i); ins = mini_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL); /* * The code in mono-basic-block.c treats the rest of the code as dead, but we * have to emit a normal return since llvm expects it. */ if (cfg->ret) emit_setret (cfg, ins); MONO_INST_NEW (cfg, ins, OP_BR); ins->inst_target_bb = end_bblock; MONO_ADD_INS (cfg->cbb, ins); link_bblock (cfg, cfg->cbb, end_bblock); break; } else { /* Handle tailcalls similarly to calls */ DISABLE_AOT (cfg); mini_emit_tailcall_parameters (cfg, fsig); MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL); call->method = cmethod; // FIXME Other initialization of the tailcall field occurs after // it is used. So this is the only "real" use and needs more attention. call->tailcall = TRUE; call->signature = fsig; call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n); call->inst.inst_p0 = cmethod; for (i = 0; i < n; ++i) EMIT_NEW_ARGLOAD (cfg, call->args [i], i); if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret))) call->vret_var = cfg->vret_addr; mono_arch_emit_call (cfg, call); cfg->param_area = MAX(cfg->param_area, call->stack_usage); MONO_ADD_INS (cfg->cbb, (MonoInst*)call); } start_new_bblock = 1; break; } case MONO_CEE_CALLI: { // FIXME tail.calli is problemetic because the this pointer's type // is not in the signature, and we cannot check for a byref valuetype. MonoInst *addr; MonoInst *callee = NULL; // Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT. common_call = TRUE; // i.e. skip_ret/push_res/seq_point logic cmethod = NULL; gboolean const inst_tailcall = G_UNLIKELY (debug_tailcall_try_all ? (next_ip < end && next_ip [0] == CEE_RET) : ((ins_flag & MONO_INST_TAILCALL) != 0)); ins = NULL; //GSHAREDVT_FAILURE (il_op); CHECK_STACK (1); --sp; addr = *sp; g_assert (addr); fsig = mini_get_signature (method, token, generic_context, cfg->error); CHECK_CFG_ERROR; if (method->dynamic && fsig->pinvoke) { MonoInst *args [3]; /* * This is a call through a function pointer using a pinvoke * signature. Have to create a wrapper and call that instead. * FIXME: This is very slow, need to create a wrapper at JIT time * instead based on the signature. */ EMIT_NEW_IMAGECONST (cfg, args [0], ((MonoDynamicMethod*)method)->assembly->image); EMIT_NEW_PCONST (cfg, args [1], fsig); args [2] = addr; // FIXME tailcall? addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args); } if (!method->dynamic && fsig->pinvoke && !method->wrapper_type) { /* MONO_WRAPPER_DYNAMIC_METHOD dynamic method handled above in the method->dynamic case; for other wrapper types assume the code knows what its doing and added its own GC transitions */ gboolean skip_gc_trans = fsig->suppress_gc_transition; if (!skip_gc_trans) { #if 0 fprintf (stderr, "generating wrapper for calli in method %s with wrapper type %s\n", method->name, mono_wrapper_type_to_str (method->wrapper_type)); #endif /* Call the wrapper that will do the GC transition instead */ MonoMethod *wrapper = mono_marshal_get_native_func_wrapper_indirect (method->klass, fsig, cfg->compile_aot); fsig = mono_method_signature_internal (wrapper); n = fsig->param_count - 1; /* wrapper has extra fnptr param */ CHECK_STACK (n); /* move the args to allow room for 'this' in the first position */ while (n--) { --sp; sp [1] = sp [0]; } sp[0] = addr; /* n+1 args, first arg is the address of the indirect method to call */ g_assert (!fsig->hasthis && !fsig->pinvoke); ins = mono_emit_method_call (cfg, wrapper, /*args*/sp, NULL); goto calli_end; } } n = fsig->param_count + fsig->hasthis; CHECK_STACK (n); //g_assert (!virtual_ || fsig->hasthis); sp -= n; if (!(cfg->method->wrapper_type && cfg->method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD) && check_call_signature (cfg, fsig, sp)) { if (break_on_unverified ()) check_call_signature (cfg, fsig, sp); // Again, step through it. UNVERIFIED; } inline_costs += CALL_COST * MIN(10, num_calls++); /* * Making generic calls out of gsharedvt methods. * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid * patching gshared method addresses into a gsharedvt method. */ if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) { /* * We pass the address to the gsharedvt trampoline in the rgctx reg */ callee = addr; g_assert (addr); // Doubles as boolean after tailcall check. } inst_tailcall && is_supported_tailcall (cfg, ip, method, NULL, fsig, FALSE/*virtual irrelevant*/, addr != NULL, &tailcall); if (save_last_error) mono_emit_jit_icall (cfg, mono_marshal_clear_last_error, NULL); if (callee) { if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE) /* Not tested */ GSHAREDVT_FAILURE (il_op); if (cfg->llvm_only) // FIXME: GSHAREDVT_FAILURE (il_op); addr = emit_get_rgctx_sig (cfg, context_used, fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI); ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, callee, tailcall); goto calli_end; } /* Prevent inlining of methods with indirect calls */ INLINE_FAILURE ("indirect call"); if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) { MonoJumpInfoType info_type; gpointer info_data; /* * Instead of emitting an indirect call, emit a direct call * with the contents of the aotconst as the patch info. */ if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) { info_type = (MonoJumpInfoType)addr->inst_c1; info_data = addr->inst_p0; } else { info_type = (MonoJumpInfoType)addr->inst_right->inst_c1; info_data = addr->inst_right->inst_left; } if (info_type == MONO_PATCH_INFO_ICALL_ADDR) { // non-JIT icall, mostly builtin, but also user-extensible tailcall = FALSE; ins = (MonoInst*)mini_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp); NULLIFY_INS (addr); goto calli_end; } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR || info_type == MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR) { tailcall = FALSE; ins = (MonoInst*)mini_emit_abs_call (cfg, info_type, info_data, fsig, sp); NULLIFY_INS (addr); goto calli_end; } } if (cfg->llvm_only && !(cfg->method->wrapper_type && cfg->method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)) ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr); else ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, NULL, tailcall); goto calli_end; } case MONO_CEE_CALL: case MONO_CEE_CALLVIRT: { MonoInst *addr; addr = NULL; int array_rank; array_rank = 0; gboolean virtual_; virtual_ = il_op == MONO_CEE_CALLVIRT; gboolean pass_imt_from_rgctx; pass_imt_from_rgctx = FALSE; MonoInst *imt_arg; imt_arg = NULL; gboolean pass_vtable; pass_vtable = FALSE; gboolean pass_mrgctx; pass_mrgctx = FALSE; MonoInst *vtable_arg; vtable_arg = NULL; gboolean check_this; check_this = FALSE; gboolean delegate_invoke; delegate_invoke = FALSE; gboolean direct_icall; direct_icall = FALSE; gboolean tailcall_calli; tailcall_calli = FALSE; gboolean noreturn; noreturn = FALSE; gboolean gshared_static_virtual; gshared_static_virtual = FALSE; #ifdef TARGET_WASM gboolean needs_stack_walk; needs_stack_walk = FALSE; #endif // Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT. common_call = FALSE; // variables to help in assertions gboolean called_is_supported_tailcall; called_is_supported_tailcall = FALSE; MonoMethod *tailcall_method; tailcall_method = NULL; MonoMethod *tailcall_cmethod; tailcall_cmethod = NULL; MonoMethodSignature *tailcall_fsig; tailcall_fsig = NULL; gboolean tailcall_virtual; tailcall_virtual = FALSE; gboolean tailcall_extra_arg; tailcall_extra_arg = FALSE; gboolean inst_tailcall; inst_tailcall = G_UNLIKELY (debug_tailcall_try_all ? (next_ip < end && next_ip [0] == CEE_RET) : ((ins_flag & MONO_INST_TAILCALL) != 0)); ins = NULL; /* Used to pass arguments to called functions */ HandleCallData cdata; memset (&cdata, 0, sizeof (HandleCallData)); cmethod = mini_get_method (cfg, method, token, NULL, generic_context); CHECK_CFG_ERROR; if (cfg->verbose_level > 3) printf ("cmethod = %s\n", mono_method_get_full_name (cmethod)); MonoMethod *cil_method; cil_method = cmethod; if (constrained_class) { if (m_method_is_static (cil_method) && mini_class_check_context_used (cfg, constrained_class)) { /* get_constrained_method () doesn't work on the gparams used by generic sharing */ // FIXME: Other configurations //if (!cfg->gsharedvt) // GENERIC_SHARING_FAILURE (CEE_CALL); gshared_static_virtual = TRUE; } else { cmethod = get_constrained_method (cfg, image, token, cil_method, constrained_class, generic_context); CHECK_CFG_ERROR; if (m_class_is_enumtype (constrained_class) && !strcmp (cmethod->name, "GetHashCode")) { /* Use the corresponding method from the base type to avoid boxing */ MonoType *base_type = mono_class_enum_basetype_internal (constrained_class); g_assert (base_type); constrained_class = mono_class_from_mono_type_internal (base_type); cmethod = get_method_nofail (constrained_class, cmethod->name, 0, 0); g_assert (cmethod); } } } if (!dont_verify && !cfg->skip_visibility) { MonoMethod *target_method = cil_method; if (method->is_inflated) { MonoGenericContainer *container = mono_method_get_generic_container(method_definition); MonoGenericContext *context = (container != NULL ? &container->context : NULL); target_method = mini_get_method_allow_open (method, token, NULL, context, cfg->error); CHECK_CFG_ERROR; } if (!mono_method_can_access_method (method_definition, target_method) && !mono_method_can_access_method (method, cil_method)) emit_method_access_failure (cfg, method, cil_method); } if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) { if (cfg->interp && !cfg->interp_entry_only) { /* Use the interpreter instead */ cfg->exception_message = g_strdup ("stack walk"); cfg->disable_llvm = TRUE; } #ifdef TARGET_WASM else { needs_stack_walk = TRUE; } #endif } if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT) && !gshared_static_virtual) { if (!mono_class_is_interface (method->klass)) emit_bad_image_failure (cfg, method, cil_method); else virtual_ = TRUE; } if (!m_class_is_inited (cmethod->klass)) if (!mono_class_init_internal (cmethod->klass)) TYPE_LOAD_ERROR (cmethod->klass); fsig = mono_method_signature_internal (cmethod); if (!fsig) LOAD_ERROR; if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL && mini_class_is_system_array (cmethod->klass)) { array_rank = m_class_get_rank (cmethod->klass); } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && direct_icalls_enabled (cfg, cmethod)) { direct_icall = TRUE; } else if (fsig->pinvoke) { if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) { /* * Avoid calling mono_marshal_get_native_wrapper () too early, it might call managed * callbacks on netcore. */ fsig = mono_metadata_signature_dup_mempool (cfg->mempool, fsig); fsig->pinvoke = FALSE; } else { MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot); fsig = mono_method_signature_internal (wrapper); } } else if (constrained_class) { } else { fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, cfg->error); CHECK_CFG_ERROR; } if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated)) cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig); /* See code below */ if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) { MonoBasicBlock *tbb; GET_BBLOCK (cfg, tbb, next_ip); if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) { /* * We want to extend the try block to cover the call, but we can't do it if the * call is made directly since its followed by an exception check. */ direct_icall = FALSE; } } mono_save_token_info (cfg, image, token, cil_method); if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code))) need_seq_point = TRUE; /* Don't support calls made using type arguments for now */ /* if (cfg->gsharedvt) { if (mini_is_gsharedvt_signature (fsig)) GSHAREDVT_FAILURE (il_op); } */ if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) g_assert_not_reached (); n = fsig->param_count + fsig->hasthis; if (!cfg->gshared && mono_class_is_gtd (cmethod->klass)) UNVERIFIED; if (!cfg->gshared) g_assert (!mono_method_check_context_used (cmethod)); CHECK_STACK (n); //g_assert (!virtual_ || fsig->hasthis); sp -= n; if (virtual_ && cmethod && sp [0] && sp [0]->opcode == OP_TYPED_OBJREF) { ERROR_DECL (error); MonoMethod *new_cmethod = mono_class_get_virtual_method (sp [0]->klass, cmethod, error); if (is_ok (error)) { cmethod = new_cmethod; virtual_ = FALSE; } else { mono_error_cleanup (error); } } if (cmethod && method_does_not_return (cmethod)) { cfg->cbb->out_of_line = TRUE; noreturn = TRUE; } cdata.method = method; cdata.inst_tailcall = inst_tailcall; /* * We have the `constrained.' prefix opcode. */ if (constrained_class) { ins = handle_constrained_call (cfg, cmethod, fsig, constrained_class, sp, &cdata, &cmethod, &virtual_, &emit_widen); CHECK_CFG_EXCEPTION; if (!gshared_static_virtual) constrained_class = NULL; if (ins) goto call_end; } for (int i = 0; i < fsig->param_count; ++i) sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]); if (check_call_signature (cfg, fsig, sp)) { if (break_on_unverified ()) check_call_signature (cfg, fsig, sp); // Again, step through it. UNVERIFIED; } if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke")) delegate_invoke = TRUE; /* * Implement a workaround for the inherent races involved in locking: * Monitor.Enter () * try { * } finally { * Monitor.Exit () * } * If a thread abort happens between the call to Monitor.Enter () and the start of the * try block, the Exit () won't be executed, see: * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx * To work around this, we extend such try blocks to include the last x bytes * of the Monitor.Enter () call. */ if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) { MonoBasicBlock *tbb; GET_BBLOCK (cfg, tbb, next_ip); /* * Only extend try blocks with a finally, to avoid catching exceptions thrown * from Monitor.Enter like ArgumentNullException. */ if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) { /* Mark this bblock as needing to be extended */ tbb->extend_try_block = TRUE; } } /* Conversion to a JIT intrinsic */ gboolean ins_type_initialized; if ((ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp, &ins_type_initialized))) { if (!MONO_TYPE_IS_VOID (fsig->ret)) { if (!ins_type_initialized) mini_type_to_eval_stack_type ((cfg), fsig->ret, ins); emit_widen = FALSE; } // FIXME This is only missed if in fact the intrinsic involves a call. if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall intrins %s -> %s\n", method->name, cmethod->name); goto call_end; } CHECK_CFG_ERROR; /* * If the callee is a shared method, then its static cctor * might not get called after the call was patched. */ if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) { emit_class_init (cfg, cmethod->klass); CHECK_TYPELOAD (cmethod->klass); } /* Inlining */ if ((cfg->opt & MONO_OPT_INLINE) && !inst_tailcall && (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) && mono_method_check_inlining (cfg, cmethod)) { int costs; gboolean always = FALSE; gboolean is_empty = FALSE; if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) { /* Prevent inlining of methods that call wrappers */ INLINE_FAILURE ("wrapper call"); // FIXME? Does this write to cmethod impact tailcall_supported? Probably not. // Neither pinvoke or icall are likely to be tailcalled. cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE); always = TRUE; } costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &is_empty); if (costs) { cfg->real_offset += 5; if (!MONO_TYPE_IS_VOID (fsig->ret)) /* *sp is already set by inline_method */ ins = *sp; inline_costs += costs; // FIXME This is missed if the inlinee contains tail calls that // would work, but not once inlined into caller. // This matchingness could be a factor in inlining. // i.e. Do not inline if it hurts tailcall, do inline // if it helps and/or or is neutral, and helps performance // using usual heuristics. // Note that inlining will expose multiple tailcall opportunities // so the tradeoff is not obvious. If we can tailcall anything // like desktop, then this factor mostly falls away, except // that inlining can affect tailcall performance due to // signature match/mismatch. if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall inline %s -> %s\n", method->name, cmethod->name); if (is_empty) ins_has_side_effect = FALSE; goto call_end; } } check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx); if (cfg->gshared) { MonoGenericContext *cmethod_context = mono_method_get_context (cmethod); context_used = mini_method_check_context_used (cfg, cmethod); if (!context_used && gshared_static_virtual) context_used = mini_class_check_context_used (cfg, constrained_class); if (context_used && mono_class_is_interface (cmethod->klass) && !m_method_is_static (cmethod)) { /* Generic method interface calls are resolved via a helper function and don't need an imt. */ if (!cmethod_context || !cmethod_context->method_inst) pass_imt_from_rgctx = TRUE; } /* * If a shared method calls another * shared method then the caller must * have a generic sharing context * because the magic trampoline * requires it. FIXME: We shouldn't * have to force the vtable/mrgctx * variable here. Instead there * should be a flag in the cfg to * request a generic sharing context. */ if (context_used && ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cfg->method->klass))) mono_get_vtable_var (cfg); } if (pass_vtable) { if (context_used) { vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE); } else { MonoVTable *vtable = mono_class_vtable_checked (cmethod->klass, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (cmethod->klass); EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable); } } if (pass_mrgctx) { g_assert (!vtable_arg); if (!cfg->compile_aot) { /* * emit_get_rgctx_method () calls mono_class_vtable () so check * for type load errors before. */ mono_class_setup_vtable (cmethod->klass); CHECK_TYPELOAD (cmethod->klass); } vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX); if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod))) { if (virtual_) check_this = TRUE; virtual_ = FALSE; } } if (pass_imt_from_rgctx) { g_assert (!pass_vtable); imt_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); g_assert (imt_arg); } if (check_this) MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg); /* Calling virtual generic methods */ // These temporaries help detangle "pure" computation of // inputs to is_supported_tailcall from side effects, so that // is_supported_tailcall can be computed just once. gboolean virtual_generic; virtual_generic = FALSE; gboolean virtual_generic_imt; virtual_generic_imt = FALSE; if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_METHOD_IS_FINAL (cmethod) && fsig->generic_param_count && !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) && !cfg->llvm_only) { g_assert (fsig->is_inflated); virtual_generic = TRUE; /* Prevent inlining of methods that contain indirect calls */ INLINE_FAILURE ("virtual generic call"); if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) GSHAREDVT_FAILURE (il_op); if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) { virtual_generic_imt = TRUE; g_assert (!imt_arg); if (!context_used) g_assert (cmethod->is_inflated); imt_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); g_assert (imt_arg); virtual_ = TRUE; vtable_arg = NULL; } } // Capture some intent before computing tailcall. gboolean make_generic_call_out_of_gsharedvt_method; gboolean will_have_imt_arg; make_generic_call_out_of_gsharedvt_method = FALSE; will_have_imt_arg = FALSE; /* * Making generic calls out of gsharedvt methods. * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid * patching gshared method addresses into a gsharedvt method. */ if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) && !(m_class_get_rank (cmethod->klass) && m_class_get_byval_arg (cmethod->klass)->type != MONO_TYPE_SZARRAY) && (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) { make_generic_call_out_of_gsharedvt_method = TRUE; if (virtual_) { if (fsig->generic_param_count) { will_have_imt_arg = TRUE; } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) { will_have_imt_arg = TRUE; } } } /* Tail prefix / tailcall optimization */ /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests. Inlining and stack traces are not guaranteed however. */ /* FIXME: runtime generic context pointer for jumps? */ /* FIXME: handle this for generic sharing eventually */ // tailcall means "the backend can and will handle it". // inst_tailcall means the tail. prefix is present. tailcall_extra_arg = vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass); tailcall = inst_tailcall && is_supported_tailcall (cfg, ip, method, cmethod, fsig, virtual_, tailcall_extra_arg, &tailcall_calli); // Writes to imt_arg, vtable_arg, virtual_, cmethod, must not occur from here (inputs to is_supported_tailcall). // Capture values to later assert they don't change. called_is_supported_tailcall = TRUE; tailcall_method = method; tailcall_cmethod = cmethod; tailcall_fsig = fsig; tailcall_virtual = virtual_; if (virtual_generic) { if (virtual_generic_imt) { if (tailcall) { /* Prevent inlining of methods with tailcalls (the call stack would be altered) */ INLINE_FAILURE ("tailcall"); } common_call = TRUE; goto call_end; } MonoInst *this_temp, *this_arg_temp, *store; MonoInst *iargs [4]; this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL); NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]); MONO_ADD_INS (cfg->cbb, store); /* FIXME: This should be a managed pointer */ this_arg_temp = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0); iargs [1] = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0); addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs); EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0); ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL); if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall virtual generic %s -> %s\n", method->name, cmethod->name); goto call_end; } CHECK_CFG_ERROR; /* Tail recursion elimination */ if (((cfg->opt & MONO_OPT_TAILCALL) || inst_tailcall) && il_op == MONO_CEE_CALL && cmethod == method && next_ip < end && next_ip [0] == CEE_RET && !vtable_arg) { gboolean has_vtargs = FALSE; int i; /* Prevent inlining of methods with tailcalls (the call stack would be altered) */ INLINE_FAILURE ("tailcall"); /* keep it simple */ for (i = fsig->param_count - 1; !has_vtargs && i >= 0; i--) has_vtargs = MONO_TYPE_ISSTRUCT (mono_method_signature_internal (cmethod)->params [i]); if (!has_vtargs) { if (need_seq_point) { emit_seq_point (cfg, method, ip, FALSE, TRUE); need_seq_point = FALSE; } for (i = 0; i < n; ++i) EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]); mini_profiler_emit_tail_call (cfg, cmethod); MONO_INST_NEW (cfg, ins, OP_BR); MONO_ADD_INS (cfg->cbb, ins); tblock = start_bblock->out_bb [0]; link_bblock (cfg, cfg->cbb, tblock); ins->inst_target_bb = tblock; start_new_bblock = 1; /* skip the CEE_RET, too */ if (ip_in_bb (cfg, cfg->cbb, next_ip)) skip_ret = TRUE; push_res = FALSE; need_seq_point = FALSE; goto call_end; } } inline_costs += CALL_COST * MIN(10, num_calls++); /* * Synchronized wrappers. * Its hard to determine where to replace a method with its synchronized * wrapper without causing an infinite recursion. The current solution is * to add the synchronized wrapper in the trampolines, and to * change the called method to a dummy wrapper, and resolve that wrapper * to the real method in mono_jit_compile_method (). */ if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) { MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method); if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig)) { // FIXME? Does this write to cmethod impact tailcall_supported? Probably not. cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod); } } /* * Making generic calls out of gsharedvt methods. * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid * patching gshared method addresses into a gsharedvt method. */ if (make_generic_call_out_of_gsharedvt_method) { if (virtual_) { //if (mono_class_is_interface (cmethod->klass)) //GSHAREDVT_FAILURE (il_op); // disable for possible remoting calls if (fsig->hasthis && method->klass == mono_defaults.object_class) GSHAREDVT_FAILURE (il_op); if (fsig->generic_param_count) { /* virtual generic call */ g_assert (!imt_arg); g_assert (will_have_imt_arg); /* Same as the virtual generic case above */ imt_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); g_assert (imt_arg); } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) { /* This can happen when we call a fully instantiated iface method */ g_assert (will_have_imt_arg); imt_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); g_assert (imt_arg); } /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */ vtable_arg = NULL; } if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke"))) keep_this_alive = sp [0]; MonoRgctxInfoType info_type; if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT; else info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE; addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type); if (cfg->llvm_only) { // FIXME: Avoid initializing vtable_arg ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr); if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall llvmonly gsharedvt %s -> %s\n", method->name, cmethod->name); } else { tailcall = tailcall_calli; ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall); tailcall_remove_ret |= tailcall; } goto call_end; } /* Generic sharing */ /* * Calls to generic methods from shared code cannot go through the trampoline infrastructure * in some cases, because the called method might end up being different on every call. * Load the called method address from the rgctx and do an indirect call in these cases. * Use this if the callee is gsharedvt sharable too, since * at runtime we might find an instantiation so the call cannot * be patched (the 'no_patch' code path in mini-trampolines.c). */ gboolean gshared_indirect; gshared_indirect = context_used && !imt_arg && !array_rank && !delegate_invoke; if (gshared_indirect) gshared_indirect = (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) || !mono_class_generic_sharing_enabled (cmethod->klass) || gshared_static_virtual); if (gshared_indirect) gshared_indirect = (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)); if (gshared_indirect) { INLINE_FAILURE ("gshared"); g_assert (cfg->gshared && cmethod); g_assert (!addr); if (fsig->hasthis) MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg); if (cfg->llvm_only) { if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) { /* Handled in handle_constrained_gsharedvt_call () */ g_assert (!gshared_static_virtual); addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER); } else { if (gshared_static_virtual) addr = emit_get_rgctx_virt_method (cfg, -1, constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE); else addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_FTNDESC); } // FIXME: Avoid initializing imt_arg/vtable_arg ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr); if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall context_used_llvmonly %s -> %s\n", method->name, cmethod->name); } else { if (gshared_static_virtual) { /* * cmethod is a static interface method, the actual called method at runtime * needs to be computed using constrained_class and cmethod. */ addr = emit_get_rgctx_virt_method (cfg, -1, constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE); } else { addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE); } if (inst_tailcall) mono_tailcall_print ("%s tailcall_calli#2 %s -> %s\n", tailcall_calli ? "making" : "missed", method->name, cmethod->name); tailcall = tailcall_calli; ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall); tailcall_remove_ret |= tailcall; } goto call_end; } /* Direct calls to icalls */ if (direct_icall) { MonoMethod *wrapper; int costs; /* Inline the wrapper */ wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot); costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE, NULL); g_assert (costs > 0); cfg->real_offset += 5; if (!MONO_TYPE_IS_VOID (fsig->ret)) /* *sp is already set by inline_method */ ins = *sp; inline_costs += costs; if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall direct_icall %s -> %s\n", method->name, cmethod->name); goto call_end; } /* Array methods */ if (array_rank) { MonoInst *addr; if (strcmp (cmethod->name, "Set") == 0) { /* array Set */ MonoInst *val = sp [fsig->param_count]; if (val->type == STACK_OBJ) { MonoInst *iargs [ ] = { sp [0], val }; mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs); } addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE); if (!mini_debug_options.weak_memory_model && val->type == STACK_OBJ) mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg); if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val)) mini_emit_write_barrier (cfg, addr, val); if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass)) GSHAREDVT_FAILURE (il_op); } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */ addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0); } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */ if (!m_class_is_valuetype (m_class_get_element_class (cmethod->klass)) && !readonly) mini_emit_check_array_type (cfg, sp [0], cmethod->klass); CHECK_TYPELOAD (cmethod->klass); readonly = FALSE; addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE); ins = addr; } else { g_assert_not_reached (); } emit_widen = FALSE; if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall array_rank %s -> %s\n", method->name, cmethod->name); goto call_end; } ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL); if (ins) { if (inst_tailcall) // FIXME mono_tailcall_print ("missed tailcall redirect %s -> %s\n", method->name, cmethod->name); goto call_end; } /* Tail prefix / tailcall optimization */ if (tailcall) { /* Prevent inlining of methods with tailcalls (the call stack would be altered) */ INLINE_FAILURE ("tailcall"); } /* * Virtual calls in llvm-only mode. */ if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) { ins = mini_emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp); goto call_end; } /* Common call */ if (!(cfg->opt & MONO_OPT_AGGRESSIVE_INLINING) && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !method_does_not_return (cmethod)) INLINE_FAILURE ("call"); common_call = TRUE; #ifdef TARGET_WASM /* Push an LMF so these frames can be enumerated during stack walks by mono_arch_unwind_frame () */ if (needs_stack_walk && !cfg->deopt) { MonoInst *method_ins; int lmf_reg; emit_push_lmf (cfg); EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL); lmf_reg = ins->dreg; /* The lmf->method field will be used to look up the MonoJitInfo for this method */ method_ins = emit_get_rgctx_method (cfg, mono_method_check_context_used (cfg->method), cfg->method, MONO_RGCTX_INFO_METHOD); EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, method), method_ins->dreg); } #endif call_end: // Check that the decision to tailcall would not have changed. g_assert (!called_is_supported_tailcall || tailcall_method == method); // FIXME? cmethod does change, weaken the assert if we weren't tailcalling anyway. // If this still fails, restructure the code, or call tailcall_supported again and assert no change. g_assert (!called_is_supported_tailcall || !tailcall || tailcall_cmethod == cmethod); g_assert (!called_is_supported_tailcall || tailcall_fsig == fsig); g_assert (!called_is_supported_tailcall || tailcall_virtual == virtual_); g_assert (!called_is_supported_tailcall || tailcall_extra_arg == (vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass))); if (common_call) // FIXME goto call_end && !common_call often skips tailcall processing. ins = mini_emit_method_call_full (cfg, cmethod, fsig, tailcall, sp, virtual_ ? sp [0] : NULL, imt_arg, vtable_arg); /* * Handle devirt of some A.B.C calls by replacing the result of A.B with a OP_TYPED_OBJREF instruction, so the .C * call can be devirtualized above. */ if (cmethod) ins = handle_call_res_devirt (cfg, cmethod, ins); #ifdef TARGET_WASM if (common_call && needs_stack_walk && !cfg->deopt) /* If an exception is thrown, the LMF is popped by a call to mini_llvmonly_pop_lmf () */ emit_pop_lmf (cfg); #endif if (noreturn) { MONO_INST_NEW (cfg, ins, OP_NOT_REACHED); MONO_ADD_INS (cfg->cbb, ins); } calli_end: if ((tailcall_remove_ret || (common_call && tailcall)) && !cfg->llvm_only) { link_bblock (cfg, cfg->cbb, end_bblock); start_new_bblock = 1; // FIXME: Eliminate unreachable epilogs /* * OP_TAILCALL has no return value, so skip the CEE_RET if it is * only reachable from this call. */ GET_BBLOCK (cfg, tblock, next_ip); if (tblock == cfg->cbb || tblock->in_count == 0) skip_ret = TRUE; push_res = FALSE; need_seq_point = FALSE; } if (ins_flag & MONO_INST_TAILCALL) mini_test_tailcall (cfg, tailcall); /* End of call, INS should contain the result of the call, if any */ if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) { g_assert (ins); if (emit_widen) *sp++ = mono_emit_widen_call_res (cfg, ins, fsig); else *sp++ = ins; } if (save_last_error) { save_last_error = FALSE; #ifdef TARGET_WIN32 // Making icalls etc could clobber the value so emit inline code // to read last error on Windows. MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR); ins->dreg = alloc_dreg (cfg, STACK_I4); ins->type = STACK_I4; MONO_ADD_INS (cfg->cbb, ins); mono_emit_jit_icall (cfg, mono_marshal_set_last_error_windows, &ins); #else mono_emit_jit_icall (cfg, mono_marshal_set_last_error, NULL); #endif } if (keep_this_alive) { MonoInst *dummy_use; /* See mini_emit_method_call_full () */ EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive); } if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) { /* * Clang can convert these calls to tailcalls which screw up the stack * walk. This happens even when the -fno-optimize-sibling-calls * option is passed to clang. * Work around this by emitting a dummy call. */ mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL); } CHECK_CFG_EXCEPTION; if (skip_ret) { // FIXME When not followed by CEE_RET, correct behavior is to raise an exception. g_assert (next_ip [0] == CEE_RET); next_ip += 1; il_op = MonoOpcodeEnum_Invalid; // Call or ret? Unclear. } ins_flag = 0; constrained_class = NULL; if (need_seq_point) { //check is is a nested call and remove the non_empty_stack of the last call, only for non native methods if (!(method->flags & METHOD_IMPL_ATTRIBUTE_NATIVE)) { if (emitted_funccall_seq_point) { if (cfg->last_seq_point) cfg->last_seq_point->flags |= MONO_INST_NESTED_CALL; } else emitted_funccall_seq_point = TRUE; } emit_seq_point (cfg, method, next_ip, FALSE, TRUE); } break; } case MONO_CEE_RET: if (!detached_before_ret) mini_profiler_emit_leave (cfg, sig->ret->type != MONO_TYPE_VOID ? sp [-1] : NULL); g_assert (!method_does_not_return (method)); if (cfg->method != method) { /* return from inlined method */ /* * If in_count == 0, that means the ret is unreachable due to * being preceded by a throw. In that case, inline_method () will * handle setting the return value * (test case: test_0_inline_throw ()). */ if (return_var && cfg->cbb->in_count) { MonoType *ret_type = mono_method_signature_internal (method)->ret; MonoInst *store; CHECK_STACK (1); --sp; *sp = convert_value (cfg, ret_type, *sp); if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp)) UNVERIFIED; //g_assert (returnvar != -1); EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp); cfg->ret_var_set = TRUE; } } else { if (cfg->lmf_var && cfg->cbb->in_count && (!cfg->llvm_only || cfg->deopt)) emit_pop_lmf (cfg); if (cfg->ret) { MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (method)->ret); if (seq_points && !sym_seq_points) { /* * Place a seq point here too even through the IL stack is not * empty, so a step over on * call <FOO> * ret * will work correctly. */ NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE); MONO_ADD_INS (cfg->cbb, ins); } g_assert (!return_var); CHECK_STACK (1); --sp; *sp = convert_value (cfg, ret_type, *sp); if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp)) UNVERIFIED; emit_setret (cfg, *sp); } } if (sp != stack_start) UNVERIFIED; MONO_INST_NEW (cfg, ins, OP_BR); ins->inst_target_bb = end_bblock; MONO_ADD_INS (cfg->cbb, ins); link_bblock (cfg, cfg->cbb, end_bblock); start_new_bblock = 1; break; case MONO_CEE_BR_S: MONO_INST_NEW (cfg, ins, OP_BR); GET_BBLOCK (cfg, tblock, target); link_bblock (cfg, cfg->cbb, tblock); ins->inst_target_bb = tblock; if (sp != stack_start) { handle_stack_args (cfg, stack_start, sp - stack_start); sp = stack_start; CHECK_UNVERIFIABLE (cfg); } MONO_ADD_INS (cfg->cbb, ins); start_new_bblock = 1; inline_costs += BRANCH_COST; break; case MONO_CEE_BEQ_S: case MONO_CEE_BGE_S: case MONO_CEE_BGT_S: case MONO_CEE_BLE_S: case MONO_CEE_BLT_S: case MONO_CEE_BNE_UN_S: case MONO_CEE_BGE_UN_S: case MONO_CEE_BGT_UN_S: case MONO_CEE_BLE_UN_S: case MONO_CEE_BLT_UN_S: MONO_INST_NEW (cfg, ins, il_op + BIG_BRANCH_OFFSET); ADD_BINCOND (NULL); sp = stack_start; inline_costs += BRANCH_COST; break; case MONO_CEE_BR: MONO_INST_NEW (cfg, ins, OP_BR); GET_BBLOCK (cfg, tblock, target); link_bblock (cfg, cfg->cbb, tblock); ins->inst_target_bb = tblock; if (sp != stack_start) { handle_stack_args (cfg, stack_start, sp - stack_start); sp = stack_start; CHECK_UNVERIFIABLE (cfg); } MONO_ADD_INS (cfg->cbb, ins); start_new_bblock = 1; inline_costs += BRANCH_COST; break; case MONO_CEE_BRFALSE_S: case MONO_CEE_BRTRUE_S: case MONO_CEE_BRFALSE: case MONO_CEE_BRTRUE: { MonoInst *cmp; gboolean is_true = il_op == MONO_CEE_BRTRUE_S || il_op == MONO_CEE_BRTRUE; if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8) UNVERIFIED; sp--; GET_BBLOCK (cfg, tblock, target); link_bblock (cfg, cfg->cbb, tblock); GET_BBLOCK (cfg, tblock, next_ip); link_bblock (cfg, cfg->cbb, tblock); if (sp != stack_start) { handle_stack_args (cfg, stack_start, sp - stack_start); CHECK_UNVERIFIABLE (cfg); } MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM); cmp->sreg1 = sp [0]->dreg; type_from_op (cfg, cmp, sp [0], NULL); CHECK_TYPE (cmp); #if SIZEOF_REGISTER == 4 if (cmp->opcode == OP_LCOMPARE_IMM) { /* Convert it to OP_LCOMPARE */ MONO_INST_NEW (cfg, ins, OP_I8CONST); ins->type = STACK_I8; ins->dreg = alloc_dreg (cfg, STACK_I8); ins->inst_l = 0; MONO_ADD_INS (cfg->cbb, ins); cmp->opcode = OP_LCOMPARE; cmp->sreg2 = ins->dreg; } #endif MONO_ADD_INS (cfg->cbb, cmp); MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ); type_from_op (cfg, ins, sp [0], NULL); MONO_ADD_INS (cfg->cbb, ins); ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2); GET_BBLOCK (cfg, tblock, target); ins->inst_true_bb = tblock; GET_BBLOCK (cfg, tblock, next_ip); ins->inst_false_bb = tblock; start_new_bblock = 2; sp = stack_start; inline_costs += BRANCH_COST; break; } case MONO_CEE_BEQ: case MONO_CEE_BGE: case MONO_CEE_BGT: case MONO_CEE_BLE: case MONO_CEE_BLT: case MONO_CEE_BNE_UN: case MONO_CEE_BGE_UN: case MONO_CEE_BGT_UN: case MONO_CEE_BLE_UN: case MONO_CEE_BLT_UN: MONO_INST_NEW (cfg, ins, il_op); ADD_BINCOND (NULL); sp = stack_start; inline_costs += BRANCH_COST; break; case MONO_CEE_SWITCH: { MonoInst *src1; MonoBasicBlock **targets; MonoBasicBlock *default_bblock; MonoJumpInfoBBTable *table; int offset_reg = alloc_preg (cfg); int target_reg = alloc_preg (cfg); int table_reg = alloc_preg (cfg); int sum_reg = alloc_preg (cfg); gboolean use_op_switch; n = read32 (ip + 1); --sp; src1 = sp [0]; if ((src1->type != STACK_I4) && (src1->type != STACK_PTR)) UNVERIFIED; ip += 5; GET_BBLOCK (cfg, default_bblock, next_ip); default_bblock->flags |= BB_INDIRECT_JUMP_TARGET; targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n); for (i = 0; i < n; ++i) { GET_BBLOCK (cfg, tblock, next_ip + (gint32)read32 (ip)); targets [i] = tblock; targets [i]->flags |= BB_INDIRECT_JUMP_TARGET; ip += 4; } if (sp != stack_start) { /* * Link the current bb with the targets as well, so handle_stack_args * will set their in_stack correctly. */ link_bblock (cfg, cfg->cbb, default_bblock); for (i = 0; i < n; ++i) link_bblock (cfg, cfg->cbb, targets [i]); handle_stack_args (cfg, stack_start, sp - stack_start); sp = stack_start; CHECK_UNVERIFIABLE (cfg); /* Undo the links */ mono_unlink_bblock (cfg, cfg->cbb, default_bblock); for (i = 0; i < n; ++i) mono_unlink_bblock (cfg, cfg->cbb, targets [i]); } MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock); for (i = 0; i < n; ++i) link_bblock (cfg, cfg->cbb, targets [i]); table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable)); table->table = targets; table->table_size = n; use_op_switch = FALSE; #ifdef TARGET_ARM /* ARM implements SWITCH statements differently */ /* FIXME: Make it use the generic implementation */ if (!cfg->compile_aot) use_op_switch = TRUE; #endif if (COMPILE_LLVM (cfg)) use_op_switch = TRUE; cfg->cbb->has_jump_table = 1; if (use_op_switch) { MONO_INST_NEW (cfg, ins, OP_SWITCH); ins->sreg1 = src1->dreg; ins->inst_p0 = table; ins->inst_many_bb = targets; ins->klass = (MonoClass *)GUINT_TO_POINTER (n); MONO_ADD_INS (cfg->cbb, ins); } else { if (TARGET_SIZEOF_VOID_P == 8) MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3); else MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2); #if SIZEOF_REGISTER == 8 /* The upper word might not be zero, and we add it to a 64 bit address later */ MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg); #endif if (cfg->compile_aot) { MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH); } else { MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE); ins->inst_c1 = MONO_PATCH_INFO_SWITCH; ins->inst_p0 = table; ins->dreg = table_reg; MONO_ADD_INS (cfg->cbb, ins); } /* FIXME: Use load_memindex */ MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0); MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg); } start_new_bblock = 1; inline_costs += BRANCH_COST * 2; break; } case MONO_CEE_LDIND_I1: case MONO_CEE_LDIND_U1: case MONO_CEE_LDIND_I2: case MONO_CEE_LDIND_U2: case MONO_CEE_LDIND_I4: case MONO_CEE_LDIND_U4: case MONO_CEE_LDIND_I8: case MONO_CEE_LDIND_I: case MONO_CEE_LDIND_R4: case MONO_CEE_LDIND_R8: case MONO_CEE_LDIND_REF: --sp; if (!(ins_flag & MONO_INST_NONULLCHECK)) MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, FALSE); ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (ldind_to_type (il_op)), sp [0], 0, ins_flag); *sp++ = ins; ins_flag = 0; break; case MONO_CEE_STIND_REF: case MONO_CEE_STIND_I1: case MONO_CEE_STIND_I2: case MONO_CEE_STIND_I4: case MONO_CEE_STIND_I8: case MONO_CEE_STIND_R4: case MONO_CEE_STIND_R8: case MONO_CEE_STIND_I: { sp -= 2; if (il_op == MONO_CEE_STIND_REF && sp [1]->type != STACK_OBJ) { /* stind.ref must only be used with object references. */ UNVERIFIED; } if (il_op == MONO_CEE_STIND_R4 && sp [1]->type == STACK_R8) sp [1] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.single_class), sp [1]); mini_emit_memory_store (cfg, m_class_get_byval_arg (stind_to_type (il_op)), sp [0], sp [1], ins_flag); ins_flag = 0; inline_costs += 1; break; } case MONO_CEE_MUL: MONO_INST_NEW (cfg, ins, il_op); sp -= 2; ins->sreg1 = sp [0]->dreg; ins->sreg2 = sp [1]->dreg; type_from_op (cfg, ins, sp [0], sp [1]); CHECK_TYPE (ins); ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); /* Use the immediate opcodes if possible */ int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode); if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->inst_c0)) { if (imm_opcode != -1) { ins->opcode = imm_opcode; ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0); ins->sreg2 = -1; NULLIFY_INS (sp [1]); } } MONO_ADD_INS ((cfg)->cbb, (ins)); *sp++ = mono_decompose_opcode (cfg, ins); break; case MONO_CEE_ADD: case MONO_CEE_SUB: case MONO_CEE_DIV: case MONO_CEE_DIV_UN: case MONO_CEE_REM: case MONO_CEE_REM_UN: case MONO_CEE_AND: case MONO_CEE_OR: case MONO_CEE_XOR: case MONO_CEE_SHL: case MONO_CEE_SHR: case MONO_CEE_SHR_UN: { MONO_INST_NEW (cfg, ins, il_op); sp -= 2; ins->sreg1 = sp [0]->dreg; ins->sreg2 = sp [1]->dreg; type_from_op (cfg, ins, sp [0], sp [1]); CHECK_TYPE (ins); add_widen_op (cfg, ins, &sp [0], &sp [1]); ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); /* Use the immediate opcodes if possible */ int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode); if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) { if (imm_opcode != -1) { ins->opcode = imm_opcode; if (sp [1]->opcode == OP_I8CONST) { #if SIZEOF_REGISTER == 8 ins->inst_imm = sp [1]->inst_l; #else ins->inst_l = sp [1]->inst_l; #endif } else { ins->inst_imm = (gssize)(sp [1]->inst_c0); } ins->sreg2 = -1; /* Might be followed by an instruction added by add_widen_op */ if (sp [1]->next == NULL) NULLIFY_INS (sp [1]); } } MONO_ADD_INS ((cfg)->cbb, (ins)); *sp++ = mono_decompose_opcode (cfg, ins); break; } case MONO_CEE_NEG: case MONO_CEE_NOT: case MONO_CEE_CONV_I1: case MONO_CEE_CONV_I2: case MONO_CEE_CONV_I4: case MONO_CEE_CONV_R4: case MONO_CEE_CONV_R8: case MONO_CEE_CONV_U4: case MONO_CEE_CONV_I8: case MONO_CEE_CONV_U8: case MONO_CEE_CONV_OVF_I8: case MONO_CEE_CONV_OVF_U8: case MONO_CEE_CONV_R_UN: /* Special case this earlier so we have long constants in the IR */ if ((il_op == MONO_CEE_CONV_I8 || il_op == MONO_CEE_CONV_U8) && (sp [-1]->opcode == OP_ICONST)) { int data = sp [-1]->inst_c0; sp [-1]->opcode = OP_I8CONST; sp [-1]->type = STACK_I8; #if SIZEOF_REGISTER == 8 if (il_op == MONO_CEE_CONV_U8) sp [-1]->inst_c0 = (guint32)data; else sp [-1]->inst_c0 = data; #else if (il_op == MONO_CEE_CONV_U8) sp [-1]->inst_l = (guint32)data; else sp [-1]->inst_l = data; #endif sp [-1]->dreg = alloc_dreg (cfg, STACK_I8); } else { ADD_UNOP (il_op); } break; case MONO_CEE_CONV_OVF_I4: case MONO_CEE_CONV_OVF_I1: case MONO_CEE_CONV_OVF_I2: case MONO_CEE_CONV_OVF_I: case MONO_CEE_CONV_OVF_I1_UN: case MONO_CEE_CONV_OVF_I2_UN: case MONO_CEE_CONV_OVF_I4_UN: case MONO_CEE_CONV_OVF_I8_UN: case MONO_CEE_CONV_OVF_I_UN: if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) { /* floats are always signed, _UN has no effect */ ADD_UNOP (CEE_CONV_OVF_I8); if (il_op == MONO_CEE_CONV_OVF_I1_UN) ADD_UNOP (MONO_CEE_CONV_OVF_I1); else if (il_op == MONO_CEE_CONV_OVF_I2_UN) ADD_UNOP (MONO_CEE_CONV_OVF_I2); else if (il_op == MONO_CEE_CONV_OVF_I4_UN) ADD_UNOP (MONO_CEE_CONV_OVF_I4); else if (il_op == MONO_CEE_CONV_OVF_I8_UN) ; else ADD_UNOP (il_op); } else { ADD_UNOP (il_op); } break; case MONO_CEE_CONV_OVF_U1: case MONO_CEE_CONV_OVF_U2: case MONO_CEE_CONV_OVF_U4: case MONO_CEE_CONV_OVF_U: case MONO_CEE_CONV_OVF_U1_UN: case MONO_CEE_CONV_OVF_U2_UN: case MONO_CEE_CONV_OVF_U4_UN: case MONO_CEE_CONV_OVF_U8_UN: case MONO_CEE_CONV_OVF_U_UN: if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) { /* floats are always signed, _UN has no effect */ ADD_UNOP (CEE_CONV_OVF_U8); ADD_UNOP (il_op); } else { ADD_UNOP (il_op); } break; case MONO_CEE_CONV_U2: case MONO_CEE_CONV_U1: case MONO_CEE_CONV_U: case MONO_CEE_CONV_I: ADD_UNOP (il_op); CHECK_CFG_EXCEPTION; break; case MONO_CEE_ADD_OVF: case MONO_CEE_ADD_OVF_UN: case MONO_CEE_MUL_OVF: case MONO_CEE_MUL_OVF_UN: case MONO_CEE_SUB_OVF: case MONO_CEE_SUB_OVF_UN: MONO_INST_NEW (cfg, ins, il_op); sp -= 2; ins->sreg1 = sp [0]->dreg; ins->sreg2 = sp [1]->dreg; type_from_op (cfg, ins, sp [0], sp [1]); CHECK_TYPE (ins); if (ovf_exc) ins->inst_exc_name = ovf_exc; else ins->inst_exc_name = "OverflowException"; /* Have to insert a widening op */ add_widen_op (cfg, ins, &sp [0], &sp [1]); ins->dreg = alloc_dreg (cfg, (MonoStackType)(ins)->type); MONO_ADD_INS ((cfg)->cbb, ins); /* The opcode might be emulated, so need to special case this */ if (ovf_exc && mono_find_jit_opcode_emulation (ins->opcode)) { switch (ins->opcode) { case OP_IMUL_OVF_UN: /* This opcode is just a placeholder, it will be emulated also */ ins->opcode = OP_IMUL_OVF_UN_OOM; break; case OP_LMUL_OVF_UN: /* This opcode is just a placeholder, it will be emulated also */ ins->opcode = OP_LMUL_OVF_UN_OOM; break; default: g_assert_not_reached (); } } ovf_exc = NULL; *sp++ = mono_decompose_opcode (cfg, ins); break; case MONO_CEE_CPOBJ: GSHAREDVT_FAILURE (il_op); GSHAREDVT_FAILURE (*ip); klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); sp -= 2; mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag); ins_flag = 0; break; case MONO_CEE_LDOBJ: { int loc_index = -1; int stloc_len = 0; --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); /* Optimize the common ldobj+stloc combination */ if (next_ip < end) { switch (next_ip [0]) { case MONO_CEE_STLOC_S: CHECK_OPSIZE (7); loc_index = next_ip [1]; stloc_len = 2; break; case MONO_CEE_STLOC_0: case MONO_CEE_STLOC_1: case MONO_CEE_STLOC_2: case MONO_CEE_STLOC_3: loc_index = next_ip [0] - CEE_STLOC_0; stloc_len = 1; break; default: break; } } if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, next_ip)) { CHECK_LOCAL (loc_index); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), sp [0]->dreg, 0); ins->dreg = cfg->locals [loc_index]->dreg; ins->flags |= ins_flag; il_op = (MonoOpcodeEnum)next_ip [0]; next_ip += stloc_len; if (ins_flag & MONO_INST_VOLATILE) { /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ); } ins_flag = 0; break; } /* Optimize the ldobj+stobj combination */ if (next_ip + 4 < end && next_ip [0] == CEE_STOBJ && ip_in_bb (cfg, cfg->cbb, next_ip) && read32 (next_ip + 1) == token) { CHECK_STACK (1); sp --; mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag); il_op = (MonoOpcodeEnum)next_ip [0]; next_ip += 5; ins_flag = 0; break; } ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (klass), sp [0], 0, ins_flag); *sp++ = ins; ins_flag = 0; inline_costs += 1; break; } case MONO_CEE_LDSTR: if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) { EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n)); ins->type = STACK_OBJ; *sp = ins; } else if (method->wrapper_type != MONO_WRAPPER_NONE) { MonoInst *iargs [1]; char *str = (char *)mono_method_get_wrapper_data (method, n); if (cfg->compile_aot) EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str); else EMIT_NEW_PCONST (cfg, iargs [0], str); *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper_internal, iargs); } else { { if (cfg->cbb->out_of_line) { MonoInst *iargs [2]; if (image == mono_defaults.corlib) { /* * Avoid relocations in AOT and save some space by using a * version of helper_ldstr specialized to mscorlib. */ EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n)); *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs); } else { /* Avoid creating the string object */ EMIT_NEW_IMAGECONST (cfg, iargs [0], image); EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n)); *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs); } } else if (cfg->compile_aot) { NEW_LDSTRCONST (cfg, ins, image, n); *sp = ins; MONO_ADD_INS (cfg->cbb, ins); } else { NEW_PCONST (cfg, ins, NULL); ins->type = STACK_OBJ; ins->inst_p0 = mono_ldstr_checked (image, mono_metadata_token_index (n), cfg->error); CHECK_CFG_ERROR; if (!ins->inst_p0) OUT_OF_MEMORY_FAILURE; *sp = ins; MONO_ADD_INS (cfg->cbb, ins); } } } sp++; break; case MONO_CEE_NEWOBJ: { MonoInst *iargs [2]; MonoMethodSignature *fsig; MonoInst this_ins; MonoInst *alloc; MonoInst *vtable_arg = NULL; cmethod = mini_get_method (cfg, method, token, NULL, generic_context); CHECK_CFG_ERROR; fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, cfg->error); CHECK_CFG_ERROR; mono_save_token_info (cfg, image, token, cmethod); if (!mono_class_init_internal (cmethod->klass)) TYPE_LOAD_ERROR (cmethod->klass); context_used = mini_method_check_context_used (cfg, cmethod); if (!dont_verify && !cfg->skip_visibility) { MonoMethod *cil_method = cmethod; MonoMethod *target_method = cil_method; if (method->is_inflated) { MonoGenericContainer *container = mono_method_get_generic_container(method_definition); MonoGenericContext *context = (container != NULL ? &container->context : NULL); target_method = mini_get_method_allow_open (method, token, NULL, context, cfg->error); CHECK_CFG_ERROR; } if (!mono_method_can_access_method (method_definition, target_method) && !mono_method_can_access_method (method, cil_method)) emit_method_access_failure (cfg, method, cil_method); } if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) { emit_class_init (cfg, cmethod->klass); CHECK_TYPELOAD (cmethod->klass); } /* if (cfg->gsharedvt) { if (mini_is_gsharedvt_variable_signature (sig)) GSHAREDVT_FAILURE (il_op); } */ n = fsig->param_count; CHECK_STACK (n); /* * Generate smaller code for the common newobj <exception> instruction in * argument checking code. */ if (cfg->cbb->out_of_line && m_class_get_image (cmethod->klass) == mono_defaults.corlib && is_exception_class (cmethod->klass) && n <= 2 && ((n < 1) || (!m_type_is_byref (fsig->params [0]) && fsig->params [0]->type == MONO_TYPE_STRING)) && ((n < 2) || (!m_type_is_byref (fsig->params [1]) && fsig->params [1]->type == MONO_TYPE_STRING))) { MonoInst *iargs [3]; sp -= n; EMIT_NEW_ICONST (cfg, iargs [0], m_class_get_type_token (cmethod->klass)); switch (n) { case 0: *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs); break; case 1: iargs [1] = sp [0]; *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs); break; case 2: iargs [1] = sp [0]; iargs [2] = sp [1]; *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs); break; default: g_assert_not_reached (); } inline_costs += 5; break; } /* move the args to allow room for 'this' in the first position */ while (n--) { --sp; sp [1] = sp [0]; } for (int i = 0; i < fsig->param_count; ++i) sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]); /* check_call_signature () requires sp[0] to be set */ this_ins.type = STACK_OBJ; sp [0] = &this_ins; if (check_call_signature (cfg, fsig, sp)) UNVERIFIED; iargs [0] = NULL; if (mini_class_is_system_array (cmethod->klass)) { *sp = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); MonoJitICallId function = MONO_JIT_ICALL_ZeroIsReserved; int rank = m_class_get_rank (cmethod->klass); int n = fsig->param_count; /* Optimize the common cases, use ctor using length for each rank (no lbound). */ if (n == rank) { switch (n) { case 1: function = MONO_JIT_ICALL_mono_array_new_1; break; case 2: function = MONO_JIT_ICALL_mono_array_new_2; break; case 3: function = MONO_JIT_ICALL_mono_array_new_3; break; case 4: function = MONO_JIT_ICALL_mono_array_new_4; break; default: break; } } /* Regular case, rank > 4 or legnth, lbound specified per rank. */ if (function == MONO_JIT_ICALL_ZeroIsReserved) { // FIXME Maximum value of param_count? Realistically 64. Fits in imm? if (!array_new_localalloc_ins) { MONO_INST_NEW (cfg, array_new_localalloc_ins, OP_LOCALLOC_IMM); array_new_localalloc_ins->dreg = alloc_preg (cfg); cfg->flags |= MONO_CFG_HAS_ALLOCA; MONO_ADD_INS (init_localsbb, array_new_localalloc_ins); } array_new_localalloc_ins->inst_imm = MAX (array_new_localalloc_ins->inst_imm, n * sizeof (target_mgreg_t)); int dreg = array_new_localalloc_ins->dreg; if (2 * rank == n) { /* [lbound, length, lbound, length, ...] * mono_array_new_n_icall expects a non-interleaved list of * lbounds and lengths, so deinterleave here. */ for (int l = 0; l < 2; ++l) { int src = l; int dst = l * rank; for (int r = 0; r < rank; ++r, src += 2, ++dst) { NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, dreg, dst * sizeof (target_mgreg_t), sp [src + 1]->dreg); MONO_ADD_INS (cfg->cbb, ins); } } } else { /* [length, length, length, ...] */ for (int i = 0; i < n; ++i) { NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, dreg, i * sizeof (target_mgreg_t), sp [i + 1]->dreg); MONO_ADD_INS (cfg->cbb, ins); } } EMIT_NEW_ICONST (cfg, ins, n); sp [1] = ins; EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), dreg); ins->type = STACK_PTR; sp [2] = ins; // FIXME Adjust sp by n - 3? Attempts failed. function = MONO_JIT_ICALL_mono_array_new_n_icall; } alloc = mono_emit_jit_icall_id (cfg, function, sp); } else if (cmethod->string_ctor) { g_assert (!context_used); g_assert (!vtable_arg); /* we simply pass a null pointer */ EMIT_NEW_PCONST (cfg, *sp, NULL); /* now call the string ctor */ alloc = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL); } else { if (m_class_is_valuetype (cmethod->klass)) { iargs [0] = mono_compile_create_var (cfg, m_class_get_byval_arg (cmethod->klass), OP_LOCAL); mini_emit_init_rvar (cfg, iargs [0]->dreg, m_class_get_byval_arg (cmethod->klass)); EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0); alloc = NULL; /* * The code generated by mini_emit_virtual_call () expects * iargs [0] to be a boxed instance, but luckily the vcall * will be transformed into a normal call there. */ } else if (context_used) { alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used); *sp = alloc; } else { MonoVTable *vtable = NULL; if (!cfg->compile_aot) vtable = mono_class_vtable_checked (cmethod->klass, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (cmethod->klass); /* * TypeInitializationExceptions thrown from the mono_runtime_class_init * call in mono_jit_runtime_invoke () can abort the finalizer thread. * As a workaround, we call class cctors before allocating objects. */ if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) { emit_class_init (cfg, cmethod->klass); if (cfg->verbose_level > 2) printf ("class %s.%s needs init call for ctor\n", m_class_get_name_space (cmethod->klass), m_class_get_name (cmethod->klass)); class_inits = g_slist_prepend (class_inits, cmethod->klass); } alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0); *sp = alloc; } CHECK_CFG_EXCEPTION; /*for handle_alloc*/ if (alloc) MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg); /* Now call the actual ctor */ int ctor_inline_costs = 0; handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &ctor_inline_costs); // don't contribute to inline_const if ctor has [MethodImpl(MethodImplOptions.AggressiveInlining)] if (!COMPILE_LLVM(cfg) || !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING)) inline_costs += ctor_inline_costs; CHECK_CFG_EXCEPTION; } if (alloc == NULL) { /* Valuetype */ EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0); mini_type_to_eval_stack_type (cfg, m_class_get_byval_arg (ins->klass), ins); *sp++= ins; } else { *sp++ = alloc; } inline_costs += 5; if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code))) emit_seq_point (cfg, method, next_ip, FALSE, TRUE); break; } case MONO_CEE_CASTCLASS: case MONO_CEE_ISINST: { --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); if (sp [0]->type != STACK_OBJ) UNVERIFIED; MONO_INST_NEW (cfg, ins, (il_op == MONO_CEE_ISINST) ? OP_ISINST : OP_CASTCLASS); ins->dreg = alloc_preg (cfg); ins->sreg1 = (*sp)->dreg; ins->klass = klass; ins->type = STACK_OBJ; MONO_ADD_INS (cfg->cbb, ins); CHECK_CFG_EXCEPTION; *sp++ = ins; cfg->flags |= MONO_CFG_HAS_TYPE_CHECK; break; } case MONO_CEE_UNBOX_ANY: { MonoInst *res, *addr; --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); mono_save_token_info (cfg, image, token, klass); context_used = mini_class_check_context_used (cfg, klass); if (mini_is_gsharedvt_klass (klass)) { res = handle_unbox_gsharedvt (cfg, klass, *sp); inline_costs += 2; } else if (mini_class_is_reference (klass)) { if (MONO_INS_IS_PCONST_NULL (*sp)) { EMIT_NEW_PCONST (cfg, res, NULL); res->type = STACK_OBJ; } else { MONO_INST_NEW (cfg, res, OP_CASTCLASS); res->dreg = alloc_preg (cfg); res->sreg1 = (*sp)->dreg; res->klass = klass; res->type = STACK_OBJ; MONO_ADD_INS (cfg->cbb, res); cfg->flags |= MONO_CFG_HAS_TYPE_CHECK; } } else if (mono_class_is_nullable (klass)) { res = handle_unbox_nullable (cfg, *sp, klass, context_used); } else { addr = mini_handle_unbox (cfg, klass, *sp, context_used); /* LDOBJ */ EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0); res = ins; inline_costs += 2; } *sp ++ = res; break; } case MONO_CEE_BOX: { MonoInst *val; MonoClass *enum_class; MonoMethod *has_flag; MonoMethodSignature *has_flag_sig; --sp; val = *sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); mono_save_token_info (cfg, image, token, klass); context_used = mini_class_check_context_used (cfg, klass); if (mini_class_is_reference (klass)) { *sp++ = val; break; } val = convert_value (cfg, m_class_get_byval_arg (klass), val); if (klass == mono_defaults.void_class) UNVERIFIED; if (target_type_is_incompatible (cfg, m_class_get_byval_arg (klass), val)) UNVERIFIED; /* frequent check in generic code: box (struct), brtrue */ /* * Look for: * * <push int/long ptr> * <push int/long> * box MyFlags * constrained. MyFlags * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum) * * If we find this sequence and the operand types on box and constrained * are equal, we can emit a specialized instruction sequence instead of * the very slow HasFlag () call. * This code sequence is generated by older mcs/csc, the newer one is handled in * emit_inst_for_method (). */ guint32 constrained_token; guint32 callvirt_token; if ((cfg->opt & MONO_OPT_INTRINS) && // FIXME ip_in_bb as we go? next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) && (ip = il_read_constrained (next_ip, end, &constrained_token)) && ip_in_bb (cfg, cfg->cbb, ip) && (ip = il_read_callvirt (ip, end, &callvirt_token)) && ip_in_bb (cfg, cfg->cbb, ip) && m_class_is_enumtype (klass) && (enum_class = mini_get_class (method, constrained_token, generic_context)) && (has_flag = mini_get_method (cfg, method, callvirt_token, NULL, generic_context)) && has_flag->klass == mono_defaults.enum_class && !strcmp (has_flag->name, "HasFlag") && (has_flag_sig = mono_method_signature_internal (has_flag)) && has_flag_sig->hasthis && has_flag_sig->param_count == 1) { CHECK_TYPELOAD (enum_class); if (enum_class == klass) { MonoInst *enum_this, *enum_flag; next_ip = ip; il_op = MONO_CEE_CALLVIRT; --sp; enum_this = sp [0]; enum_flag = sp [1]; *sp++ = mini_handle_enum_has_flag (cfg, klass, enum_this, -1, enum_flag); break; } } guint32 unbox_any_token; /* * Common in generic code: * box T1, unbox.any T2. */ if ((cfg->opt & MONO_OPT_INTRINS) && next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) && (ip = il_read_unbox_any (next_ip, end, &unbox_any_token))) { MonoClass *unbox_klass = mini_get_class (method, unbox_any_token, generic_context); CHECK_TYPELOAD (unbox_klass); if (klass == unbox_klass) { next_ip = ip; *sp++ = val; break; } } // Optimize // // box // call object::GetType() // guint32 gettype_token; if ((ip = il_read_call(next_ip, end, &gettype_token)) && ip_in_bb (cfg, cfg->cbb, ip)) { MonoMethod* gettype_method = mini_get_method (cfg, method, gettype_token, NULL, generic_context); if (!strcmp (gettype_method->name, "GetType") && gettype_method->klass == mono_defaults.object_class) { mono_class_init_internal(klass); if (mono_class_get_checked (m_class_get_image (klass), m_class_get_type_token (klass), error) == klass) { if (cfg->compile_aot) { EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, m_class_get_image (klass), m_class_get_type_token (klass), generic_context); } else { MonoType *klass_type = m_class_get_byval_arg (klass); MonoReflectionType* reflection_type = mono_type_get_object_checked (klass_type, cfg->error); EMIT_NEW_PCONST (cfg, ins, reflection_type); } ins->type = STACK_OBJ; ins->klass = mono_defaults.systemtype_class; *sp++ = ins; next_ip = ip; break; } } } // Optimize // // box // ldnull // ceq (or cgt.un) // // to just // // ldc.i4.0 (or 1) guchar* ldnull_ip; if ((ldnull_ip = il_read_op (next_ip, end, CEE_LDNULL, MONO_CEE_LDNULL)) && ip_in_bb (cfg, cfg->cbb, ldnull_ip)) { gboolean is_eq = FALSE, is_neq = FALSE; if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CEQ))) is_eq = TRUE; else if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CGT_UN))) is_neq = TRUE; if ((is_eq || is_neq) && ip_in_bb (cfg, cfg->cbb, ip) && !mono_class_is_nullable (klass) && !mini_is_gsharedvt_klass (klass)) { next_ip = ip; il_op = (MonoOpcodeEnum) (is_eq ? CEE_LDC_I4_0 : CEE_LDC_I4_1); EMIT_NEW_ICONST (cfg, ins, is_eq ? 0 : 1); ins->type = STACK_I4; *sp++ = ins; break; } } guint32 isinst_tk = 0; if ((ip = il_read_op_and_token (next_ip, end, CEE_ISINST, MONO_CEE_ISINST, &isinst_tk)) && ip_in_bb (cfg, cfg->cbb, ip)) { MonoClass *isinst_class = mini_get_class (method, isinst_tk, generic_context); if (!mono_class_is_nullable (klass) && !mono_class_is_nullable (isinst_class) && !mini_is_gsharedvt_variable_klass (klass) && !mini_is_gsharedvt_variable_klass (isinst_class) && !mono_class_is_open_constructed_type (m_class_get_byval_arg (klass)) && !mono_class_is_open_constructed_type (m_class_get_byval_arg (isinst_class))) { // Optimize // // box // isinst [Type] // brfalse/brtrue // // to // // ldc.i4.0 (or 1) // brfalse/brtrue // guchar* br_ip = NULL; if ((br_ip = il_read_brtrue (ip, end, &target)) || (br_ip = il_read_brtrue_s (ip, end, &target)) || (br_ip = il_read_brfalse (ip, end, &target)) || (br_ip = il_read_brfalse_s (ip, end, &target))) { gboolean isinst = mono_class_is_assignable_from_internal (isinst_class, klass); next_ip = ip; il_op = (MonoOpcodeEnum) (isinst ? CEE_LDC_I4_1 : CEE_LDC_I4_0); EMIT_NEW_ICONST (cfg, ins, isinst ? 1 : 0); ins->type = STACK_I4; *sp++ = ins; break; } // Optimize // // box // isinst [Type] // ldnull // ceq/cgt.un // // to // // ldc.i4.0 (or 1) // guchar* ldnull_ip = NULL; if ((ldnull_ip = il_read_op (ip, end, CEE_LDNULL, MONO_CEE_LDNULL)) && ip_in_bb (cfg, cfg->cbb, ldnull_ip)) { gboolean is_eq = FALSE, is_neq = FALSE; if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CEQ))) is_eq = TRUE; else if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CGT_UN))) is_neq = TRUE; if ((is_eq || is_neq) && ip_in_bb (cfg, cfg->cbb, ip) && !mono_class_is_nullable (klass) && !mini_is_gsharedvt_klass (klass)) { gboolean isinst = mono_class_is_assignable_from_internal (isinst_class, klass); next_ip = ip; if (is_eq) isinst = !isinst; il_op = (MonoOpcodeEnum) (isinst ? CEE_LDC_I4_1 : CEE_LDC_I4_0); EMIT_NEW_ICONST (cfg, ins, isinst ? 1 : 0); ins->type = STACK_I4; *sp++ = ins; break; } } // Optimize // // box // isinst [Type] // unbox.any // // to // // nop // guchar* unbox_ip = NULL; guint32 unbox_token = 0; if ((unbox_ip = il_read_unbox_any (ip, end, &unbox_token)) && ip_in_bb (cfg, cfg->cbb, unbox_ip)) { MonoClass *unbox_klass = mini_get_class (method, unbox_token, generic_context); CHECK_TYPELOAD (unbox_klass); if (!mono_class_is_nullable (unbox_klass) && !mini_is_gsharedvt_klass (unbox_klass) && klass == isinst_class && klass == unbox_klass) { *sp++ = val; next_ip = unbox_ip; break; } } } } gboolean is_true; // FIXME: LLVM can't handle the inconsistent bb linking if (!mono_class_is_nullable (klass) && !mini_is_gsharedvt_klass (klass) && next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) && ( (is_true = !!(ip = il_read_brtrue (next_ip, end, &target))) || (is_true = !!(ip = il_read_brtrue_s (next_ip, end, &target))) || (ip = il_read_brfalse (next_ip, end, &target)) || (ip = il_read_brfalse_s (next_ip, end, &target)))) { int dreg; MonoBasicBlock *true_bb, *false_bb; il_op = (MonoOpcodeEnum)next_ip [0]; next_ip = ip; if (cfg->verbose_level > 3) { printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL)); printf ("<box+brtrue opt>\n"); } /* * We need to link both bblocks, since it is needed for handling stack * arguments correctly (See test_0_box_brtrue_opt_regress_81102). * Branching to only one of them would lead to inconsistencies, so * generate an ICONST+BRTRUE, the branch opts will get rid of them. */ GET_BBLOCK (cfg, true_bb, target); GET_BBLOCK (cfg, false_bb, next_ip); mono_link_bblock (cfg, cfg->cbb, true_bb); mono_link_bblock (cfg, cfg->cbb, false_bb); if (sp != stack_start) { handle_stack_args (cfg, stack_start, sp - stack_start); sp = stack_start; CHECK_UNVERIFIABLE (cfg); } if (COMPILE_LLVM (cfg)) { dreg = alloc_ireg (cfg); MONO_EMIT_NEW_ICONST (cfg, dreg, 0); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1); MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb); } else { /* The JIT can't eliminate the iconst+compare */ MONO_INST_NEW (cfg, ins, OP_BR); ins->inst_target_bb = is_true ? true_bb : false_bb; MONO_ADD_INS (cfg->cbb, ins); } start_new_bblock = 1; break; } if (m_class_is_enumtype (klass) && !mini_is_gsharedvt_klass (klass) && !(val->type == STACK_I8 && TARGET_SIZEOF_VOID_P == 4)) { /* Can't do this with 64 bit enums on 32 bit since the vtype decomp pass is ran after the long decomp pass */ if (val->opcode == OP_ICONST) { MONO_INST_NEW (cfg, ins, OP_BOX_ICONST); ins->type = STACK_OBJ; ins->klass = klass; ins->inst_c0 = val->inst_c0; ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type); } else { MONO_INST_NEW (cfg, ins, OP_BOX); ins->type = STACK_OBJ; ins->klass = klass; ins->sreg1 = val->dreg; ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type); } MONO_ADD_INS (cfg->cbb, ins); *sp++ = ins; } else { *sp++ = mini_emit_box (cfg, val, klass, context_used); } CHECK_CFG_EXCEPTION; inline_costs += 1; break; } case MONO_CEE_UNBOX: { --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); mono_save_token_info (cfg, image, token, klass); context_used = mini_class_check_context_used (cfg, klass); if (mono_class_is_nullable (klass)) { MonoInst *val; val = handle_unbox_nullable (cfg, *sp, klass, context_used); EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), m_class_get_byval_arg (val->klass)); *sp++= ins; } else { ins = mini_handle_unbox (cfg, klass, *sp, context_used); *sp++ = ins; } inline_costs += 2; break; } case MONO_CEE_LDFLD: case MONO_CEE_LDFLDA: case MONO_CEE_STFLD: case MONO_CEE_LDSFLD: case MONO_CEE_LDSFLDA: case MONO_CEE_STSFLD: { MonoClassField *field; guint foffset; gboolean is_instance; gpointer addr = NULL; gboolean is_special_static; MonoType *ftype; MonoInst *store_val = NULL; MonoInst *thread_ins; is_instance = (il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDFLDA || il_op == MONO_CEE_STFLD); if (is_instance) { if (il_op == MONO_CEE_STFLD) { sp -= 2; store_val = sp [1]; } else { --sp; } if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8) UNVERIFIED; if (il_op != MONO_CEE_LDFLD && sp [0]->type == STACK_VTYPE) UNVERIFIED; } else { if (il_op == MONO_CEE_STSFLD) { sp--; store_val = sp [0]; } } if (method->wrapper_type != MONO_WRAPPER_NONE) { field = (MonoClassField *)mono_method_get_wrapper_data (method, token); klass = m_field_get_parent (field); } else { klass = NULL; field = mono_field_from_token_checked (image, token, &klass, generic_context, cfg->error); if (!field) CHECK_TYPELOAD (klass); CHECK_CFG_ERROR; } if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field)) FIELD_ACCESS_FAILURE (method, field); mono_class_init_internal (klass); mono_class_setup_fields (klass); ftype = mono_field_get_type_internal (field); /* * LDFLD etc. is usable on static fields as well, so convert those cases to * the static case. */ if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) { switch (il_op) { case MONO_CEE_LDFLD: il_op = MONO_CEE_LDSFLD; break; case MONO_CEE_STFLD: il_op = MONO_CEE_STSFLD; break; case MONO_CEE_LDFLDA: il_op = MONO_CEE_LDSFLDA; break; default: g_assert_not_reached (); } is_instance = FALSE; } context_used = mini_class_check_context_used (cfg, klass); if (il_op == MONO_CEE_LDSFLD) { ins = mini_emit_inst_for_field_load (cfg, field); if (ins) { *sp++ = ins; goto field_access_end; } } /* INSTANCE CASE */ if (is_instance) g_assert (field->offset); foffset = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject): field->offset; if (il_op == MONO_CEE_STFLD) { sp [1] = convert_value (cfg, field->type, sp [1]); if (target_type_is_incompatible (cfg, field->type, sp [1])) UNVERIFIED; { MonoInst *store; MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ()); if (ins_flag & MONO_INST_VOLATILE) { /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); } if (mini_is_gsharedvt_klass (klass)) { MonoInst *offset_ins; context_used = mini_class_check_context_used (cfg, klass); offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET); /* The value is offset by 1 */ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1); dreg = alloc_ireg_mp (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg); if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) { store = mini_emit_storing_write_barrier (cfg, ins, sp [1]); } else { /* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */ EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg); } } else { if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) { /* insert call to write barrier */ MonoInst *ptr; int dreg; dreg = alloc_ireg_mp (cfg); EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset); store = mini_emit_storing_write_barrier (cfg, ptr, sp [1]); } else { EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg); } } if (sp [0]->opcode != OP_LDADDR) store->flags |= MONO_INST_FAULT; store->flags |= ins_flag; } goto field_access_end; } if (is_instance) { if (sp [0]->type == STACK_VTYPE) { MonoInst *var; /* Have to compute the address of the variable */ var = get_vreg_to_inst (cfg, sp [0]->dreg); if (!var) var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, sp [0]->dreg); else g_assert (var->klass == klass); EMIT_NEW_VARLOADA (cfg, ins, var, m_class_get_byval_arg (var->klass)); sp [0] = ins; } if (il_op == MONO_CEE_LDFLDA) { if (sp [0]->type == STACK_OBJ) { MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0); MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException"); } dreg = alloc_ireg_mp (cfg); if (mini_is_gsharedvt_klass (klass)) { MonoInst *offset_ins; offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET); /* The value is offset by 1 */ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1); EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg); } else { EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset); } ins->klass = mono_class_from_mono_type_internal (field->type); ins->type = STACK_MP; *sp++ = ins; } else { MonoInst *load; MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ()); #ifdef MONO_ARCH_SIMD_INTRINSICS if (sp [0]->opcode == OP_LDADDR && m_class_is_simd_type (klass) && cfg->opt & MONO_OPT_SIMD) { ins = mono_emit_simd_field_load (cfg, field, sp [0]); if (ins) { *sp++ = ins; goto field_access_end; } } #endif MonoInst *field_add_inst = sp [0]; if (mini_is_gsharedvt_klass (klass)) { MonoInst *offset_ins; offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET); /* The value is offset by 1 */ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1); EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg); foffset = 0; } load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag); if (sp [0]->opcode != OP_LDADDR) load->flags |= MONO_INST_FAULT; *sp++ = load; } } if (is_instance) goto field_access_end; /* STATIC CASE */ context_used = mini_class_check_context_used (cfg, klass); if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) { mono_error_set_field_missing (cfg->error, m_field_get_parent (field), field->name, NULL, "Using static instructions with literal field"); CHECK_CFG_ERROR; } /* The special_static_fields field is init'd in mono_class_vtable, so it needs * to be called here. */ if (!context_used) { mono_class_vtable_checked (klass, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (klass); } addr = mono_special_static_field_get_offset (field, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (klass); is_special_static = mono_class_field_is_special_static (field); if (is_special_static && ((gsize)addr & 0x80000000) == 0) thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD); else thread_ins = NULL; /* Generate IR to compute the field address */ if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))) { /* * Fast access to TLS data * Inline version of get_thread_static_data () in * threads.c. */ guint32 offset; int idx, static_data_reg, array_reg, dreg; static_data_reg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data)); if (cfg->compile_aot || context_used) { int offset_reg, offset2_reg, idx_reg; /* For TLS variables, this will return the TLS offset */ if (context_used) { MonoInst *addr_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET); /* The value is offset by 1 */ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, addr_ins->dreg, addr_ins->dreg, 1); } else { EMIT_NEW_SFLDACONST (cfg, ins, field); } offset_reg = ins->dreg; MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff); idx_reg = alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, TARGET_SIZEOF_VOID_P == 8 ? 3 : 2); MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg); array_reg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0); offset2_reg = alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff); dreg = alloc_ireg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg); } else { offset = (gsize)addr & 0x7fffffff; idx = offset & 0x3f; array_reg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * TARGET_SIZEOF_VOID_P); dreg = alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff)); } } else if ((cfg->compile_aot && is_special_static) || (context_used && is_special_static)) { MonoInst *iargs [1]; g_assert (m_field_get_parent (field)); if (context_used) { iargs [0] = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_CLASS_FIELD); } else { EMIT_NEW_FIELDCONST (cfg, iargs [0], field); } ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs); } else if (context_used) { MonoInst *static_data; /* g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n", method->klass->name_space, method->klass->name, method->name, depth, field->offset); */ if (mono_class_needs_cctor_run (klass, method)) emit_class_init (cfg, klass); /* * The pointer we're computing here is * * super_info.static_data + field->offset */ static_data = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_STATIC_DATA); if (mini_is_gsharedvt_klass (klass)) { MonoInst *offset_ins; offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET); /* The value is offset by 1 */ EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1); dreg = alloc_ireg_mp (cfg); EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg); } else if (field->offset == 0) { ins = static_data; } else { int addr_reg = mono_alloc_preg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset); } } else if (cfg->compile_aot && addr) { MonoInst *iargs [1]; g_assert (m_field_get_parent (field)); EMIT_NEW_FIELDCONST (cfg, iargs [0], field); ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs); } else { MonoVTable *vtable = NULL; if (!cfg->compile_aot) vtable = mono_class_vtable_checked (klass, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (klass); if (!addr) { if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) { if (!(g_slist_find (class_inits, klass))) { emit_class_init (cfg, klass); if (cfg->verbose_level > 2) printf ("class %s.%s needs init call for %s\n", m_class_get_name_space (klass), m_class_get_name (klass), mono_field_get_name (field)); class_inits = g_slist_prepend (class_inits, klass); } } else { if (cfg->run_cctors) { /* This makes so that inline cannot trigger */ /* .cctors: too many apps depend on them */ /* running with a specific order... */ g_assert (vtable); if (!vtable->initialized && m_class_has_cctor (vtable->klass)) INLINE_FAILURE ("class init"); if (!mono_runtime_class_init_full (vtable, cfg->error)) { mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); goto exception_exit; } } } if (cfg->compile_aot) EMIT_NEW_SFLDACONST (cfg, ins, field); else { g_assert (vtable); addr = mono_static_field_get_addr (vtable, field); g_assert (addr); EMIT_NEW_PCONST (cfg, ins, addr); } } else { MonoInst *iargs [1]; EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr)); ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs); } } /* Generate IR to do the actual load/store operation */ if ((il_op == MONO_CEE_STFLD || il_op == MONO_CEE_STSFLD)) { if (ins_flag & MONO_INST_VOLATILE) { /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); } else if (!mini_debug_options.weak_memory_model && mini_type_is_reference (ftype)) { mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL); } } if (il_op == MONO_CEE_LDSFLDA) { ins->klass = mono_class_from_mono_type_internal (ftype); ins->type = STACK_PTR; *sp++ = ins; } else if (il_op == MONO_CEE_STSFLD) { MonoInst *store; EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg); store->flags |= ins_flag; } else { gboolean is_const = FALSE; MonoVTable *vtable = NULL; gpointer addr = NULL; if (!context_used) { vtable = mono_class_vtable_checked (klass, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (klass); } if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) || (!context_used && !cfg->compile_aot && vtable->initialized))) { int ro_type = ftype->type; if (!addr) addr = mono_static_field_get_addr (vtable, field); if (ro_type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (ftype->data.klass)) { ro_type = mono_class_enum_basetype_internal (ftype->data.klass)->type; } GSHAREDVT_FAILURE (il_op); /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/ is_const = TRUE; switch (ro_type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_U1: EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr)); sp++; break; case MONO_TYPE_I1: EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr)); sp++; break; case MONO_TYPE_CHAR: case MONO_TYPE_U2: EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr)); sp++; break; case MONO_TYPE_I2: EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr)); sp++; break; break; case MONO_TYPE_I4: EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr)); sp++; break; case MONO_TYPE_U4: EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr)); sp++; break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr)); mini_type_to_eval_stack_type ((cfg), field->type, *sp); sp++; break; case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: if (!mono_gc_is_moving ()) { EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr)); mini_type_to_eval_stack_type ((cfg), field->type, *sp); sp++; } else { is_const = FALSE; } break; case MONO_TYPE_I8: case MONO_TYPE_U8: EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr)); sp++; break; case MONO_TYPE_R4: case MONO_TYPE_R8: case MONO_TYPE_VALUETYPE: default: is_const = FALSE; break; } } if (!is_const) { MonoInst *load; EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0); load->flags |= ins_flag; *sp++ = load; } } field_access_end: if ((il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) { /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */ mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ); } ins_flag = 0; break; } case MONO_CEE_STOBJ: sp -= 2; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); /* FIXME: should check item at sp [1] is compatible with the type of the store. */ mini_emit_memory_store (cfg, m_class_get_byval_arg (klass), sp [0], sp [1], ins_flag); ins_flag = 0; inline_costs += 1; break; /* * Array opcodes */ case MONO_CEE_NEWARR: { MonoInst *len_ins; const char *data_ptr; int data_size = 0; guint32 field_token; --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); if (m_class_get_byval_arg (klass)->type == MONO_TYPE_VOID) UNVERIFIED; context_used = mini_class_check_context_used (cfg, klass); #ifndef TARGET_S390X if (sp [0]->type == STACK_I8 && TARGET_SIZEOF_VOID_P == 4) { MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4); ins->sreg1 = sp [0]->dreg; ins->type = STACK_I4; ins->dreg = alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, ins); *sp = mono_decompose_opcode (cfg, ins); } #else /* The array allocator expects a 64-bit input, and we cannot rely on the high bits of a 32-bit result, so we have to extend. */ if (sp [0]->type == STACK_I4 && TARGET_SIZEOF_VOID_P == 8) { MONO_INST_NEW (cfg, ins, OP_ICONV_TO_I8); ins->sreg1 = sp [0]->dreg; ins->type = STACK_I8; ins->dreg = alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, ins); *sp = mono_decompose_opcode (cfg, ins); } #endif if (context_used) { MonoInst *args [3]; MonoClass *array_class = mono_class_create_array (klass, 1); MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class); /* FIXME: Use OP_NEWARR and decompose later to help abcrem */ /* vtable */ args [0] = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE); /* array len */ args [1] = sp [0]; if (managed_alloc) ins = mono_emit_method_call (cfg, managed_alloc, args, NULL); else ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args); } else { /* Decompose later since it is needed by abcrem */ MonoClass *array_type = mono_class_create_array (klass, 1); mono_class_vtable_checked (array_type, cfg->error); CHECK_CFG_ERROR; CHECK_TYPELOAD (array_type); MONO_INST_NEW (cfg, ins, OP_NEWARR); ins->dreg = alloc_ireg_ref (cfg); ins->sreg1 = sp [0]->dreg; ins->inst_newa_class = klass; ins->type = STACK_OBJ; ins->klass = array_type; MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE; cfg->cbb->needs_decompose = TRUE; /* Needed so mono_emit_load_get_addr () gets called */ mono_get_got_var (cfg); } len_ins = sp [0]; ip += 5; *sp++ = ins; inline_costs += 1; /* * we inline/optimize the initialization sequence if possible. * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing * for small sizes open code the memcpy * ensure the rva field is big enough */ if ((cfg->opt & MONO_OPT_INTRINS) && next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (cfg, method, cfg->compile_aot, next_ip, end, klass, len_ins->inst_c0, &data_size, &field_token, &il_op, &next_ip))) { MonoMethod *memcpy_method = mini_get_memcpy_method (); MonoInst *iargs [3]; int add_reg = alloc_ireg_mp (cfg); EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector)); if (cfg->compile_aot) { EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, m_class_get_image (method->klass), GPOINTER_TO_UINT(field_token), STACK_PTR, NULL); } else { EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr); } EMIT_NEW_ICONST (cfg, iargs [2], data_size); mono_emit_method_call (cfg, memcpy_method, iargs, NULL); } break; } case MONO_CEE_LDLEN: --sp; if (sp [0]->type != STACK_OBJ) UNVERIFIED; MONO_INST_NEW (cfg, ins, OP_LDLEN); ins->dreg = alloc_preg (cfg); ins->sreg1 = sp [0]->dreg; ins->inst_imm = MONO_STRUCT_OFFSET (MonoArray, max_length); ins->type = STACK_I4; /* This flag will be inherited by the decomposition */ ins->flags |= MONO_INST_FAULT | MONO_INST_INVARIANT_LOAD; MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE; cfg->cbb->needs_decompose = TRUE; MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, sp [0]->dreg); *sp++ = ins; break; case MONO_CEE_LDELEMA: sp -= 2; if (sp [0]->type != STACK_OBJ) UNVERIFIED; cfg->flags |= MONO_CFG_HAS_LDELEMA; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); /* we need to make sure that this array is exactly the type it needs * to be for correctness. the wrappers are lax with their usage * so we need to ignore them here */ if (!m_class_is_valuetype (klass) && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) { MonoClass *array_class = mono_class_create_array (klass, 1); mini_emit_check_array_type (cfg, sp [0], array_class); CHECK_TYPELOAD (array_class); } readonly = FALSE; ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE); *sp++ = ins; break; case MONO_CEE_LDELEM: case MONO_CEE_LDELEM_I1: case MONO_CEE_LDELEM_U1: case MONO_CEE_LDELEM_I2: case MONO_CEE_LDELEM_U2: case MONO_CEE_LDELEM_I4: case MONO_CEE_LDELEM_U4: case MONO_CEE_LDELEM_I8: case MONO_CEE_LDELEM_I: case MONO_CEE_LDELEM_R4: case MONO_CEE_LDELEM_R8: case MONO_CEE_LDELEM_REF: { MonoInst *addr; sp -= 2; if (il_op == MONO_CEE_LDELEM) { klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); mono_class_init_internal (klass); } else klass = array_access_to_klass (il_op); if (sp [0]->type != STACK_OBJ) UNVERIFIED; cfg->flags |= MONO_CFG_HAS_LDELEMA; if (mini_is_gsharedvt_variable_klass (klass)) { // FIXME-VT: OP_ICONST optimization addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0); ins->opcode = OP_LOADV_MEMBASE; } else if (sp [1]->opcode == OP_ICONST) { int array_reg = sp [0]->dreg; int index_reg = sp [1]->dreg; int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector); if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg)) MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg); MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset); } else { addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0); } *sp++ = ins; break; } case MONO_CEE_STELEM_I: case MONO_CEE_STELEM_I1: case MONO_CEE_STELEM_I2: case MONO_CEE_STELEM_I4: case MONO_CEE_STELEM_I8: case MONO_CEE_STELEM_R4: case MONO_CEE_STELEM_R8: case MONO_CEE_STELEM_REF: case MONO_CEE_STELEM: { sp -= 3; cfg->flags |= MONO_CFG_HAS_LDELEMA; if (il_op == MONO_CEE_STELEM) { klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); mono_class_init_internal (klass); } else klass = array_access_to_klass (il_op); if (sp [0]->type != STACK_OBJ) UNVERIFIED; sp [2] = convert_value (cfg, m_class_get_byval_arg (klass), sp [2]); mini_emit_array_store (cfg, klass, sp, TRUE); inline_costs += 1; break; } case MONO_CEE_CKFINITE: { --sp; if (cfg->llvm_only) { MonoInst *iargs [1]; iargs [0] = sp [0]; *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs); } else { sp [0] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.double_class), sp [0]); MONO_INST_NEW (cfg, ins, OP_CKFINITE); ins->sreg1 = sp [0]->dreg; ins->dreg = alloc_freg (cfg); ins->type = STACK_R8; MONO_ADD_INS (cfg->cbb, ins); *sp++ = mono_decompose_opcode (cfg, ins); } break; } case MONO_CEE_REFANYVAL: { MonoInst *src_var, *src; int klass_reg = alloc_preg (cfg); int dreg = alloc_preg (cfg); GSHAREDVT_FAILURE (il_op); MONO_INST_NEW (cfg, ins, il_op); --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); context_used = mini_class_check_context_used (cfg, klass); // FIXME: src_var = get_vreg_to_inst (cfg, sp [0]->dreg); if (!src_var) src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg); EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass)); if (context_used) { MonoInst *klass_ins; klass_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS); // FIXME: MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg); MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException"); } else { mini_emit_class_check (cfg, klass_reg, klass); } EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value)); ins->type = STACK_MP; ins->klass = klass; *sp++ = ins; break; } case MONO_CEE_MKREFANY: { MonoInst *loc, *addr; GSHAREDVT_FAILURE (il_op); MONO_INST_NEW (cfg, ins, il_op); --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); context_used = mini_class_check_context_used (cfg, klass); loc = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL); EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0); MonoInst *const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS); int type_reg = alloc_preg (cfg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, m_class_offsetof_byval_arg ()); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg); EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0); ins->type = STACK_VTYPE; ins->klass = mono_defaults.typed_reference_class; *sp++ = ins; break; } case MONO_CEE_LDTOKEN: { gpointer handle; MonoClass *handle_class; if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) { handle = mono_method_get_wrapper_data (method, n); handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1); if (handle_class == mono_defaults.typehandle_class) handle = m_class_get_byval_arg ((MonoClass*)handle); } else { handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, cfg->error); CHECK_CFG_ERROR; } if (!handle) LOAD_ERROR; mono_class_init_internal (handle_class); if (cfg->gshared) { if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF || mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) { /* This case handles ldtoken of an open type, like for typeof(Gen<>). */ context_used = 0; } else if (handle_class == mono_defaults.typehandle_class) { context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type_internal ((MonoType *)handle)); } else if (handle_class == mono_defaults.fieldhandle_class) context_used = mini_class_check_context_used (cfg, m_field_get_parent (((MonoClassField*)handle))); else if (handle_class == mono_defaults.methodhandle_class) context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle); else g_assert_not_reached (); } { if ((next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && ((next_ip [0] == CEE_CALL) || (next_ip [0] == CEE_CALLVIRT)) && (cmethod = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context)) && (cmethod->klass == mono_defaults.systemtype_class) && (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) { MonoClass *tclass = mono_class_from_mono_type_internal ((MonoType *)handle); mono_class_init_internal (tclass); // Optimize to true/false if next instruction is `call instance bool Type::get_IsValueType()` guchar *is_vt_ip; guint32 is_vt_token; if ((is_vt_ip = il_read_call (next_ip + 5, end, &is_vt_token)) && ip_in_bb (cfg, cfg->cbb, is_vt_ip)) { MonoMethod *is_vt_method = mini_get_method (cfg, method, is_vt_token, NULL, generic_context); if (is_vt_method->klass == mono_defaults.systemtype_class && !mini_is_gsharedvt_variable_klass (tclass) && !mono_class_is_open_constructed_type (m_class_get_byval_arg (tclass)) && !strcmp ("get_IsValueType", is_vt_method->name)) { next_ip = is_vt_ip; EMIT_NEW_ICONST (cfg, ins, m_class_is_valuetype (tclass) ? 1 : 0); ins->type = STACK_I4; *sp++ = ins; break; } } if (context_used) { MONO_INST_NEW (cfg, ins, OP_RTTYPE); ins->dreg = alloc_ireg_ref (cfg); ins->inst_p0 = tclass; ins->type = STACK_OBJ; MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE; cfg->cbb->needs_decompose = TRUE; } else if (cfg->compile_aot) { if (method->wrapper_type) { error_init (error); //got to do it since there are multiple conditionals below if (mono_class_get_checked (m_class_get_image (tclass), m_class_get_type_token (tclass), error) == tclass && !generic_context) { /* Special case for static synchronized wrappers */ EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, m_class_get_image (tclass), m_class_get_type_token (tclass), generic_context); } else { mono_error_cleanup (error); /* FIXME don't swallow the error */ /* FIXME: n is not a normal token */ DISABLE_AOT (cfg); EMIT_NEW_PCONST (cfg, ins, NULL); } } else { EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context); } } else { MonoReflectionType *rt = mono_type_get_object_checked ((MonoType *)handle, cfg->error); CHECK_CFG_ERROR; EMIT_NEW_PCONST (cfg, ins, rt); } ins->type = STACK_OBJ; ins->klass = mono_defaults.runtimetype_class; il_op = (MonoOpcodeEnum)next_ip [0]; next_ip += 5; } else { MonoInst *addr, *vtvar; vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (handle_class), OP_LOCAL); if (context_used) { if (handle_class == mono_defaults.typehandle_class) { ins = mini_emit_get_rgctx_klass (cfg, context_used, mono_class_from_mono_type_internal ((MonoType *)handle), MONO_RGCTX_INFO_TYPE); } else if (handle_class == mono_defaults.methodhandle_class) { ins = emit_get_rgctx_method (cfg, context_used, (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD); } else if (handle_class == mono_defaults.fieldhandle_class) { ins = emit_get_rgctx_field (cfg, context_used, (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD); } else { g_assert_not_reached (); } } else if (cfg->compile_aot) { EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context); } else { EMIT_NEW_PCONST (cfg, ins, handle); } EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg); EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0); } } *sp++ = ins; break; } case MONO_CEE_THROW: if (sp [-1]->type != STACK_OBJ) UNVERIFIED; MONO_INST_NEW (cfg, ins, OP_THROW); --sp; ins->sreg1 = sp [0]->dreg; cfg->cbb->out_of_line = TRUE; MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_NOT_REACHED); MONO_ADD_INS (cfg->cbb, ins); sp = stack_start; link_bblock (cfg, cfg->cbb, end_bblock); start_new_bblock = 1; /* This can complicate code generation for llvm since the return value might not be defined */ if (COMPILE_LLVM (cfg)) INLINE_FAILURE ("throw"); break; case MONO_CEE_ENDFINALLY: if (!ip_in_finally_clause (cfg, ip - header->code)) UNVERIFIED; /* mono_save_seq_point_info () depends on this */ if (sp != stack_start) emit_seq_point (cfg, method, ip, FALSE, FALSE); MONO_INST_NEW (cfg, ins, OP_ENDFINALLY); MONO_ADD_INS (cfg->cbb, ins); start_new_bblock = 1; ins_has_side_effect = FALSE; /* * Control will leave the method so empty the stack, otherwise * the next basic block will start with a nonempty stack. */ while (sp != stack_start) { sp--; } break; case MONO_CEE_LEAVE: case MONO_CEE_LEAVE_S: { GList *handlers; /* empty the stack */ g_assert (sp >= stack_start); sp = stack_start; /* * If this leave statement is in a catch block, check for a * pending exception, and rethrow it if necessary. * We avoid doing this in runtime invoke wrappers, since those are called * by native code which excepts the wrapper to catch all exceptions. */ for (i = 0; i < header->num_clauses; ++i) { MonoExceptionClause *clause = &header->clauses [i]; /* * Use <= in the final comparison to handle clauses with multiple * leave statements, like in bug #78024. * The ordering of the exception clauses guarantees that we find the * innermost clause. */ if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((il_op == MONO_CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) { MonoInst *exc_ins; MonoBasicBlock *dont_throw; /* MonoInst *load; NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0); */ exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL); NEW_BBLOCK (cfg, dont_throw); /* * Currently, we always rethrow the abort exception, despite the * fact that this is not correct. See thread6.cs for an example. * But propagating the abort exception is more important than * getting the semantics right. */ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw); MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg); MONO_START_BB (cfg, dont_throw); } } #ifdef ENABLE_LLVM cfg->cbb->try_end = (intptr_t)(ip - header->code); #endif if ((handlers = mono_find_leave_clauses (cfg, ip, target))) { GList *tmp; /* * For each finally clause that we exit we need to invoke the finally block. * After each invocation we need to add try holes for all the clauses that * we already exited. */ for (tmp = handlers; tmp; tmp = tmp->next) { MonoLeaveClause *leave = (MonoLeaveClause *) tmp->data; MonoExceptionClause *clause = leave->clause; if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) continue; MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset); MonoBasicBlock *dont_throw; /* * Emit instrumentation code before linking the basic blocks below as this * will alter cfg->cbb. */ mini_profiler_emit_call_finally (cfg, header, ip, leave->index, clause); tblock = cfg->cil_offset_to_bb [clause->handler_offset]; g_assert (tblock); link_bblock (cfg, cfg->cbb, tblock); MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0); MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER); ins->inst_target_bb = tblock; ins->inst_eh_blocks = tmp; MONO_ADD_INS (cfg->cbb, ins); cfg->cbb->has_call_handler = 1; /* Throw exception if exvar is set */ /* FIXME Do we need this for calls from catch/filter ? */ NEW_BBLOCK (cfg, dont_throw); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw); mono_emit_jit_icall (cfg, ves_icall_thread_finish_async_abort, NULL); cfg->cbb->clause_holes = tmp; MONO_START_BB (cfg, dont_throw); cfg->cbb->clause_holes = tmp; if (COMPILE_LLVM (cfg)) { MonoBasicBlock *target_bb; /* * Link the finally bblock with the target, since it will * conceptually branch there. */ GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1); GET_BBLOCK (cfg, target_bb, target); link_bblock (cfg, tblock, target_bb); } } } MONO_INST_NEW (cfg, ins, OP_BR); MONO_ADD_INS (cfg->cbb, ins); GET_BBLOCK (cfg, tblock, target); link_bblock (cfg, cfg->cbb, tblock); ins->inst_target_bb = tblock; start_new_bblock = 1; break; } /* * Mono specific opcodes */ case MONO_CEE_MONO_ICALL: { g_assert (method->wrapper_type != MONO_WRAPPER_NONE); const MonoJitICallId jit_icall_id = (MonoJitICallId)token; MonoJitICallInfo * const info = mono_find_jit_icall_info (jit_icall_id); CHECK_STACK (info->sig->param_count); sp -= info->sig->param_count; if (token == MONO_JIT_ICALL_mono_threads_attach_coop) { MonoInst *addr; MonoBasicBlock *next_bb; if (cfg->compile_aot) { /* * This is called on unattached threads, so it cannot go through the trampoline * infrastructure. Use an indirect call through a got slot initialized at load time * instead. */ EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, GUINT_TO_POINTER (jit_icall_id)); ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL); } else { ins = mono_emit_jit_icall_id (cfg, jit_icall_id, sp); } /* * Parts of the initlocals code needs to come after this, since it might call methods like memset. * Also profiling needs to be after attach. */ init_localsbb2 = cfg->cbb; NEW_BBLOCK (cfg, next_bb); MONO_START_BB (cfg, next_bb); } else { if (token == MONO_JIT_ICALL_mono_threads_detach_coop) { /* can't emit profiling code after a detach, so emit it now */ mini_profiler_emit_leave (cfg, NULL); detached_before_ret = TRUE; } ins = mono_emit_jit_icall_id (cfg, jit_icall_id, sp); } if (!MONO_TYPE_IS_VOID (info->sig->ret)) *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); break; } MonoJumpInfoType ldptr_type; case MONO_CEE_MONO_LDPTR_CARD_TABLE: ldptr_type = MONO_PATCH_INFO_GC_CARD_TABLE_ADDR; goto mono_ldptr; case MONO_CEE_MONO_LDPTR_NURSERY_START: ldptr_type = MONO_PATCH_INFO_GC_NURSERY_START; goto mono_ldptr; case MONO_CEE_MONO_LDPTR_NURSERY_BITS: ldptr_type = MONO_PATCH_INFO_GC_NURSERY_BITS; goto mono_ldptr; case MONO_CEE_MONO_LDPTR_INT_REQ_FLAG: ldptr_type = MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG; goto mono_ldptr; case MONO_CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT: ldptr_type = MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT; mono_ldptr: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); ins = mini_emit_runtime_constant (cfg, ldptr_type, NULL); *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); break; case MONO_CEE_MONO_LDPTR: { gpointer ptr; g_assert (method->wrapper_type != MONO_WRAPPER_NONE); ptr = mono_method_get_wrapper_data (method, token); EMIT_NEW_PCONST (cfg, ins, ptr); *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); /* Can't embed random pointers into AOT code */ DISABLE_AOT (cfg); break; } case MONO_CEE_MONO_JIT_ICALL_ADDR: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, GUINT_TO_POINTER (token)); *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); break; case MONO_CEE_MONO_ICALL_ADDR: { MonoMethod *cmethod; gpointer ptr; g_assert (method->wrapper_type != MONO_WRAPPER_NONE); cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token); if (cfg->compile_aot) { if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) { /* * This is generated by emit_native_wrapper () to resolve the pinvoke address * before the call, its not needed when using direct pinvoke. * This is not an optimization, but its used to avoid looking up pinvokes * on platforms which don't support dlopen (). */ EMIT_NEW_PCONST (cfg, ins, NULL); } else { EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod); } } else { ptr = mono_lookup_internal_call (cmethod); g_assert (ptr); EMIT_NEW_PCONST (cfg, ins, ptr); } *sp++ = ins; break; } case MONO_CEE_MONO_VTADDR: { g_assert (method->wrapper_type != MONO_WRAPPER_NONE); MonoInst *src_var, *src; --sp; // FIXME: src_var = get_vreg_to_inst (cfg, sp [0]->dreg); EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype); *sp++ = src; break; } case MONO_CEE_MONO_NEWOBJ: { g_assert (method->wrapper_type != MONO_WRAPPER_NONE); MonoInst *iargs [2]; klass = (MonoClass *)mono_method_get_wrapper_data (method, token); mono_class_init_internal (klass); NEW_CLASSCONST (cfg, iargs [0], klass); MONO_ADD_INS (cfg->cbb, iargs [0]); *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs); inline_costs += CALL_COST * MIN(10, num_calls++); break; } case MONO_CEE_MONO_OBJADDR: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); --sp; MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = alloc_ireg_mp (cfg); ins->sreg1 = sp [0]->dreg; ins->type = STACK_MP; MONO_ADD_INS (cfg->cbb, ins); *sp++ = ins; break; case MONO_CEE_MONO_LDNATIVEOBJ: /* * Similar to LDOBJ, but instead load the unmanaged * representation of the vtype to the stack. */ g_assert (method->wrapper_type != MONO_WRAPPER_NONE); --sp; klass = (MonoClass *)mono_method_get_wrapper_data (method, token); g_assert (m_class_is_valuetype (klass)); mono_class_init_internal (klass); { MonoInst *src, *dest, *temp; src = sp [0]; temp = mono_compile_create_var (cfg, m_class_get_byval_arg (klass), OP_LOCAL); temp->backend.is_pinvoke = 1; EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0); mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0); EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0); dest->type = STACK_VTYPE; dest->klass = klass; *sp ++ = dest; } break; case MONO_CEE_MONO_RETOBJ: { /* * Same as RET, but return the native representation of a vtype * to the caller. */ g_assert (method->wrapper_type != MONO_WRAPPER_NONE); g_assert (cfg->ret); g_assert (mono_method_signature_internal (method)->pinvoke); --sp; klass = (MonoClass *)mono_method_get_wrapper_data (method, token); if (!cfg->vret_addr) { g_assert (cfg->ret_var_is_local); EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype); } else { EMIT_NEW_RETLOADA (cfg, ins); } mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0); if (sp != stack_start) UNVERIFIED; if (!detached_before_ret) mini_profiler_emit_leave (cfg, sp [0]); MONO_INST_NEW (cfg, ins, OP_BR); ins->inst_target_bb = end_bblock; MONO_ADD_INS (cfg->cbb, ins); link_bblock (cfg, cfg->cbb, end_bblock); start_new_bblock = 1; break; } case MONO_CEE_MONO_SAVE_LMF: case MONO_CEE_MONO_RESTORE_LMF: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); break; case MONO_CEE_MONO_CLASSCONST: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token)); *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); break; case MONO_CEE_MONO_METHODCONST: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); EMIT_NEW_METHODCONST (cfg, ins, mono_method_get_wrapper_data (method, token)); *sp++ = ins; break; case MONO_CEE_MONO_PINVOKE_ADDR_CACHE: { g_assert (method->wrapper_type != MONO_WRAPPER_NONE); MonoMethod *pinvoke_method = (MonoMethod*)mono_method_get_wrapper_data (method, token); /* This is a memory slot used by the wrapper */ if (cfg->compile_aot) { EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHOD_PINVOKE_ADDR_CACHE, pinvoke_method); } else { gpointer addr = mono_mem_manager_alloc0 (cfg->mem_manager, sizeof (gpointer)); EMIT_NEW_PCONST (cfg, ins, addr); } *sp++ = ins; break; } case MONO_CEE_MONO_NOT_TAKEN: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); cfg->cbb->out_of_line = TRUE; break; case MONO_CEE_MONO_TLS: { MonoTlsKey key; g_assert (method->wrapper_type != MONO_WRAPPER_NONE); key = (MonoTlsKey)n; g_assert (key < TLS_KEY_NUM); ins = mono_create_tls_get (cfg, key); g_assert (ins); ins->type = STACK_PTR; *sp++ = ins; break; } case MONO_CEE_MONO_DYN_CALL: { MonoCallInst *call; /* It would be easier to call a trampoline, but that would put an * extra frame on the stack, confusing exception handling. So * implement it inline using an opcode for now. */ g_assert (method->wrapper_type != MONO_WRAPPER_NONE); if (!cfg->dyn_call_var) { cfg->dyn_call_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* prevent it from being register allocated */ cfg->dyn_call_var->flags |= MONO_INST_VOLATILE; } /* Has to use a call inst since local regalloc expects it */ MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL); ins = (MonoInst*)call; sp -= 2; ins->sreg1 = sp [0]->dreg; ins->sreg2 = sp [1]->dreg; MONO_ADD_INS (cfg->cbb, ins); cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area); /* OP_DYN_CALL might need to allocate a dynamically sized param area */ cfg->flags |= MONO_CFG_HAS_ALLOCA; inline_costs += CALL_COST * MIN(10, num_calls++); break; } case MONO_CEE_MONO_MEMORY_BARRIER: { g_assert (method->wrapper_type != MONO_WRAPPER_NONE); mini_emit_memory_barrier (cfg, (int)n); break; } case MONO_CEE_MONO_ATOMIC_STORE_I4: { g_assert (method->wrapper_type != MONO_WRAPPER_NONE); g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4)); sp -= 2; MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4); ins->dreg = sp [0]->dreg; ins->sreg1 = sp [1]->dreg; ins->backend.memory_barrier_kind = (int)n; MONO_ADD_INS (cfg->cbb, ins); break; } case MONO_CEE_MONO_LD_DELEGATE_METHOD_PTR: { CHECK_STACK (1); --sp; dreg = alloc_preg (cfg); EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); *sp++ = ins; break; } case MONO_CEE_MONO_CALLI_EXTRA_ARG: { MonoInst *addr; MonoMethodSignature *fsig; MonoInst *arg; /* * This is the same as CEE_CALLI, but passes an additional argument * to the called method in llvmonly mode. * This is only used by delegate invoke wrappers to call the * actual delegate method. */ g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE); ins = NULL; cmethod = NULL; CHECK_STACK (1); --sp; addr = *sp; fsig = mini_get_signature (method, token, generic_context, cfg->error); CHECK_CFG_ERROR; if (cfg->llvm_only) cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig); n = fsig->param_count + fsig->hasthis + 1; CHECK_STACK (n); sp -= n; arg = sp [n - 1]; if (cfg->llvm_only) { /* * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt * cconv. This is set by mono_init_delegate (). */ if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) { MonoInst *callee = addr; MonoInst *call, *localloc_ins; MonoBasicBlock *is_gsharedvt_bb, *end_bb; int low_bit_reg = alloc_preg (cfg); NEW_BBLOCK (cfg, is_gsharedvt_bb); NEW_BBLOCK (cfg, end_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb); /* Normal case: callee uses a normal cconv, have to add an out wrapper */ addr = emit_get_rgctx_sig (cfg, context_used, fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI); /* * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg. */ MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM); ins->dreg = alloc_preg (cfg); ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P; MONO_ADD_INS (cfg->cbb, ins); localloc_ins = ins; cfg->flags |= MONO_CFG_HAS_ALLOCA; MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg); call = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */ MONO_START_BB (cfg, is_gsharedvt_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1); ins = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee); ins->dreg = call->dreg; MONO_START_BB (cfg, end_bb); } else { /* Caller uses a normal calling conv */ MonoInst *callee = addr; MonoInst *call, *localloc_ins; MonoBasicBlock *is_gsharedvt_bb, *end_bb; int low_bit_reg = alloc_preg (cfg); NEW_BBLOCK (cfg, is_gsharedvt_bb); NEW_BBLOCK (cfg, end_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb); /* Normal case: callee uses a normal cconv, no conversion is needed */ call = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */ MONO_START_BB (cfg, is_gsharedvt_bb); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1); NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig); MONO_ADD_INS (cfg->cbb, addr); /* * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg. */ MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM); ins->dreg = alloc_preg (cfg); ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P; MONO_ADD_INS (cfg->cbb, ins); localloc_ins = ins; cfg->flags |= MONO_CFG_HAS_ALLOCA; MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg); ins = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr); ins->dreg = call->dreg; MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); MONO_START_BB (cfg, end_bb); } } else { /* Same as CEE_CALLI */ if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) { /* * We pass the address to the gsharedvt trampoline in the rgctx reg */ MonoInst *callee = addr; addr = emit_get_rgctx_sig (cfg, context_used, fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI); ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee); } else { ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL); } } if (!MONO_TYPE_IS_VOID (fsig->ret)) *sp++ = mono_emit_widen_call_res (cfg, ins, fsig); CHECK_CFG_EXCEPTION; ins_flag = 0; constrained_class = NULL; break; } case MONO_CEE_MONO_LDDOMAIN: { MonoDomain *domain = mono_get_root_domain (); g_assert (method->wrapper_type != MONO_WRAPPER_NONE); EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : domain); *sp++ = ins; break; } case MONO_CEE_MONO_SAVE_LAST_ERROR: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); // Just an IL prefix, setting this flag, picked up by call instructions. save_last_error = TRUE; break; case MONO_CEE_MONO_GET_RGCTX_ARG: g_assert (method->wrapper_type != MONO_WRAPPER_NONE); mono_create_rgctx_var (cfg); MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = alloc_dreg (cfg, STACK_PTR); ins->sreg1 = cfg->rgctx_var->dreg; ins->type = STACK_PTR; MONO_ADD_INS (cfg->cbb, ins); *sp++ = ins; break; case MONO_CEE_MONO_GET_SP: { /* Used by COOP only, so this is good enough */ MonoInst *var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); EMIT_NEW_VARLOADA (cfg, ins, var, NULL); *sp++ = ins; break; } case MONO_CEE_MONO_REMAP_OVF_EXC: /* Remap the exception thrown by the next _OVF opcode */ g_assert (method->wrapper_type != MONO_WRAPPER_NONE); ovf_exc = (const char*)mono_method_get_wrapper_data (method, token); break; case MONO_CEE_ARGLIST: { /* somewhat similar to LDTOKEN */ MonoInst *addr, *vtvar; vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.argumenthandle_class), OP_LOCAL); EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0); EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg); EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0); ins->type = STACK_VTYPE; ins->klass = mono_defaults.argumenthandle_class; *sp++ = ins; break; } case MONO_CEE_CEQ: case MONO_CEE_CGT: case MONO_CEE_CGT_UN: case MONO_CEE_CLT: case MONO_CEE_CLT_UN: { MonoInst *cmp, *arg1, *arg2; sp -= 2; arg1 = sp [0]; arg2 = sp [1]; /* * The following transforms: * CEE_CEQ into OP_CEQ * CEE_CGT into OP_CGT * CEE_CGT_UN into OP_CGT_UN * CEE_CLT into OP_CLT * CEE_CLT_UN into OP_CLT_UN */ MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]); MONO_INST_NEW (cfg, ins, cmp->opcode); cmp->sreg1 = arg1->dreg; cmp->sreg2 = arg2->dreg; type_from_op (cfg, cmp, arg1, arg2); CHECK_TYPE (cmp); add_widen_op (cfg, cmp, &arg1, &arg2); if ((arg1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP)))) cmp->opcode = OP_LCOMPARE; else if (arg1->type == STACK_R4) cmp->opcode = OP_RCOMPARE; else if (arg1->type == STACK_R8) cmp->opcode = OP_FCOMPARE; else cmp->opcode = OP_ICOMPARE; MONO_ADD_INS (cfg->cbb, cmp); ins->type = STACK_I4; ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type); type_from_op (cfg, ins, arg1, arg2); if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) { /* * The backends expect the fceq opcodes to do the * comparison too. */ ins->sreg1 = cmp->sreg1; ins->sreg2 = cmp->sreg2; NULLIFY_INS (cmp); } MONO_ADD_INS (cfg->cbb, ins); *sp++ = ins; break; } case MONO_CEE_LDFTN: { MonoInst *argconst; MonoMethod *cil_method; cmethod = mini_get_method (cfg, method, n, NULL, generic_context); CHECK_CFG_ERROR; if (constrained_class) { if (m_method_is_static (cmethod) && mini_class_check_context_used (cfg, constrained_class)) // FIXME: GENERIC_SHARING_FAILURE (CEE_LDFTN); cmethod = get_constrained_method (cfg, image, n, cmethod, constrained_class, generic_context); constrained_class = NULL; CHECK_CFG_ERROR; } mono_class_init_internal (cmethod->klass); mono_save_token_info (cfg, image, n, cmethod); context_used = mini_method_check_context_used (cfg, cmethod); cil_method = cmethod; if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod)) emit_method_access_failure (cfg, method, cil_method); const gboolean has_unmanaged_callers_only = cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_method_has_unmanaged_callers_only_attribute (cmethod); /* * Optimize the common case of ldftn+delegate creation */ if ((sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) { MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context); if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) { MonoInst *target_ins, *handle_ins; MonoMethod *invoke; int invoke_context_used; if (G_UNLIKELY (has_unmanaged_callers_only)) { mono_error_set_not_supported (cfg->error, "Cannot create delegate from method with UnmanagedCallersOnlyAttribute"); CHECK_CFG_ERROR; } invoke = mono_get_delegate_invoke_internal (ctor_method->klass); if (!invoke || !mono_method_signature_internal (invoke)) LOAD_ERROR; invoke_context_used = mini_method_check_context_used (cfg, invoke); target_ins = sp [-1]; if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) { /*BAD IMPL: We must not add a null check for virtual invoke delegates.*/ if (mono_method_signature_internal (invoke)->param_count == mono_method_signature_internal (cmethod)->param_count) { MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0); MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException"); } } if ((invoke_context_used == 0 || !cfg->gsharedvt) || cfg->llvm_only) { if (cfg->verbose_level > 3) g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL)); if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, FALSE))) { sp --; *sp = handle_ins; CHECK_CFG_EXCEPTION; sp ++; next_ip += 5; il_op = MONO_CEE_NEWOBJ; break; } else { CHECK_CFG_ERROR; } } } } /* UnmanagedCallersOnlyAttribute means ldftn should return a method callable from native */ if (G_UNLIKELY (has_unmanaged_callers_only)) { if (G_UNLIKELY (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) { // Follow CoreCLR, disallow [UnmanagedCallersOnly] and [DllImport] to be used // together emit_not_supported_failure (cfg); EMIT_NEW_PCONST (cfg, ins, NULL); *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); break; } MonoClass *delegate_klass = NULL; MonoGCHandle target_handle = 0; ERROR_DECL (wrapper_error); MonoMethod *wrapped_cmethod; wrapped_cmethod = mono_marshal_get_managed_wrapper (cmethod, delegate_klass, target_handle, wrapper_error); if (!is_ok (wrapper_error)) { /* if we couldn't create a wrapper because cmethod isn't supposed to have an UnmanagedCallersOnly attribute, follow CoreCLR behavior and throw when the method with the ldftn is executing, not when it is being compiled. */ emit_invalid_program_with_msg (cfg, wrapper_error, method, cmethod); mono_error_cleanup (wrapper_error); EMIT_NEW_PCONST (cfg, ins, NULL); *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); break; } else { cmethod = wrapped_cmethod; } } argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst); *sp++ = ins; inline_costs += CALL_COST * MIN(10, num_calls++); break; } case MONO_CEE_LDVIRTFTN: { MonoInst *args [2]; cmethod = mini_get_method (cfg, method, n, NULL, generic_context); CHECK_CFG_ERROR; mono_class_init_internal (cmethod->klass); context_used = mini_method_check_context_used (cfg, cmethod); /* * Optimize the common case of ldvirtftn+delegate creation */ if (previous_il_op == MONO_CEE_DUP && (sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) { MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context); if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) { MonoInst *target_ins, *handle_ins; MonoMethod *invoke; int invoke_context_used; const gboolean is_virtual = (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) != 0; invoke = mono_get_delegate_invoke_internal (ctor_method->klass); if (!invoke || !mono_method_signature_internal (invoke)) LOAD_ERROR; invoke_context_used = mini_method_check_context_used (cfg, invoke); target_ins = sp [-1]; if (invoke_context_used == 0 || !cfg->gsharedvt || cfg->llvm_only) { if (cfg->verbose_level > 3) g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL)); if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, is_virtual))) { sp -= 2; *sp = handle_ins; CHECK_CFG_EXCEPTION; next_ip += 5; previous_il_op = MONO_CEE_NEWOBJ; sp ++; break; } else { CHECK_CFG_ERROR; } } } } --sp; args [0] = *sp; args [1] = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD); if (context_used) *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args); else *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args); inline_costs += CALL_COST * MIN(10, num_calls++); break; } case MONO_CEE_LOCALLOC: { MonoBasicBlock *non_zero_bb, *end_bb; int alloc_ptr = alloc_preg (cfg); --sp; if (sp != stack_start) UNVERIFIED; if (cfg->method != method) /* * Inlining this into a loop in a parent could lead to * stack overflows which is different behavior than the * non-inlined case, thus disable inlining in this case. */ INLINE_FAILURE("localloc"); NEW_BBLOCK (cfg, non_zero_bb); NEW_BBLOCK (cfg, end_bb); /* if size != zero */ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb); //size is zero, so result is NULL MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL); MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb); MONO_START_BB (cfg, non_zero_bb); MONO_INST_NEW (cfg, ins, OP_LOCALLOC); ins->dreg = alloc_ptr; ins->sreg1 = sp [0]->dreg; ins->type = STACK_PTR; MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_HAS_ALLOCA; if (header->init_locals) ins->flags |= MONO_INST_INIT; MONO_START_BB (cfg, end_bb); EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr); ins->type = STACK_PTR; *sp++ = ins; break; } case MONO_CEE_ENDFILTER: { MonoExceptionClause *clause, *nearest; int cc; --sp; if ((sp != stack_start) || (sp [0]->type != STACK_I4)) UNVERIFIED; MONO_INST_NEW (cfg, ins, OP_ENDFILTER); ins->sreg1 = (*sp)->dreg; MONO_ADD_INS (cfg->cbb, ins); start_new_bblock = 1; nearest = NULL; for (cc = 0; cc < header->num_clauses; ++cc) { clause = &header->clauses [cc]; if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) && ((next_ip - header->code) > clause->data.filter_offset && (next_ip - header->code) <= clause->handler_offset) && (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) nearest = clause; } g_assert (nearest); if ((next_ip - header->code) != nearest->handler_offset) UNVERIFIED; break; } case MONO_CEE_UNALIGNED_: ins_flag |= MONO_INST_UNALIGNED; /* FIXME: record alignment? we can assume 1 for now */ break; case MONO_CEE_VOLATILE_: ins_flag |= MONO_INST_VOLATILE; break; case MONO_CEE_TAIL_: ins_flag |= MONO_INST_TAILCALL; cfg->flags |= MONO_CFG_HAS_TAILCALL; /* Can't inline tailcalls at this time */ inline_costs += 100000; break; case MONO_CEE_INITOBJ: --sp; klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); if (mini_class_is_reference (klass)) MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0); else mini_emit_initobj (cfg, *sp, NULL, klass); inline_costs += 1; break; case MONO_CEE_CONSTRAINED_: constrained_class = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (constrained_class); ins_has_side_effect = FALSE; break; case MONO_CEE_CPBLK: sp -= 3; mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag); ins_flag = 0; inline_costs += 1; break; case MONO_CEE_INITBLK: sp -= 3; mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag); ins_flag = 0; inline_costs += 1; break; case MONO_CEE_NO_: if (ip [2] & CEE_NO_TYPECHECK) ins_flag |= MONO_INST_NOTYPECHECK; if (ip [2] & CEE_NO_RANGECHECK) ins_flag |= MONO_INST_NORANGECHECK; if (ip [2] & CEE_NO_NULLCHECK) ins_flag |= MONO_INST_NONULLCHECK; break; case MONO_CEE_RETHROW: { MonoInst *load; int handler_offset = -1; for (i = 0; i < header->num_clauses; ++i) { MonoExceptionClause *clause = &header->clauses [i]; if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) { handler_offset = clause->handler_offset; break; } } cfg->cbb->flags |= BB_EXCEPTION_UNSAFE; if (handler_offset == -1) UNVERIFIED; EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0); MONO_INST_NEW (cfg, ins, OP_RETHROW); ins->sreg1 = load->dreg; MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_NOT_REACHED); MONO_ADD_INS (cfg->cbb, ins); sp = stack_start; link_bblock (cfg, cfg->cbb, end_bblock); start_new_bblock = 1; break; } case MONO_CEE_MONO_RETHROW: { if (sp [-1]->type != STACK_OBJ) UNVERIFIED; MONO_INST_NEW (cfg, ins, OP_RETHROW); --sp; ins->sreg1 = sp [0]->dreg; cfg->cbb->out_of_line = TRUE; MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_NOT_REACHED); MONO_ADD_INS (cfg->cbb, ins); sp = stack_start; link_bblock (cfg, cfg->cbb, end_bblock); start_new_bblock = 1; /* This can complicate code generation for llvm since the return value might not be defined */ if (COMPILE_LLVM (cfg)) INLINE_FAILURE ("mono_rethrow"); break; } case MONO_CEE_SIZEOF: { guint32 val; int ialign; if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (m_class_get_image (method->klass)) && !generic_context) { MonoType *type = mono_type_create_from_typespec_checked (image, token, cfg->error); CHECK_CFG_ERROR; val = mono_type_size (type, &ialign); EMIT_NEW_ICONST (cfg, ins, val); } else { MonoClass *klass = mini_get_class (method, token, generic_context); CHECK_TYPELOAD (klass); if (mini_is_gsharedvt_klass (klass)) { ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_SIZEOF); ins->type = STACK_I4; } else { val = mono_type_size (m_class_get_byval_arg (klass), &ialign); EMIT_NEW_ICONST (cfg, ins, val); } } *sp++ = ins; break; } case MONO_CEE_REFANYTYPE: { MonoInst *src_var, *src; GSHAREDVT_FAILURE (il_op); --sp; // FIXME: src_var = get_vreg_to_inst (cfg, sp [0]->dreg); if (!src_var) src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg); EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype); EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (mono_defaults.typehandle_class), src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type)); *sp++ = ins; break; } case MONO_CEE_READONLY_: readonly = TRUE; break; case MONO_CEE_UNUSED56: case MONO_CEE_UNUSED57: case MONO_CEE_UNUSED70: case MONO_CEE_UNUSED: case MONO_CEE_UNUSED99: case MONO_CEE_UNUSED58: case MONO_CEE_UNUSED1: UNVERIFIED; default: g_warning ("opcode 0x%02x not handled", il_op); UNVERIFIED; } if (ins_has_side_effect) cfg->cbb->flags |= BB_HAS_SIDE_EFFECTS; } if (start_new_bblock != 1) UNVERIFIED; cfg->cbb->cil_length = ip - cfg->cbb->cil_code; if (cfg->cbb->next_bb) { /* This could already be set because of inlining, #693905 */ MonoBasicBlock *bb = cfg->cbb; while (bb->next_bb) bb = bb->next_bb; bb->next_bb = end_bblock; } else { cfg->cbb->next_bb = end_bblock; } #if defined(TARGET_POWERPC) || defined(TARGET_X86) if (cfg->compile_aot) /* FIXME: The plt slots require a GOT var even if the method doesn't use it */ mono_get_got_var (cfg); #endif #ifdef TARGET_WASM if (cfg->lmf_var && !cfg->deopt) { // mini_llvmonly_pop_lmf () might be called before emit_push_lmf () so initialize the LMF cfg->cbb = init_localsbb; EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL); int lmf_reg = ins->dreg; EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_IMM, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), 0); } #endif if (cfg->method == method && cfg->got_var) mono_emit_load_got_addr (cfg); if (init_localsbb) { cfg->cbb = init_localsbb; cfg->ip = NULL; for (i = 0; i < header->num_locals; ++i) { /* * Vtype initialization might need to be done after CEE_JIT_ATTACH, since it can make calls to memset (), * which need the trampoline code to work. */ if (MONO_TYPE_ISSTRUCT (header->locals [i])) cfg->cbb = init_localsbb2; else cfg->cbb = init_localsbb; emit_init_local (cfg, i, header->locals [i], init_locals); } } if (cfg->init_ref_vars && cfg->method == method) { /* Emit initialization for ref vars */ // FIXME: Avoid duplication initialization for IL locals. for (i = 0; i < cfg->num_varinfo; ++i) { MonoInst *ins = cfg->varinfo [i]; if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ) MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL); } } if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) { cfg->cbb = init_localsbb; emit_push_lmf (cfg); } /* emit profiler enter code after a jit attach if there is one */ cfg->cbb = init_localsbb2; mini_profiler_emit_enter (cfg); cfg->cbb = init_localsbb; if (seq_points) { MonoBasicBlock *bb; /* * Make seq points at backward branch targets interruptable. */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT) bb->code->flags |= MONO_INST_SINGLE_STEP_LOC; } /* Add a sequence point for method entry/exit events */ if (seq_points && cfg->gen_sdb_seq_points) { NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE); MONO_ADD_INS (init_localsbb, ins); NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE); MONO_ADD_INS (cfg->bb_exit, ins); } /* * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because * the code they refer to was dead (#11880). */ if (sym_seq_points) { for (i = 0; i < header->code_size; ++i) { if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) { MonoInst *ins; NEW_SEQ_POINT (cfg, ins, i, FALSE); mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE); } } } cfg->ip = NULL; if (cfg->method == method) { compute_bb_regions (cfg); } else { MonoBasicBlock *bb; /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */ for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) { bb->real_offset = inline_offset; } } if (inline_costs < 0) { char *mname; /* Method is too large */ mname = mono_method_full_name (method, TRUE); mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname)); g_free (mname); } if ((cfg->verbose_level > 2) && (cfg->method == method)) mono_print_code (cfg, "AFTER METHOD-TO-IR"); goto cleanup; mono_error_exit: if (cfg->verbose_level > 3) g_print ("exiting due to error"); g_assert (!is_ok (cfg->error)); goto cleanup; exception_exit: if (cfg->verbose_level > 3) g_print ("exiting due to exception"); g_assert (cfg->exception_type != MONO_EXCEPTION_NONE); goto cleanup; unverified: if (cfg->verbose_level > 3) g_print ("exiting due to invalid il"); set_exception_type_from_invalid_il (cfg, method, ip); goto cleanup; cleanup: g_slist_free (class_inits); mono_basic_block_free (original_bb); cfg->dont_inline = g_list_remove (cfg->dont_inline, method); if (cfg->exception_type) return -1; else return inline_costs; } static int store_membase_reg_to_store_membase_imm (int opcode) { switch (opcode) { case OP_STORE_MEMBASE_REG: return OP_STORE_MEMBASE_IMM; case OP_STOREI1_MEMBASE_REG: return OP_STOREI1_MEMBASE_IMM; case OP_STOREI2_MEMBASE_REG: return OP_STOREI2_MEMBASE_IMM; case OP_STOREI4_MEMBASE_REG: return OP_STOREI4_MEMBASE_IMM; case OP_STOREI8_MEMBASE_REG: return OP_STOREI8_MEMBASE_IMM; default: g_assert_not_reached (); } return -1; } int mono_op_to_op_imm (int opcode) { switch (opcode) { case OP_IADD: return OP_IADD_IMM; case OP_ISUB: return OP_ISUB_IMM; case OP_IDIV: return OP_IDIV_IMM; case OP_IDIV_UN: return OP_IDIV_UN_IMM; case OP_IREM: return OP_IREM_IMM; case OP_IREM_UN: return OP_IREM_UN_IMM; case OP_IMUL: return OP_IMUL_IMM; case OP_IAND: return OP_IAND_IMM; case OP_IOR: return OP_IOR_IMM; case OP_IXOR: return OP_IXOR_IMM; case OP_ISHL: return OP_ISHL_IMM; case OP_ISHR: return OP_ISHR_IMM; case OP_ISHR_UN: return OP_ISHR_UN_IMM; case OP_LADD: return OP_LADD_IMM; case OP_LSUB: return OP_LSUB_IMM; case OP_LAND: return OP_LAND_IMM; case OP_LOR: return OP_LOR_IMM; case OP_LXOR: return OP_LXOR_IMM; case OP_LSHL: return OP_LSHL_IMM; case OP_LSHR: return OP_LSHR_IMM; case OP_LSHR_UN: return OP_LSHR_UN_IMM; #if SIZEOF_REGISTER == 8 case OP_LMUL: return OP_LMUL_IMM; case OP_LREM: return OP_LREM_IMM; #endif case OP_COMPARE: return OP_COMPARE_IMM; case OP_ICOMPARE: return OP_ICOMPARE_IMM; case OP_LCOMPARE: return OP_LCOMPARE_IMM; case OP_STORE_MEMBASE_REG: return OP_STORE_MEMBASE_IMM; case OP_STOREI1_MEMBASE_REG: return OP_STOREI1_MEMBASE_IMM; case OP_STOREI2_MEMBASE_REG: return OP_STOREI2_MEMBASE_IMM; case OP_STOREI4_MEMBASE_REG: return OP_STOREI4_MEMBASE_IMM; #if defined(TARGET_X86) || defined (TARGET_AMD64) case OP_X86_PUSH: return OP_X86_PUSH_IMM; case OP_X86_COMPARE_MEMBASE_REG: return OP_X86_COMPARE_MEMBASE_IMM; #endif #if defined(TARGET_AMD64) case OP_AMD64_ICOMPARE_MEMBASE_REG: return OP_AMD64_ICOMPARE_MEMBASE_IMM; #endif case OP_VOIDCALL_REG: return OP_VOIDCALL; case OP_CALL_REG: return OP_CALL; case OP_LCALL_REG: return OP_LCALL; case OP_FCALL_REG: return OP_FCALL; case OP_LOCALLOC: return OP_LOCALLOC_IMM; } return -1; } int mono_load_membase_to_load_mem (int opcode) { // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro #if defined(TARGET_X86) || defined(TARGET_AMD64) switch (opcode) { case OP_LOAD_MEMBASE: return OP_LOAD_MEM; case OP_LOADU1_MEMBASE: return OP_LOADU1_MEM; case OP_LOADU2_MEMBASE: return OP_LOADU2_MEM; case OP_LOADI4_MEMBASE: return OP_LOADI4_MEM; case OP_LOADU4_MEMBASE: return OP_LOADU4_MEM; #if SIZEOF_REGISTER == 8 case OP_LOADI8_MEMBASE: return OP_LOADI8_MEM; #endif } #endif return -1; } static int op_to_op_dest_membase (int store_opcode, int opcode) { #if defined(TARGET_X86) if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG))) return -1; switch (opcode) { case OP_IADD: return OP_X86_ADD_MEMBASE_REG; case OP_ISUB: return OP_X86_SUB_MEMBASE_REG; case OP_IAND: return OP_X86_AND_MEMBASE_REG; case OP_IOR: return OP_X86_OR_MEMBASE_REG; case OP_IXOR: return OP_X86_XOR_MEMBASE_REG; case OP_ADD_IMM: case OP_IADD_IMM: return OP_X86_ADD_MEMBASE_IMM; case OP_SUB_IMM: case OP_ISUB_IMM: return OP_X86_SUB_MEMBASE_IMM; case OP_AND_IMM: case OP_IAND_IMM: return OP_X86_AND_MEMBASE_IMM; case OP_OR_IMM: case OP_IOR_IMM: return OP_X86_OR_MEMBASE_IMM; case OP_XOR_IMM: case OP_IXOR_IMM: return OP_X86_XOR_MEMBASE_IMM; case OP_MOVE: return OP_NOP; } #endif #if defined(TARGET_AMD64) if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG))) return -1; switch (opcode) { case OP_IADD: return OP_X86_ADD_MEMBASE_REG; case OP_ISUB: return OP_X86_SUB_MEMBASE_REG; case OP_IAND: return OP_X86_AND_MEMBASE_REG; case OP_IOR: return OP_X86_OR_MEMBASE_REG; case OP_IXOR: return OP_X86_XOR_MEMBASE_REG; case OP_IADD_IMM: return OP_X86_ADD_MEMBASE_IMM; case OP_ISUB_IMM: return OP_X86_SUB_MEMBASE_IMM; case OP_IAND_IMM: return OP_X86_AND_MEMBASE_IMM; case OP_IOR_IMM: return OP_X86_OR_MEMBASE_IMM; case OP_IXOR_IMM: return OP_X86_XOR_MEMBASE_IMM; case OP_LADD: return OP_AMD64_ADD_MEMBASE_REG; case OP_LSUB: return OP_AMD64_SUB_MEMBASE_REG; case OP_LAND: return OP_AMD64_AND_MEMBASE_REG; case OP_LOR: return OP_AMD64_OR_MEMBASE_REG; case OP_LXOR: return OP_AMD64_XOR_MEMBASE_REG; case OP_ADD_IMM: case OP_LADD_IMM: return OP_AMD64_ADD_MEMBASE_IMM; case OP_SUB_IMM: case OP_LSUB_IMM: return OP_AMD64_SUB_MEMBASE_IMM; case OP_AND_IMM: case OP_LAND_IMM: return OP_AMD64_AND_MEMBASE_IMM; case OP_OR_IMM: case OP_LOR_IMM: return OP_AMD64_OR_MEMBASE_IMM; case OP_XOR_IMM: case OP_LXOR_IMM: return OP_AMD64_XOR_MEMBASE_IMM; case OP_MOVE: return OP_NOP; } #endif return -1; } static int op_to_op_store_membase (int store_opcode, int opcode) { #if defined(TARGET_X86) || defined(TARGET_AMD64) switch (opcode) { case OP_ICEQ: if (store_opcode == OP_STOREI1_MEMBASE_REG) return OP_X86_SETEQ_MEMBASE; case OP_CNE: if (store_opcode == OP_STOREI1_MEMBASE_REG) return OP_X86_SETNE_MEMBASE; } #endif return -1; } static int op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode) { #ifdef TARGET_X86 /* FIXME: This has sign extension issues */ /* if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE)) return OP_X86_COMPARE_MEMBASE8_IMM; */ if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))) return -1; switch (opcode) { case OP_X86_PUSH: return OP_X86_PUSH_MEMBASE; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: return OP_X86_COMPARE_MEMBASE_IMM; case OP_COMPARE: case OP_ICOMPARE: return OP_X86_COMPARE_MEMBASE_REG; } #endif #ifdef TARGET_AMD64 /* FIXME: This has sign extension issues */ /* if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE)) return OP_X86_COMPARE_MEMBASE8_IMM; */ switch (opcode) { case OP_X86_PUSH: if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE)) return OP_X86_PUSH_MEMBASE; break; /* FIXME: This only works for 32 bit immediates case OP_COMPARE_IMM: case OP_LCOMPARE_IMM: if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE)) return OP_AMD64_COMPARE_MEMBASE_IMM; */ case OP_ICOMPARE_IMM: if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) return OP_AMD64_ICOMPARE_MEMBASE_IMM; break; case OP_COMPARE: case OP_LCOMPARE: if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE) return OP_AMD64_ICOMPARE_MEMBASE_REG; if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE)) return OP_AMD64_COMPARE_MEMBASE_REG; break; case OP_ICOMPARE: if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) return OP_AMD64_ICOMPARE_MEMBASE_REG; break; } #endif return -1; } static int op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode) { #ifdef TARGET_X86 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))) return -1; switch (opcode) { case OP_COMPARE: case OP_ICOMPARE: return OP_X86_COMPARE_REG_MEMBASE; case OP_IADD: return OP_X86_ADD_REG_MEMBASE; case OP_ISUB: return OP_X86_SUB_REG_MEMBASE; case OP_IAND: return OP_X86_AND_REG_MEMBASE; case OP_IOR: return OP_X86_OR_REG_MEMBASE; case OP_IXOR: return OP_X86_XOR_REG_MEMBASE; } #endif #ifdef TARGET_AMD64 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) { switch (opcode) { case OP_ICOMPARE: return OP_AMD64_ICOMPARE_REG_MEMBASE; case OP_IADD: return OP_X86_ADD_REG_MEMBASE; case OP_ISUB: return OP_X86_SUB_REG_MEMBASE; case OP_IAND: return OP_X86_AND_REG_MEMBASE; case OP_IOR: return OP_X86_OR_REG_MEMBASE; case OP_IXOR: return OP_X86_XOR_REG_MEMBASE; } } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) { switch (opcode) { case OP_COMPARE: case OP_LCOMPARE: return OP_AMD64_COMPARE_REG_MEMBASE; case OP_LADD: return OP_AMD64_ADD_REG_MEMBASE; case OP_LSUB: return OP_AMD64_SUB_REG_MEMBASE; case OP_LAND: return OP_AMD64_AND_REG_MEMBASE; case OP_LOR: return OP_AMD64_OR_REG_MEMBASE; case OP_LXOR: return OP_AMD64_XOR_REG_MEMBASE; } } #endif return -1; } int mono_op_to_op_imm_noemul (int opcode) { MONO_DISABLE_WARNING(4065) // switch with default but no case switch (opcode) { #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS) case OP_LSHR: case OP_LSHL: case OP_LSHR_UN: return -1; #endif #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV) case OP_IDIV: case OP_IDIV_UN: case OP_IREM: case OP_IREM_UN: return -1; #endif #if defined(MONO_ARCH_EMULATE_MUL_DIV) case OP_IMUL: return -1; #endif default: return mono_op_to_op_imm (opcode); } MONO_RESTORE_WARNING } gboolean mono_op_no_side_effects (int opcode) { /* FIXME: Add more instructions */ /* INEG sets the condition codes, and the OP_LNEG decomposition depends on this on x86 */ switch (opcode) { case OP_MOVE: case OP_FMOVE: case OP_VMOVE: case OP_XMOVE: case OP_RMOVE: case OP_VZERO: case OP_XZERO: case OP_ICONST: case OP_I8CONST: case OP_ADD_IMM: case OP_R8CONST: case OP_LADD_IMM: case OP_ISUB_IMM: case OP_IADD_IMM: case OP_LNEG: case OP_ISUB: case OP_CMOV_IGE: case OP_ISHL_IMM: case OP_ISHR_IMM: case OP_ISHR_UN_IMM: case OP_IAND_IMM: case OP_ICONV_TO_U1: case OP_ICONV_TO_I1: case OP_SEXT_I4: case OP_LCONV_TO_U1: case OP_ICONV_TO_U2: case OP_ICONV_TO_I2: case OP_LCONV_TO_I2: case OP_LDADDR: case OP_PHI: case OP_NOP: case OP_ZEXT_I4: case OP_NOT_NULL: case OP_IL_SEQ_POINT: case OP_RTTYPE: return TRUE; default: return FALSE; } } gboolean mono_ins_no_side_effects (MonoInst *ins) { if (mono_op_no_side_effects (ins->opcode)) return TRUE; if (ins->opcode == OP_AOTCONST) { MonoJumpInfoType type = (MonoJumpInfoType)(intptr_t)ins->inst_p1; // Some AOTCONSTs have side effects switch (type) { case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_VTABLE: case MONO_PATCH_INFO_METHOD_RGCTX: return TRUE; } } return FALSE; } /** * mono_handle_global_vregs: * * Make vregs used in more than one bblock 'global', i.e. allocate a variable * for them. */ void mono_handle_global_vregs (MonoCompile *cfg) { gint32 *vreg_to_bb; MonoBasicBlock *bb; int i, pos; vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1); #ifdef MONO_ARCH_SIMD_INTRINSICS if (cfg->uses_simd_intrinsics & MONO_CFG_USES_SIMD_INTRINSICS_SIMPLIFY_INDIRECTION) mono_simd_simplify_indirection (cfg); #endif /* Find local vregs used in more than one bb */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins = bb->code; int block_num = bb->block_num; if (cfg->verbose_level > 2) printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num); cfg->cbb = bb; for (; ins; ins = ins->next) { const char *spec = INS_INFO (ins->opcode); int regtype = 0, regindex; gint32 prev_bb; if (G_UNLIKELY (cfg->verbose_level > 2)) mono_print_ins (ins); g_assert (ins->opcode >= MONO_CEE_LAST); for (regindex = 0; regindex < 4; regindex ++) { int vreg = 0; if (regindex == 0) { regtype = spec [MONO_INST_DEST]; if (regtype == ' ') continue; vreg = ins->dreg; } else if (regindex == 1) { regtype = spec [MONO_INST_SRC1]; if (regtype == ' ') continue; vreg = ins->sreg1; } else if (regindex == 2) { regtype = spec [MONO_INST_SRC2]; if (regtype == ' ') continue; vreg = ins->sreg2; } else if (regindex == 3) { regtype = spec [MONO_INST_SRC3]; if (regtype == ' ') continue; vreg = ins->sreg3; } #if SIZEOF_REGISTER == 4 /* In the LLVM case, the long opcodes are not decomposed */ if (regtype == 'l' && !COMPILE_LLVM (cfg)) { /* * Since some instructions reference the original long vreg, * and some reference the two component vregs, it is quite hard * to determine when it needs to be global. So be conservative. */ if (!get_vreg_to_inst (cfg, vreg)) { mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg); if (cfg->verbose_level > 2) printf ("LONG VREG R%d made global.\n", vreg); } /* * Make the component vregs volatile since the optimizations can * get confused otherwise. */ get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE; get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE; } #endif g_assert (vreg != -1); prev_bb = vreg_to_bb [vreg]; if (prev_bb == 0) { /* 0 is a valid block num */ vreg_to_bb [vreg] = block_num + 1; } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) { if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS))) continue; if (!get_vreg_to_inst (cfg, vreg)) { if (G_UNLIKELY (cfg->verbose_level > 2)) printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num); switch (regtype) { case 'i': if (vreg_is_ref (cfg, vreg)) mono_compile_create_var_for_vreg (cfg, mono_get_object_type (), OP_LOCAL, vreg); else mono_compile_create_var_for_vreg (cfg, mono_get_int_type (), OP_LOCAL, vreg); break; case 'l': mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg); break; case 'f': mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL, vreg); break; case 'v': case 'x': mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL, vreg); break; default: g_assert_not_reached (); } } /* Flag as having been used in more than one bb */ vreg_to_bb [vreg] = -1; } } } } /* If a variable is used in only one bblock, convert it into a local vreg */ for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *var = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); switch (var->type) { case STACK_I4: case STACK_OBJ: case STACK_PTR: case STACK_MP: case STACK_VTYPE: #if SIZEOF_REGISTER == 8 case STACK_I8: #endif #if !defined(TARGET_X86) /* Enabling this screws up the fp stack on x86 */ case STACK_R8: #endif if (mono_arch_is_soft_float ()) break; /* if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype)) break; */ /* Arguments are implicitly global */ /* Putting R4 vars into registers doesn't work currently */ /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */ if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (m_class_get_byval_arg (var->klass)->type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) { /* * Make that the variable's liveness interval doesn't contain a call, since * that would cause the lvreg to be spilled, making the whole optimization * useless. */ /* This is too slow for JIT compilation */ #if 0 if (cfg->compile_aot && vreg_to_bb [var->dreg]) { MonoInst *ins; int def_index, call_index, ins_index; gboolean spilled = FALSE; def_index = -1; call_index = -1; ins_index = 0; for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) { const char *spec = INS_INFO (ins->opcode); if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg)) def_index = ins_index; if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) || ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) { if (call_index > def_index) { spilled = TRUE; break; } } if (MONO_IS_CALL (ins)) call_index = ins_index; ins_index ++; } if (spilled) break; } #endif if (G_UNLIKELY (cfg->verbose_level > 2)) printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx); var->flags |= MONO_INST_IS_DEAD; cfg->vreg_to_inst [var->dreg] = NULL; } break; } } /* * Compress the varinfo and vars tables so the liveness computation is faster and * takes up less space. */ pos = 0; for (i = 0; i < cfg->num_varinfo; ++i) { MonoInst *var = cfg->varinfo [i]; if (pos < i && cfg->locals_start == i) cfg->locals_start = pos; if (!(var->flags & MONO_INST_IS_DEAD)) { if (pos < i) { cfg->varinfo [pos] = cfg->varinfo [i]; cfg->varinfo [pos]->inst_c0 = pos; memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar)); cfg->vars [pos].idx = pos; #if SIZEOF_REGISTER == 4 if (cfg->varinfo [pos]->type == STACK_I8) { /* Modify the two component vars too */ MonoInst *var1; var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg)); var1->inst_c0 = pos; var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg)); var1->inst_c0 = pos; } #endif } pos ++; } } cfg->num_varinfo = pos; if (cfg->locals_start > cfg->num_varinfo) cfg->locals_start = cfg->num_varinfo; } /* * mono_allocate_gsharedvt_vars: * * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array. * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes. */ void mono_allocate_gsharedvt_vars (MonoCompile *cfg) { int i; cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg); for (i = 0; i < cfg->num_varinfo; ++i) { MonoInst *ins = cfg->varinfo [i]; int idx; if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) { if (i >= cfg->locals_start) { /* Local */ idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET); cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1; ins->opcode = OP_GSHAREDVT_LOCAL; ins->inst_imm = idx; } else { /* Arg */ cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1; ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET; } } } } /** * mono_spill_global_vars: * * Generate spill code for variables which are not allocated to registers, * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if * code is generated which could be optimized by the local optimization passes. */ void mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts) { MonoBasicBlock *bb; char spec2 [16]; int orig_next_vreg; guint32 *vreg_to_lvreg; guint32 *lvregs; guint32 i, lvregs_len, lvregs_size; gboolean dest_has_lvreg = FALSE; MonoStackType stacktypes [128]; MonoInst **live_range_start, **live_range_end; MonoBasicBlock **live_range_start_bb, **live_range_end_bb; *need_local_opts = FALSE; memset (spec2, 0, sizeof (spec2)); /* FIXME: Move this function to mini.c */ stacktypes [(int)'i'] = STACK_PTR; stacktypes [(int)'l'] = STACK_I8; stacktypes [(int)'f'] = STACK_R8; #ifdef MONO_ARCH_SIMD_INTRINSICS stacktypes [(int)'x'] = STACK_VTYPE; #endif #if SIZEOF_REGISTER == 4 /* Create MonoInsts for longs */ for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) { switch (ins->type) { case STACK_R8: case STACK_I8: { MonoInst *tree; if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg)) break; g_assert (ins->opcode == OP_REGOFFSET); tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg)); g_assert (tree); tree->opcode = OP_REGOFFSET; tree->inst_basereg = ins->inst_basereg; tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET; tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg)); g_assert (tree); tree->opcode = OP_REGOFFSET; tree->inst_basereg = ins->inst_basereg; tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET; break; } default: break; } } } #endif if (cfg->compute_gc_maps) { /* registers need liveness info even for !non refs */ for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; if (ins->opcode == OP_REGVAR) ins->flags |= MONO_INST_GC_TRACK; } } /* FIXME: widening and truncation */ /* * As an optimization, when a variable allocated to the stack is first loaded into * an lvreg, we will remember the lvreg and use it the next time instead of loading * the variable again. */ orig_next_vreg = cfg->next_vreg; vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg); lvregs_size = 1024; lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size); lvregs_len = 0; /* * These arrays contain the first and last instructions accessing a given * variable. * Since we emit bblocks in the same order we process them here, and we * don't split live ranges, these will precisely describe the live range of * the variable, i.e. the instruction range where a valid value can be found * in the variables location. * The live range is computed using the liveness info computed by the liveness pass. * We can't use vmv->range, since that is an abstract live range, and we need * one which is instruction precise. * FIXME: Variables used in out-of-line bblocks have a hole in their live range. */ /* FIXME: Only do this if debugging info is requested */ live_range_start = g_new0 (MonoInst*, cfg->next_vreg); live_range_end = g_new0 (MonoInst*, cfg->next_vreg); live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg); live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg); /* Add spill loads/stores */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins; if (cfg->verbose_level > 2) printf ("\nSPILL BLOCK %d:\n", bb->block_num); /* Clear vreg_to_lvreg array */ for (i = 0; i < lvregs_len; i++) vreg_to_lvreg [lvregs [i]] = 0; lvregs_len = 0; cfg->cbb = bb; MONO_BB_FOR_EACH_INS (bb, ins) { const char *spec = INS_INFO (ins->opcode); int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs; gboolean store, no_lvreg; int sregs [MONO_MAX_SRC_REGS]; if (G_UNLIKELY (cfg->verbose_level > 2)) mono_print_ins (ins); if (ins->opcode == OP_NOP) continue; /* * We handle LDADDR here as well, since it can only be decomposed * when variable addresses are known. */ if (ins->opcode == OP_LDADDR) { MonoInst *var = (MonoInst *)ins->inst_p0; if (var->opcode == OP_VTARG_ADDR) { /* Happens on SPARC/S390 where vtypes are passed by reference */ MonoInst *vtaddr = var->inst_left; if (vtaddr->opcode == OP_REGVAR) { ins->opcode = OP_MOVE; ins->sreg1 = vtaddr->dreg; } else if (var->inst_left->opcode == OP_REGOFFSET) { ins->opcode = OP_LOAD_MEMBASE; ins->inst_basereg = vtaddr->inst_basereg; ins->inst_offset = vtaddr->inst_offset; } else NOT_IMPLEMENTED; } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) { /* gsharedvt arg passed by ref */ g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET); ins->opcode = OP_LOAD_MEMBASE; ins->inst_basereg = var->inst_basereg; ins->inst_offset = var->inst_offset; } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) { MonoInst *load, *load2, *load3; int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1; int reg1, reg2, reg3; MonoInst *info_var = cfg->gsharedvt_info_var; MonoInst *locals_var = cfg->gsharedvt_locals_var; /* * gsharedvt local. * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx]. */ g_assert (var->opcode == OP_GSHAREDVT_LOCAL); g_assert (info_var); g_assert (locals_var); /* Mark the instruction used to compute the locals var as used */ cfg->gsharedvt_locals_var_ins = NULL; /* Load the offset */ if (info_var->opcode == OP_REGOFFSET) { reg1 = alloc_ireg (cfg); NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset); } else if (info_var->opcode == OP_REGVAR) { load = NULL; reg1 = info_var->dreg; } else { g_assert_not_reached (); } reg2 = alloc_ireg (cfg); NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P)); /* Load the locals area address */ reg3 = alloc_ireg (cfg); if (locals_var->opcode == OP_REGOFFSET) { NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset); } else if (locals_var->opcode == OP_REGVAR) { NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg); } else { g_assert_not_reached (); } /* Compute the address */ ins->opcode = OP_PADD; ins->sreg1 = reg3; ins->sreg2 = reg2; mono_bblock_insert_before_ins (bb, ins, load3); mono_bblock_insert_before_ins (bb, load3, load2); if (load) mono_bblock_insert_before_ins (bb, load2, load); } else { g_assert (var->opcode == OP_REGOFFSET); ins->opcode = OP_ADD_IMM; ins->sreg1 = var->inst_basereg; ins->inst_imm = var->inst_offset; } *need_local_opts = TRUE; spec = INS_INFO (ins->opcode); } if (ins->opcode < MONO_CEE_LAST) { mono_print_ins (ins); g_assert_not_reached (); } /* * Store opcodes have destbasereg in the dreg, but in reality, it is an * src register. * FIXME: */ if (MONO_IS_STORE_MEMBASE (ins)) { tmp_reg = ins->dreg; ins->dreg = ins->sreg2; ins->sreg2 = tmp_reg; store = TRUE; spec2 [MONO_INST_DEST] = ' '; spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1]; spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST]; spec2 [MONO_INST_SRC3] = ' '; spec = spec2; } else if (MONO_IS_STORE_MEMINDEX (ins)) g_assert_not_reached (); else store = FALSE; no_lvreg = FALSE; if (G_UNLIKELY (cfg->verbose_level > 2)) { printf ("\t %.3s %d", spec, ins->dreg); num_sregs = mono_inst_get_src_registers (ins, sregs); for (srcindex = 0; srcindex < num_sregs; ++srcindex) printf (" %d", sregs [srcindex]); printf ("\n"); } /***************/ /* DREG */ /***************/ regtype = spec [MONO_INST_DEST]; g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' '))); prev_dreg = -1; int dreg_using_dest_to_membase_op = -1; if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) { MonoInst *var = get_vreg_to_inst (cfg, ins->dreg); MonoInst *store_ins; int store_opcode; MonoInst *def_ins = ins; int dreg = ins->dreg; /* The original vreg */ store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype); if (var->opcode == OP_REGVAR) { ins->dreg = var->dreg; } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) { /* * Instead of emitting a load+store, use a _membase opcode. */ g_assert (var->opcode == OP_REGOFFSET); if (ins->opcode == OP_MOVE) { NULLIFY_INS (ins); def_ins = NULL; } else { dreg_using_dest_to_membase_op = ins->dreg; ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode); ins->inst_basereg = var->inst_basereg; ins->inst_offset = var->inst_offset; ins->dreg = -1; } spec = INS_INFO (ins->opcode); } else { guint32 lvreg; g_assert (var->opcode == OP_REGOFFSET); prev_dreg = ins->dreg; /* Invalidate any previous lvreg for this vreg */ vreg_to_lvreg [ins->dreg] = 0; lvreg = 0; if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) { regtype = 'l'; store_opcode = OP_STOREI8_MEMBASE_REG; } ins->dreg = alloc_dreg (cfg, stacktypes [regtype]); #if SIZEOF_REGISTER != 8 if (regtype == 'l') { NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg)); mono_bblock_insert_after_ins (bb, ins, store_ins); NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg)); mono_bblock_insert_after_ins (bb, ins, store_ins); def_ins = store_ins; } else #endif { g_assert (store_opcode != OP_STOREV_MEMBASE); /* Try to fuse the store into the instruction itself */ /* FIXME: Add more instructions */ if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) { ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode); ins->inst_imm = ins->inst_c0; ins->inst_destbasereg = var->inst_basereg; ins->inst_offset = var->inst_offset; spec = INS_INFO (ins->opcode); } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) { ins->opcode = store_opcode; ins->inst_destbasereg = var->inst_basereg; ins->inst_offset = var->inst_offset; no_lvreg = TRUE; tmp_reg = ins->dreg; ins->dreg = ins->sreg2; ins->sreg2 = tmp_reg; store = TRUE; spec2 [MONO_INST_DEST] = ' '; spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1]; spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST]; spec2 [MONO_INST_SRC3] = ' '; spec = spec2; } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) { // FIXME: The backends expect the base reg to be in inst_basereg ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode); ins->dreg = -1; ins->inst_basereg = var->inst_basereg; ins->inst_offset = var->inst_offset; spec = INS_INFO (ins->opcode); } else { /* printf ("INS: "); mono_print_ins (ins); */ /* Create a store instruction */ NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg); /* Insert it after the instruction */ mono_bblock_insert_after_ins (bb, ins, store_ins); def_ins = store_ins; /* * We can't assign ins->dreg to var->dreg here, since the * sregs could use it. So set a flag, and do it after * the sregs. */ if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) dest_has_lvreg = TRUE; } } } if (def_ins && !live_range_start [dreg]) { live_range_start [dreg] = def_ins; live_range_start_bb [dreg] = bb; } if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) { MonoInst *tmp; MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF); tmp->inst_c1 = dreg; mono_bblock_insert_after_ins (bb, def_ins, tmp); } } /************/ /* SREGS */ /************/ num_sregs = mono_inst_get_src_registers (ins, sregs); for (srcindex = 0; srcindex < 3; ++srcindex) { regtype = spec [MONO_INST_SRC1 + srcindex]; sreg = sregs [srcindex]; g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' '))); if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) { MonoInst *var = get_vreg_to_inst (cfg, sreg); MonoInst *use_ins = ins; MonoInst *load_ins; guint32 load_opcode; if (var->opcode == OP_REGVAR) { sregs [srcindex] = var->dreg; //mono_inst_set_src_registers (ins, sregs); live_range_end [sreg] = use_ins; live_range_end_bb [sreg] = bb; if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) { MonoInst *tmp; MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE); /* var->dreg is a hreg */ tmp->inst_c1 = sreg; mono_bblock_insert_after_ins (bb, ins, tmp); } continue; } g_assert (var->opcode == OP_REGOFFSET); load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype); g_assert (load_opcode != OP_LOADV_MEMBASE); if (vreg_to_lvreg [sreg]) { g_assert (vreg_to_lvreg [sreg] != -1); /* The variable is already loaded to an lvreg */ if (G_UNLIKELY (cfg->verbose_level > 2)) printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg); sregs [srcindex] = vreg_to_lvreg [sreg]; //mono_inst_set_src_registers (ins, sregs); continue; } /* Try to fuse the load into the instruction */ if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) { ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode); sregs [0] = var->inst_basereg; //mono_inst_set_src_registers (ins, sregs); ins->inst_offset = var->inst_offset; } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) { ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode); sregs [1] = var->inst_basereg; //mono_inst_set_src_registers (ins, sregs); ins->inst_offset = var->inst_offset; } else { if (MONO_IS_REAL_MOVE (ins)) { ins->opcode = OP_NOP; sreg = ins->dreg; } else { //printf ("%d ", srcindex); mono_print_ins (ins); sreg = alloc_dreg (cfg, stacktypes [regtype]); if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) { if (var->dreg == prev_dreg) { /* * sreg refers to the value loaded by the load * emitted below, but we need to use ins->dreg * since it refers to the store emitted earlier. */ sreg = ins->dreg; } g_assert (sreg != -1); if (var->dreg == dreg_using_dest_to_membase_op) { if (cfg->verbose_level > 2) printf ("\tCan't cache R%d because it's part of a dreg dest_membase optimization\n", var->dreg); } else { vreg_to_lvreg [var->dreg] = sreg; } if (lvregs_len >= lvregs_size) { guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2); memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size); lvregs = new_lvregs; lvregs_size *= 2; } lvregs [lvregs_len ++] = var->dreg; } } sregs [srcindex] = sreg; //mono_inst_set_src_registers (ins, sregs); #if SIZEOF_REGISTER != 8 if (regtype == 'l') { NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET); mono_bblock_insert_before_ins (bb, ins, load_ins); NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET); mono_bblock_insert_before_ins (bb, ins, load_ins); use_ins = load_ins; } else #endif { #if SIZEOF_REGISTER == 4 g_assert (load_opcode != OP_LOADI8_MEMBASE); #endif NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset); mono_bblock_insert_before_ins (bb, ins, load_ins); use_ins = load_ins; } if (cfg->verbose_level > 2) mono_print_ins_index (0, use_ins); } if (var->dreg < orig_next_vreg) { live_range_end [var->dreg] = use_ins; live_range_end_bb [var->dreg] = bb; } if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) { MonoInst *tmp; MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE); tmp->inst_c1 = var->dreg; mono_bblock_insert_after_ins (bb, ins, tmp); } } } mono_inst_set_src_registers (ins, sregs); if (dest_has_lvreg) { g_assert (ins->dreg != -1); vreg_to_lvreg [prev_dreg] = ins->dreg; if (lvregs_len >= lvregs_size) { guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2); memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size); lvregs = new_lvregs; lvregs_size *= 2; } lvregs [lvregs_len ++] = prev_dreg; dest_has_lvreg = FALSE; } if (store) { tmp_reg = ins->dreg; ins->dreg = ins->sreg2; ins->sreg2 = tmp_reg; } if (MONO_IS_CALL (ins)) { /* Clear vreg_to_lvreg array */ for (i = 0; i < lvregs_len; i++) vreg_to_lvreg [lvregs [i]] = 0; lvregs_len = 0; } else if (ins->opcode == OP_NOP) { ins->dreg = -1; MONO_INST_NULLIFY_SREGS (ins); } if (cfg->verbose_level > 2) mono_print_ins_index (1, ins); } /* Extend the live range based on the liveness info */ if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) { for (i = 0; i < cfg->num_varinfo; i ++) { MonoMethodVar *vi = MONO_VARINFO (cfg, i); if (vreg_is_volatile (cfg, vi->vreg)) /* The liveness info is incomplete */ continue; if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) { /* Live from at least the first ins of this bb */ live_range_start [vi->vreg] = bb->code; live_range_start_bb [vi->vreg] = bb; } if (mono_bitset_test_fast (bb->live_out_set, i)) { /* Live at least until the last ins of this bb */ live_range_end [vi->vreg] = bb->last_ins; live_range_end_bb [vi->vreg] = bb; } } } } /* * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them * by storing the current native offset into MonoMethodVar->live_range_start/end. */ if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) { for (i = 0; i < cfg->num_varinfo; ++i) { int vreg = MONO_VARINFO (cfg, i)->vreg; MonoInst *ins; if (live_range_start [vreg]) { MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START); ins->inst_c0 = i; ins->inst_c1 = vreg; mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins); } if (live_range_end [vreg]) { MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END); ins->inst_c0 = i; ins->inst_c1 = vreg; if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins) mono_add_ins_to_end (live_range_end_bb [vreg], ins); else mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins); } } } if (cfg->gsharedvt_locals_var_ins) { /* Nullify if unused */ cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST; cfg->gsharedvt_locals_var_ins->inst_imm = 0; } g_free (live_range_start); g_free (live_range_end); g_free (live_range_start_bb); g_free (live_range_end_bb); } /** * FIXME: * - use 'iadd' instead of 'int_add' * - handling ovf opcodes: decompose in method_to_ir. * - unify iregs/fregs * -> partly done, the missing parts are: * - a more complete unification would involve unifying the hregs as well, so * code wouldn't need if (fp) all over the place. but that would mean the hregs * would no longer map to the machine hregs, so the code generators would need to * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks * wouldn't work any more. Duplicating the code in mono_local_regalloc () into * fp/non-fp branches speeds it up by about 15%. * - use sext/zext opcodes instead of shifts * - add OP_ICALL * - get rid of TEMPLOADs if possible and use vregs instead * - clean up usage of OP_P/OP_ opcodes * - cleanup usage of DUMMY_USE * - cleanup the setting of ins->type for MonoInst's which are pushed on the * stack * - set the stack type and allocate a dreg in the EMIT_NEW macros * - get rid of all the <foo>2 stuff when the new JIT is ready. * - make sure handle_stack_args () is called before the branch is emitted * - when the new IR is done, get rid of all unused stuff * - COMPARE/BEQ as separate instructions or unify them ? * - keeping them separate allows specialized compare instructions like * compare_imm, compare_membase * - most back ends unify fp compare+branch, fp compare+ceq * - integrate mono_save_args into inline_method * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2 * - handle long shift opts on 32 bit platforms somehow: they require * 3 sregs (2 for arg1 and 1 for arg2) * - make byref a 'normal' type. * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a * variable if needed. * - do not start a new IL level bblock when cfg->cbb is changed by a function call * like inline_method. * - remove inlining restrictions * - fix LNEG and enable cfold of INEG * - generalize x86 optimizations like ldelema as a peephole optimization * - add store_mem_imm for amd64 * - optimize the loading of the interruption flag in the managed->native wrappers * - avoid special handling of OP_NOP in passes * - move code inserting instructions into one function/macro. * - try a coalescing phase after liveness analysis * - add float -> vreg conversion + local optimizations on !x86 * - figure out how to handle decomposed branches during optimizations, ie. * compare+branch, op_jump_table+op_br etc. * - promote RuntimeXHandles to vregs * - vtype cleanups: * - add a NEW_VARLOADA_VREG macro * - the vtype optimizations are blocked by the LDADDR opcodes generated for * accessing vtype fields. * - get rid of I8CONST on 64 bit platforms * - dealing with the increase in code size due to branches created during opcode * decomposition: * - use extended basic blocks * - all parts of the JIT * - handle_global_vregs () && local regalloc * - avoid introducing global vregs during decomposition, like 'vtable' in isinst * - sources of increase in code size: * - vtypes * - long compares * - isinst and castclass * - lvregs not allocated to global registers even if used multiple times * - call cctors outside the JIT, to make -v output more readable and JIT timings more * meaningful. * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization) * - add all micro optimizations from the old JIT * - put tree optimizations into the deadce pass * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch * specific function. * - unify the float comparison opcodes with the other comparison opcodes, i.e. * fcompare + branchCC. * - create a helper function for allocating a stack slot, taking into account * MONO_CFG_HAS_SPILLUP. * - merge r68207. * - optimize mono_regstate2_alloc_int/float. * - fix the pessimistic handling of variables accessed in exception handler blocks. * - need to write a tree optimization pass, but the creation of trees is difficult, i.e. * parts of the tree could be separated by other instructions, killing the tree * arguments, or stores killing loads etc. Also, should we fold loads into other * instructions if the result of the load is used multiple times ? * - make the REM_IMM optimization in mini-x86.c arch-independent. * - LAST MERGE: 108395. * - when returning vtypes in registers, generate IR and append it to the end of the * last bb instead of doing it in the epilog. * - change the store opcodes so they use sreg1 instead of dreg to store the base register. */ /* NOTES ----- - When to decompose opcodes: - earlier: this makes some optimizations hard to implement, since the low level IR no longer contains the necessary information. But it is easier to do. - later: harder to implement, enables more optimizations. - Branches inside bblocks: - created when decomposing complex opcodes. - branches to another bblock: harmless, but not tracked by the branch optimizations, so need to branch to a label at the start of the bblock. - branches to inside the same bblock: very problematic, trips up the local reg allocator. Can be fixed by spitting the current bblock, but that is a complex operation, since some local vregs can become global vregs etc. - Local/global vregs: - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the local register allocator. - global vregs: used in more than one bblock. Have an associated MonoMethodVar structure, created by mono_create_var (). Assigned to hregs or the stack by the global register allocator. - When to do optimizations like alu->alu_imm: - earlier -> saves work later on since the IR will be smaller/simpler - later -> can work on more instructions - Handling of valuetypes: - When a vtype is pushed on the stack, a new temporary is created, an instruction computing its address (LDADDR) is emitted and pushed on the stack. Need to optimize cases when the vtype is used immediately as in argument passing, stloc etc. - Instead of the to_end stuff in the old JIT, simply call the function handling the values on the stack before emitting the last instruction of the bb. */ #else /* !DISABLE_JIT */ MONO_EMPTY_SOURCE_FILE (method_to_ir); #endif /* !DISABLE_JIT */
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/mini-amd64.c
/** * \file * AMD64 backend for the Mono code generator * * Based on mini-x86.c. * * Authors: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * Patrik Torstensson * Zoltan Varga ([email protected]) * Johan Lorensson ([email protected]) * * (C) 2003 Ximian, Inc. * Copyright 2003-2011 Novell, Inc (http://www.novell.com) * Copyright 2011 Xamarin, Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include "mini.h" #include <string.h> #include <math.h> #include <assert.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <mono/metadata/abi-details.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/threads.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/gc-internals.h> #include <mono/metadata/tokentype.h> #include <mono/utils/mono-math.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-memory-model.h> #include <mono/utils/mono-tls.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/mono-threads.h> #include <mono/utils/unlocked.h> #include "interp/interp.h" #include "ir-emit.h" #include "mini-amd64.h" #include "cpu-amd64.h" #include "mini-gc.h" #include "mini-runtime.h" #include "aot-runtime.h" #ifdef MONO_XEN_OPT static gboolean optimize_for_xen = TRUE; #else #define optimize_for_xen 0 #endif static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math") #define IS_IMM32(val) ((((guint64)val) >> 32) == 0) #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f)) /* The single step trampoline */ static gpointer ss_trampoline; /* The breakpoint trampoline */ static gpointer bp_trampoline; /* Offset between fp and the first argument in the callee */ #define ARGS_OFFSET 16 #define GP_SCRATCH_REG AMD64_R11 /* Max number of bblocks before we bail from using more advanced branch placement code */ #define MAX_BBLOCKS_FOR_BRANCH_OPTS 800 /* * AMD64 register usage: * - callee saved registers are used for global register allocation * - %r11 is used for materializing 64 bit constants in opcodes * - the rest is used for local allocation */ /* * Floating point comparison results: * ZF PF CF * A > B 0 0 0 * A < B 0 0 1 * A = B 1 0 0 * A > B 0 0 0 * UNORDERED 1 1 1 */ const char* mono_arch_regname (int reg) { switch (reg) { case AMD64_RAX: return "%rax"; case AMD64_RBX: return "%rbx"; case AMD64_RCX: return "%rcx"; case AMD64_RDX: return "%rdx"; case AMD64_RSP: return "%rsp"; case AMD64_RBP: return "%rbp"; case AMD64_RDI: return "%rdi"; case AMD64_RSI: return "%rsi"; case AMD64_R8: return "%r8"; case AMD64_R9: return "%r9"; case AMD64_R10: return "%r10"; case AMD64_R11: return "%r11"; case AMD64_R12: return "%r12"; case AMD64_R13: return "%r13"; case AMD64_R14: return "%r14"; case AMD64_R15: return "%r15"; } return "unknown"; } static const char * const packed_xmmregs [] = { "p:xmm0", "p:xmm1", "p:xmm2", "p:xmm3", "p:xmm4", "p:xmm5", "p:xmm6", "p:xmm7", "p:xmm8", "p:xmm9", "p:xmm10", "p:xmm11", "p:xmm12", "p:xmm13", "p:xmm14", "p:xmm15" }; static const char * const single_xmmregs [] = { "s:xmm0", "s:xmm1", "s:xmm2", "s:xmm3", "s:xmm4", "s:xmm5", "s:xmm6", "s:xmm7", "s:xmm8", "s:xmm9", "s:xmm10", "s:xmm11", "s:xmm12", "s:xmm13", "s:xmm14", "s:xmm15" }; const char* mono_arch_fregname (int reg) { if (reg < AMD64_XMM_NREG) return single_xmmregs [reg]; else return "unknown"; } const char * mono_arch_xregname (int reg) { if (reg < AMD64_XMM_NREG) return packed_xmmregs [reg]; else return "unknown"; } static gboolean debug_omit_fp (void) { #if 0 return mono_debug_count (); #else return TRUE; #endif } static gboolean amd64_is_near_call (guint8 *code) { /* Skip REX */ if ((code [0] >= 0x40) && (code [0] <= 0x4f)) code += 1; return code [0] == 0xe8; } static gboolean amd64_use_imm32 (gint64 val) { if (mini_debug_options.single_imm_size) return FALSE; return amd64_is_imm32 (val); } void mono_x86_patch (unsigned char* code, gpointer target) { mono_x86_patch_inline (code, target); } static void amd64_patch (unsigned char* code, gpointer target) { // NOTE: Sometimes code has just been generated, is not running yet, // and has no alignment requirements. Sometimes it could be running while we patch it, // and there are alignment requirements. // FIXME Assert alignment. guint8 rex = 0; /* Skip REX */ if ((code [0] >= 0x40) && (code [0] <= 0x4f)) { rex = code [0]; code += 1; } if ((code [0] & 0xf8) == 0xb8) { /* amd64_set_reg_template */ *(guint64*)(code + 1) = (guint64)target; } else if ((code [0] == 0x8b) && rex && x86_modrm_mod (code [1]) == 0 && x86_modrm_rm (code [1]) == 5) { /* mov 0(%rip), %dreg */ g_assert (!1); // Historical code was incorrect. ptrdiff_t const offset = (guchar*)target - (code + 6); g_assert (offset == (gint32)offset); *(gint32*)(code + 2) = (gint32)offset; } else if (code [0] == 0xff && (code [1] == 0x15 || code [1] == 0x25)) { /* call or jmp *<OFFSET>(%rip) */ // Patch the data, not the code. g_assert (!2); // For possible use later. *(void**)(code + 6 + *(gint32*)(code + 2)) = target; } else x86_patch (code, target); } void mono_amd64_patch (unsigned char* code, gpointer target) { amd64_patch (code, target); } #define DEBUG(a) if (cfg->verbose_level > 1) a static void inline add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo) { ainfo->offset = *stack_size; if (*gr >= PARAM_REGS) { ainfo->storage = ArgOnStack; ainfo->arg_size = sizeof (target_mgreg_t); /* Since the same stack slot size is used for all arg */ /* types, it needs to be big enough to hold them all */ (*stack_size) += sizeof (target_mgreg_t); } else { ainfo->storage = ArgInIReg; ainfo->reg = param_regs [*gr]; (*gr) ++; } } static void inline add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double) { ainfo->offset = *stack_size; if (*gr >= FLOAT_PARAM_REGS) { ainfo->storage = ArgOnStack; ainfo->arg_size = sizeof (target_mgreg_t); /* Since the same stack slot size is used for both float */ /* types, it needs to be big enough to hold them both */ (*stack_size) += sizeof (target_mgreg_t); } else { /* A double register */ if (is_double) ainfo->storage = ArgInDoubleSSEReg; else ainfo->storage = ArgInFloatSSEReg; ainfo->reg = *gr; (*gr) += 1; } } typedef enum ArgumentClass { ARG_CLASS_NO_CLASS, ARG_CLASS_MEMORY, ARG_CLASS_INTEGER, ARG_CLASS_SSE } ArgumentClass; static ArgumentClass merge_argument_class_from_type (MonoType *type, ArgumentClass class1) { ArgumentClass class2 = ARG_CLASS_NO_CLASS; MonoType *ptype; ptype = mini_get_underlying_type (type); switch (ptype->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_OBJECT: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_I8: case MONO_TYPE_U8: class2 = ARG_CLASS_INTEGER; break; case MONO_TYPE_R4: case MONO_TYPE_R8: #ifdef TARGET_WIN32 class2 = ARG_CLASS_INTEGER; #else class2 = ARG_CLASS_SSE; #endif break; case MONO_TYPE_TYPEDBYREF: g_assert_not_reached (); case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ptype)) { class2 = ARG_CLASS_INTEGER; break; } /* fall through */ case MONO_TYPE_VALUETYPE: { MonoMarshalType *info = mono_marshal_load_type_info (ptype->data.klass); int i; for (i = 0; i < info->num_fields; ++i) { class2 = class1; class2 = merge_argument_class_from_type (info->fields [i].field->type, class2); } break; } default: g_assert_not_reached (); } /* Merge */ if (class1 == class2) ; else if (class1 == ARG_CLASS_NO_CLASS) class1 = class2; else if ((class1 == ARG_CLASS_MEMORY) || (class2 == ARG_CLASS_MEMORY)) class1 = ARG_CLASS_MEMORY; else if ((class1 == ARG_CLASS_INTEGER) || (class2 == ARG_CLASS_INTEGER)) class1 = ARG_CLASS_INTEGER; else class1 = ARG_CLASS_SSE; return class1; } typedef struct { MonoType *type; int size, offset; } StructFieldInfo; /* * collect_field_info_nested: * * Collect field info from KLASS recursively into FIELDS. */ static void collect_field_info_nested (MonoClass *klass, GArray *fields_array, int offset, gboolean pinvoke, gboolean unicode) { MonoMarshalType *info; int i; if (pinvoke) { info = mono_marshal_load_type_info (klass); g_assert(info); for (i = 0; i < info->num_fields; ++i) { if (MONO_TYPE_ISSTRUCT (info->fields [i].field->type)) { collect_field_info_nested (mono_class_from_mono_type_internal (info->fields [i].field->type), fields_array, info->fields [i].offset, pinvoke, unicode); } else { guint32 align; StructFieldInfo f; f.type = info->fields [i].field->type; f.size = mono_marshal_type_size (info->fields [i].field->type, info->fields [i].mspec, &align, TRUE, unicode); f.offset = offset + info->fields [i].offset; if (i == info->num_fields - 1 && f.size + f.offset < info->native_size) { /* This can happen with .pack directives eg. 'fixed' arrays */ if (MONO_TYPE_IS_PRIMITIVE (f.type)) { /* Replicate the last field to fill out the remaining place, since the code in add_valuetype () needs type information */ g_array_append_val (fields_array, f); while (f.size + f.offset < info->native_size) { f.offset += f.size; g_array_append_val (fields_array, f); } } else { f.size = info->native_size - f.offset; g_array_append_val (fields_array, f); } } else { g_array_append_val (fields_array, f); } } } } else { gpointer iter; MonoClassField *field; iter = NULL; while ((field = mono_class_get_fields_internal (klass, &iter))) { if (field->type->attrs & FIELD_ATTRIBUTE_STATIC) continue; if (MONO_TYPE_ISSTRUCT (field->type)) { collect_field_info_nested (mono_class_from_mono_type_internal (field->type), fields_array, field->offset - MONO_ABI_SIZEOF (MonoObject), pinvoke, unicode); } else { int align; StructFieldInfo f; f.type = field->type; f.size = mono_type_size (field->type, &align); f.offset = field->offset - MONO_ABI_SIZEOF (MonoObject) + offset; g_array_append_val (fields_array, f); } } } } #ifdef TARGET_WIN32 /* Windows x64 ABI can pass/return value types in register of size 1,2,4,8 bytes. */ #define MONO_WIN64_VALUE_TYPE_FITS_REG(arg_size) (arg_size <= SIZEOF_REGISTER && (arg_size == 1 || arg_size == 2 || arg_size == 4 || arg_size == 8)) static gboolean allocate_register_for_valuetype_win64 (ArgInfo *arg_info, ArgumentClass arg_class, guint32 arg_size, const AMD64_Reg_No int_regs [], int int_reg_count, const AMD64_XMM_Reg_No float_regs [], int float_reg_count, guint32 *current_int_reg, guint32 *current_float_reg) { gboolean result = FALSE; assert (arg_info != NULL && int_regs != NULL && float_regs != NULL && current_int_reg != NULL && current_float_reg != NULL); assert (arg_info->storage == ArgValuetypeInReg || arg_info->storage == ArgValuetypeAddrInIReg); arg_info->pair_storage [0] = arg_info->pair_storage [1] = ArgNone; arg_info->pair_regs [0] = arg_info->pair_regs [1] = ArgNone; arg_info->pair_size [0] = 0; arg_info->pair_size [1] = 0; arg_info->nregs = 0; if (arg_class == ARG_CLASS_INTEGER && *current_int_reg < int_reg_count) { /* Pass parameter in integer register. */ arg_info->pair_storage [0] = ArgInIReg; arg_info->pair_regs [0] = int_regs [*current_int_reg]; (*current_int_reg) ++; result = TRUE; } else if (arg_class == ARG_CLASS_SSE && *current_float_reg < float_reg_count) { /* Pass parameter in float register. */ arg_info->pair_storage [0] = (arg_size <= sizeof (gfloat)) ? ArgInFloatSSEReg : ArgInDoubleSSEReg; arg_info->pair_regs [0] = float_regs [*current_float_reg]; (*current_float_reg) ++; result = TRUE; } if (result == TRUE) { arg_info->pair_size [0] = arg_size; arg_info->nregs = 1; } return result; } static gboolean allocate_parameter_register_for_valuetype_win64 (ArgInfo *arg_info, ArgumentClass arg_class, guint32 arg_size, guint32 *current_int_reg, guint32 *current_float_reg) { return allocate_register_for_valuetype_win64 (arg_info, arg_class, arg_size, param_regs, PARAM_REGS, float_param_regs, FLOAT_PARAM_REGS, current_int_reg, current_float_reg); } static gboolean allocate_return_register_for_valuetype_win64 (ArgInfo *arg_info, ArgumentClass arg_class, guint32 arg_size, guint32 *current_int_reg, guint32 *current_float_reg) { return allocate_register_for_valuetype_win64 (arg_info, arg_class, arg_size, return_regs, RETURN_REGS, float_return_regs, FLOAT_RETURN_REGS, current_int_reg, current_float_reg); } static void allocate_storage_for_valuetype_win64 (ArgInfo *arg_info, MonoType *type, gboolean is_return, ArgumentClass arg_class, guint32 arg_size, guint32 *current_int_reg, guint32 *current_float_reg, guint32 *stack_size) { /* Windows x64 value type ABI. * * Parameters: https://msdn.microsoft.com/en-us/library/zthk2dkh.aspx * * Integer/Float types smaller than or equals to 8 bytes or porperly sized struct/union (1,2,4,8) * Try pass in register using ArgValuetypeInReg/(ArgInIReg|ArgInFloatSSEReg|ArgInDoubleSSEReg) as storage and size of parameter(1,2,4,8), if no more registers, pass on stack using ArgOnStack as storage and size of parameter(1,2,4,8). * Integer/Float types bigger than 8 bytes or struct/unions larger than 8 bytes or (3,5,6,7). * Try to pass pointer in register using ArgValuetypeAddrInIReg, if no more registers, pass pointer on stack using ArgValuetypeAddrOnStack as storage and parameter size of register (8 bytes). * * Return values: https://msdn.microsoft.com/en-us/library/7572ztz4.aspx. * * Integers/Float types smaller than or equal to 8 bytes * Return in corresponding register RAX/XMM0 using ArgValuetypeInReg/(ArgInIReg|ArgInFloatSSEReg|ArgInDoubleSSEReg) as storage and size of parameter(1,2,4,8). * Properly sized struct/unions (1,2,4,8) * Return in register RAX using ArgValuetypeInReg as storage and size of parameter(1,2,4,8). * Types bigger than 8 bytes or struct/unions larger than 8 bytes or (3,5,6,7). * Return pointer to allocated stack space (allocated by caller) using ArgValuetypeAddrInIReg as storage and parameter size. */ assert (arg_info != NULL && type != NULL && current_int_reg != NULL && current_float_reg != NULL && stack_size != NULL); if (!is_return) { /* Parameter cases. */ if (arg_class != ARG_CLASS_MEMORY && MONO_WIN64_VALUE_TYPE_FITS_REG (arg_size)) { assert (arg_size == 1 || arg_size == 2 || arg_size == 4 || arg_size == 8); /* First, try to use registers for parameter. If type is struct it can only be passed by value in integer register. */ arg_info->storage = ArgValuetypeInReg; if (!allocate_parameter_register_for_valuetype_win64 (arg_info, !MONO_TYPE_ISSTRUCT (type) ? arg_class : ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg)) { /* No more registers, fallback passing parameter on stack as value. */ assert (arg_info->pair_storage [0] == ArgNone && arg_info->pair_storage [1] == ArgNone && arg_info->pair_size [0] == 0 && arg_info->pair_size [1] == 0 && arg_info->nregs == 0); /* Passing value directly on stack, so use size of value. */ arg_info->storage = ArgOnStack; arg_size = ALIGN_TO (arg_size, sizeof (target_mgreg_t)); arg_info->offset = *stack_size; arg_info->arg_size = arg_size; *stack_size += arg_size; } } else { /* Fallback to stack, try to pass address to parameter in register. Always use integer register to represent stack address. */ arg_info->storage = ArgValuetypeAddrInIReg; if (!allocate_parameter_register_for_valuetype_win64 (arg_info, ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg)) { /* No more registers, fallback passing address to parameter on stack. */ assert (arg_info->pair_storage [0] == ArgNone && arg_info->pair_storage [1] == ArgNone && arg_info->pair_size [0] == 0 && arg_info->pair_size [1] == 0 && arg_info->nregs == 0); /* Passing an address to value on stack, so use size of register as argument size. */ arg_info->storage = ArgValuetypeAddrOnStack; arg_size = sizeof (target_mgreg_t); arg_info->offset = *stack_size; arg_info->arg_size = arg_size; *stack_size += arg_size; } } } else { /* Return value cases. */ if (arg_class != ARG_CLASS_MEMORY && MONO_WIN64_VALUE_TYPE_FITS_REG (arg_size)) { assert (arg_size == 1 || arg_size == 2 || arg_size == 4 || arg_size == 8); /* Return value fits into return registers. If type is struct it can only be returned by value in integer register. */ arg_info->storage = ArgValuetypeInReg; allocate_return_register_for_valuetype_win64 (arg_info, !MONO_TYPE_ISSTRUCT (type) ? arg_class : ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg); /* Only RAX/XMM0 should be used to return valuetype. */ assert ((arg_info->pair_regs[0] == AMD64_RAX && arg_info->pair_regs[1] == ArgNone) || (arg_info->pair_regs[0] == AMD64_XMM0 && arg_info->pair_regs[1] == ArgNone)); } else { /* Return value doesn't fit into return register, return address to allocated stack space (allocated by caller and passed as input). */ arg_info->storage = ArgValuetypeAddrInIReg; allocate_return_register_for_valuetype_win64 (arg_info, ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg); /* Only RAX should be used to return valuetype address. */ assert (arg_info->pair_regs[0] == AMD64_RAX && arg_info->pair_regs[1] == ArgNone); arg_size = ALIGN_TO (arg_size, sizeof (target_mgreg_t)); arg_info->offset = *stack_size; *stack_size += arg_size; } } } static void get_valuetype_size_win64 (MonoClass *klass, gboolean pinvoke, ArgInfo *arg_info, MonoType *type, ArgumentClass *arg_class, guint32 *arg_size) { *arg_size = 0; *arg_class = ARG_CLASS_NO_CLASS; assert (klass != NULL && arg_info != NULL && type != NULL && arg_class != NULL && arg_size != NULL); if (pinvoke) { /* Calculate argument class type and size of marshalled type. */ MonoMarshalType *info = mono_marshal_load_type_info (klass); *arg_size = info->native_size; } else { /* Calculate argument class type and size of managed type. */ *arg_size = mono_class_value_size (klass, NULL); } /* Windows ABI only handle value types on stack or passed in integer register (if it fits register size). */ *arg_class = MONO_WIN64_VALUE_TYPE_FITS_REG (*arg_size) ? ARG_CLASS_INTEGER : ARG_CLASS_MEMORY; if (*arg_class == ARG_CLASS_MEMORY) { /* Value type has a size that doesn't seem to fit register according to ABI. Try to used full stack size of type. */ *arg_size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, pinvoke); } /* * Standard C and C++ doesn't allow empty structs, empty structs will always have a size of 1 byte. * GCC have an extension to allow empty structs, https://gcc.gnu.org/onlinedocs/gcc/Empty-Structures.html. * This cause a little dilemma since runtime build using none GCC compiler will not be compatible with * GCC build C libraries and the other way around. On platforms where empty structs has size of 1 byte * it must be represented in call and cannot be dropped. */ if (*arg_size == 0 && MONO_TYPE_ISSTRUCT (type)) { arg_info->pass_empty_struct = TRUE; *arg_size = SIZEOF_REGISTER; *arg_class = ARG_CLASS_INTEGER; } assert (*arg_class != ARG_CLASS_NO_CLASS); } static void add_valuetype_win64 (MonoMethodSignature *signature, ArgInfo *arg_info, MonoType *type, gboolean is_return, guint32 *current_int_reg, guint32 *current_float_reg, guint32 *stack_size) { guint32 arg_size = SIZEOF_REGISTER; MonoClass *klass = NULL; ArgumentClass arg_class; assert (signature != NULL && arg_info != NULL && type != NULL && current_int_reg != NULL && current_float_reg != NULL && stack_size != NULL); klass = mono_class_from_mono_type_internal (type); get_valuetype_size_win64 (klass, signature->pinvoke && !signature->marshalling_disabled, arg_info, type, &arg_class, &arg_size); /* Only drop value type if its not an empty struct as input that must be represented in call */ if ((arg_size == 0 && !arg_info->pass_empty_struct) || (arg_info->pass_empty_struct && is_return)) { arg_info->storage = ArgValuetypeInReg; arg_info->pair_storage [0] = arg_info->pair_storage [1] = ArgNone; } else { /* Alocate storage for value type. */ allocate_storage_for_valuetype_win64 (arg_info, type, is_return, arg_class, arg_size, current_int_reg, current_float_reg, stack_size); } } #endif /* TARGET_WIN32 */ static void add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type, gboolean is_return, guint32 *gr, guint32 *fr, guint32 *stack_size) { #ifdef TARGET_WIN32 add_valuetype_win64 (sig, ainfo, type, is_return, gr, fr, stack_size); #else guint32 size, quad, nquads, i, nfields; /* Keep track of the size used in each quad so we can */ /* use the right size when copying args/return vars. */ guint32 quadsize [2] = {8, 8}; ArgumentClass args [2]; StructFieldInfo *fields = NULL; GArray *fields_array; MonoClass *klass; gboolean pass_on_stack = FALSE; int struct_size; klass = mono_class_from_mono_type_internal (type); size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, sig->pinvoke && !sig->marshalling_disabled); if (!sig->pinvoke && ((is_return && (size == 8)) || (!is_return && (size <= 16)))) { /* We pass and return vtypes of size 8 in a register */ } else if (!sig->pinvoke || (size == 0) || (size > 16)) { pass_on_stack = TRUE; } /* If this struct can't be split up naturally into 8-byte */ /* chunks (registers), pass it on the stack. */ if (sig->pinvoke && !sig->marshalling_disabled) { MonoMarshalType *info = mono_marshal_load_type_info (klass); g_assert (info); struct_size = info->native_size; } else { struct_size = mono_class_value_size (klass, NULL); } /* * Collect field information recursively to be able to * handle nested structures. */ fields_array = g_array_new (FALSE, TRUE, sizeof (StructFieldInfo)); collect_field_info_nested (klass, fields_array, 0, sig->pinvoke && !sig->marshalling_disabled, m_class_is_unicode (klass)); fields = (StructFieldInfo*)fields_array->data; nfields = fields_array->len; for (i = 0; i < nfields; ++i) { if ((fields [i].offset < 8) && (fields [i].offset + fields [i].size) > 8) { pass_on_stack = TRUE; break; } } if (size == 0) { ainfo->storage = ArgValuetypeInReg; ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone; return; } if (pass_on_stack) { /* Allways pass in memory */ ainfo->offset = *stack_size; *stack_size += ALIGN_TO (size, 8); ainfo->storage = is_return ? ArgValuetypeAddrInIReg : ArgOnStack; if (!is_return) ainfo->arg_size = ALIGN_TO (size, 8); g_array_free (fields_array, TRUE); return; } if (size > 8) nquads = 2; else nquads = 1; if (!sig->pinvoke) { int n = mono_class_value_size (klass, NULL); quadsize [0] = n >= 8 ? 8 : n; quadsize [1] = n >= 8 ? MAX (n - 8, 8) : 0; /* Always pass in 1 or 2 integer registers */ args [0] = ARG_CLASS_INTEGER; args [1] = ARG_CLASS_INTEGER; /* Only the simplest cases are supported */ if (is_return && nquads != 1) { args [0] = ARG_CLASS_MEMORY; args [1] = ARG_CLASS_MEMORY; } } else { /* * Implement the algorithm from section 3.2.3 of the X86_64 ABI. * The X87 and SSEUP stuff is left out since there are no such types in * the CLR. */ if (!nfields) { ainfo->storage = ArgValuetypeInReg; ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone; return; } if (struct_size > 16) { ainfo->offset = *stack_size; *stack_size += ALIGN_TO (struct_size, 8); ainfo->storage = is_return ? ArgValuetypeAddrInIReg : ArgOnStack; if (!is_return) ainfo->arg_size = ALIGN_TO (struct_size, 8); g_array_free (fields_array, TRUE); return; } args [0] = ARG_CLASS_NO_CLASS; args [1] = ARG_CLASS_NO_CLASS; for (quad = 0; quad < nquads; ++quad) { ArgumentClass class1; if (nfields == 0) class1 = ARG_CLASS_MEMORY; else class1 = ARG_CLASS_NO_CLASS; for (i = 0; i < nfields; ++i) { if ((fields [i].offset < 8) && (fields [i].offset + fields [i].size) > 8) { /* Unaligned field */ NOT_IMPLEMENTED; } /* Skip fields in other quad */ if ((quad == 0) && (fields [i].offset >= 8)) continue; if ((quad == 1) && (fields [i].offset < 8)) continue; /* How far into this quad this data extends.*/ /* (8 is size of quad) */ quadsize [quad] = fields [i].offset + fields [i].size - (quad * 8); class1 = merge_argument_class_from_type (fields [i].type, class1); } /* Empty structs have a nonzero size, causing this assert to be hit */ if (sig->pinvoke) g_assert (class1 != ARG_CLASS_NO_CLASS); args [quad] = class1; } } g_array_free (fields_array, TRUE); /* Post merger cleanup */ if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY)) args [0] = args [1] = ARG_CLASS_MEMORY; /* Allocate registers */ { int orig_gr = *gr; int orig_fr = *fr; while (quadsize [0] != 1 && quadsize [0] != 2 && quadsize [0] != 4 && quadsize [0] != 8) quadsize [0] ++; while (quadsize [1] != 0 && quadsize [1] != 1 && quadsize [1] != 2 && quadsize [1] != 4 && quadsize [1] != 8) quadsize [1] ++; ainfo->storage = ArgValuetypeInReg; ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone; g_assert (quadsize [0] <= 8); g_assert (quadsize [1] <= 8); ainfo->pair_size [0] = quadsize [0]; ainfo->pair_size [1] = quadsize [1]; ainfo->nregs = nquads; for (quad = 0; quad < nquads; ++quad) { switch (args [quad]) { case ARG_CLASS_INTEGER: if (*gr >= PARAM_REGS) args [quad] = ARG_CLASS_MEMORY; else { ainfo->pair_storage [quad] = ArgInIReg; if (is_return) ainfo->pair_regs [quad] = return_regs [*gr]; else ainfo->pair_regs [quad] = param_regs [*gr]; (*gr) ++; } break; case ARG_CLASS_SSE: if (*fr >= FLOAT_PARAM_REGS) args [quad] = ARG_CLASS_MEMORY; else { if (quadsize[quad] <= 4) ainfo->pair_storage [quad] = ArgInFloatSSEReg; else ainfo->pair_storage [quad] = ArgInDoubleSSEReg; ainfo->pair_regs [quad] = *fr; (*fr) ++; } break; case ARG_CLASS_MEMORY: break; case ARG_CLASS_NO_CLASS: break; default: g_assert_not_reached (); } } if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY)) { int arg_size; /* Revert possible register assignments */ *gr = orig_gr; *fr = orig_fr; ainfo->offset = *stack_size; if (sig->pinvoke) arg_size = ALIGN_TO (struct_size, 8); else arg_size = nquads * sizeof (target_mgreg_t); *stack_size += arg_size; ainfo->storage = is_return ? ArgValuetypeAddrInIReg : ArgOnStack; if (!is_return) ainfo->arg_size = arg_size; } } #endif /* !TARGET_WIN32 */ } /* * get_call_info: * * Obtain information about a call according to the calling convention. * For AMD64 System V, see the "System V ABI, x86-64 Architecture Processor Supplement * Draft Version 0.23" document for more information. * For AMD64 Windows, see "Overview of x64 Calling Conventions", * https://msdn.microsoft.com/en-us/library/ms235286.aspx */ static CallInfo* get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { guint32 i, gr, fr, pstart; MonoType *ret_type; int n = sig->hasthis + sig->param_count; guint32 stack_size = 0; CallInfo *cinfo; gboolean is_pinvoke = sig->pinvoke; if (mp) cinfo = (CallInfo *)mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n)); else cinfo = (CallInfo *)g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n)); cinfo->nargs = n; cinfo->gsharedvt = mini_is_gsharedvt_variable_signature (sig); gr = 0; fr = 0; #ifdef TARGET_WIN32 /* Reserve space where the callee can save the argument registers */ stack_size = 4 * sizeof (target_mgreg_t); #endif /* return value */ ret_type = mini_get_underlying_type (sig->ret); switch (ret_type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = AMD64_RAX; break; case MONO_TYPE_U8: case MONO_TYPE_I8: cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = AMD64_RAX; break; case MONO_TYPE_R4: cinfo->ret.storage = ArgInFloatSSEReg; cinfo->ret.reg = AMD64_XMM0; break; case MONO_TYPE_R8: cinfo->ret.storage = ArgInDoubleSSEReg; cinfo->ret.reg = AMD64_XMM0; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ret_type)) { cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = AMD64_RAX; break; } if (mini_is_gsharedvt_type (ret_type)) { cinfo->ret.storage = ArgGsharedvtVariableInReg; break; } /* fall through */ case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: { guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0; add_valuetype (sig, &cinfo->ret, ret_type, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize); g_assert (cinfo->ret.storage != ArgInIReg); break; } case MONO_TYPE_VAR: case MONO_TYPE_MVAR: g_assert (mini_is_gsharedvt_type (ret_type)); cinfo->ret.storage = ArgGsharedvtVariableInReg; break; case MONO_TYPE_VOID: break; default: g_error ("Can't handle as return value 0x%x", ret_type->type); } pstart = 0; /* * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after * the first argument, allowing 'this' to be always passed in the first arg reg. * Also do this if the first argument is a reference type, since virtual calls * are sometimes made using calli without sig->hasthis set, like in the delegate * invoke wrappers. */ ArgStorage ret_storage = cinfo->ret.storage; if ((ret_storage == ArgValuetypeAddrInIReg || ret_storage == ArgGsharedvtVariableInReg) && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) { if (sig->hasthis) { add_general (&gr, &stack_size, cinfo->args + 0); } else { add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0]); pstart = 1; } add_general (&gr, &stack_size, &cinfo->ret); cinfo->ret.storage = ret_storage; cinfo->vret_arg_index = 1; } else { /* this */ if (sig->hasthis) add_general (&gr, &stack_size, cinfo->args + 0); if (ret_storage == ArgValuetypeAddrInIReg || ret_storage == ArgGsharedvtVariableInReg) { add_general (&gr, &stack_size, &cinfo->ret); cinfo->ret.storage = ret_storage; } } if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) { gr = PARAM_REGS; fr = FLOAT_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie); } for (i = pstart; i < sig->param_count; ++i) { ArgInfo *ainfo = &cinfo->args [sig->hasthis + i]; MonoType *ptype; #ifdef TARGET_WIN32 /* The float param registers and other param registers must be the same index on Windows x64.*/ if (gr > fr) fr = gr; else if (fr > gr) gr = fr; #endif if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* We allways pass the sig cookie on the stack for simplicity */ /* * Prevent implicit arguments + the sig cookie from being passed * in registers. */ gr = PARAM_REGS; fr = FLOAT_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie); } ptype = mini_get_underlying_type (sig->params [i]); switch (ptype->type) { case MONO_TYPE_I1: ainfo->is_signed = 1; case MONO_TYPE_U1: add_general (&gr, &stack_size, ainfo); ainfo->byte_arg_size = 1; break; case MONO_TYPE_I2: ainfo->is_signed = 1; case MONO_TYPE_U2: add_general (&gr, &stack_size, ainfo); ainfo->byte_arg_size = 2; break; case MONO_TYPE_I4: ainfo->is_signed = 1; case MONO_TYPE_U4: add_general (&gr, &stack_size, ainfo); ainfo->byte_arg_size = 4; break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: add_general (&gr, &stack_size, ainfo); break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ptype)) { add_general (&gr, &stack_size, ainfo); break; } if (mini_is_gsharedvt_variable_type (ptype)) { /* gsharedvt arguments are passed by ref */ add_general (&gr, &stack_size, ainfo); if (ainfo->storage == ArgInIReg) ainfo->storage = ArgGSharedVtInReg; else ainfo->storage = ArgGSharedVtOnStack; break; } /* fall through */ case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: add_valuetype (sig, ainfo, ptype, FALSE, &gr, &fr, &stack_size); break; case MONO_TYPE_U8: case MONO_TYPE_I8: add_general (&gr, &stack_size, ainfo); break; case MONO_TYPE_R4: add_float (&fr, &stack_size, ainfo, FALSE); break; case MONO_TYPE_R8: add_float (&fr, &stack_size, ainfo, TRUE); break; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: /* gsharedvt arguments are passed by ref */ g_assert (mini_is_gsharedvt_type (ptype)); add_general (&gr, &stack_size, ainfo); if (ainfo->storage == ArgInIReg) ainfo->storage = ArgGSharedVtInReg; else ainfo->storage = ArgGSharedVtOnStack; break; default: g_assert_not_reached (); } } if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) { gr = PARAM_REGS; fr = FLOAT_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie); } cinfo->stack_usage = stack_size; cinfo->reg_usage = gr; cinfo->freg_usage = fr; return cinfo; } static int arg_need_temp (ArgInfo *ainfo) { // Value types using one register doesn't need temp. if (ainfo->storage == ArgValuetypeInReg && ainfo->nregs > 1) return ainfo->nregs * sizeof (host_mgreg_t); return 0; } static gpointer arg_get_storage (CallContext *ccontext, ArgInfo *ainfo) { switch (ainfo->storage) { case ArgInIReg: return &ccontext->gregs [ainfo->reg]; case ArgInFloatSSEReg: case ArgInDoubleSSEReg: return &ccontext->fregs [ainfo->reg]; case ArgOnStack: case ArgValuetypeAddrOnStack: return ccontext->stack + ainfo->offset; case ArgValuetypeInReg: // Empty struct if (ainfo->nregs == 0) return NULL; // Value type using one register can be stored // directly in its context gregs/fregs slot. g_assert (ainfo->nregs == 1); switch (ainfo->pair_storage [0]) { case ArgInIReg: return &ccontext->gregs [ainfo->pair_regs [0]]; case ArgInFloatSSEReg: case ArgInDoubleSSEReg: return &ccontext->fregs [ainfo->pair_regs [0]]; default: g_assert_not_reached (); } case ArgValuetypeAddrInIReg: g_assert (ainfo->pair_storage [0] == ArgInIReg && ainfo->pair_storage [1] == ArgNone); return &ccontext->gregs [ainfo->pair_regs [0]]; default: g_error ("Arg storage type not yet supported"); } } static void arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest) { g_assert (arg_need_temp (ainfo)); host_mgreg_t *dest_cast = (host_mgreg_t*)dest; /* Reconstruct the value type */ for (int k = 0; k < ainfo->nregs; k++) { int storage_type = ainfo->pair_storage [k]; int reg_storage = ainfo->pair_regs [k]; switch (storage_type) { case ArgInIReg: *dest_cast = ccontext->gregs [reg_storage]; break; case ArgInFloatSSEReg: case ArgInDoubleSSEReg: *(double*)dest_cast = ccontext->fregs [reg_storage]; break; default: g_assert_not_reached (); } dest_cast++; } } static void arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src) { g_assert (arg_need_temp (ainfo)); host_mgreg_t *src_cast = (host_mgreg_t*)src; for (int k = 0; k < ainfo->nregs; k++) { int storage_type = ainfo->pair_storage [k]; int reg_storage = ainfo->pair_regs [k]; switch (storage_type) { case ArgInIReg: ccontext->gregs [reg_storage] = *src_cast; break; case ArgInFloatSSEReg: case ArgInDoubleSSEReg: ccontext->fregs [reg_storage] = *(double*)src_cast; break; default: g_assert_not_reached (); } src_cast++; } } void mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { CallInfo *cinfo = get_call_info (NULL, sig); const MonoEECallbacks *interp_cb = mini_get_interp_callbacks (); gpointer storage; ArgInfo *ainfo; memset (ccontext, 0, sizeof (CallContext)); ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT); if (ccontext->stack_size) ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size); if (sig->ret->type != MONO_TYPE_VOID) { ainfo = &cinfo->ret; if (ainfo->storage == ArgValuetypeAddrInIReg) { storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1); ccontext->gregs [cinfo->ret.reg] = (host_mgreg_t)storage; } } g_assert (!sig->hasthis); for (int i = 0; i < sig->param_count; i++) { ainfo = &cinfo->args [i]; if (ainfo->storage == ArgValuetypeAddrInIReg || ainfo->storage == ArgValuetypeAddrOnStack) { storage = arg_get_storage (ccontext, ainfo); *(gpointer *)storage = interp_cb->frame_arg_to_storage (frame, sig, i); continue; } int temp_size = arg_need_temp (ainfo); if (temp_size) storage = alloca (temp_size); // FIXME? alloca in a loop else storage = arg_get_storage (ccontext, ainfo); interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage); if (temp_size) arg_set_val (ccontext, ainfo, storage); } g_free (cinfo); } void mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp) { const MonoEECallbacks *interp_cb; CallInfo *cinfo; gpointer storage; ArgInfo *ainfo; if (sig->ret->type == MONO_TYPE_VOID) return; interp_cb = mini_get_interp_callbacks (); cinfo = get_call_info (NULL, sig); ainfo = &cinfo->ret; if (retp) { g_assert (cinfo->ret.storage == ArgValuetypeAddrInIReg); interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, retp); #ifdef TARGET_WIN32 // Windows x64 ABI ainfo implementation includes info on how to return value type address. // back to caller. storage = arg_get_storage (ccontext, ainfo); *(gpointer *)storage = retp; #endif } else { g_assert (cinfo->ret.storage != ArgValuetypeAddrInIReg); int temp_size = arg_need_temp (ainfo); if (temp_size) storage = alloca (temp_size); else storage = arg_get_storage (ccontext, ainfo); memset (ccontext, 0, sizeof (CallContext)); // FIXME interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage); if (temp_size) arg_set_val (ccontext, ainfo, storage); } g_free (cinfo); } gpointer mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb = mini_get_interp_callbacks (); CallInfo *cinfo = get_call_info (NULL, sig); gpointer storage; ArgInfo *ainfo; for (int i = 0; i < sig->param_count + sig->hasthis; i++) { ainfo = &cinfo->args [i]; if (ainfo->storage == ArgValuetypeAddrInIReg || ainfo->storage == ArgValuetypeAddrOnStack) { storage = arg_get_storage (ccontext, ainfo); interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, *(gpointer *)storage); continue; } int temp_size = arg_need_temp (ainfo); if (temp_size) { storage = alloca (temp_size); // FIXME? alloca in a loop arg_get_val (ccontext, ainfo, storage); } else { storage = arg_get_storage (ccontext, ainfo); } interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage); } storage = NULL; if (sig->ret->type != MONO_TYPE_VOID) { ainfo = &cinfo->ret; if (ainfo->storage == ArgValuetypeAddrInIReg) storage = (gpointer) ccontext->gregs [cinfo->ret.reg]; } g_free (cinfo); return storage; } void mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb; CallInfo *cinfo; ArgInfo *ainfo; gpointer storage; /* No return value */ if (sig->ret->type == MONO_TYPE_VOID) return; interp_cb = mini_get_interp_callbacks (); cinfo = get_call_info (NULL, sig); ainfo = &cinfo->ret; /* The return values were stored directly at address passed in reg */ if (cinfo->ret.storage != ArgValuetypeAddrInIReg) { int temp_size = arg_need_temp (ainfo); if (temp_size) { storage = alloca (temp_size); arg_get_val (ccontext, ainfo, storage); } else { storage = arg_get_storage (ccontext, ainfo); } interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage); } g_free (cinfo); } /* * mono_arch_get_argument_info: * @csig: a method signature * @param_count: the number of parameters to consider * @arg_info: an array to store the result infos * * Gathers information on parameters such as size, alignment and * padding. arg_info should be large enought to hold param_count + 1 entries. * * Returns the size of the argument area on the stack. */ int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { int k; CallInfo *cinfo = get_call_info (NULL, csig); guint32 args_size = cinfo->stack_usage; /* The arguments are saved to a stack area in mono_arch_instrument_prolog */ if (csig->hasthis) { arg_info [0].offset = 0; } for (k = 0; k < param_count; k++) { arg_info [k + 1].offset = ((k + csig->hasthis) * 8); /* FIXME: */ arg_info [k + 1].size = 0; } g_free (cinfo); return args_size; } #ifndef DISABLE_JIT gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { CallInfo *caller_info = get_call_info (NULL, caller_sig); CallInfo *callee_info = get_call_info (NULL, callee_sig); gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage) && IS_SUPPORTED_TAILCALL (callee_info->ret.storage == caller_info->ret.storage); // Limit stack_usage to 1G. Assume 32bit limits when we move parameters. res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (1 << 30)); res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (1 << 30)); // valuetype parameters are address of local const ArgInfo *ainfo; ainfo = callee_info->args + callee_sig->hasthis; for (int i = 0; res && i < callee_sig->param_count; ++i) { res = IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgValuetypeAddrInIReg) && IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgValuetypeAddrOnStack); } g_free (caller_info); g_free (callee_info); return res; } #endif /* DISABLE_JIT */ /* * Initialize the cpu to execute managed code. */ void mono_arch_cpu_init (void) { #ifndef _MSC_VER guint16 fpcw; /* spec compliance requires running with double precision */ __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw)); fpcw &= ~X86_FPCW_PRECC_MASK; fpcw |= X86_FPCW_PREC_DOUBLE; __asm__ __volatile__ ("fldcw %0\n": : "m" (fpcw)); __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw)); #else /* TODO: This is crashing on Win64 right now. * _control87 (_PC_53, MCW_PC); */ #endif } /* * Initialize architecture specific code. */ void mono_arch_init (void) { #ifndef DISABLE_JIT if (!mono_aot_only) bp_trampoline = mini_get_breakpoint_trampoline (); #endif } /* * Cleanup architecture specific code. */ void mono_arch_cleanup (void) { } /* * This function returns the optimizations supported on this cpu. */ guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { guint32 opts = 0; *exclude_mask = 0; if (mono_hwcap_x86_has_cmov) { opts |= MONO_OPT_CMOV; if (mono_hwcap_x86_has_fcmov) opts |= MONO_OPT_FCMOV; else *exclude_mask |= MONO_OPT_FCMOV; } else { *exclude_mask |= MONO_OPT_CMOV; } return opts; } MonoCPUFeatures mono_arch_get_cpu_features (void) { guint64 features = MONO_CPU_INITED; if (mono_hwcap_x86_has_sse1) features |= MONO_CPU_X86_SSE; if (mono_hwcap_x86_has_sse2) features |= MONO_CPU_X86_SSE2; if (mono_hwcap_x86_has_sse3) features |= MONO_CPU_X86_SSE3; if (mono_hwcap_x86_has_ssse3) features |= MONO_CPU_X86_SSSE3; if (mono_hwcap_x86_has_sse41) features |= MONO_CPU_X86_SSE41; if (mono_hwcap_x86_has_sse42) features |= MONO_CPU_X86_SSE42; if (mono_hwcap_x86_has_popcnt) features |= MONO_CPU_X86_POPCNT; if (mono_hwcap_x86_has_lzcnt) features |= MONO_CPU_X86_LZCNT; return (MonoCPUFeatures)features; } #ifndef DISABLE_JIT GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG)) continue; if (mono_is_regsize_var (ins->inst_vtype)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = g_list_prepend (vars, vmv); } } vars = mono_varlist_sort (cfg, vars, 0); return vars; } /** * mono_arch_compute_omit_fp: * Determine whether the frame pointer can be eliminated. */ static void mono_arch_compute_omit_fp (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; int i, locals_size; CallInfo *cinfo; if (cfg->arch.omit_fp_computed) return; header = cfg->header; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; /* * FIXME: Remove some of the restrictions. */ cfg->arch.omit_fp = TRUE; cfg->arch.omit_fp_computed = TRUE; if (cfg->disable_omit_fp) cfg->arch.omit_fp = FALSE; if (!debug_omit_fp ()) cfg->arch.omit_fp = FALSE; /* if (cfg->method->save_lmf) cfg->arch.omit_fp = FALSE; */ if (cfg->flags & MONO_CFG_HAS_ALLOCA) cfg->arch.omit_fp = FALSE; if (header->num_clauses) cfg->arch.omit_fp = FALSE; if (cfg->param_area) cfg->arch.omit_fp = FALSE; if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) cfg->arch.omit_fp = FALSE; for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = &cinfo->args [i]; if (ainfo->storage == ArgOnStack || ainfo->storage == ArgValuetypeAddrInIReg || ainfo->storage == ArgValuetypeAddrOnStack) { /* * The stack offset can only be determined when the frame * size is known. */ cfg->arch.omit_fp = FALSE; } } locals_size = 0; for (i = cfg->locals_start; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; int ialign; locals_size += mono_type_size (ins->inst_vtype, &ialign); } } GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; mono_arch_compute_omit_fp (cfg); if (cfg->arch.omit_fp) regs = g_list_prepend (regs, (gpointer)AMD64_RBP); /* We use the callee saved registers for global allocation */ regs = g_list_prepend (regs, (gpointer)AMD64_RBX); regs = g_list_prepend (regs, (gpointer)AMD64_R12); regs = g_list_prepend (regs, (gpointer)AMD64_R13); regs = g_list_prepend (regs, (gpointer)AMD64_R14); regs = g_list_prepend (regs, (gpointer)AMD64_R15); #ifdef TARGET_WIN32 regs = g_list_prepend (regs, (gpointer)AMD64_RDI); regs = g_list_prepend (regs, (gpointer)AMD64_RSI); #endif return regs; } /* * mono_arch_regalloc_cost: * * Return the cost, in number of memory references, of the action of * allocating the variable VMV into a register during global register * allocation. */ guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { MonoInst *ins = cfg->varinfo [vmv->idx]; if (cfg->method->save_lmf) /* The register is already saved */ /* substract 1 for the invisible store in the prolog */ return (ins->opcode == OP_ARG) ? 0 : 1; else /* push+pop */ return (ins->opcode == OP_ARG) ? 1 : 2; } /* * mono_arch_fill_argument_info: * * Populate cfg->args, cfg->ret and cfg->vret_addr with information about the arguments * of the method. */ void mono_arch_fill_argument_info (MonoCompile *cfg) { MonoMethodSignature *sig; MonoInst *ins; int i; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); cinfo = cfg->arch.cinfo; /* * Contrary to mono_arch_allocate_vars (), the information should describe * where the arguments are at the beginning of the method, not where they can be * accessed during the execution of the method. The later makes no sense for the * global register allocator, since a variable can be in more than one location. */ switch (cinfo->ret.storage) { case ArgInIReg: case ArgInFloatSSEReg: case ArgInDoubleSSEReg: cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cinfo->ret.reg; break; case ArgValuetypeInReg: cfg->ret->opcode = OP_REGOFFSET; cfg->ret->inst_basereg = -1; cfg->ret->inst_offset = -1; break; case ArgNone: break; default: g_assert_not_reached (); } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = &cinfo->args [i]; ins = cfg->args [i]; switch (ainfo->storage) { case ArgInIReg: case ArgInFloatSSEReg: case ArgInDoubleSSEReg: ins->opcode = OP_REGVAR; ins->inst_c0 = ainfo->reg; break; case ArgOnStack: ins->opcode = OP_REGOFFSET; ins->inst_basereg = -1; ins->inst_offset = -1; break; case ArgValuetypeInReg: /* Dummy */ ins->opcode = OP_NOP; break; default: g_assert_not_reached (); } } } void mono_arch_allocate_vars (MonoCompile *cfg) { MonoType *sig_ret; MonoMethodSignature *sig; MonoInst *ins; int i, offset; guint32 locals_stack_size, locals_stack_align; gint32 *offsets; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); cinfo = cfg->arch.cinfo; sig_ret = mini_get_underlying_type (sig->ret); mono_arch_compute_omit_fp (cfg); /* * We use the ABI calling conventions for managed code as well. * Exception: valuetypes are only sometimes passed or returned in registers. */ /* * The stack looks like this: * <incoming arguments passed on the stack> * <return value> * <lmf/caller saved registers> * <locals> * <spill area> * <localloc area> -> grows dynamically * <params area> */ if (cfg->arch.omit_fp) { cfg->flags |= MONO_CFG_HAS_SPILLUP; cfg->frame_reg = AMD64_RSP; offset = 0; } else { /* Locals are allocated backwards from %fp */ cfg->frame_reg = AMD64_RBP; offset = 0; } cfg->arch.saved_iregs = cfg->used_int_regs; if (cfg->method->save_lmf) { /* Save all callee-saved registers normally (except RBP, if not already used), and restore them when unwinding through an LMF */ guint32 iregs_to_save = AMD64_CALLEE_SAVED_REGS & ~(1<<AMD64_RBP); cfg->arch.saved_iregs |= iregs_to_save; } if (cfg->arch.omit_fp) cfg->arch.reg_save_area_offset = offset; /* Reserve space for callee saved registers */ for (i = 0; i < AMD64_NREG; ++i) if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) { offset += sizeof (target_mgreg_t); } if (!cfg->arch.omit_fp) cfg->arch.reg_save_area_offset = -offset; if (sig_ret->type != MONO_TYPE_VOID) { switch (cinfo->ret.storage) { case ArgInIReg: case ArgInFloatSSEReg: case ArgInDoubleSSEReg: cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cinfo->ret.reg; cfg->ret->dreg = cinfo->ret.reg; break; case ArgValuetypeAddrInIReg: case ArgGsharedvtVariableInReg: /* The register is volatile */ cfg->vret_addr->opcode = OP_REGOFFSET; cfg->vret_addr->inst_basereg = cfg->frame_reg; if (cfg->arch.omit_fp) { cfg->vret_addr->inst_offset = offset; offset += 8; } else { offset += 8; cfg->vret_addr->inst_offset = -offset; } if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr ="); mono_print_ins (cfg->vret_addr); } break; case ArgValuetypeInReg: /* Allocate a local to hold the result, the epilog will copy it to the correct place */ cfg->ret->opcode = OP_REGOFFSET; cfg->ret->inst_basereg = cfg->frame_reg; if (cfg->arch.omit_fp) { cfg->ret->inst_offset = offset; offset += cinfo->ret.pair_storage [1] == ArgNone ? 8 : 16; } else { offset += cinfo->ret.pair_storage [1] == ArgNone ? 8 : 16; cfg->ret->inst_offset = - offset; } break; default: g_assert_not_reached (); } } /* Allocate locals */ offsets = mono_allocate_stack_slots (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align); if (locals_stack_align) { offset += (locals_stack_align - 1); offset &= ~(locals_stack_align - 1); } if (cfg->arch.omit_fp) { cfg->locals_min_stack_offset = offset; cfg->locals_max_stack_offset = offset + locals_stack_size; } else { cfg->locals_min_stack_offset = - (offset + locals_stack_size); cfg->locals_max_stack_offset = - offset; } for (i = cfg->locals_start; i < cfg->num_varinfo; i++) { if (offsets [i] != -1) { MonoInst *ins = cfg->varinfo [i]; ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; if (cfg->arch.omit_fp) ins->inst_offset = (offset + offsets [i]); else ins->inst_offset = - (offset + offsets [i]); //printf ("allocated local %d to ", i); mono_print_tree_nl (ins); } } offset += locals_stack_size; if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) { g_assert (!cfg->arch.omit_fp); g_assert (cinfo->sig_cookie.storage == ArgOnStack); cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET; } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ins = cfg->args [i]; if (ins->opcode != OP_REGVAR) { ArgInfo *ainfo = &cinfo->args [i]; gboolean inreg = TRUE; /* FIXME: Allocate volatile arguments to registers */ if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) inreg = FALSE; /* * Under AMD64, all registers used to pass arguments to functions * are volatile across calls. * FIXME: Optimize this. */ if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInFloatSSEReg) || (ainfo->storage == ArgInDoubleSSEReg) || (ainfo->storage == ArgValuetypeInReg) || (ainfo->storage == ArgGSharedVtInReg)) inreg = FALSE; ins->opcode = OP_REGOFFSET; switch (ainfo->storage) { case ArgInIReg: case ArgInFloatSSEReg: case ArgInDoubleSSEReg: case ArgGSharedVtInReg: if (inreg) { ins->opcode = OP_REGVAR; ins->dreg = ainfo->reg; } break; case ArgOnStack: case ArgGSharedVtOnStack: g_assert (!cfg->arch.omit_fp); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = ainfo->offset + ARGS_OFFSET; break; case ArgValuetypeInReg: break; case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: { MonoInst *indir; g_assert (!cfg->arch.omit_fp); g_assert (ainfo->storage == ArgValuetypeAddrInIReg || (ainfo->storage == ArgValuetypeAddrOnStack && ainfo->pair_storage [0] == ArgNone)); MONO_INST_NEW (cfg, indir, 0); indir->opcode = OP_REGOFFSET; if (ainfo->pair_storage [0] == ArgInIReg) { indir->inst_basereg = cfg->frame_reg; offset = ALIGN_TO (offset, sizeof (target_mgreg_t)); offset += sizeof (target_mgreg_t); indir->inst_offset = - offset; } else { indir->inst_basereg = cfg->frame_reg; indir->inst_offset = ainfo->offset + ARGS_OFFSET; } ins->opcode = OP_VTARG_ADDR; ins->inst_left = indir; break; } default: NOT_IMPLEMENTED; } if (!inreg && (ainfo->storage != ArgOnStack) && (ainfo->storage != ArgValuetypeAddrInIReg) && (ainfo->storage != ArgValuetypeAddrOnStack) && (ainfo->storage != ArgGSharedVtOnStack)) { ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; /* These arguments are saved to the stack in the prolog */ offset = ALIGN_TO (offset, sizeof (target_mgreg_t)); if (cfg->arch.omit_fp) { ins->inst_offset = offset; offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (target_mgreg_t) : sizeof (target_mgreg_t); // Arguments are yet supported by the stack map creation code //cfg->locals_max_stack_offset = MAX (cfg->locals_max_stack_offset, offset); } else { offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (target_mgreg_t) : sizeof (target_mgreg_t); ins->inst_offset = - offset; //cfg->locals_min_stack_offset = MIN (cfg->locals_min_stack_offset, offset); } } } } cfg->stack_offset = offset; } void mono_arch_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; if (cinfo->ret.storage == ArgValuetypeInReg) cfg->ret_var_is_local = TRUE; if (cinfo->ret.storage == ArgValuetypeAddrInIReg || cinfo->ret.storage == ArgGsharedvtVariableInReg) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG); if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr = "); mono_print_ins (cfg->vret_addr); } } if (cfg->gen_sdb_seq_points) { MonoInst *ins; if (cfg->compile_aot) { MonoInst *ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.seq_point_info_var = ins; } ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.ss_tramp_var = ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.bp_tramp_var = ins; } if (cfg->method->save_lmf) cfg->create_lmf_var = TRUE; if (cfg->method->save_lmf) { cfg->lmf_ir = TRUE; } } static void add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree) { MonoInst *ins; switch (storage) { case ArgInIReg: MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg_copy (cfg, tree->dreg); ins->sreg1 = tree->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE); break; case ArgInFloatSSEReg: MONO_INST_NEW (cfg, ins, OP_AMD64_SET_XMMREG_R4); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = tree->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE); break; case ArgInDoubleSSEReg: MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = tree->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE); break; default: g_assert_not_reached (); } } static int arg_storage_to_load_membase (ArgStorage storage) { switch (storage) { case ArgInIReg: #if defined(MONO_ARCH_ILP32) return OP_LOADI8_MEMBASE; #else return OP_LOAD_MEMBASE; #endif case ArgInDoubleSSEReg: return OP_LOADR8_MEMBASE; case ArgInFloatSSEReg: return OP_LOADR4_MEMBASE; default: g_assert_not_reached (); } return -1; } static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { MonoMethodSignature *tmp_sig; int sig_reg; if (call->tailcall) // FIXME tailcall is not always yet initialized. NOT_IMPLEMENTED; g_assert (cinfo->sig_cookie.storage == ArgOnStack); /* * mono_ArgIterator_Setup assumes the signature cookie is * passed first and all the arguments which were before it are * passed on the stack after the signature. So compensate by * passing a different signature. */ tmp_sig = mono_metadata_signature_dup_full (m_class_get_image (cfg->method->klass), call->signature); tmp_sig->param_count -= call->signature->sentinelpos; tmp_sig->sentinelpos = 0; memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*)); sig_reg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, cinfo->sig_cookie.offset, sig_reg); } #ifdef ENABLE_LLVM static LLVMArgStorage arg_storage_to_llvm_arg_storage (MonoCompile *cfg, ArgStorage storage) { switch (storage) { case ArgInIReg: return LLVMArgInIReg; case ArgNone: return LLVMArgNone; case ArgGSharedVtInReg: case ArgGSharedVtOnStack: return LLVMArgGSharedVt; default: g_assert_not_reached (); return LLVMArgNone; } } LLVMCallInfo* mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig) { int i, n; CallInfo *cinfo; ArgInfo *ainfo; int j; LLVMCallInfo *linfo; MonoType *t, *sig_ret; n = sig->param_count + sig->hasthis; sig_ret = mini_get_underlying_type (sig->ret); cinfo = get_call_info (cfg->mempool, sig); linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n)); /* * LLVM always uses the native ABI while we use our own ABI, the * only difference is the handling of vtypes: * - we only pass/receive them in registers in some cases, and only * in 1 or 2 integer registers. */ switch (cinfo->ret.storage) { case ArgNone: linfo->ret.storage = LLVMArgNone; break; case ArgInIReg: case ArgInFloatSSEReg: case ArgInDoubleSSEReg: linfo->ret.storage = LLVMArgNormal; break; case ArgValuetypeInReg: { ainfo = &cinfo->ret; if (sig->pinvoke && (ainfo->pair_storage [0] == ArgInFloatSSEReg || ainfo->pair_storage [0] == ArgInDoubleSSEReg || ainfo->pair_storage [1] == ArgInFloatSSEReg || ainfo->pair_storage [1] == ArgInDoubleSSEReg)) { cfg->exception_message = g_strdup ("pinvoke + vtype ret"); cfg->disable_llvm = TRUE; return linfo; } linfo->ret.storage = LLVMArgVtypeInReg; for (j = 0; j < 2; ++j) linfo->ret.pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, ainfo->pair_storage [j]); break; } case ArgValuetypeAddrInIReg: case ArgGsharedvtVariableInReg: /* Vtype returned using a hidden argument */ linfo->ret.storage = LLVMArgVtypeRetAddr; linfo->vret_arg_index = cinfo->vret_arg_index; break; default: g_assert_not_reached (); break; } for (i = 0; i < n; ++i) { ainfo = cinfo->args + i; if (i >= sig->hasthis) t = sig->params [i - sig->hasthis]; else t = mono_get_int_type (); t = mini_type_get_underlying_type (t); linfo->args [i].storage = LLVMArgNone; switch (ainfo->storage) { case ArgInIReg: linfo->args [i].storage = LLVMArgNormal; break; case ArgInDoubleSSEReg: case ArgInFloatSSEReg: linfo->args [i].storage = LLVMArgNormal; break; case ArgOnStack: if (MONO_TYPE_ISSTRUCT (t)) linfo->args [i].storage = LLVMArgVtypeByVal; else linfo->args [i].storage = LLVMArgNormal; break; case ArgValuetypeInReg: if (sig->pinvoke && (ainfo->pair_storage [0] == ArgInFloatSSEReg || ainfo->pair_storage [0] == ArgInDoubleSSEReg || ainfo->pair_storage [1] == ArgInFloatSSEReg || ainfo->pair_storage [1] == ArgInDoubleSSEReg)) { cfg->exception_message = g_strdup ("pinvoke + vtypes"); cfg->disable_llvm = TRUE; return linfo; } linfo->args [i].storage = LLVMArgVtypeInReg; for (j = 0; j < 2; ++j) linfo->args [i].pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, ainfo->pair_storage [j]); break; case ArgGSharedVtInReg: case ArgGSharedVtOnStack: linfo->args [i].storage = LLVMArgGSharedVt; break; case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: linfo->args [i].storage = LLVMArgVtypeAddr; break; default: cfg->exception_message = g_strdup ("ainfo->storage"); cfg->disable_llvm = TRUE; break; } } return linfo; } #endif void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoInst *arg, *in; MonoMethodSignature *sig; int i, n; CallInfo *cinfo; ArgInfo *ainfo; sig = call->signature; n = sig->param_count + sig->hasthis; cinfo = get_call_info (cfg->mempool, sig); if (COMPILE_LLVM (cfg)) { /* We shouldn't be called in the llvm case */ cfg->disable_llvm = TRUE; return; } /* * Emit all arguments which are passed on the stack to prevent register * allocation problems. */ for (i = 0; i < n; ++i) { MonoType *t; ainfo = cinfo->args + i; in = call->args [i]; if (sig->hasthis && i == 0) t = mono_get_object_type (); else t = sig->params [i - sig->hasthis]; t = mini_get_underlying_type (t); //XXX what about ArgGSharedVtOnStack here? if (ainfo->storage == ArgOnStack && !MONO_TYPE_ISSTRUCT (t)) { if (!m_type_is_byref (t)) { if (t->type == MONO_TYPE_R4) MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg); else if (t->type == MONO_TYPE_R8) MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg); else MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg); } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg); } if (cfg->compute_gc_maps) { MonoInst *def; EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, t); } } } /* * Emit all parameters passed in registers in non-reverse order for better readability * and to help the optimization in emit_prolog (). */ for (i = 0; i < n; ++i) { ainfo = cinfo->args + i; in = call->args [i]; if (ainfo->storage == ArgInIReg) add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, in); } for (i = n - 1; i >= 0; --i) { MonoType *t; ainfo = cinfo->args + i; in = call->args [i]; if (sig->hasthis && i == 0) t = mono_get_object_type (); else t = sig->params [i - sig->hasthis]; t = mini_get_underlying_type (t); switch (ainfo->storage) { case ArgInIReg: /* Already done */ break; case ArgInFloatSSEReg: case ArgInDoubleSSEReg: add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, in); break; case ArgOnStack: case ArgValuetypeInReg: case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: case ArgGSharedVtInReg: case ArgGSharedVtOnStack: { if (ainfo->storage == ArgOnStack && !MONO_TYPE_ISSTRUCT (t)) /* Already emitted above */ break; guint32 align; guint32 size; if (sig->pinvoke && !sig->marshalling_disabled) size = mono_type_native_stack_size (t, &align); else { /* * Other backends use mono_type_stack_size (), but that * aligns the size to 8, which is larger than the size of * the source, leading to reads of invalid memory if the * source is at the end of address space. */ size = mono_class_value_size (mono_class_from_mono_type_internal (t), &align); } if (size >= 10000) { /* Avoid asserts in emit_memcpy () */ mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Passing an argument of size '%d'.", size)); /* Continue normally */ } if (size > 0 || ainfo->pass_empty_struct) { MONO_INST_NEW (cfg, arg, OP_OUTARG_VT); arg->sreg1 = in->dreg; arg->klass = mono_class_from_mono_type_internal (t); arg->backend.size = size; arg->inst_p0 = call; arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, arg); } break; } default: g_assert_not_reached (); } if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) /* Emit the signature cookie just before the implicit arguments */ emit_sig_cookie (cfg, call, cinfo); } /* Handle the case where there are no implicit arguments */ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) emit_sig_cookie (cfg, call, cinfo); switch (cinfo->ret.storage) { case ArgValuetypeInReg: if (cinfo->ret.pair_storage [0] == ArgInIReg && cinfo->ret.pair_storage [1] == ArgNone) { /* * Tell the JIT to use a more efficient calling convention: call using * OP_CALL, compute the result location after the call, and save the * result there. */ call->vret_in_reg = TRUE; /* * Nullify the instruction computing the vret addr to enable * future optimizations. */ if (call->vret_var) NULLIFY_INS (call->vret_var); } else { if (call->tailcall) NOT_IMPLEMENTED; /* * The valuetype is in RAX:RDX after the call, need to be copied to * the stack. Push the address here, so the call instruction can * access it. */ if (!cfg->arch.vret_addr_loc) { cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* Prevent it from being register allocated or optimized away */ cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE; } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg); } break; case ArgValuetypeAddrInIReg: case ArgGsharedvtVariableInReg: { MonoInst *vtarg; MONO_INST_NEW (cfg, vtarg, OP_MOVE); vtarg->sreg1 = call->vret_var->dreg; vtarg->dreg = mono_alloc_preg (cfg); MONO_ADD_INS (cfg->cbb, vtarg); mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE); break; } default: break; } if (cfg->method->save_lmf) { MONO_INST_NEW (cfg, arg, OP_AMD64_SAVE_SP_TO_LMF); MONO_ADD_INS (cfg->cbb, arg); } call->stack_usage = cinfo->stack_usage; } void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoInst *arg; MonoCallInst *call = (MonoCallInst*)ins->inst_p0; ArgInfo *ainfo = (ArgInfo*)ins->inst_p1; int size = ins->backend.size; switch (ainfo->storage) { case ArgValuetypeInReg: { MonoInst *load; int part; for (part = 0; part < 2; ++part) { if (ainfo->pair_storage [part] == ArgNone) continue; if (ainfo->pass_empty_struct) { //Pass empty struct value as 0 on platforms representing empty structs as 1 byte. NEW_ICONST (cfg, load, 0); } else { MONO_INST_NEW (cfg, load, arg_storage_to_load_membase (ainfo->pair_storage [part])); load->inst_basereg = src->dreg; load->inst_offset = part * sizeof (target_mgreg_t); switch (ainfo->pair_storage [part]) { case ArgInIReg: load->dreg = mono_alloc_ireg (cfg); break; case ArgInDoubleSSEReg: case ArgInFloatSSEReg: load->dreg = mono_alloc_freg (cfg); break; default: g_assert_not_reached (); } } MONO_ADD_INS (cfg->cbb, load); add_outarg_reg (cfg, call, ainfo->pair_storage [part], ainfo->pair_regs [part], load); } break; } case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: { MonoInst *vtaddr, *load; g_assert (ainfo->storage == ArgValuetypeAddrInIReg || (ainfo->storage == ArgValuetypeAddrOnStack && ainfo->pair_storage [0] == ArgNone)); vtaddr = mono_compile_create_var (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL); vtaddr->backend.is_pinvoke = call->signature->pinvoke && !call->signature->marshalling_disabled; MONO_INST_NEW (cfg, load, OP_LDADDR); cfg->has_indirection = TRUE; load->inst_p0 = vtaddr; vtaddr->flags |= MONO_INST_INDIRECT; load->type = STACK_MP; load->klass = vtaddr->klass; load->dreg = mono_alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, load); mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, TARGET_SIZEOF_VOID_P); if (ainfo->pair_storage [0] == ArgInIReg) { MONO_INST_NEW (cfg, arg, OP_AMD64_LEA_MEMBASE); arg->dreg = mono_alloc_ireg (cfg); arg->sreg1 = load->dreg; arg->inst_imm = 0; MONO_ADD_INS (cfg->cbb, arg); mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, ainfo->pair_regs [0], FALSE); } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, load->dreg); } break; } case ArgGSharedVtInReg: /* Pass by addr */ mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE); break; case ArgGSharedVtOnStack: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, src->dreg); break; default: if (size == 8) { int dreg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, 0); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, dreg); } else if (size <= 40) { mini_emit_memcpy (cfg, AMD64_RSP, ainfo->offset, src->dreg, 0, size, TARGET_SIZEOF_VOID_P); } else { // FIXME: Code growth mini_emit_memcpy (cfg, AMD64_RSP, ainfo->offset, src->dreg, 0, size, TARGET_SIZEOF_VOID_P); } if (cfg->compute_gc_maps) { MonoInst *def; EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, m_class_get_byval_arg (ins->klass)); } } } void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret); if (ret->type == MONO_TYPE_R4) { if (COMPILE_LLVM (cfg)) MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); else MONO_EMIT_NEW_UNALU (cfg, OP_AMD64_SET_XMMREG_R4, cfg->ret->dreg, val->dreg); return; } else if (ret->type == MONO_TYPE_R8) { MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); return; } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); } #endif /* DISABLE_JIT */ #define EMIT_COND_BRANCH(ins,cond,sign) \ if (ins->inst_true_bb->native_offset) { \ x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \ } else { \ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \ if (optimize_branch_pred && \ x86_is_imm8 (ins->inst_true_bb->max_offset - offset)) \ x86_branch8 (code, cond, 0, sign); \ else \ x86_branch32 (code, cond, 0, sign); \ } typedef struct { MonoMethodSignature *sig; CallInfo *cinfo; int nstack_args, nullable_area; } ArchDynCallInfo; static gboolean dyn_call_supported (MonoMethodSignature *sig, CallInfo *cinfo) { int i; switch (cinfo->ret.storage) { case ArgNone: case ArgInIReg: case ArgInFloatSSEReg: case ArgInDoubleSSEReg: case ArgValuetypeAddrInIReg: case ArgValuetypeInReg: break; default: return FALSE; } for (i = 0; i < cinfo->nargs; ++i) { ArgInfo *ainfo = &cinfo->args [i]; switch (ainfo->storage) { case ArgInIReg: case ArgInFloatSSEReg: case ArgInDoubleSSEReg: case ArgValuetypeInReg: case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: case ArgOnStack: break; default: return FALSE; } } return TRUE; } /* * mono_arch_dyn_call_prepare: * * Return a pointer to an arch-specific structure which contains information * needed by mono_arch_get_dyn_call_args (). Return NULL if OP_DYN_CALL is not * supported for SIG. * This function is equivalent to ffi_prep_cif in libffi. */ MonoDynCallInfo* mono_arch_dyn_call_prepare (MonoMethodSignature *sig) { ArchDynCallInfo *info; CallInfo *cinfo; int i, aindex; cinfo = get_call_info (NULL, sig); if (!dyn_call_supported (sig, cinfo)) { g_free (cinfo); return NULL; } info = g_new0 (ArchDynCallInfo, 1); // FIXME: Preprocess the info to speed up get_dyn_call_args (). info->sig = sig; info->cinfo = cinfo; info->nstack_args = 0; for (i = 0; i < cinfo->nargs; ++i) { ArgInfo *ainfo = &cinfo->args [i]; switch (ainfo->storage) { case ArgOnStack: case ArgValuetypeAddrOnStack: info->nstack_args = MAX (info->nstack_args, (ainfo->offset / sizeof (target_mgreg_t)) + (ainfo->arg_size / sizeof (target_mgreg_t))); break; default: break; } } for (aindex = 0; aindex < sig->param_count; aindex++) { MonoType *t = sig->params [aindex]; ArgInfo *ainfo = &cinfo->args [aindex + sig->hasthis]; if (m_type_is_byref (t)) continue; switch (t->type) { case MONO_TYPE_GENERICINST: if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) { MonoClass *klass = mono_class_from_mono_type_internal (t); int size; if (!(ainfo->storage == ArgValuetypeInReg || ainfo->storage == ArgOnStack)) { /* Nullables need a temporary buffer, its stored at the end of DynCallArgs.regs after the stack args */ size = mono_class_value_size (klass, NULL); info->nullable_area += size; } } break; default: break; } } info->nullable_area = ALIGN_TO (info->nullable_area, 16); /* Align to 16 bytes */ if (info->nstack_args & 1) info->nstack_args ++; return (MonoDynCallInfo*)info; } /* * mono_arch_dyn_call_free: * * Free a MonoDynCallInfo structure. */ void mono_arch_dyn_call_free (MonoDynCallInfo *info) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; g_free (ainfo->cinfo); g_free (ainfo); } int mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; /* Extend the 'regs' field dynamically */ return sizeof (DynCallArgs) + (ainfo->nstack_args * sizeof (target_mgreg_t)) + ainfo->nullable_area; } #define PTR_TO_GREG(ptr) ((host_mgreg_t)(ptr)) #define GREG_TO_PTR(greg) ((gpointer)(greg)) /* * mono_arch_get_start_dyn_call: * * Convert the arguments ARGS to a format which can be passed to OP_DYN_CALL, and * store the result into BUF. * ARGS should be an array of pointers pointing to the arguments. * RET should point to a memory buffer large enought to hold the result of the * call. * This function should be as fast as possible, any work which does not depend * on the actual values of the arguments should be done in * mono_arch_dyn_call_prepare (). * start_dyn_call + OP_DYN_CALL + finish_dyn_call is equivalent to ffi_call in * libffi. */ void mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf) { ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info; DynCallArgs *p = (DynCallArgs*)buf; int arg_index, greg, i, pindex; MonoMethodSignature *sig = dinfo->sig; int buffer_offset = 0; guint8 *nullable_buffer; static int general_param_reg_to_index [MONO_MAX_IREGS]; static int float_param_reg_to_index [MONO_MAX_FREGS]; static gboolean param_reg_to_index_inited; if (!param_reg_to_index_inited) { for (i = 0; i < PARAM_REGS; ++i) general_param_reg_to_index [param_regs[i]] = i; for (i = 0; i < FLOAT_PARAM_REGS; ++i) float_param_reg_to_index [float_param_regs[i]] = i; mono_memory_barrier (); param_reg_to_index_inited = 1; } else { mono_memory_barrier (); } p->res = 0; p->ret = ret; p->nstack_args = dinfo->nstack_args; arg_index = 0; greg = 0; pindex = 0; /* Stored after the stack arguments */ nullable_buffer = (guint8*)&(p->regs [PARAM_REGS + dinfo->nstack_args]); if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) { p->regs [greg ++] = PTR_TO_GREG(*(args [arg_index ++])); if (!sig->hasthis) pindex = 1; } if (dinfo->cinfo->ret.storage == ArgValuetypeAddrInIReg || dinfo->cinfo->ret.storage == ArgGsharedvtVariableInReg) p->regs [greg ++] = PTR_TO_GREG (ret); for (; pindex < sig->param_count; pindex++) { MonoType *t = mini_get_underlying_type (sig->params [pindex]); gpointer *arg = args [arg_index ++]; ArgInfo *ainfo = &dinfo->cinfo->args [pindex + sig->hasthis]; int slot; if (ainfo->storage == ArgOnStack || ainfo->storage == ArgValuetypeAddrOnStack) { slot = PARAM_REGS + (ainfo->offset / sizeof (target_mgreg_t)); } else if (ainfo->storage == ArgValuetypeAddrInIReg) { g_assert (ainfo->pair_storage [0] == ArgInIReg && ainfo->pair_storage [1] == ArgNone); slot = general_param_reg_to_index [ainfo->pair_regs [0]]; } else if (ainfo->storage == ArgInFloatSSEReg || ainfo->storage == ArgInDoubleSSEReg) { slot = float_param_reg_to_index [ainfo->reg]; } else { slot = general_param_reg_to_index [ainfo->reg]; } if (m_type_is_byref (t)) { p->regs [slot] = PTR_TO_GREG (*(arg)); continue; } switch (t->type) { case MONO_TYPE_OBJECT: case MONO_TYPE_PTR: case MONO_TYPE_I: case MONO_TYPE_U: #if !defined(MONO_ARCH_ILP32) case MONO_TYPE_I8: case MONO_TYPE_U8: #endif p->regs [slot] = PTR_TO_GREG (*(arg)); break; #if defined(MONO_ARCH_ILP32) case MONO_TYPE_I8: case MONO_TYPE_U8: p->regs [slot] = *(guint64*)(arg); break; #endif case MONO_TYPE_U1: p->regs [slot] = *(guint8*)(arg); break; case MONO_TYPE_I1: p->regs [slot] = *(gint8*)(arg); break; case MONO_TYPE_I2: p->regs [slot] = *(gint16*)(arg); break; case MONO_TYPE_U2: p->regs [slot] = *(guint16*)(arg); break; case MONO_TYPE_I4: p->regs [slot] = *(gint32*)(arg); break; case MONO_TYPE_U4: p->regs [slot] = *(guint32*)(arg); break; case MONO_TYPE_R4: { double d; *(float*)&d = *(float*)(arg); if (ainfo->storage == ArgOnStack) { *(double *)(p->regs + slot) = d; } else { p->has_fp = 1; p->fregs [slot] = d; } break; } case MONO_TYPE_R8: if (ainfo->storage == ArgOnStack) { *(double *)(p->regs + slot) = *(double*)(arg); } else { p->has_fp = 1; p->fregs [slot] = *(double*)(arg); } break; case MONO_TYPE_GENERICINST: if (MONO_TYPE_IS_REFERENCE (t)) { p->regs [slot] = PTR_TO_GREG (*(arg)); break; } else if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) { MonoClass *klass = mono_class_from_mono_type_internal (t); guint8 *nullable_buf; int size; size = mono_class_value_size (klass, NULL); if (ainfo->storage == ArgValuetypeInReg || ainfo->storage == ArgOnStack) { nullable_buf = g_alloca (size); } else { nullable_buf = nullable_buffer + buffer_offset; buffer_offset += size; g_assert (buffer_offset <= dinfo->nullable_area); } /* The argument pointed to by arg is either a boxed vtype or null */ mono_nullable_init (nullable_buf, (MonoObject*)arg, klass); arg = (gpointer*)nullable_buf; /* Fall though */ } else { /* Fall through */ } case MONO_TYPE_VALUETYPE: { switch (ainfo->storage) { case ArgValuetypeInReg: for (i = 0; i < 2; ++i) { switch (ainfo->pair_storage [i]) { case ArgNone: break; case ArgInIReg: slot = general_param_reg_to_index [ainfo->pair_regs [i]]; p->regs [slot] = ((target_mgreg_t*)(arg))[i]; break; case ArgInFloatSSEReg: { double d; p->has_fp = 1; slot = float_param_reg_to_index [ainfo->pair_regs [i]]; *(float*)&d = ((float*)(arg))[i]; p->fregs [slot] = d; break; } case ArgInDoubleSSEReg: p->has_fp = 1; slot = float_param_reg_to_index [ainfo->pair_regs [i]]; p->fregs [slot] = ((double*)(arg))[i]; break; default: g_assert_not_reached (); break; } } break; case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: // In DYNCALL use case value types are already copied when included in parameter array. // Currently no need to make an extra temporary value type on stack for this use case. p->regs [slot] = (target_mgreg_t)arg; break; case ArgOnStack: for (i = 0; i < ainfo->arg_size / 8; ++i) p->regs [slot + i] = ((target_mgreg_t*)(arg))[i]; break; default: g_assert_not_reached (); break; } break; } default: g_assert_not_reached (); } } } /* * mono_arch_finish_dyn_call: * * Store the result of a dyn call into the return value buffer passed to * start_dyn_call (). * This function should be as fast as possible, any work which does not depend * on the actual values of the arguments should be done in * mono_arch_dyn_call_prepare (). */ void mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf) { ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info; MonoMethodSignature *sig = dinfo->sig; DynCallArgs *dargs = (DynCallArgs*)buf; guint8 *ret = dargs->ret; host_mgreg_t res = dargs->res; MonoType *sig_ret = mini_get_underlying_type (sig->ret); int i; switch (sig_ret->type) { case MONO_TYPE_VOID: *(gpointer*)ret = NULL; break; case MONO_TYPE_OBJECT: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: *(gpointer*)ret = GREG_TO_PTR (res); break; case MONO_TYPE_I1: *(gint8*)ret = res; break; case MONO_TYPE_U1: *(guint8*)ret = res; break; case MONO_TYPE_I2: *(gint16*)ret = res; break; case MONO_TYPE_U2: *(guint16*)ret = res; break; case MONO_TYPE_I4: *(gint32*)ret = res; break; case MONO_TYPE_U4: *(guint32*)ret = res; break; case MONO_TYPE_I8: *(gint64*)ret = res; break; case MONO_TYPE_U8: *(guint64*)ret = res; break; case MONO_TYPE_R4: *(float*)ret = *(float*)&(dargs->fregs [0]); break; case MONO_TYPE_R8: *(double*)ret = dargs->fregs [0]; break; case MONO_TYPE_GENERICINST: if (MONO_TYPE_IS_REFERENCE (sig_ret)) { *(gpointer*)ret = GREG_TO_PTR(res); break; } else { /* Fall through */ } case MONO_TYPE_VALUETYPE: if (dinfo->cinfo->ret.storage == ArgValuetypeAddrInIReg || dinfo->cinfo->ret.storage == ArgGsharedvtVariableInReg) { /* Nothing to do */ } else { ArgInfo *ainfo = &dinfo->cinfo->ret; g_assert (ainfo->storage == ArgValuetypeInReg); for (i = 0; i < 2; ++i) { switch (ainfo->pair_storage [0]) { case ArgInIReg: ((host_mgreg_t*)ret)[i] = res; break; case ArgInDoubleSSEReg: ((double*)ret)[i] = dargs->fregs [i]; break; case ArgNone: break; default: g_assert_not_reached (); break; } } } break; default: g_assert_not_reached (); } } #undef PTR_TO_GREG #undef GREG_TO_PTR /* emit an exception if condition is fail */ #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \ do { \ MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \ if (tins == NULL) { \ mono_add_patch_info (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_EXC, exc_name); \ x86_branch32 (code, cond, 0, signed); \ } else { \ EMIT_COND_BRANCH (tins, cond, signed); \ } \ } while (0); #define EMIT_SSE2_FPFUNC(code, op, dreg, sreg1) do { \ amd64_movsd_membase_reg (code, AMD64_RSP, -8, (sreg1)); \ amd64_fld_membase (code, AMD64_RSP, -8, TRUE); \ amd64_ ##op (code); \ amd64_fst_membase (code, AMD64_RSP, -8, TRUE, TRUE); \ amd64_movsd_reg_membase (code, (dreg), AMD64_RSP, -8); \ } while (0); #ifndef DISABLE_JIT static guint8* emit_call (MonoCompile *cfg, MonoCallInst *call, guint8 *code, MonoJitICallId jit_icall_id) { gboolean no_patch = FALSE; MonoJumpInfoTarget patch; // FIXME? This is similar to mono_call_to_patch, except it favors MONO_PATCH_INFO_ABS over call->jit_icall_id. if (jit_icall_id) { g_assert (!call); patch.type = MONO_PATCH_INFO_JIT_ICALL_ID; patch.target = GUINT_TO_POINTER (jit_icall_id); } else if (call->inst.flags & MONO_INST_HAS_METHOD) { patch.type = MONO_PATCH_INFO_METHOD; patch.target = call->method; } else { patch.type = MONO_PATCH_INFO_ABS; patch.target = call->fptr; } /* * FIXME: Add support for thunks */ { gboolean near_call = FALSE; /* * Indirect calls are expensive so try to make a near call if possible. * The caller memory is allocated by the code manager so it is * guaranteed to be at a 32 bit offset. */ if (patch.type != MONO_PATCH_INFO_ABS) { /* The target is in memory allocated using the code manager */ near_call = TRUE; if (patch.type == MONO_PATCH_INFO_METHOD) { MonoMethod* const method = call->method; if (m_class_get_image (method->klass)->aot_module) /* The callee might be an AOT method */ near_call = FALSE; if (method->dynamic) /* The target is in malloc-ed memory */ near_call = FALSE; } else { /* * The call might go directly to a native function without * the wrapper. */ MonoJitICallInfo * const mi = mono_find_jit_icall_info (jit_icall_id); gconstpointer target = mono_icall_get_wrapper (mi); if ((((guint64)target) >> 32) != 0) near_call = FALSE; } } else { MonoJumpInfo *jinfo = NULL; if (cfg->abs_patches) jinfo = (MonoJumpInfo *)g_hash_table_lookup (cfg->abs_patches, call->fptr); if (jinfo) { if (jinfo->type == MONO_PATCH_INFO_JIT_ICALL_ADDR) { MonoJitICallInfo *mi = mono_find_jit_icall_info (jinfo->data.jit_icall_id); if (mi && (((guint64)mi->func) >> 32) == 0) near_call = TRUE; no_patch = TRUE; } else { /* * This is not really an optimization, but required because the * generic class init trampolines use R11 to pass the vtable. */ near_call = TRUE; } } else { jit_icall_id = call->jit_icall_id; if (jit_icall_id) { MonoJitICallInfo const *info = mono_find_jit_icall_info (jit_icall_id); // Change patch from MONO_PATCH_INFO_ABS to MONO_PATCH_INFO_JIT_ICALL_ID. patch.type = MONO_PATCH_INFO_JIT_ICALL_ID; patch.target = GUINT_TO_POINTER (jit_icall_id); if (info->func == info->wrapper) { /* No wrapper */ if ((((guint64)info->func) >> 32) == 0) near_call = TRUE; } else { /* ?See the comment in mono_codegen ()? */ near_call = TRUE; } } else if ((((guint64)patch.target) >> 32) == 0) { near_call = TRUE; no_patch = TRUE; } } } if (cfg->method->dynamic) /* These methods are allocated using malloc */ near_call = FALSE; #ifdef MONO_ARCH_NOMAP32BIT near_call = FALSE; #endif /* The 64bit XEN kernel does not honour the MAP_32BIT flag. (#522894) */ if (optimize_for_xen) near_call = FALSE; if (cfg->compile_aot) { near_call = TRUE; no_patch = TRUE; } if (near_call) { /* * Align the call displacement to an address divisible by 4 so it does * not span cache lines. This is required for code patching to work on SMP * systems. */ if (!no_patch && ((guint32)(code + 1 - cfg->native_code) % 4) != 0) { guint32 pad_size = 4 - ((guint32)(code + 1 - cfg->native_code) % 4); amd64_padding (code, pad_size); } mono_add_patch_info (cfg, code - cfg->native_code, patch.type, patch.target); amd64_call_code (code, 0); } else { if (!no_patch && ((guint32)(code + 2 - cfg->native_code) % 8) != 0) { guint32 pad_size = 8 - ((guint32)(code + 2 - cfg->native_code) % 8); amd64_padding (code, pad_size); g_assert ((guint64)(code + 2 - cfg->native_code) % 8 == 0); } mono_add_patch_info (cfg, code - cfg->native_code, patch.type, patch.target); amd64_set_reg_template (code, GP_SCRATCH_REG); amd64_call_reg (code, GP_SCRATCH_REG); } } set_code_cursor (cfg, code); return code; } static int store_membase_imm_to_store_membase_reg (int opcode) { switch (opcode) { case OP_STORE_MEMBASE_IMM: return OP_STORE_MEMBASE_REG; case OP_STOREI4_MEMBASE_IMM: return OP_STOREI4_MEMBASE_REG; case OP_STOREI8_MEMBASE_IMM: return OP_STOREI8_MEMBASE_REG; } return -1; } #define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB) || ((opcode) == OP_ISBB_IMM))) /* * mono_arch_peephole_pass_1: * * Perform peephole opts which should/can be performed before local regalloc */ void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { MonoInst *last_ins = mono_inst_prev (ins, FILTER_IL_SEQ_POINT); switch (ins->opcode) { case OP_ADD_IMM: case OP_IADD_IMM: case OP_LADD_IMM: if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS) && (ins->inst_imm > 0)) { /* * X86_LEA is like ADD, but doesn't have the * sreg1==dreg restriction. inst_imm > 0 is needed since LEA sign-extends * its operand to 64 bit. */ ins->opcode = ins->opcode == OP_IADD_IMM ? OP_X86_LEA_MEMBASE : OP_AMD64_LEA_MEMBASE; ins->inst_basereg = ins->sreg1; } break; case OP_LXOR: case OP_IXOR: if ((ins->sreg1 == ins->sreg2) && (ins->sreg1 == ins->dreg)) { MonoInst *ins2; /* * Replace STORE_MEMBASE_IMM 0 with STORE_MEMBASE_REG since * the latter has length 2-3 instead of 6 (reverse constant * propagation). These instruction sequences are very common * in the initlocals bblock. */ for (ins2 = ins->next; ins2; ins2 = ins2->next) { if (((ins2->opcode == OP_STORE_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_IMM) || (ins2->opcode == OP_STORE_MEMBASE_IMM)) && (ins2->inst_imm == 0)) { ins2->opcode = store_membase_imm_to_store_membase_reg (ins2->opcode); ins2->sreg1 = ins->dreg; } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG)) { /* Continue */ } else if (((ins2->opcode == OP_ICONST) || (ins2->opcode == OP_I8CONST)) && (ins2->dreg == ins->dreg) && (ins2->inst_c0 == 0)) { NULLIFY_INS (ins2); /* Continue */ } else if (ins2->opcode == OP_IL_SEQ_POINT) { /* Continue */ } else { break; } } } break; case OP_COMPARE_IMM: case OP_LCOMPARE_IMM: /* OP_COMPARE_IMM (reg, 0) * --> * OP_AMD64_TEST_NULL (reg) */ if (!ins->inst_imm) ins->opcode = OP_AMD64_TEST_NULL; break; case OP_ICOMPARE_IMM: if (!ins->inst_imm) ins->opcode = OP_X86_TEST_NULL; break; case OP_AMD64_ICOMPARE_MEMBASE_IMM: /* * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm * --> * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_COMPARE_IMM reg, imm * * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL */ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = OP_ICOMPARE_IMM; ins->sreg1 = last_ins->sreg1; /* check if we can remove cmp reg,0 with test null */ if (!ins->inst_imm) ins->opcode = OP_X86_TEST_NULL; } break; } mono_peephole_ins (bb, ins); } } void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { switch (ins->opcode) { case OP_ICONST: case OP_I8CONST: { MonoInst *next = mono_inst_next (ins, FILTER_IL_SEQ_POINT); /* reg = 0 -> XOR (reg, reg) */ /* XOR sets cflags on x86, so we cant do it always */ if (ins->inst_c0 == 0 && (!next || (next && INST_IGNORES_CFLAGS (next->opcode)))) { ins->opcode = OP_LXOR; ins->sreg1 = ins->dreg; ins->sreg2 = ins->dreg; /* Fall through */ } else { break; } } case OP_LXOR: /* * Use IXOR to avoid a rex prefix if possible. The cpu will sign extend the * 0 result into 64 bits. */ if ((ins->sreg1 == ins->sreg2) && (ins->sreg1 == ins->dreg)) { ins->opcode = OP_IXOR; } /* Fall through */ case OP_IXOR: if ((ins->sreg1 == ins->sreg2) && (ins->sreg1 == ins->dreg)) { MonoInst *ins2; /* * Replace STORE_MEMBASE_IMM 0 with STORE_MEMBASE_REG since * the latter has length 2-3 instead of 6 (reverse constant * propagation). These instruction sequences are very common * in the initlocals bblock. */ for (ins2 = ins->next; ins2; ins2 = ins2->next) { if (((ins2->opcode == OP_STORE_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_IMM) || (ins2->opcode == OP_STORE_MEMBASE_IMM)) && (ins2->inst_imm == 0)) { ins2->opcode = store_membase_imm_to_store_membase_reg (ins2->opcode); ins2->sreg1 = ins->dreg; } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_REG) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG) || (ins2->opcode == OP_LIVERANGE_START) || (ins2->opcode == OP_GC_LIVENESS_DEF) || (ins2->opcode == OP_GC_LIVENESS_USE)) { /* Continue */ } else if (((ins2->opcode == OP_ICONST) || (ins2->opcode == OP_I8CONST)) && (ins2->dreg == ins->dreg) && (ins2->inst_c0 == 0)) { NULLIFY_INS (ins2); /* Continue */ } else if (ins2->opcode == OP_IL_SEQ_POINT) { /* Continue */ } else { break; } } } break; case OP_IADD_IMM: if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1)) ins->opcode = OP_X86_INC_REG; break; case OP_ISUB_IMM: if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1)) ins->opcode = OP_X86_DEC_REG; break; } mono_peephole_ins (bb, ins); } } #define NEW_INS(cfg,ins,dest,op) do { \ MONO_INST_NEW ((cfg), (dest), (op)); \ (dest)->cil_code = (ins)->cil_code; \ mono_bblock_insert_before_ins (bb, ins, (dest)); \ } while (0) #define NEW_SIMD_INS(cfg,ins,dest,op,d,s1,s2) do { \ MONO_INST_NEW ((cfg), (dest), (op)); \ (dest)->cil_code = (ins)->cil_code; \ (dest)->dreg = d; \ (dest)->sreg1 = s1; \ (dest)->sreg2 = s2; \ (dest)->type = STACK_VTYPE; \ (dest)->klass = ins->klass; \ mono_bblock_insert_before_ins (bb, ins, (dest)); \ } while (0) static int simd_type_to_comp_op (int t) { switch (t) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_PCMPEQB; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_PCMPEQW; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_PCMPEQD; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_PCMPEQQ; // SSE 4.1 case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_PCMPEQQ; // SSE 4.1 #else return OP_PCMPEQD; #endif default: g_assert_not_reached (); return -1; } } static int simd_type_to_sub_op (int t) { switch (t) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_PSUBB; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_PSUBW; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_PSUBD; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_PSUBQ; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_PSUBQ; #else return OP_PSUBD; #endif default: g_assert_not_reached (); return -1; } } static int simd_type_to_shl_op (int t) { switch (t) { case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_PSHLW; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_PSHLD; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_PSHLQ; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_PSHLD; #else return OP_PSHLQ; #endif default: g_assert_not_reached (); return -1; } } static int simd_type_to_gt_op (int t) { switch (t) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_PCMPGTB; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_PCMPGTW; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_PCMPGTD; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_PCMPGTQ; // SSE 4.2 case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_PCMPGTQ; // SSE 4.2 #else return OP_PCMPGTD; #endif default: g_assert_not_reached (); return -1; } } static int simd_type_to_max_un_op (int t) { switch (t) { case MONO_TYPE_U1: return OP_PMAXB_UN; case MONO_TYPE_U2: return OP_PMAXW_UN; // SSE 4.1 case MONO_TYPE_U4: return OP_PMAXD_UN; // SSE 4.1 //case MONO_TYPE_U8: // return OP_PMAXQ_UN; // AVX #if TARGET_SIZEOF_VOID_P == 8 //case MONO_TYPE_U: // return OP_PMAXQ_UN; // AVX #else case MONO_TYPE_U: return OP_PMAXD_UN; // SSE 4.1 #endif default: g_assert_not_reached (); return -1; } } static int simd_type_to_add_op (int t) { switch (t) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_PADDB; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_PADDW; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_PADDD; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_PADDQ; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_PADDQ; #else return OP_PADDD; #endif default: g_assert_not_reached (); return -1; } } static int simd_type_to_min_op (int t) { switch (t) { case MONO_TYPE_I1: return OP_PMINB; // SSE 4.1 case MONO_TYPE_U1: return OP_PMINB_UN; // SSE 4.1 case MONO_TYPE_I2: return OP_PMINW; case MONO_TYPE_U2: return OP_PMINW_UN; case MONO_TYPE_I4: return OP_PMIND; // SSE 4.1 case MONO_TYPE_U4: return OP_PMIND_UN; // SSE 4.1 // case MONO_TYPE_I8: // AVX // case MONO_TYPE_U8: #if TARGET_SIZEOF_VOID_P == 8 //case MONO_TYPE_I: // AVX //case MONO_TYPE_U: #else case MONO_TYPE_I: return OP_PMIND; // SSE 4.1 case MONO_TYPE_U: return OP_PMIND_UN; // SSE 4.1 #endif default: g_assert_not_reached (); return -1; } } static int simd_type_to_max_op (int t) { switch (t) { case MONO_TYPE_I1: return OP_PMAXB; // SSE 4.1 case MONO_TYPE_U1: return OP_PMAXB_UN; // SSE 4.1 case MONO_TYPE_I2: return OP_PMAXW; case MONO_TYPE_U2: return OP_PMAXW_UN; case MONO_TYPE_I4: return OP_PMAXD; // SSE 4.1 case MONO_TYPE_U4: return OP_PMAXD_UN; // SSE 4.1 // case MONO_TYPE_I8: // AVX // case MONO_TYPE_U8: #if TARGET_SIZEOF_VOID_P == 8 //case MONO_TYPE_I: // AVX //case MONO_TYPE_U: #else case MONO_TYPE_I: return OP_PMAXD; // SSE 4.1 case MONO_TYPE_U: return OP_PMAXD_UN; // SSE 4.1 #endif default: g_assert_not_reached (); return -1; } } static void emit_simd_comp_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2) { MonoInst *temp; gboolean is64BitNativeInt = FALSE; #if TARGET_SIZEOF_VOID_P == 8 is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U; #endif if (!mono_hwcap_x86_has_sse42 && (ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8 || is64BitNativeInt)) { int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); NEW_SIMD_INS (cfg, ins, temp, OP_PCMPEQD, temp_reg1, sreg1, sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_reg2, temp_reg1, -1); temp->inst_c0 = 0xB1; NEW_SIMD_INS (cfg, ins, temp, OP_ANDPD, dreg, temp_reg1, temp_reg2); } else { NEW_SIMD_INS (cfg, ins, temp, simd_type_to_comp_op (type), dreg, sreg1, sreg2); } } static void emit_simd_gt_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2); static void emit_simd_gt_un_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2) { MonoInst *temp; switch (type) { case MONO_TYPE_U2: case MONO_TYPE_U4: if (mono_hwcap_x86_has_sse41) goto USE_MAX; goto USE_SIGNED_GT; case MONO_TYPE_U1: USE_MAX: { // dreg = max(sreg1, sreg2) != sreg2 int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); int temp_reg3 = mono_alloc_ireg (cfg); NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_un_op (type), temp_reg1, sreg1, sreg2); emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg2, temp_reg1, ins->sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_XONES, temp_reg3, -1, -1); NEW_SIMD_INS (cfg, ins, temp, OP_XORPD, dreg, temp_reg2, temp_reg3); break; } case MONO_TYPE_U8: USE_SIGNED_GT: { // convert to signed integer by subtracting (1 << (size - 1)) from each operand // and then use signed comparison int temp_c0 = mono_alloc_ireg (cfg); int temp_c80 = mono_alloc_ireg (cfg); int temp_s1 = mono_alloc_ireg (cfg); int temp_s2 = mono_alloc_ireg (cfg); NEW_SIMD_INS (cfg, ins, temp, OP_XONES, temp_c0, -1, -1); NEW_SIMD_INS (cfg, ins, temp, simd_type_to_shl_op (type), temp_c80, temp_c0, -1); temp->inst_imm = type == MONO_TYPE_U2 ? 15 : (type == MONO_TYPE_U4 ? 31 : 63); NEW_SIMD_INS (cfg, ins, temp, simd_type_to_sub_op (type), temp_s1, sreg1, temp_c80); NEW_SIMD_INS (cfg, ins, temp, simd_type_to_sub_op (type), temp_s2, sreg2, temp_c80); emit_simd_gt_op (cfg, bb, ins, type, dreg, temp_s1, temp_s2); break; case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 goto USE_SIGNED_GT; #else if (mono_hwcap_x86_has_sse41) goto USE_MAX; goto USE_SIGNED_GT; #endif } } } static void emit_simd_gt_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2) { MonoInst *temp; gboolean is64BitNativeInt = FALSE; #if TARGET_SIZEOF_VOID_P == 8 is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U; #endif if (!mono_hwcap_x86_has_sse42 && (type == MONO_TYPE_I8 || type == MONO_TYPE_U8 || is64BitNativeInt)) { // Decompose 64-bit greater than to 32-bit // // t = (v1 > v2) // u = (v1 == v2) // v = (v1 > v2) unsigned // // z = shuffle(t, (3, 3, 1, 1)) // t1 = shuffle(v, (2, 2, 0, 0)) // u1 = shuffle(u, (3, 3, 1, 1)) // w = and(t1, u1) // result = bitwise_or(z, w) int temp_t = mono_alloc_ireg (cfg); int temp_u = mono_alloc_ireg (cfg); int temp_v = mono_alloc_ireg (cfg); int temp_z = temp_t; int temp_t1 = temp_v; int temp_u1 = temp_u; int temp_w = temp_t1; NEW_SIMD_INS (cfg, ins, temp, OP_PCMPGTD, temp_t, sreg1, sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_PCMPEQD, temp_u, sreg1, sreg2); emit_simd_gt_un_op (cfg, bb, ins, MONO_TYPE_U4, temp_v, sreg1, sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_z, temp_t, -1); temp->inst_c0 = 0xF5; NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_t1, temp_v, -1); temp->inst_c0 = 0xA0; NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_u1, temp_u, -1); temp->inst_c0 = 0xF5; NEW_SIMD_INS (cfg, ins, temp, OP_ANDPD, temp_w, temp_t1, temp_u1); NEW_SIMD_INS (cfg, ins, temp, OP_ORPD, dreg, temp_z, temp_w); } else { NEW_SIMD_INS (cfg, ins, temp, simd_type_to_gt_op (type), dreg, sreg1, sreg2); } } static void emit_simd_min_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2) { MonoInst *temp; gboolean is64BitNativeInt = FALSE; #if TARGET_SIZEOF_VOID_P == 8 is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U; #endif if (type == MONO_TYPE_I2 || type == MONO_TYPE_U2) { // SSE2, so always available NEW_SIMD_INS (cfg, ins, temp, simd_type_to_min_op (type), dreg, sreg1, sreg2); } else if (!mono_hwcap_x86_has_sse41 || type == MONO_TYPE_I8 || type == MONO_TYPE_U8 || is64BitNativeInt) { // Decompose to t = (s1 > s2), d = (s1 & !t) | (s2 & t) int temp_t = mono_alloc_ireg (cfg); int temp_d1 = mono_alloc_ireg (cfg); int temp_d2 = mono_alloc_ireg (cfg); if (type == MONO_TYPE_U8 || type == MONO_TYPE_U4 || type == MONO_TYPE_U1) emit_simd_gt_un_op (cfg, bb, ins, type, temp_t, sreg1, sreg2); else emit_simd_gt_op (cfg, bb, ins, type, temp_t, sreg1, sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_PANDN, temp_d1, temp_t, sreg1); NEW_SIMD_INS (cfg, ins, temp, OP_PAND, temp_d2, temp_t, sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_POR, dreg, temp_d1, temp_d2); } else { // SSE 4.1 has byte- and dword- operations NEW_SIMD_INS (cfg, ins, temp, simd_type_to_min_op (type), dreg, sreg1, sreg2); } } static void emit_simd_max_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2) { MonoInst *temp; gboolean is64BitNativeInt = FALSE; #if TARGET_SIZEOF_VOID_P == 8 is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U; #endif if (type == MONO_TYPE_I2 || type == MONO_TYPE_U2) { // SSE2, so always available NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_op (type), dreg, sreg1, sreg2); } else if (!mono_hwcap_x86_has_sse41 || type == MONO_TYPE_I8 || type == MONO_TYPE_U8 || is64BitNativeInt) { // Decompose to t = (s1 > s2), d = (s1 & t) | (s2 & !t) int temp_t = mono_alloc_ireg (cfg); int temp_d1 = mono_alloc_ireg (cfg); int temp_d2 = mono_alloc_ireg (cfg); if (type == MONO_TYPE_U8 || type == MONO_TYPE_U4 || type == MONO_TYPE_U1) emit_simd_gt_un_op (cfg, bb, ins, type, temp_t, sreg1, sreg2); else emit_simd_gt_op (cfg, bb, ins, type, temp_t, sreg1, sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_PAND, temp_d1, temp_t, sreg1); NEW_SIMD_INS (cfg, ins, temp, OP_PANDN, temp_d2, temp_t, sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_POR, dreg, temp_d1, temp_d2); } else { // SSE 4.1 has byte- and dword- operations NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_op (type), dreg, sreg1, sreg2); } } /* * mono_arch_lowering_pass: * * Converts complex opcodes into simpler ones so that each IR instruction * corresponds to one machine instruction. */ void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n, *temp; /* * FIXME: Need to add more instructions, but the current machine * description can't model some parts of the composite instructions like * cdq. */ MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { switch (ins->opcode) { case OP_DIV_IMM: case OP_REM_IMM: case OP_IDIV_IMM: case OP_IDIV_UN_IMM: case OP_IREM_UN_IMM: case OP_LREM_IMM: case OP_IREM_IMM: mono_decompose_op_imm (cfg, bb, ins); break; case OP_COMPARE_IMM: case OP_LCOMPARE_IMM: if (!amd64_use_imm32 (ins->inst_imm)) { NEW_INS (cfg, ins, temp, OP_I8CONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->opcode = OP_COMPARE; ins->sreg2 = temp->dreg; } break; #ifndef MONO_ARCH_ILP32 case OP_LOAD_MEMBASE: #endif case OP_LOADI8_MEMBASE: /* Don't generate memindex opcodes (to simplify */ /* read sandboxing) */ if (!amd64_use_imm32 (ins->inst_offset)) { NEW_INS (cfg, ins, temp, OP_I8CONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ins->opcode = OP_AMD64_LOADI8_MEMINDEX; ins->inst_indexreg = temp->dreg; } break; #ifndef MONO_ARCH_ILP32 case OP_STORE_MEMBASE_IMM: #endif case OP_STOREI8_MEMBASE_IMM: if (!amd64_use_imm32 (ins->inst_imm)) { NEW_INS (cfg, ins, temp, OP_I8CONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->opcode = OP_STOREI8_MEMBASE_REG; ins->sreg1 = temp->dreg; } break; #ifdef MONO_ARCH_SIMD_INTRINSICS case OP_EXPAND_I1: { int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); int original_reg = ins->sreg1; NEW_INS (cfg, ins, temp, OP_ICONV_TO_U1); temp->sreg1 = original_reg; temp->dreg = temp_reg1; NEW_INS (cfg, ins, temp, OP_SHL_IMM); temp->sreg1 = temp_reg1; temp->dreg = temp_reg2; temp->inst_imm = 8; NEW_INS (cfg, ins, temp, OP_LOR); temp->sreg1 = temp->dreg = temp_reg2; temp->sreg2 = temp_reg1; ins->opcode = OP_EXPAND_I2; ins->sreg1 = temp_reg2; break; } case OP_XEQUAL: { int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); NEW_SIMD_INS (cfg, ins, temp, OP_PCMPEQD, temp_reg1, ins->sreg1, ins->sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_EXTRACT_MASK, temp_reg2, temp_reg1, -1); temp->type = STACK_I4; NEW_INS (cfg, ins, temp, OP_COMPARE_IMM); temp->sreg1 = temp_reg2; temp->inst_imm = 0xFFFF; temp->klass = ins->klass; ins->opcode = OP_CEQ; ins->sreg1 = -1; ins->sreg2 = -1; break; } case OP_XCOMPARE: { int temp_reg; gboolean is64BitNativeInt = FALSE; switch (ins->inst_c0) { case CMP_EQ: emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2); NULLIFY_INS (ins); break; case CMP_NE: { int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg1, ins->sreg1, ins->sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_XONES, temp_reg2, -1, -1); ins->opcode = OP_XORPD; ins->sreg1 = temp_reg1; ins->sreg1 = temp_reg2; break; } case CMP_LT: temp_reg = ins->sreg1; ins->sreg1 = ins->sreg2; ins->sreg2 = temp_reg; case CMP_GT: emit_simd_gt_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2); NULLIFY_INS (ins); break; case CMP_LE: temp_reg = ins->sreg1; ins->sreg1 = ins->sreg2; ins->sreg2 = temp_reg; case CMP_GE: { int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); emit_simd_gt_op (cfg, bb, ins, ins->inst_c1, temp_reg1, ins->sreg1, ins->sreg2); emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg2, ins->sreg1, ins->sreg2); ins->opcode = OP_POR; ins->sreg1 = temp_reg1; ins->sreg2 = temp_reg2; break; } case CMP_LE_UN: temp_reg = ins->sreg1; ins->sreg1 = ins->sreg2; ins->sreg2 = temp_reg; case CMP_GE_UN: #if TARGET_SIZEOF_VOID_P == 8 is64BitNativeInt = ins->inst_c1 == MONO_TYPE_U; #endif if (mono_hwcap_x86_has_sse41 && ins->inst_c1 != MONO_TYPE_U8 && !is64BitNativeInt) { int temp_reg1 = mono_alloc_ireg (cfg); NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_un_op (ins->inst_c1), temp_reg1, ins->sreg1, ins->sreg2); emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, ins->dreg, temp_reg1, ins->sreg1); NULLIFY_INS (ins); } else { int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); emit_simd_gt_un_op (cfg, bb, ins, ins->inst_c1, temp_reg1, ins->sreg1, ins->sreg2); emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg2, ins->sreg1, ins->sreg2); ins->opcode = OP_POR; ins->sreg1 = temp_reg1; ins->sreg2 = temp_reg2; } break; case CMP_LT_UN: temp_reg = ins->sreg1; ins->sreg1 = ins->sreg2; ins->sreg2 = temp_reg; case CMP_GT_UN: { emit_simd_gt_un_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2); NULLIFY_INS (ins); break; } default: g_assert_not_reached(); break; } ins->type = STACK_VTYPE; ins->inst_c0 = 0; break; } case OP_XCOMPARE_FP: { ins->opcode = ins->inst_c1 == MONO_TYPE_R4 ? OP_COMPPS : OP_COMPPD; switch (ins->inst_c0) { case CMP_EQ: ins->inst_c0 = 0; break; case CMP_NE: ins->inst_c0 = 4; break; case CMP_LT: ins->inst_c0 = 1; break; case CMP_LE: ins->inst_c0 = 2; break; case CMP_GT: ins->inst_c0 = 6; break; case CMP_GE: ins->inst_c0 = 5; break; default: g_assert_not_reached(); break; } break; } case OP_XCAST: { ins->opcode = OP_XMOVE; break; } case OP_XBINOP: { switch (ins->inst_c0) { case OP_ISUB: ins->opcode = simd_type_to_sub_op (ins->inst_c1); break; case OP_IADD: ins->opcode = simd_type_to_add_op (ins->inst_c1); break; case OP_IAND: ins->opcode = OP_ANDPD; break; case OP_IXOR: ins->opcode = OP_XORPD; break; case OP_IOR: ins->opcode = OP_ORPD; break; case OP_IMIN: emit_simd_min_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2); NULLIFY_INS (ins); break; case OP_IMAX: emit_simd_max_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2); NULLIFY_INS (ins); break; case OP_FSUB: ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_SUBPD : OP_SUBPS; break; case OP_FADD: ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_ADDPD : OP_ADDPS; break; case OP_FDIV: ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_DIVPD : OP_DIVPS; break; case OP_FMUL: ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_MULPD : OP_MULPS; break; case OP_FMIN: ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_MINPD : OP_MINPS; break; case OP_FMAX: ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_MAXPD : OP_MAXPS; break; default: g_assert_not_reached(); break; } break; } case OP_XEXTRACT_R4: case OP_XEXTRACT_R8: case OP_XEXTRACT_I4: case OP_XEXTRACT_I8: { // TODO g_assert_not_reached(); break; } #endif default: break; } } bb->max_vreg = cfg->next_vreg; } static const int branch_cc_table [] = { X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT, X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT, X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC }; /* Maps CMP_... constants to X86_CC_... constants */ static const int cc_table [] = { X86_CC_EQ, X86_CC_NE, X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT, X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT }; static const int cc_signed_table [] = { TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE }; /*#include "cprop.c"*/ static unsigned char* emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed) { // Use 8 as register size to get Nan/Inf conversion to uint result truncated to 0 if (size == 8 || (!is_signed && size == 4)) amd64_sse_cvttsd2si_reg_reg (code, dreg, sreg); else amd64_sse_cvttsd2si_reg_reg_size (code, dreg, sreg, 4); if (size == 1) amd64_widen_reg (code, dreg, dreg, is_signed, FALSE); else if (size == 2) amd64_widen_reg (code, dreg, dreg, is_signed, TRUE); return code; } static unsigned char* mono_emit_stack_alloc (MonoCompile *cfg, guchar *code, MonoInst* tree) { int sreg = tree->sreg1; int need_touch = FALSE; #if defined(TARGET_WIN32) need_touch = TRUE; #elif defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK) if (!(tree->flags & MONO_INST_INIT)) need_touch = TRUE; #endif if (need_touch) { guint8* br[5]; /* * Under Windows: * If requested stack size is larger than one page, * perform stack-touch operation */ /* * Generate stack probe code. * Under Windows, it is necessary to allocate one page at a time, * "touching" stack after each successful sub-allocation. This is * because of the way stack growth is implemented - there is a * guard page before the lowest stack page that is currently commited. * Stack normally grows sequentially so OS traps access to the * guard page and commits more pages when needed. */ amd64_test_reg_imm (code, sreg, ~0xFFF); br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE); br[2] = code; /* loop */ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000); amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP); amd64_alu_reg_imm (code, X86_SUB, sreg, 0x1000); amd64_alu_reg_imm (code, X86_CMP, sreg, 0x1000); br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE); amd64_patch (br[3], br[2]); amd64_test_reg_reg (code, sreg, sreg); br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE); amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg); br[1] = code; x86_jump8 (code, 0); amd64_patch (br[0], code); amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg); amd64_patch (br[1], code); amd64_patch (br[4], code); } else amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, tree->sreg1); if (tree->flags & MONO_INST_INIT) { int offset = 0; if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) { amd64_push_reg (code, AMD64_RAX); offset += 8; } if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX) { amd64_push_reg (code, AMD64_RCX); offset += 8; } if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI) { amd64_push_reg (code, AMD64_RDI); offset += 8; } amd64_shift_reg_imm (code, X86_SHR, sreg, 3); if (sreg != AMD64_RCX) amd64_mov_reg_reg (code, AMD64_RCX, sreg, 8); amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX); amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, offset); if (cfg->param_area) amd64_alu_reg_imm (code, X86_ADD, AMD64_RDI, cfg->param_area); amd64_cld (code); amd64_prefix (code, X86_REP_PREFIX); amd64_stosl (code); if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI) amd64_pop_reg (code, AMD64_RDI); if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX) amd64_pop_reg (code, AMD64_RCX); if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) amd64_pop_reg (code, AMD64_RAX); } return code; } static guint8* emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code) { CallInfo *cinfo; guint32 quad; /* Move return value to the target register */ /* FIXME: do this in the local reg allocator */ switch (ins->opcode) { case OP_CALL: case OP_CALL_REG: case OP_CALL_MEMBASE: case OP_LCALL: case OP_LCALL_REG: case OP_LCALL_MEMBASE: g_assert (ins->dreg == AMD64_RAX); break; case OP_FCALL: case OP_FCALL_REG: case OP_FCALL_MEMBASE: { MonoType *rtype = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret); if (rtype->type == MONO_TYPE_R4) { amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, AMD64_XMM0); } else { if (ins->dreg != AMD64_XMM0) amd64_sse_movsd_reg_reg (code, ins->dreg, AMD64_XMM0); } break; } case OP_RCALL: case OP_RCALL_REG: case OP_RCALL_MEMBASE: if (ins->dreg != AMD64_XMM0) amd64_sse_movss_reg_reg (code, ins->dreg, AMD64_XMM0); break; case OP_VCALL: case OP_VCALL_REG: case OP_VCALL_MEMBASE: case OP_VCALL2: case OP_VCALL2_REG: case OP_VCALL2_MEMBASE: cinfo = get_call_info (cfg->mempool, ((MonoCallInst*)ins)->signature); if (cinfo->ret.storage == ArgValuetypeInReg) { MonoInst *loc = cfg->arch.vret_addr_loc; /* Load the destination address */ g_assert (loc->opcode == OP_REGOFFSET); amd64_mov_reg_membase (code, AMD64_RCX, loc->inst_basereg, loc->inst_offset, sizeof(gpointer)); for (quad = 0; quad < 2; quad ++) { switch (cinfo->ret.pair_storage [quad]) { case ArgInIReg: amd64_mov_membase_reg (code, AMD64_RCX, (quad * sizeof (target_mgreg_t)), cinfo->ret.pair_regs [quad], sizeof (target_mgreg_t)); break; case ArgInFloatSSEReg: amd64_movss_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]); break; case ArgInDoubleSSEReg: amd64_movsd_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]); break; case ArgNone: break; default: NOT_IMPLEMENTED; } } } break; } return code; } #endif /* DISABLE_JIT */ #ifdef TARGET_MACH static int tls_gs_offset; #endif gboolean mono_arch_have_fast_tls (void) { #ifdef TARGET_MACH static gboolean have_fast_tls = FALSE; static gboolean inited = FALSE; guint8 *ins; if (mini_debug_options.use_fallback_tls) return FALSE; if (inited) return have_fast_tls; ins = (guint8*)pthread_getspecific; /* * We're looking for these two instructions: * * mov %gs:[offset](,%rdi,8),%rax * retq */ have_fast_tls = ins [0] == 0x65 && ins [1] == 0x48 && ins [2] == 0x8b && ins [3] == 0x04 && ins [4] == 0xfd && ins [6] == 0x00 && ins [7] == 0x00 && ins [8] == 0x00 && ins [9] == 0xc3; tls_gs_offset = ins[5]; /* * Apple now loads a different version of pthread_getspecific when launched from Xcode * For that version we're looking for these instructions: * * pushq %rbp * movq %rsp, %rbp * mov %gs:[offset](,%rdi,8),%rax * popq %rbp * retq */ if (!have_fast_tls) { have_fast_tls = ins [0] == 0x55 && ins [1] == 0x48 && ins [2] == 0x89 && ins [3] == 0xe5 && ins [4] == 0x65 && ins [5] == 0x48 && ins [6] == 0x8b && ins [7] == 0x04 && ins [8] == 0xfd && ins [10] == 0x00 && ins [11] == 0x00 && ins [12] == 0x00 && ins [13] == 0x5d && ins [14] == 0xc3; tls_gs_offset = ins[9]; } inited = TRUE; return have_fast_tls; #elif defined(TARGET_ANDROID) return FALSE; #else if (mini_debug_options.use_fallback_tls) return FALSE; return TRUE; #endif } int mono_amd64_get_tls_gs_offset (void) { #ifdef TARGET_OSX return tls_gs_offset; #else g_assert_not_reached (); return -1; #endif } /* * \param code buffer to store code to * \param dreg hard register where to place the result * \param tls_offset offset info * \return a pointer to the end of the stored code * * mono_amd64_emit_tls_get emits in \p code the native code that puts in * the dreg register the item in the thread local storage identified * by tls_offset. */ static guint8* mono_amd64_emit_tls_get (guint8* code, int dreg, int tls_offset) { #ifdef TARGET_WIN32 if (tls_offset < 64) { x86_prefix (code, X86_GS_PREFIX); amd64_mov_reg_mem (code, dreg, (tls_offset * 8) + 0x1480, 8); } else { guint8 *buf [16]; g_assert (tls_offset < 0x440); /* Load TEB->TlsExpansionSlots */ x86_prefix (code, X86_GS_PREFIX); amd64_mov_reg_mem (code, dreg, 0x1780, 8); amd64_test_reg_reg (code, dreg, dreg); buf [0] = code; amd64_branch (code, X86_CC_EQ, code, TRUE); amd64_mov_reg_membase (code, dreg, dreg, (tls_offset * 8) - 0x200, 8); amd64_patch (buf [0], code); } #elif defined(TARGET_MACH) x86_prefix (code, X86_GS_PREFIX); amd64_mov_reg_mem (code, dreg, tls_gs_offset + (tls_offset * 8), 8); #else if (optimize_for_xen) { x86_prefix (code, X86_FS_PREFIX); amd64_mov_reg_mem (code, dreg, 0, 8); amd64_mov_reg_membase (code, dreg, dreg, tls_offset, 8); } else { x86_prefix (code, X86_FS_PREFIX); amd64_mov_reg_mem (code, dreg, tls_offset, 8); } #endif return code; } static guint8* mono_amd64_emit_tls_set (guint8 *code, int sreg, int tls_offset) { #ifdef TARGET_WIN32 g_assert_not_reached (); #elif defined(TARGET_MACH) x86_prefix (code, X86_GS_PREFIX); amd64_mov_mem_reg (code, tls_gs_offset + (tls_offset * 8), sreg, 8); #else g_assert (!optimize_for_xen); x86_prefix (code, X86_FS_PREFIX); amd64_mov_mem_reg (code, tls_offset, sreg, 8); #endif return code; } /* * emit_setup_lmf: * * Emit code to initialize an LMF structure at LMF_OFFSET. */ static guint8* emit_setup_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, int cfa_offset) { /* * The ip field is not set, the exception handling code will obtain it from the stack location pointed to by the sp field. */ /* * sp is saved right before calls but we need to save it here too so * async stack walks would work. */ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8); /* Save rbp */ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), AMD64_RBP, 8); if (cfg->arch.omit_fp && cfa_offset != -1) mono_emit_unwind_op_offset (cfg, code, AMD64_RBP, - (cfa_offset - (lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp)))); /* These can't contain refs */ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF); mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), SLOT_NOREF); /* These are handled automatically by the stack marking code */ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), SLOT_NOREF); return code; } #ifdef TARGET_WIN32 #define TEB_LAST_ERROR_OFFSET 0x68 static guint8* emit_get_last_error (guint8* code, int dreg) { /* Threads last error value is located in TEB_LAST_ERROR_OFFSET. */ x86_prefix (code, X86_GS_PREFIX); amd64_mov_reg_mem (code, dreg, TEB_LAST_ERROR_OFFSET, sizeof (guint32)); return code; } #else static guint8* emit_get_last_error (guint8* code, int dreg) { g_assert_not_reached (); } #endif /* benchmark and set based on cpu */ #define LOOP_ALIGNMENT 8 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting) #ifndef DISABLE_JIT static guint8* amd64_handle_varargs_nregs (guint8 *code, guint32 nregs) { #ifndef TARGET_WIN32 if (nregs) amd64_mov_reg_imm (code, AMD64_RAX, nregs); else amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX); #endif return code; } static guint8* amd64_handle_varargs_call (MonoCompile *cfg, guint8 *code, MonoCallInst *call, gboolean free_rax) { #ifdef TARGET_WIN32 return code; #else /* * The AMD64 ABI forces callers to know about varargs. */ guint32 nregs = 0; if (call->signature->call_convention == MONO_CALL_VARARG && call->signature->pinvoke) { // deliberatly nothing -- but nreg = 0 and do not return } else if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE && m_class_get_image (cfg->method->klass) != mono_defaults.corlib) { /* * Since the unmanaged calling convention doesn't contain a * 'vararg' entry, we have to treat every pinvoke call as a * potential vararg call. */ for (guint32 i = 0; i < AMD64_XMM_NREG; ++i) nregs += (call->used_fregs & (1 << i)) != 0; } else { return code; } MonoInst *ins = (MonoInst*)call; if (free_rax && ins->sreg1 == AMD64_RAX) { amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8); ins->sreg1 = AMD64_R11; } return amd64_handle_varargs_nregs (code, nregs); #endif } void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins; MonoCallInst *call; guint8 *code = cfg->native_code + cfg->code_len; /* Fix max_offset estimate for each successor bb */ gboolean optimize_branch_pred = (cfg->opt & MONO_OPT_BRANCH) && (cfg->max_block_num < MAX_BBLOCKS_FOR_BRANCH_OPTS); if (optimize_branch_pred) { int current_offset = cfg->code_len; MonoBasicBlock *current_bb; for (current_bb = bb; current_bb != NULL; current_bb = current_bb->next_bb) { current_bb->max_offset = current_offset; current_offset += current_bb->max_length; } } if (cfg->opt & MONO_OPT_LOOP) { int pad, align = LOOP_ALIGNMENT; /* set alignment depending on cpu */ if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) { pad = align - pad; /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/ amd64_padding (code, pad); cfg->code_len += pad; bb->native_offset = cfg->code_len; } } if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); set_code_cursor (cfg, code); mono_debug_open_block (cfg, bb, code - cfg->native_code); if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) x86_breakpoint (code); MONO_BB_FOR_EACH_INS (bb, ins) { const guint offset = code - cfg->native_code; set_code_cursor (cfg, code); int max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); if (cfg->debug_info) mono_debug_record_line_number (cfg, ins, offset); switch (ins->opcode) { case OP_BIGMUL: amd64_mul_reg (code, ins->sreg2, TRUE); break; case OP_BIGMUL_UN: amd64_mul_reg (code, ins->sreg2, FALSE); break; case OP_X86_SETEQ_MEMBASE: amd64_set_membase (code, X86_CC_EQ, ins->inst_basereg, ins->inst_offset, TRUE); break; case OP_STOREI1_MEMBASE_IMM: amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1); break; case OP_STOREI2_MEMBASE_IMM: amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2); break; case OP_STOREI4_MEMBASE_IMM: amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_STOREI1_MEMBASE_REG: amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1); break; case OP_STOREI2_MEMBASE_REG: amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2); break; /* In AMD64 NaCl, pointers are 4 bytes, */ /* so STORE_* != STOREI8_*. Likewise below. */ case OP_STORE_MEMBASE_REG: amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, sizeof(gpointer)); break; case OP_STOREI8_MEMBASE_REG: amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 8); break; case OP_STOREI4_MEMBASE_REG: amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4); break; case OP_STORE_MEMBASE_IMM: /* In NaCl, this could be a PCONST type, which could */ /* mean a pointer type was copied directly into the */ /* lower 32-bits of inst_imm, so for InvalidPtr==-1 */ /* the value would be 0x00000000FFFFFFFF which is */ /* not proper for an imm32 unless you cast it. */ g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, (gint32)ins->inst_imm, sizeof(gpointer)); break; case OP_STOREI8_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8); break; case OP_LOAD_MEM: #ifdef MONO_ARCH_ILP32 /* In ILP32, pointers are 4 bytes, so separate these */ /* cases, use literal 8 below where we really want 8 */ amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm); amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, sizeof(gpointer)); break; #endif case OP_LOADI8_MEM: // FIXME: Decompose this earlier if (amd64_use_imm32 (ins->inst_imm)) amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 8); else { amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer)); amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 8); } break; case OP_LOADI4_MEM: amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm); amd64_movsxd_reg_membase (code, ins->dreg, ins->dreg, 0); break; case OP_LOADU4_MEM: // FIXME: Decompose this earlier if (amd64_use_imm32 (ins->inst_imm)) amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4); else { amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer)); amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4); } break; case OP_LOADU1_MEM: amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm); amd64_widen_membase (code, ins->dreg, ins->dreg, 0, FALSE, FALSE); break; case OP_LOADU2_MEM: /* For NaCl, pointers are 4 bytes, so separate these */ /* cases, use literal 8 below where we really want 8 */ amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm); amd64_widen_membase (code, ins->dreg, ins->dreg, 0, FALSE, TRUE); break; case OP_LOAD_MEMBASE: g_assert (amd64_is_imm32 (ins->inst_offset)); amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof(gpointer)); break; case OP_LOADI8_MEMBASE: /* Use literal 8 instead of sizeof pointer or */ /* register, we really want 8 for this opcode */ g_assert (amd64_is_imm32 (ins->inst_offset)); amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 8); break; case OP_LOADI4_MEMBASE: amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADU4_MEMBASE: amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4); break; case OP_LOADU1_MEMBASE: /* The cpu zero extends the result into 64 bits */ amd64_widen_membase_size (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE, 4); break; case OP_LOADI1_MEMBASE: amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE); break; case OP_LOADU2_MEMBASE: /* The cpu zero extends the result into 64 bits */ amd64_widen_membase_size (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE, 4); break; case OP_LOADI2_MEMBASE: amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE); break; case OP_AMD64_LOADI8_MEMINDEX: amd64_mov_reg_memindex_size (code, ins->dreg, ins->inst_basereg, 0, ins->inst_indexreg, 0, 8); break; case OP_LCONV_TO_I1: case OP_ICONV_TO_I1: case OP_SEXT_I1: amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE); break; case OP_LCONV_TO_I2: case OP_ICONV_TO_I2: case OP_SEXT_I2: amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE); break; case OP_LCONV_TO_U1: case OP_ICONV_TO_U1: amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE); break; case OP_LCONV_TO_U2: case OP_ICONV_TO_U2: amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE); break; case OP_ZEXT_I4: /* Clean out the upper word */ amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 4); break; case OP_SEXT_I4: amd64_movsxd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_COMPARE: case OP_LCOMPARE: amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); break; case OP_COMPARE_IMM: #if defined(MONO_ARCH_ILP32) /* Comparison of pointer immediates should be 4 bytes to avoid sign-extend problems */ g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4); break; #endif case OP_LCOMPARE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm); break; case OP_X86_COMPARE_REG_MEMBASE: amd64_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_X86_TEST_NULL: amd64_test_reg_reg_size (code, ins->sreg1, ins->sreg1, 4); break; case OP_AMD64_TEST_NULL: amd64_test_reg_reg (code, ins->sreg1, ins->sreg1); break; case OP_X86_ADD_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset, 4); break; case OP_X86_SUB_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset, 4); break; case OP_X86_AND_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_AND, ins->sreg1, ins->sreg2, ins->inst_offset, 4); break; case OP_X86_OR_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_OR, ins->sreg1, ins->sreg2, ins->inst_offset, 4); break; case OP_X86_XOR_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_XOR, ins->sreg1, ins->sreg2, ins->inst_offset, 4); break; case OP_X86_ADD_MEMBASE_IMM: /* FIXME: Make a 64 version too */ amd64_alu_membase_imm_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_X86_SUB_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_X86_AND_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_X86_OR_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_X86_XOR_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_X86_ADD_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4); break; case OP_X86_SUB_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4); break; case OP_X86_AND_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4); break; case OP_X86_OR_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4); break; case OP_X86_XOR_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4); break; case OP_X86_INC_MEMBASE: amd64_inc_membase_size (code, ins->inst_basereg, ins->inst_offset, 4); break; case OP_X86_INC_REG: amd64_inc_reg_size (code, ins->dreg, 4); break; case OP_X86_DEC_MEMBASE: amd64_dec_membase_size (code, ins->inst_basereg, ins->inst_offset, 4); break; case OP_X86_DEC_REG: amd64_dec_reg_size (code, ins->dreg, 4); break; case OP_X86_MUL_REG_MEMBASE: case OP_X86_MUL_MEMBASE_REG: amd64_imul_reg_membase_size (code, ins->sreg1, ins->sreg2, ins->inst_offset, 4); break; case OP_AMD64_ICOMPARE_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4); break; case OP_AMD64_ICOMPARE_MEMBASE_IMM: amd64_alu_membase_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_AMD64_COMPARE_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8); break; case OP_AMD64_COMPARE_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8); break; case OP_X86_COMPARE_MEMBASE8_IMM: amd64_alu_membase8_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_AMD64_ICOMPARE_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset, 4); break; case OP_AMD64_COMPARE_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset, 8); break; case OP_AMD64_ADD_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset, 8); break; case OP_AMD64_SUB_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset, 8); break; case OP_AMD64_AND_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_AND, ins->sreg1, ins->sreg2, ins->inst_offset, 8); break; case OP_AMD64_OR_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_OR, ins->sreg1, ins->sreg2, ins->inst_offset, 8); break; case OP_AMD64_XOR_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_XOR, ins->sreg1, ins->sreg2, ins->inst_offset, 8); break; case OP_AMD64_ADD_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8); break; case OP_AMD64_SUB_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8); break; case OP_AMD64_AND_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8); break; case OP_AMD64_OR_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8); break; case OP_AMD64_XOR_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8); break; case OP_AMD64_ADD_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8); break; case OP_AMD64_SUB_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8); break; case OP_AMD64_AND_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8); break; case OP_AMD64_OR_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8); break; case OP_AMD64_XOR_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8); break; case OP_BREAK: amd64_breakpoint (code); break; case OP_RELAXED_NOP: x86_prefix (code, X86_REP_PREFIX); x86_nop (code); break; case OP_HARD_NOP: x86_nop (code); break; case OP_NOP: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_I8CONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: case OP_NOT_REACHED: case OP_NOT_NULL: break; case OP_IL_SEQ_POINT: mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); break; case OP_SEQ_POINT: { if (ins->flags & MONO_INST_SINGLE_STEP_LOC) { MonoInst *var = cfg->arch.ss_tramp_var; guint8 *label; /* Load ss_tramp_var */ /* This is equal to &ss_trampoline */ amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8); /* Load the trampoline address */ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8); /* Call it if it is non-null */ amd64_test_reg_reg (code, AMD64_R11, AMD64_R11); label = code; amd64_branch8 (code, X86_CC_Z, 0, FALSE); amd64_call_reg (code, AMD64_R11); amd64_patch (label, code); } /* * This is the address which is saved in seq points, */ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); if (cfg->compile_aot) { const guint32 offset = code - cfg->native_code; guint32 val; MonoInst *info_var = cfg->arch.seq_point_info_var; guint8 *label; /* Load info var */ amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8); val = ((offset) * sizeof (target_mgreg_t)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs); /* Load the info->bp_addrs [offset], which is either NULL or the address of the breakpoint trampoline */ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, val, 8); amd64_test_reg_reg (code, AMD64_R11, AMD64_R11); label = code; amd64_branch8 (code, X86_CC_Z, 0, FALSE); /* Call the trampoline */ amd64_call_reg (code, AMD64_R11); amd64_patch (label, code); } else { MonoInst *var = cfg->arch.bp_tramp_var; guint8 *label; /* * Emit a test+branch against a constant, the constant will be overwritten * by mono_arch_set_breakpoint () to cause the test to fail. */ amd64_mov_reg_imm (code, AMD64_R11, 0); amd64_test_reg_reg (code, AMD64_R11, AMD64_R11); label = code; amd64_branch8 (code, X86_CC_Z, 0, FALSE); g_assert (var); g_assert (var->opcode == OP_REGOFFSET); /* Load bp_tramp_var */ /* This is equal to &bp_trampoline */ amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8); /* Call the trampoline */ amd64_call_membase (code, AMD64_R11, 0); amd64_patch (label, code); } /* * Add an additional nop so skipping the bp doesn't cause the ip to point * to another IL offset. */ x86_nop (code); break; } case OP_ADDCC: case OP_LADDCC: case OP_LADD: amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2); break; case OP_ADC: amd64_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2); break; case OP_ADD_IMM: case OP_LADD_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm); break; case OP_ADC_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm); break; case OP_SUBCC: case OP_LSUBCC: case OP_LSUB: amd64_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2); break; case OP_SBB: amd64_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2); break; case OP_SUB_IMM: case OP_LSUB_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm); break; case OP_SBB_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm); break; case OP_LAND: amd64_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2); break; case OP_AND_IMM: case OP_LAND_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm); break; case OP_LMUL: amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MUL_IMM: case OP_LMUL_IMM: case OP_IMUL_IMM: { guint32 size = (ins->opcode == OP_IMUL_IMM) ? 4 : 8; switch (ins->inst_imm) { case 2: /* MOV r1, r2 */ /* ADD r1, r1 */ if (ins->dreg != ins->sreg1) amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, size); amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg); break; case 3: /* LEA r1, [r2 + r2*2] */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1); break; case 5: /* LEA r1, [r2 + r2*4] */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); break; case 6: /* LEA r1, [r2 + r2*2] */ /* ADD r1, r1 */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1); amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg); break; case 9: /* LEA r1, [r2 + r2*8] */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 3); break; case 10: /* LEA r1, [r2 + r2*4] */ /* ADD r1, r1 */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg); break; case 12: /* LEA r1, [r2 + r2*2] */ /* SHL r1, 2 */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1); amd64_shift_reg_imm (code, X86_SHL, ins->dreg, 2); break; case 25: /* LEA r1, [r2 + r2*4] */ /* LEA r1, [r1 + r1*4] */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); amd64_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2); break; case 100: /* LEA r1, [r2 + r2*4] */ /* SHL r1, 2 */ /* LEA r1, [r1 + r1*4] */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); amd64_shift_reg_imm (code, X86_SHL, ins->dreg, 2); amd64_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2); break; default: amd64_imul_reg_reg_imm_size (code, ins->dreg, ins->sreg1, ins->inst_imm, size); break; } break; } case OP_LDIV: case OP_LREM: /* Regalloc magic makes the div/rem cases the same */ if (ins->sreg2 == AMD64_RDX) { amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8); amd64_cdq (code); amd64_div_membase (code, AMD64_RSP, -8, TRUE); } else { amd64_cdq (code); amd64_div_reg (code, ins->sreg2, TRUE); } break; case OP_LDIV_UN: case OP_LREM_UN: if (ins->sreg2 == AMD64_RDX) { amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8); amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX); amd64_div_membase (code, AMD64_RSP, -8, FALSE); } else { amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX); amd64_div_reg (code, ins->sreg2, FALSE); } break; case OP_IDIV: case OP_IREM: if (ins->sreg2 == AMD64_RDX) { amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8); amd64_cdq_size (code, 4); amd64_div_membase_size (code, AMD64_RSP, -8, TRUE, 4); } else { amd64_cdq_size (code, 4); amd64_div_reg_size (code, ins->sreg2, TRUE, 4); } break; case OP_IDIV_UN: case OP_IREM_UN: if (ins->sreg2 == AMD64_RDX) { amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8); amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX); amd64_div_membase_size (code, AMD64_RSP, -8, FALSE, 4); } else { amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX); amd64_div_reg_size (code, ins->sreg2, FALSE, 4); } break; case OP_LMUL_OVF: amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException"); break; case OP_LOR: amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2); break; case OP_OR_IMM: case OP_LOR_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm); break; case OP_LXOR: amd64_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2); break; case OP_XOR_IMM: case OP_LXOR_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm); break; case OP_LSHL: g_assert (ins->sreg2 == AMD64_RCX); amd64_shift_reg (code, X86_SHL, ins->dreg); break; case OP_LSHR: g_assert (ins->sreg2 == AMD64_RCX); amd64_shift_reg (code, X86_SAR, ins->dreg); break; case OP_SHR_IMM: case OP_LSHR_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm); break; case OP_SHR_UN_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4); break; case OP_LSHR_UN_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm); break; case OP_LSHR_UN: g_assert (ins->sreg2 == AMD64_RCX); amd64_shift_reg (code, X86_SHR, ins->dreg); break; case OP_SHL_IMM: case OP_LSHL_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm); break; case OP_IADDCC: case OP_IADD: amd64_alu_reg_reg_size (code, X86_ADD, ins->sreg1, ins->sreg2, 4); break; case OP_IADC: amd64_alu_reg_reg_size (code, X86_ADC, ins->sreg1, ins->sreg2, 4); break; case OP_IADD_IMM: amd64_alu_reg_imm_size (code, X86_ADD, ins->dreg, ins->inst_imm, 4); break; case OP_IADC_IMM: amd64_alu_reg_imm_size (code, X86_ADC, ins->dreg, ins->inst_imm, 4); break; case OP_ISUBCC: case OP_ISUB: amd64_alu_reg_reg_size (code, X86_SUB, ins->sreg1, ins->sreg2, 4); break; case OP_ISBB: amd64_alu_reg_reg_size (code, X86_SBB, ins->sreg1, ins->sreg2, 4); break; case OP_ISUB_IMM: amd64_alu_reg_imm_size (code, X86_SUB, ins->dreg, ins->inst_imm, 4); break; case OP_ISBB_IMM: amd64_alu_reg_imm_size (code, X86_SBB, ins->dreg, ins->inst_imm, 4); break; case OP_IAND: amd64_alu_reg_reg_size (code, X86_AND, ins->sreg1, ins->sreg2, 4); break; case OP_IAND_IMM: amd64_alu_reg_imm_size (code, X86_AND, ins->sreg1, ins->inst_imm, 4); break; case OP_IOR: amd64_alu_reg_reg_size (code, X86_OR, ins->sreg1, ins->sreg2, 4); break; case OP_IOR_IMM: amd64_alu_reg_imm_size (code, X86_OR, ins->sreg1, ins->inst_imm, 4); break; case OP_IXOR: amd64_alu_reg_reg_size (code, X86_XOR, ins->sreg1, ins->sreg2, 4); break; case OP_IXOR_IMM: amd64_alu_reg_imm_size (code, X86_XOR, ins->sreg1, ins->inst_imm, 4); break; case OP_INEG: amd64_neg_reg_size (code, ins->sreg1, 4); break; case OP_INOT: amd64_not_reg_size (code, ins->sreg1, 4); break; case OP_ISHL: g_assert (ins->sreg2 == AMD64_RCX); amd64_shift_reg_size (code, X86_SHL, ins->dreg, 4); break; case OP_ISHR: g_assert (ins->sreg2 == AMD64_RCX); amd64_shift_reg_size (code, X86_SAR, ins->dreg, 4); break; case OP_ISHR_IMM: amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4); break; case OP_ISHR_UN_IMM: amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4); break; case OP_ISHR_UN: g_assert (ins->sreg2 == AMD64_RCX); amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4); break; case OP_ISHL_IMM: amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4); break; case OP_IMUL: amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4); break; case OP_IMUL_OVF: amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException"); break; case OP_IMUL_OVF_UN: case OP_LMUL_OVF_UN: { /* the mul operation and the exception check should most likely be split */ int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE; int size = (ins->opcode == OP_IMUL_OVF_UN) ? 4 : 8; /*g_assert (ins->sreg2 == X86_EAX); g_assert (ins->dreg == X86_EAX);*/ if (ins->sreg2 == X86_EAX) { non_eax_reg = ins->sreg1; } else if (ins->sreg1 == X86_EAX) { non_eax_reg = ins->sreg2; } else { /* no need to save since we're going to store to it anyway */ if (ins->dreg != X86_EAX) { saved_eax = TRUE; amd64_push_reg (code, X86_EAX); } amd64_mov_reg_reg (code, X86_EAX, ins->sreg1, size); non_eax_reg = ins->sreg2; } if (ins->dreg == X86_EDX) { if (!saved_eax) { saved_eax = TRUE; amd64_push_reg (code, X86_EAX); } } else { saved_edx = TRUE; amd64_push_reg (code, X86_EDX); } amd64_mul_reg_size (code, non_eax_reg, FALSE, size); /* save before the check since pop and mov don't change the flags */ if (ins->dreg != X86_EAX) amd64_mov_reg_reg (code, ins->dreg, X86_EAX, size); if (saved_edx) amd64_pop_reg (code, X86_EDX); if (saved_eax) amd64_pop_reg (code, X86_EAX); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException"); break; } case OP_ICOMPARE: amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4); break; case OP_ICOMPARE_IMM: amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4); break; case OP_IBEQ: case OP_IBLT: case OP_IBGT: case OP_IBGE: case OP_IBLE: case OP_LBEQ: case OP_LBLT: case OP_LBGT: case OP_LBGE: case OP_LBLE: case OP_IBNE_UN: case OP_IBLT_UN: case OP_IBGT_UN: case OP_IBGE_UN: case OP_IBLE_UN: case OP_LBNE_UN: case OP_LBLT_UN: case OP_LBGT_UN: case OP_LBGE_UN: case OP_LBLE_UN: EMIT_COND_BRANCH (ins, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)]); break; case OP_CMOV_IEQ: case OP_CMOV_IGE: case OP_CMOV_IGT: case OP_CMOV_ILE: case OP_CMOV_ILT: case OP_CMOV_INE_UN: case OP_CMOV_IGE_UN: case OP_CMOV_IGT_UN: case OP_CMOV_ILE_UN: case OP_CMOV_ILT_UN: case OP_CMOV_LEQ: case OP_CMOV_LGE: case OP_CMOV_LGT: case OP_CMOV_LLE: case OP_CMOV_LLT: case OP_CMOV_LNE_UN: case OP_CMOV_LGE_UN: case OP_CMOV_LGT_UN: case OP_CMOV_LLE_UN: case OP_CMOV_LLT_UN: g_assert (ins->dreg == ins->sreg1); /* This needs to operate on 64 bit values */ amd64_cmov_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, ins->sreg2); break; case OP_LNOT: amd64_not_reg (code, ins->sreg1); break; case OP_LNEG: amd64_neg_reg (code, ins->sreg1); break; case OP_ICONST: case OP_I8CONST: if ((((guint64)ins->inst_c0) >> 32) == 0 && !mini_debug_options.single_imm_size) amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 4); else amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 8); break; case OP_AOTCONST: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0); amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, sizeof(gpointer)); break; case OP_JUMP_TABLE: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0); amd64_mov_reg_imm_size (code, ins->dreg, 0, 8); break; case OP_MOVE: if (ins->dreg != ins->sreg1) amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (target_mgreg_t)); break; case OP_AMD64_SET_XMMREG_R4: { if (cfg->r4fp) { if (ins->dreg != ins->sreg1) amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg1); } else { amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1); } break; } case OP_AMD64_SET_XMMREG_R8: { if (ins->dreg != ins->sreg1) amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1); break; } case OP_TAILCALL_PARAMETER: // This opcode helps compute sizes, i.e. // of the subsequent OP_TAILCALL, but contributes no code. g_assert (ins->next); break; case OP_TAILCALL: case OP_TAILCALL_REG: case OP_TAILCALL_MEMBASE: { call = (MonoCallInst*)ins; int i, save_area_offset; gboolean tailcall_membase = (ins->opcode == OP_TAILCALL_MEMBASE); gboolean tailcall_reg = (ins->opcode == OP_TAILCALL_REG); g_assert (!cfg->method->save_lmf); max_len += AMD64_NREG * 4; max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER); code = realloc_code (cfg, max_len); // FIXME hardcoding RAX here is not ideal. if (tailcall_reg) { int const reg = ins->sreg1; g_assert (reg > -1); if (reg != AMD64_RAX) amd64_mov_reg_reg (code, AMD64_RAX, reg, 8); } else if (tailcall_membase) { int const reg = ins->sreg1; g_assert (reg > -1); amd64_mov_reg_membase (code, AMD64_RAX, reg, ins->inst_offset, 8); } else { if (cfg->compile_aot) { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method); amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RIP, 0, 8); } else { // FIXME Patch data instead of code. guint32 pad_size = (guint32)((code + 2 - cfg->native_code) % 8); if (pad_size) amd64_padding (code, 8 - pad_size); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method); amd64_set_reg_template (code, AMD64_RAX); } } /* Restore callee saved registers */ save_area_offset = cfg->arch.reg_save_area_offset; for (i = 0; i < AMD64_NREG; ++i) if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & ((regmask_t)1 << i))) { amd64_mov_reg_membase (code, i, cfg->frame_reg, save_area_offset, 8); save_area_offset += 8; } if (cfg->arch.omit_fp) { if (cfg->arch.stack_alloc_size) amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size); // FIXME: if (call->stack_usage) NOT_IMPLEMENTED; } else { amd64_push_reg (code, AMD64_RAX); /* Copy arguments on the stack to our argument area */ // FIXME use rep mov for constant code size, before nonvolatiles // restored, first saving rsi, rdi into volatiles for (i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) { amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, i + 8, sizeof (target_mgreg_t)); amd64_mov_membase_reg (code, AMD64_RBP, ARGS_OFFSET + i, AMD64_RAX, sizeof (target_mgreg_t)); } amd64_pop_reg (code, AMD64_RAX); #ifdef TARGET_WIN32 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0); amd64_pop_reg (code, AMD64_RBP); mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP); #else amd64_leave (code); #endif } #ifdef TARGET_WIN32 // Redundant REX byte indicates a tailcall to the native unwinder. It means nothing to the processor. // https://github.com/dotnet/coreclr/blob/966dabb5bb3c4bf1ea885e1e8dc6528e8c64dc4f/src/unwinder/amd64/unwinder_amd64.cpp#L1394 // FIXME This should be jmp rip+32 for AOT direct to same assembly. // FIXME This should be jmp [rip+32] for AOT direct to not-same assembly (through data). // FIXME This should be jmp [rip+32] for JIT direct -- patch data instead of code. // This is only close to ideal for tailcall_membase, and even then it should // have a more dynamic register allocation. x86_imm_emit8 (code, 0x48); amd64_jump_reg (code, AMD64_RAX); #else // NT does not have varargs rax use, and NT ABI does not have red zone. // Use red-zone mov/jmp instead of push/ret to preserve call/ret speculation stack. // FIXME Just like NT the direct cases are are not ideal. amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RAX, 8); code = amd64_handle_varargs_call (cfg, code, call, FALSE); amd64_jump_membase (code, AMD64_RSP, -8); #endif ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; break; } case OP_CHECK_THIS: /* ensure ins->sreg1 is not NULL */ amd64_alu_membase_imm_size (code, X86_CMP, ins->sreg1, 0, 0, 4); break; case OP_ARGLIST: { amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, cfg->sig_cookie); amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, sizeof(gpointer)); break; } case OP_CALL: case OP_FCALL: case OP_RCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: call = (MonoCallInst*)ins; code = amd64_handle_varargs_call (cfg, code, call, FALSE); code = emit_call (cfg, call, code, MONO_JIT_ICALL_ZeroIsReserved); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; code = emit_move_return_value (cfg, ins, code); break; case OP_FCALL_REG: case OP_RCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: call = (MonoCallInst*)ins; if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) { amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8); ins->sreg1 = AMD64_R11; } code = amd64_handle_varargs_call (cfg, code, call, TRUE); amd64_call_reg (code, ins->sreg1); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; code = emit_move_return_value (cfg, ins, code); break; case OP_FCALL_MEMBASE: case OP_RCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: call = (MonoCallInst*)ins; amd64_call_membase (code, ins->sreg1, ins->inst_offset); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; code = emit_move_return_value (cfg, ins, code); break; case OP_DYN_CALL: { int i, limit_reg, index_reg, src_reg, dst_reg; MonoInst *var = cfg->dyn_call_var; guint8 *label; guint8 *buf [16]; g_assert (var->opcode == OP_REGOFFSET); /* r11 = args buffer filled by mono_arch_get_dyn_call_args () */ amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8); /* r10 = ftn */ amd64_mov_reg_reg (code, AMD64_R10, ins->sreg2, 8); /* Save args buffer */ amd64_mov_membase_reg (code, var->inst_basereg, var->inst_offset, AMD64_R11, 8); /* Set fp arg regs */ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, has_fp), sizeof (target_mgreg_t)); amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX); label = code; amd64_branch8 (code, X86_CC_Z, -1, 1); for (i = 0; i < FLOAT_PARAM_REGS; ++i) amd64_sse_movsd_reg_membase (code, i, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, fregs) + (i * sizeof (double))); amd64_patch (label, code); /* Allocate param area */ /* This doesn't need to be freed since OP_DYN_CALL is never called in a loop */ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, nstack_args), 8); amd64_shift_reg_imm (code, X86_SHL, AMD64_RAX, 3); amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, AMD64_RAX); /* Set stack args */ /* rax/rcx/rdx/r8/r9 is scratch */ limit_reg = AMD64_RAX; index_reg = AMD64_RCX; src_reg = AMD64_R8; dst_reg = AMD64_R9; amd64_mov_reg_membase (code, limit_reg, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, nstack_args), 8); amd64_mov_reg_imm (code, index_reg, 0); amd64_lea_membase (code, src_reg, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, regs) + ((PARAM_REGS) * sizeof (target_mgreg_t))); amd64_mov_reg_reg (code, dst_reg, AMD64_RSP, 8); buf [0] = code; x86_jump8 (code, 0); buf [1] = code; amd64_mov_reg_membase (code, AMD64_RDX, src_reg, 0, 8); amd64_mov_membase_reg (code, dst_reg, 0, AMD64_RDX, 8); amd64_alu_reg_imm (code, X86_ADD, index_reg, 1); amd64_alu_reg_imm (code, X86_ADD, src_reg, 8); amd64_alu_reg_imm (code, X86_ADD, dst_reg, 8); amd64_patch (buf [0], code); amd64_alu_reg_reg (code, X86_CMP, index_reg, limit_reg); buf [2] = code; x86_branch8 (code, X86_CC_LT, 0, FALSE); amd64_patch (buf [2], buf [1]); /* Set argument registers */ for (i = 0; i < PARAM_REGS; ++i) amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (i * sizeof (target_mgreg_t)), sizeof (target_mgreg_t)); /* Make the call */ amd64_call_reg (code, AMD64_R10); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; /* Save result */ amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8); amd64_mov_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, res), AMD64_RAX, 8); amd64_sse_movsd_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, fregs), AMD64_XMM0); amd64_sse_movsd_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, fregs) + sizeof (double), AMD64_XMM1); break; } case OP_AMD64_SAVE_SP_TO_LMF: { MonoInst *lmf_var = cfg->lmf_var; amd64_mov_membase_reg (code, lmf_var->inst_basereg, lmf_var->inst_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8); break; } case OP_X86_PUSH: g_assert_not_reached (); amd64_push_reg (code, ins->sreg1); break; case OP_X86_PUSH_IMM: g_assert_not_reached (); g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_push_imm (code, ins->inst_imm); break; case OP_X86_PUSH_MEMBASE: g_assert_not_reached (); amd64_push_membase (code, ins->inst_basereg, ins->inst_offset); break; case OP_X86_PUSH_OBJ: { int size = ALIGN_TO (ins->inst_imm, 8); g_assert_not_reached (); amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size); amd64_push_reg (code, AMD64_RDI); amd64_push_reg (code, AMD64_RSI); amd64_push_reg (code, AMD64_RCX); if (ins->inst_offset) amd64_lea_membase (code, AMD64_RSI, ins->inst_basereg, ins->inst_offset); else amd64_mov_reg_reg (code, AMD64_RSI, ins->inst_basereg, 8); amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, (3 * 8)); amd64_mov_reg_imm (code, AMD64_RCX, (size >> 3)); amd64_cld (code); amd64_prefix (code, X86_REP_PREFIX); amd64_movsd (code); amd64_pop_reg (code, AMD64_RCX); amd64_pop_reg (code, AMD64_RSI); amd64_pop_reg (code, AMD64_RDI); break; } case OP_GENERIC_CLASS_INIT: { guint8 *jump; g_assert (ins->sreg1 == MONO_AMD64_ARG_REG1); amd64_test_membase_imm_size (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoVTable, initialized), 1, 1); jump = code; amd64_branch8 (code, X86_CC_NZ, -1, 1); code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_generic_class_init); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; x86_patch (jump, code); break; } case OP_X86_LEA: amd64_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->backend.shift_amount); break; case OP_X86_LEA_MEMBASE: amd64_lea4_membase (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_AMD64_LEA_MEMBASE: amd64_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_X86_XCHG: amd64_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4); break; case OP_LOCALLOC: /* keep alignment */ amd64_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1); amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_FRAME_ALIGNMENT - 1)); code = mono_emit_stack_alloc (cfg, code, ins); amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8); if (cfg->param_area) amd64_alu_reg_imm (code, X86_ADD, ins->dreg, cfg->param_area); break; case OP_LOCALLOC_IMM: { guint32 size = ins->inst_imm; size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1); if (ins->flags & MONO_INST_INIT) { if (size < 64) { int i; amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size); amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); for (i = 0; i < size; i += 8) amd64_mov_membase_reg (code, AMD64_RSP, i, ins->dreg, 8); amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8); } else { amd64_mov_reg_imm (code, ins->dreg, size); ins->sreg1 = ins->dreg; code = mono_emit_stack_alloc (cfg, code, ins); amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8); } } else { amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size); amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8); } if (cfg->param_area) amd64_alu_reg_imm (code, X86_ADD, ins->dreg, cfg->param_area); break; } case OP_THROW: { amd64_mov_reg_reg (code, AMD64_ARG_REG1, ins->sreg1, 8); code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_arch_throw_exception); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; break; } case OP_RETHROW: { amd64_mov_reg_reg (code, AMD64_ARG_REG1, ins->sreg1, 8); code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_arch_rethrow_exception); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; break; } case OP_CALL_HANDLER: /* Align stack */ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb); amd64_call_imm (code, 0); /* * ins->inst_eh_blocks and bb->clause_holes are part of same GList. * Holes from bb->clause_holes will be added separately for the entire * basic block. Add only the rest of them. */ for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); /* Restore stack alignment */ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8); break; case OP_START_HANDLER: { /* Even though we're saving RSP, use sizeof */ /* gpointer because spvar is of type IntPtr */ /* see: mono_create_spvar_for_region */ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); amd64_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, AMD64_RSP, sizeof(gpointer)); if ((MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FINALLY) || MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FILTER) || MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FAULT)) && cfg->param_area) { amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT)); } break; } case OP_ENDFINALLY: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, sizeof(gpointer)); amd64_ret (code); break; } case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, sizeof(gpointer)); /* The local allocator will put the result into RAX */ amd64_ret (code); break; } case OP_GET_EX_OBJ: if (ins->dreg != AMD64_RAX) amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, sizeof (target_mgreg_t)); break; case OP_LABEL: ins->inst_c0 = code - cfg->native_code; break; case OP_BR: //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins); //if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins) //break; if (ins->inst_target_bb->native_offset) { amd64_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); if (optimize_branch_pred && x86_is_imm8 (ins->inst_target_bb->max_offset - offset)) x86_jump8 (code, 0); else x86_jump32 (code, 0); } break; case OP_BR_REG: amd64_jump_reg (code, ins->sreg1); break; case OP_ICNEQ: case OP_ICGE: case OP_ICLE: case OP_ICGE_UN: case OP_ICLE_UN: case OP_CEQ: case OP_LCEQ: case OP_ICEQ: case OP_CLT: case OP_LCLT: case OP_ICLT: case OP_CGT: case OP_ICGT: case OP_LCGT: case OP_CLT_UN: case OP_LCLT_UN: case OP_ICLT_UN: case OP_CGT_UN: case OP_LCGT_UN: case OP_ICGT_UN: amd64_set_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, cc_signed_table [mono_opcode_to_cond (ins->opcode)]); amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); break; case OP_COND_EXC_EQ: case OP_COND_EXC_NE_UN: case OP_COND_EXC_LT: case OP_COND_EXC_LT_UN: case OP_COND_EXC_GT: case OP_COND_EXC_GT_UN: case OP_COND_EXC_GE: case OP_COND_EXC_GE_UN: case OP_COND_EXC_LE: case OP_COND_EXC_LE_UN: case OP_COND_EXC_IEQ: case OP_COND_EXC_INE_UN: case OP_COND_EXC_ILT: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_IGT: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_IGE: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_ILE: case OP_COND_EXC_ILE_UN: EMIT_COND_SYSTEM_EXCEPTION (cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], (const char *)ins->inst_p1); break; case OP_COND_EXC_OV: case OP_COND_EXC_NO: case OP_COND_EXC_C: case OP_COND_EXC_NC: EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], (ins->opcode < OP_COND_EXC_NE_UN), (const char *)ins->inst_p1); break; case OP_COND_EXC_IOV: case OP_COND_EXC_INO: case OP_COND_EXC_IC: case OP_COND_EXC_INC: EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_IEQ], (ins->opcode < OP_COND_EXC_INE_UN), (const char *)ins->inst_p1); break; /* floating point opcodes */ case OP_R8CONST: { double d = *(double *)ins->inst_p0; if ((d == 0.0) && (mono_signbit (d) == 0)) { amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg); } else if (cfg->compile_aot && cfg->code_exec_only) { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8_GOT, ins->inst_p0); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof(gpointer)); amd64_sse_movsd_reg_membase (code, ins->dreg, AMD64_R11, 0); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0); amd64_sse_movsd_reg_membase (code, ins->dreg, AMD64_RIP, 0); } break; } case OP_R4CONST: { float f = *(float *)ins->inst_p0; if ((f == 0.0) && (mono_signbit (f) == 0)) { if (cfg->r4fp) amd64_sse_xorps_reg_reg (code, ins->dreg, ins->dreg); else amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg); } else { if (cfg->compile_aot && cfg->code_exec_only) { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4_GOT, ins->inst_p0); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof(gpointer)); amd64_sse_movss_reg_membase (code, ins->dreg, AMD64_R11, 0); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0); amd64_sse_movss_reg_membase (code, ins->dreg, AMD64_RIP, 0); } if (!cfg->r4fp) amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); } break; } case OP_STORER8_MEMBASE_REG: amd64_sse_movsd_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1); break; case OP_LOADR8_MEMBASE: amd64_sse_movsd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; case OP_STORER4_MEMBASE_REG: if (cfg->r4fp) { amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1); } else { /* This requires a double->single conversion */ amd64_sse_cvtsd2ss_reg_reg (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1); amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, MONO_ARCH_FP_SCRATCH_REG); } break; case OP_LOADR4_MEMBASE: if (cfg->r4fp) { amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); } break; case OP_ICONV_TO_R4: if (cfg->r4fp) { amd64_sse_cvtsi2ss_reg_reg_size (code, ins->dreg, ins->sreg1, 4); } else { amd64_sse_cvtsi2ss_reg_reg_size (code, ins->dreg, ins->sreg1, 4); amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); } break; case OP_ICONV_TO_R8: amd64_sse_cvtsi2sd_reg_reg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_LCONV_TO_R4: if (cfg->r4fp) { amd64_sse_cvtsi2ss_reg_reg (code, ins->dreg, ins->sreg1); } else { amd64_sse_cvtsi2ss_reg_reg (code, ins->dreg, ins->sreg1); amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); } break; case OP_LCONV_TO_R8: amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_FCONV_TO_R4: if (cfg->r4fp) { amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1); } else { amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1); amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); } break; case OP_FCONV_TO_I1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE); break; case OP_FCONV_TO_U1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE); break; case OP_FCONV_TO_I2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE); break; case OP_FCONV_TO_U2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE); break; case OP_FCONV_TO_U4: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE); break; case OP_FCONV_TO_I4: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE); break; case OP_FCONV_TO_I: case OP_FCONV_TO_I8: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE); break; case OP_RCONV_TO_I1: amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4); amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE); break; case OP_RCONV_TO_U1: amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4); amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); break; case OP_RCONV_TO_I2: amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4); amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE); break; case OP_RCONV_TO_U2: amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4); amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE); break; case OP_RCONV_TO_I4: amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_RCONV_TO_U4: // Use 8 as register size to get Nan/Inf conversion result truncated to 0 amd64_sse_cvtss2si_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_RCONV_TO_I8: case OP_RCONV_TO_I: amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 8); break; case OP_RCONV_TO_R8: amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_RCONV_TO_R4: if (ins->dreg != ins->sreg1) amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_LCONV_TO_R_UN: { guint8 *br [2]; /* Based on gcc code */ amd64_test_reg_reg (code, ins->sreg1, ins->sreg1); br [0] = code; x86_branch8 (code, X86_CC_S, 0, TRUE); /* Positive case */ amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1); br [1] = code; x86_jump8 (code, 0); amd64_patch (br [0], code); /* Negative case */ /* Save to the red zone */ amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RAX, 8); amd64_mov_membase_reg (code, AMD64_RSP, -16, AMD64_RCX, 8); amd64_mov_reg_reg (code, AMD64_RCX, ins->sreg1, 8); amd64_mov_reg_reg (code, AMD64_RAX, ins->sreg1, 8); amd64_alu_reg_imm (code, X86_AND, AMD64_RCX, 1); amd64_shift_reg_imm (code, X86_SHR, AMD64_RAX, 1); amd64_alu_reg_imm (code, X86_OR, AMD64_RAX, AMD64_RCX); amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, AMD64_RAX); amd64_sse_addsd_reg_reg (code, ins->dreg, ins->dreg); /* Restore */ amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, -16, 8); amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, -8, 8); amd64_patch (br [1], code); break; } case OP_LCONV_TO_OVF_U4: amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, TRUE, "OverflowException"); amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8); break; case OP_LCONV_TO_OVF_I4_UN: amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0x7fffffff); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, FALSE, "OverflowException"); amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8); break; case OP_FMOVE: if (ins->dreg != ins->sreg1) amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_RMOVE: if (ins->dreg != ins->sreg1) amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_MOVE_F_TO_I4: if (cfg->r4fp) { amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8); } else { amd64_sse_cvtsd2ss_reg_reg (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1); amd64_movd_reg_xreg_size (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG, 8); } break; case OP_MOVE_I4_TO_F: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8); if (!cfg->r4fp) amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); break; case OP_MOVE_F_TO_I8: amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8); break; case OP_MOVE_I8_TO_F: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8); break; case OP_FADD: amd64_sse_addsd_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_FSUB: amd64_sse_subsd_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_FMUL: amd64_sse_mulsd_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_FDIV: amd64_sse_divsd_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_FNEG: { static double r8_0 = -0.0; g_assert (ins->sreg1 == ins->dreg); if (cfg->compile_aot && cfg->code_exec_only) { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8_GOT, &r8_0); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof (target_mgreg_t)); amd64_sse_movsd_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_R11, 0); amd64_sse_xorpd_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, &r8_0); amd64_sse_xorpd_reg_membase (code, ins->dreg, AMD64_RIP, 0); } break; } case OP_ABS: { static guint64 d = 0x7fffffffffffffffUL; g_assert (ins->sreg1 == ins->dreg); if (cfg->compile_aot && cfg->code_exec_only) { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8_GOT, &d); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof (target_mgreg_t)); amd64_sse_movsd_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_R11, 0); amd64_sse_andpd_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, &d); amd64_sse_andpd_reg_membase (code, ins->dreg, AMD64_RIP, 0); } break; } case OP_SQRT: EMIT_SSE2_FPFUNC (code, fsqrt, ins->dreg, ins->sreg1); break; case OP_RADD: amd64_sse_addss_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_RSUB: amd64_sse_subss_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_RMUL: amd64_sse_mulss_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_RDIV: amd64_sse_divss_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_RNEG: { static float r4_0 = -0.0; g_assert (ins->sreg1 == ins->dreg); if (cfg->compile_aot && cfg->code_exec_only) { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4_GOT, &r4_0); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof (target_mgreg_t)); amd64_sse_movss_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_R11, 0); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, &r4_0); amd64_sse_movss_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_RIP, 0); } amd64_sse_xorps_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG); break; } case OP_IMIN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4); amd64_cmov_reg_size (code, X86_CC_GT, TRUE, ins->dreg, ins->sreg2, 4); break; case OP_IMIN_UN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4); amd64_cmov_reg_size (code, X86_CC_GT, FALSE, ins->dreg, ins->sreg2, 4); break; case OP_IMAX: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4); amd64_cmov_reg_size (code, X86_CC_LT, TRUE, ins->dreg, ins->sreg2, 4); break; case OP_IMAX_UN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4); amd64_cmov_reg_size (code, X86_CC_LT, FALSE, ins->dreg, ins->sreg2, 4); break; case OP_LMIN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); amd64_cmov_reg (code, X86_CC_GT, TRUE, ins->dreg, ins->sreg2); break; case OP_LMIN_UN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); amd64_cmov_reg (code, X86_CC_GT, FALSE, ins->dreg, ins->sreg2); break; case OP_LMAX: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); amd64_cmov_reg (code, X86_CC_LT, TRUE, ins->dreg, ins->sreg2); break; case OP_LMAX_UN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); amd64_cmov_reg (code, X86_CC_LT, FALSE, ins->dreg, ins->sreg2); break; case OP_X86_FPOP: break; case OP_FCOMPARE: /* * The two arguments are swapped because the fbranch instructions * depend on this for the non-sse case to work. */ amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1); break; case OP_RCOMPARE: /* * FIXME: Get rid of this. * The two arguments are swapped because the fbranch instructions * depend on this for the non-sse case to work. */ amd64_sse_comiss_reg_reg (code, ins->sreg2, ins->sreg1); break; case OP_FCNEQ: case OP_FCEQ: { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ guchar *unordered_check; amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2); unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); if (ins->opcode == OP_FCEQ) { amd64_set_reg (code, X86_CC_EQ, ins->dreg, FALSE); amd64_patch (unordered_check, code); } else { guchar *jump_to_end; amd64_set_reg (code, X86_CC_NE, ins->dreg, FALSE); jump_to_end = code; x86_jump8 (code, 0); amd64_patch (unordered_check, code); amd64_inc_reg (code, ins->dreg); amd64_patch (jump_to_end, code); } break; } case OP_FCLT: case OP_FCLT_UN: { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1); if (ins->opcode == OP_FCLT_UN) { guchar *unordered_check = code; guchar *jump_to_end; x86_branch8 (code, X86_CC_P, 0, FALSE); amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE); jump_to_end = code; x86_jump8 (code, 0); amd64_patch (unordered_check, code); amd64_inc_reg (code, ins->dreg); amd64_patch (jump_to_end, code); } else { amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE); } break; } case OP_FCLE: { guchar *unordered_check; amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1); unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); amd64_set_reg (code, X86_CC_NB, ins->dreg, FALSE); amd64_patch (unordered_check, code); break; } case OP_FCGT: case OP_FCGT_UN: { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ guchar *unordered_check; amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1); if (ins->opcode == OP_FCGT) { unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE); amd64_patch (unordered_check, code); } else { amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE); } break; } case OP_FCGE: { guchar *unordered_check; amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1); unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); amd64_set_reg (code, X86_CC_NA, ins->dreg, FALSE); amd64_patch (unordered_check, code); break; } case OP_RCEQ: case OP_RCGT: case OP_RCLT: case OP_RCLT_UN: case OP_RCGT_UN: { int x86_cond; amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); amd64_sse_comiss_reg_reg (code, ins->sreg2, ins->sreg1); switch (ins->opcode) { case OP_RCEQ: x86_cond = X86_CC_EQ; break; case OP_RCGT: x86_cond = X86_CC_LT; break; case OP_RCLT: x86_cond = X86_CC_GT; break; case OP_RCLT_UN: x86_cond = X86_CC_GT; break; case OP_RCGT_UN: x86_cond = X86_CC_LT; break; default: g_assert_not_reached (); break; } guchar *unordered_check; switch (ins->opcode) { case OP_RCEQ: case OP_RCGT: unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); amd64_set_reg (code, x86_cond, ins->dreg, FALSE); amd64_patch (unordered_check, code); break; case OP_RCLT_UN: case OP_RCGT_UN: { guchar *jump_to_end; unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); amd64_set_reg (code, x86_cond, ins->dreg, FALSE); jump_to_end = code; x86_jump8 (code, 0); amd64_patch (unordered_check, code); amd64_inc_reg (code, ins->dreg); amd64_patch (jump_to_end, code); break; } case OP_RCLT: amd64_set_reg (code, x86_cond, ins->dreg, FALSE); break; default: g_assert_not_reached (); break; } break; } case OP_FCLT_MEMBASE: case OP_FCGT_MEMBASE: case OP_FCLT_UN_MEMBASE: case OP_FCGT_UN_MEMBASE: case OP_FCEQ_MEMBASE: { guchar *unordered_check, *jump_to_end; int x86_cond; amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); amd64_sse_comisd_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset); switch (ins->opcode) { case OP_FCEQ_MEMBASE: x86_cond = X86_CC_EQ; break; case OP_FCLT_MEMBASE: case OP_FCLT_UN_MEMBASE: x86_cond = X86_CC_LT; break; case OP_FCGT_MEMBASE: case OP_FCGT_UN_MEMBASE: x86_cond = X86_CC_GT; break; default: g_assert_not_reached (); } unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); amd64_set_reg (code, x86_cond, ins->dreg, FALSE); switch (ins->opcode) { case OP_FCEQ_MEMBASE: case OP_FCLT_MEMBASE: case OP_FCGT_MEMBASE: amd64_patch (unordered_check, code); break; case OP_FCLT_UN_MEMBASE: case OP_FCGT_UN_MEMBASE: jump_to_end = code; x86_jump8 (code, 0); amd64_patch (unordered_check, code); amd64_inc_reg (code, ins->dreg); amd64_patch (jump_to_end, code); break; default: break; } break; } case OP_FBEQ: { guchar *jump = code; x86_branch8 (code, X86_CC_P, 0, TRUE); EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); amd64_patch (jump, code); break; } case OP_FBNE_UN: /* Branch if C013 != 100 */ /* branch if !ZF or (PF|CF) */ EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE); EMIT_COND_BRANCH (ins, X86_CC_P, FALSE); EMIT_COND_BRANCH (ins, X86_CC_B, FALSE); break; case OP_FBLT: EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE); break; case OP_FBLT_UN: EMIT_COND_BRANCH (ins, X86_CC_P, FALSE); EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE); break; case OP_FBGT: case OP_FBGT_UN: if (ins->opcode == OP_FBGT) { guchar *br1; /* skip branch if C1=1 */ br1 = code; x86_branch8 (code, X86_CC_P, 0, FALSE); /* branch if (C0 | C3) = 1 */ EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE); amd64_patch (br1, code); break; } else { EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE); } break; case OP_FBGE: { /* Branch if C013 == 100 or 001 */ guchar *br1; /* skip branch if C1=1 */ br1 = code; x86_branch8 (code, X86_CC_P, 0, FALSE); /* branch if (C0 | C3) = 1 */ EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE); amd64_patch (br1, code); break; } case OP_FBGE_UN: /* Branch if C013 == 000 */ EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE); break; case OP_FBLE: { /* Branch if C013=000 or 100 */ guchar *br1; /* skip branch if C1=1 */ br1 = code; x86_branch8 (code, X86_CC_P, 0, FALSE); /* branch if C0=0 */ EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE); amd64_patch (br1, code); break; } case OP_FBLE_UN: /* Branch if C013 != 001 */ EMIT_COND_BRANCH (ins, X86_CC_P, FALSE); EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE); break; case OP_CKFINITE: /* Transfer value to the fp stack */ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 16); amd64_movsd_membase_reg (code, AMD64_RSP, 0, ins->sreg1); amd64_fld_membase (code, AMD64_RSP, 0, TRUE); amd64_push_reg (code, AMD64_RAX); amd64_fxam (code); amd64_fnstsw (code); amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0x4100); amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0); amd64_pop_reg (code, AMD64_RAX); amd64_fstp (code, 0); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException"); amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 16); break; case OP_TLS_GET: { code = mono_amd64_emit_tls_get (code, ins->dreg, ins->inst_offset); break; } case OP_TLS_SET: { code = mono_amd64_emit_tls_set (code, ins->sreg1, ins->inst_offset); break; } case OP_MEMORY_BARRIER: { if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) x86_mfence (code); break; } case OP_ATOMIC_ADD_I4: case OP_ATOMIC_ADD_I8: { int dreg = ins->dreg; guint32 size = (ins->opcode == OP_ATOMIC_ADD_I4) ? 4 : 8; if ((dreg == ins->sreg2) || (dreg == ins->inst_basereg)) dreg = AMD64_R11; amd64_mov_reg_reg (code, dreg, ins->sreg2, size); amd64_prefix (code, X86_LOCK_PREFIX); amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size); /* dreg contains the old value, add with sreg2 value */ amd64_alu_reg_reg_size (code, X86_ADD, dreg, ins->sreg2, size); if (ins->dreg != dreg) amd64_mov_reg_reg (code, ins->dreg, dreg, size); break; } case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_EXCHANGE_I8: { guint32 size = ins->opcode == OP_ATOMIC_EXCHANGE_I4 ? 4 : 8; /* LOCK prefix is implied. */ amd64_mov_reg_reg (code, GP_SCRATCH_REG, ins->sreg2, size); amd64_xchg_membase_reg_size (code, ins->sreg1, ins->inst_offset, GP_SCRATCH_REG, size); amd64_mov_reg_reg (code, ins->dreg, GP_SCRATCH_REG, size); break; } case OP_ATOMIC_CAS_I4: case OP_ATOMIC_CAS_I8: { guint32 size; if (ins->opcode == OP_ATOMIC_CAS_I8) size = 8; else size = 4; /* * See http://msdn.microsoft.com/en-us/magazine/cc302329.aspx for * an explanation of how this works. */ g_assert (ins->sreg3 == AMD64_RAX); g_assert (ins->sreg1 != AMD64_RAX); g_assert (ins->sreg1 != ins->sreg2); amd64_prefix (code, X86_LOCK_PREFIX); amd64_cmpxchg_membase_reg_size (code, ins->sreg1, ins->inst_offset, ins->sreg2, size); if (ins->dreg != AMD64_RAX) amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, size); break; } case OP_ATOMIC_LOAD_I1: { amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE); break; } case OP_ATOMIC_LOAD_U1: { amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE); break; } case OP_ATOMIC_LOAD_I2: { amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE); break; } case OP_ATOMIC_LOAD_U2: { amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE); break; } case OP_ATOMIC_LOAD_I4: { amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; } case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_LOAD_I8: case OP_ATOMIC_LOAD_U8: { amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, ins->opcode == OP_ATOMIC_LOAD_U4 ? 4 : 8); break; } case OP_ATOMIC_LOAD_R4: { if (cfg->r4fp) { amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); } break; } case OP_ATOMIC_LOAD_R8: { amd64_sse_movsd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; } case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U4: case OP_ATOMIC_STORE_I8: case OP_ATOMIC_STORE_U8: { int size; switch (ins->opcode) { case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_U1: size = 1; break; case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_U2: size = 2; break; case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U4: size = 4; break; case OP_ATOMIC_STORE_I8: case OP_ATOMIC_STORE_U8: size = 8; break; } amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, size); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) x86_mfence (code); break; } case OP_ATOMIC_STORE_R4: { if (cfg->r4fp) { amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1); } else { amd64_sse_cvtsd2ss_reg_reg (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1); amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, MONO_ARCH_FP_SCRATCH_REG); } if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) x86_mfence (code); break; } case OP_ATOMIC_STORE_R8: { x86_nop (code); x86_nop (code); amd64_sse_movsd_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1); x86_nop (code); x86_nop (code); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) x86_mfence (code); break; } case OP_CARD_TABLE_WBARRIER: { int ptr = ins->sreg1; int value = ins->sreg2; guchar *br = 0; int nursery_shift, card_table_shift; gpointer card_table_mask; size_t nursery_size; gpointer card_table = mono_gc_get_card_table (&card_table_shift, &card_table_mask); guint64 nursery_start = (guint64)mono_gc_get_nursery (&nursery_shift, &nursery_size); guint64 shifted_nursery_start = nursery_start >> nursery_shift; /*If either point to the stack we can simply avoid the WB. This happens due to * optimizations revealing a stack store that was not visible when op_cardtable was emited. */ if (ins->sreg1 == AMD64_RSP || ins->sreg2 == AMD64_RSP) continue; /* * We need one register we can clobber, we choose EDX and make sreg1 * fixed EAX to work around limitations in the local register allocator. * sreg2 might get allocated to EDX, but that is not a problem since * we use it before clobbering EDX. */ g_assert (ins->sreg1 == AMD64_RAX); /* * This is the code we produce: * * edx = value * edx >>= nursery_shift * cmp edx, (nursery_start >> nursery_shift) * jne done * edx = ptr * edx >>= card_table_shift * edx += cardtable * [edx] = 1 * done: */ if (mono_gc_card_table_nursery_check ()) { if (value != AMD64_RDX) amd64_mov_reg_reg (code, AMD64_RDX, value, 8); amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, nursery_shift); if (shifted_nursery_start >> 31) { /* * The value we need to compare against is 64 bits, so we need * another spare register. We use RBX, which we save and * restore. */ amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RBX, 8); amd64_mov_reg_imm (code, AMD64_RBX, shifted_nursery_start); amd64_alu_reg_reg (code, X86_CMP, AMD64_RDX, AMD64_RBX); amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RSP, -8, 8); } else { amd64_alu_reg_imm (code, X86_CMP, AMD64_RDX, shifted_nursery_start); } br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE); } amd64_mov_reg_reg (code, AMD64_RDX, ptr, 8); amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, card_table_shift); if (card_table_mask) amd64_alu_reg_imm (code, X86_AND, AMD64_RDX, (guint32)(guint64)card_table_mask); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, card_table); amd64_alu_reg_membase (code, X86_ADD, AMD64_RDX, AMD64_RIP, 0); amd64_mov_membase_imm (code, AMD64_RDX, 0, 1, 1); if (mono_gc_card_table_nursery_check ()) x86_patch (br, code); break; } #ifdef MONO_ARCH_SIMD_INTRINSICS /* TODO: Some of these IR opcodes are marked as no clobber when they indeed do. */ case OP_ADDPS: amd64_sse_addps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_DIVPS: amd64_sse_divps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MULPS: amd64_sse_mulps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_SUBPS: amd64_sse_subps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MAXPS: amd64_sse_maxps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MINPS: amd64_sse_minps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_COMPPS: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7); amd64_sse_cmpps_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ANDPS: amd64_sse_andps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_ANDNPS: amd64_sse_andnps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_ORPS: amd64_sse_orps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_XORPS: amd64_sse_xorps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_SQRTPS: amd64_sse_sqrtps_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_RSQRTPS: amd64_sse_rsqrtps_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_RCPPS: amd64_sse_rcpps_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_ADDSUBPS: amd64_sse_addsubps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_HADDPS: amd64_sse_haddps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_HSUBPS: amd64_sse_hsubps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_DUPPS_HIGH: amd64_sse_movshdup_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_DUPPS_LOW: amd64_sse_movsldup_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_PSHUFLEW_HIGH: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); amd64_sse_pshufhw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_PSHUFLEW_LOW: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); amd64_sse_pshuflw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_PSHUFLED: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_SHUFPS: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); amd64_sse_shufps_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_SHUFPD: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0x3); amd64_sse_shufpd_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ADDPD: amd64_sse_addpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_DIVPD: amd64_sse_divpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MULPD: amd64_sse_mulpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_SUBPD: amd64_sse_subpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MAXPD: amd64_sse_maxpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MINPD: amd64_sse_minpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_COMPPD: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7); amd64_sse_cmppd_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ANDPD: amd64_sse_andpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_ANDNPD: amd64_sse_andnpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_ORPD: amd64_sse_orpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_XORPD: amd64_sse_xorpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_SQRTPD: amd64_sse_sqrtpd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_ADDSUBPD: amd64_sse_addsubpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_HADDPD: amd64_sse_haddpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_HSUBPD: amd64_sse_hsubpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_DUPPD: amd64_sse_movddup_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_EXTRACT_MASK: amd64_sse_pmovmskb_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_PAND: amd64_sse_pand_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PANDN: amd64_sse_pandn_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_POR: amd64_sse_por_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PXOR: amd64_sse_pxor_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDB: amd64_sse_paddb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDW: amd64_sse_paddw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDD: amd64_sse_paddd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDQ: amd64_sse_paddq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBB: amd64_sse_psubb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBW: amd64_sse_psubw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBD: amd64_sse_psubd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBQ: amd64_sse_psubq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMAXB_UN: amd64_sse_pmaxub_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMAXW_UN: amd64_sse_pmaxuw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMAXD_UN: amd64_sse_pmaxud_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMAXB: amd64_sse_pmaxsb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMAXW: amd64_sse_pmaxsw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMAXD: amd64_sse_pmaxsd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PAVGB_UN: amd64_sse_pavgb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PAVGW_UN: amd64_sse_pavgw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMINB_UN: amd64_sse_pminub_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMINW_UN: amd64_sse_pminuw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMIND_UN: amd64_sse_pminud_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMINB: amd64_sse_pminsb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMINW: amd64_sse_pminsw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMIND: amd64_sse_pminsd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQB: amd64_sse_pcmpeqb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQW: amd64_sse_pcmpeqw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQD: amd64_sse_pcmpeqd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQQ: amd64_sse_pcmpeqq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTB: amd64_sse_pcmpgtb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTW: amd64_sse_pcmpgtw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTD: amd64_sse_pcmpgtd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTQ: amd64_sse_pcmpgtq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUM_ABS_DIFF: amd64_sse_psadbw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWB: amd64_sse_punpcklbw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWW: amd64_sse_punpcklwd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWD: amd64_sse_punpckldq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWQ: amd64_sse_punpcklqdq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWPS: amd64_sse_unpcklps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWPD: amd64_sse_unpcklpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHB: amd64_sse_punpckhbw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHW: amd64_sse_punpckhwd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHD: amd64_sse_punpckhdq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHQ: amd64_sse_punpckhqdq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHPS: amd64_sse_unpckhps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHPD: amd64_sse_unpckhpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PACKW: amd64_sse_packsswb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PACKD: amd64_sse_packssdw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PACKW_UN: amd64_sse_packuswb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PACKD_UN: amd64_sse_packusdw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDB_SAT_UN: amd64_sse_paddusb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBB_SAT_UN: amd64_sse_psubusb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDW_SAT_UN: amd64_sse_paddusw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBW_SAT_UN: amd64_sse_psubusw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDB_SAT: amd64_sse_paddsb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBB_SAT: amd64_sse_psubsb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDW_SAT: amd64_sse_paddsw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBW_SAT: amd64_sse_psubsw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMULW: amd64_sse_pmullw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMULD: amd64_sse_pmulld_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMULQ: amd64_sse_pmuludq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMULW_HIGH_UN: amd64_sse_pmulhuw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMULW_HIGH: amd64_sse_pmulhw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSHRW: amd64_sse_psrlw_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHRW_REG: amd64_sse_psrlw_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_PSARW: amd64_sse_psraw_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSARW_REG: amd64_sse_psraw_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_PSHLW: amd64_sse_psllw_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHLW_REG: amd64_sse_psllw_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_PSHRD: amd64_sse_psrld_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHRD_REG: amd64_sse_psrld_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_PSARD: amd64_sse_psrad_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSARD_REG: amd64_sse_psrad_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_PSHLD: amd64_sse_pslld_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHLD_REG: amd64_sse_pslld_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_PSHRQ: amd64_sse_psrlq_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHRQ_REG: amd64_sse_psrlq_reg_reg (code, ins->dreg, ins->sreg2); break; /*TODO: This is appart of the sse spec but not added case OP_PSARQ: amd64_sse_psraq_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSARQ_REG: amd64_sse_psraq_reg_reg (code, ins->dreg, ins->sreg2); break; */ case OP_PSHLQ: amd64_sse_psllq_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHLQ_REG: amd64_sse_psllq_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_CVTDQ2PD: amd64_sse_cvtdq2pd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_CVTDQ2PS: amd64_sse_cvtdq2ps_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_CVTPD2DQ: amd64_sse_cvtpd2dq_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_CVTPD2PS: amd64_sse_cvtpd2ps_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_CVTPS2DQ: amd64_sse_cvtps2dq_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_CVTPS2PD: amd64_sse_cvtps2pd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_CVTTPD2DQ: amd64_sse_cvttpd2dq_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_CVTTPS2DQ: amd64_sse_cvttps2dq_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_ICONV_TO_X: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_EXTRACT_I4: amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_EXTRACT_I8: if (ins->inst_c0) { amd64_movhlps_reg_reg (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1); amd64_movd_reg_xreg_size (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG, 8); } else { amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8); } break; case OP_EXTRACT_I1: amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); if (ins->inst_c0) amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_c0 * 8); amd64_widen_reg (code, ins->dreg, ins->dreg, ins->inst_c1 == MONO_TYPE_I1, FALSE); break; case OP_EXTRACT_I2: /*amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); if (ins->inst_c0) amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, 16, 4);*/ amd64_sse_pextrw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); amd64_widen_reg_size (code, ins->dreg, ins->dreg, ins->inst_c1 == MONO_TYPE_I2, TRUE, 4); break; case OP_EXTRACT_R8: if (ins->inst_c0) amd64_movhlps_reg_reg (code, ins->dreg, ins->sreg1); else amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_INSERT_I2: amd64_sse_pinsrw_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_EXTRACTX_U2: amd64_sse_pextrw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_INSERTX_U1_SLOW: /*sreg1 is the extracted ireg (scratch) /sreg2 is the to be inserted ireg (scratch) /dreg is the xreg to receive the value*/ /*clear the bits from the extracted word*/ amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_c0 & 1 ? 0x00FF : 0xFF00); /*shift the value to insert if needed*/ if (ins->inst_c0 & 1) amd64_shift_reg_imm_size (code, X86_SHL, ins->sreg2, 8, 4); /*join them together*/ amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2); amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0 / 2); break; case OP_INSERTX_I4_SLOW: amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2); amd64_shift_reg_imm (code, X86_SHR, ins->sreg2, 16); amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2 + 1); break; case OP_INSERTX_I8_SLOW: amd64_movd_xreg_reg_size(code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg2, 8); if (ins->inst_c0) amd64_movlhps_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG); else amd64_sse_movsd_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG); break; case OP_INSERTX_R4_SLOW: switch (ins->inst_c0) { case 0: if (cfg->r4fp) amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2); else amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2); break; case 1: amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3)); if (cfg->r4fp) amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2); else amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3)); break; case 2: amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3)); if (cfg->r4fp) amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2); else amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3)); break; case 3: amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0)); if (cfg->r4fp) amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2); else amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0)); break; } break; case OP_INSERTX_R8_SLOW: if (ins->inst_c0) amd64_movlhps_reg_reg (code, ins->dreg, ins->sreg2); else amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_STOREX_MEMBASE_REG: case OP_STOREX_MEMBASE: amd64_sse_movups_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1); break; case OP_LOADX_MEMBASE: amd64_sse_movups_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_LOADX_ALIGNED_MEMBASE: amd64_sse_movaps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_STOREX_ALIGNED_MEMBASE_REG: amd64_sse_movaps_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1); break; case OP_STOREX_NTA_MEMBASE_REG: amd64_sse_movntps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_PREFETCH_MEMBASE: amd64_sse_prefetch_reg_membase (code, ins->backend.arg_info, ins->sreg1, ins->inst_offset); break; case OP_XMOVE: /*FIXME the peephole pass should have killed this*/ if (ins->dreg != ins->sreg1) amd64_sse_movaps_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_XZERO: amd64_sse_pxor_reg_reg (code, ins->dreg, ins->dreg); break; case OP_XONES: amd64_sse_pcmpeqb_reg_reg (code, ins->dreg, ins->dreg); break; case OP_ICONV_TO_R4_RAW: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4); if (!cfg->r4fp) amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); break; case OP_FCONV_TO_R8_X: amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_XCONV_R8_TO_I4: amd64_sse_cvttsd2si_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); switch (ins->backend.source_opcode) { case OP_FCONV_TO_I1: amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE); break; case OP_FCONV_TO_U1: amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); break; case OP_FCONV_TO_I2: amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE); break; case OP_FCONV_TO_U2: amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE); break; } break; case OP_EXPAND_I2: amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg1, 0); amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg1, 1); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_I4: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_I8: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0x44); break; case OP_EXPAND_R4: if (cfg->r4fp) { amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1); } else { amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1); amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->dreg); } amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_R8: amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0x44); break; case OP_SSE41_ROUNDP: { if (ins->inst_c1 == MONO_TYPE_R8) amd64_sse_roundpd_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); else g_assert_not_reached (); // roundps, but it's not used anywhere for non-llvm back-end yet. break; } #endif case OP_LZCNT32: amd64_sse_lzcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_LZCNT64: amd64_sse_lzcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 8); break; case OP_POPCNT32: amd64_sse_popcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_POPCNT64: amd64_sse_popcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 8); break; case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: { guint8 *br [1]; amd64_test_membase_imm_size (code, ins->sreg1, 0, 1, 4); br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_threads_state_poll); amd64_patch (br[0], code); break; } case OP_GC_LIVENESS_DEF: case OP_GC_LIVENESS_USE: case OP_GC_PARAM_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; break; case OP_GC_SPILL_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins); break; case OP_GET_LAST_ERROR: code = emit_get_last_error(code, ins->dreg); break; case OP_FILL_PROF_CALL_CTX: for (int i = 0; i < AMD64_NREG; i++) if (AMD64_IS_CALLEE_SAVED_REG (i) || i == AMD64_RSP) amd64_mov_membase_reg (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, gregs) + i * sizeof (target_mgreg_t), i, sizeof (target_mgreg_t)); break; default: g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__); g_assert_not_reached (); } g_assertf ((code - cfg->native_code - offset) <= max_len, "wrong maximal instruction length of instruction %s (expected %d, got %d)", mono_inst_name (ins->opcode), max_len, (int)(code - cfg->native_code - offset)); } set_code_cursor (cfg, code); } #endif /* DISABLE_JIT */ G_BEGIN_DECLS void __chkstk (void); void ___chkstk_ms (void); G_END_DECLS void mono_arch_register_lowlevel_calls (void) { /* The signature doesn't matter */ mono_register_jit_icall (mono_amd64_throw_exception, mono_icall_sig_void, TRUE); #if defined(TARGET_WIN32) || defined(HOST_WIN32) #if _MSC_VER mono_register_jit_icall_info (&mono_get_jit_icall_info ()->mono_chkstk_win64, __chkstk, "mono_chkstk_win64", NULL, TRUE, "__chkstk"); #else mono_register_jit_icall_info (&mono_get_jit_icall_info ()->mono_chkstk_win64, ___chkstk_ms, "mono_chkstk_win64", NULL, TRUE, "___chkstk_ms"); #endif #endif } void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target) { unsigned char *ip = ji->ip.i + code; /* * Debug code to help track down problems where the target of a near call is * is not valid. */ if (amd64_is_near_call (ip)) { gint64 disp = (guint8*)target - (guint8*)ip; if (!amd64_is_imm32 (disp)) { printf ("TYPE: %d\n", ji->type); switch (ji->type) { case MONO_PATCH_INFO_JIT_ICALL_ID: printf ("V: %s\n", mono_find_jit_icall_info (ji->data.jit_icall_id)->name); break; case MONO_PATCH_INFO_METHOD_JUMP: case MONO_PATCH_INFO_METHOD: printf ("V: %s\n", ji->data.method->name); break; default: break; } } } amd64_patch (ip, (gpointer)target); } #ifndef DISABLE_JIT static int get_max_epilog_size (MonoCompile *cfg) { int max_epilog_size = 16; if (cfg->method->save_lmf) max_epilog_size += 256; max_epilog_size += (AMD64_NREG * 2); return max_epilog_size; } /* * This macro is used for testing whenever the unwinder works correctly at every point * where an async exception can happen. */ /* This will generate a SIGSEGV at the given point in the code */ #define async_exc_point(code) do { \ if (mono_inject_async_exc_method && mono_method_desc_full_match (mono_inject_async_exc_method, cfg->method)) { \ if (cfg->arch.async_point_count == mono_inject_async_exc_pos) \ amd64_mov_reg_mem (code, AMD64_RAX, 0, 4); \ cfg->arch.async_point_count ++; \ } \ } while (0) #ifdef TARGET_WIN32 static guint8 * emit_prolog_setup_sp_win64 (MonoCompile *cfg, guint8 *code, int alloc_size, int *cfa_offset_input) { int cfa_offset = *cfa_offset_input; /* Allocate windows stack frame using stack probing method */ if (alloc_size) { if (alloc_size >= 0x1000) { amd64_mov_reg_imm (code, AMD64_RAX, alloc_size); code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_chkstk_win64); } amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size); if (cfg->arch.omit_fp) { cfa_offset += alloc_size; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); async_exc_point (code); } // NOTE, in a standard win64 prolog the alloc unwind info is always emitted, but since mono // uses a frame pointer with negative offsets and a standard win64 prolog assumes positive offsets, we can't // emit sp alloc unwind metadata since the native OS unwinder will incorrectly restore sp. Excluding the alloc // metadata on the other hand won't give the OS the information so it can just restore the frame pointer to sp and // that will retrieve the expected results. if (cfg->arch.omit_fp) mono_emit_unwind_op_sp_alloc (cfg, code, alloc_size); } *cfa_offset_input = cfa_offset; set_code_cursor (cfg, code); return code; } #endif /* TARGET_WIN32 */ guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoBasicBlock *bb; MonoMethodSignature *sig; MonoInst *ins; int alloc_size, pos, i, cfa_offset, quad, max_epilog_size, save_area_offset; guint8 *code; CallInfo *cinfo; MonoInst *lmf_var = cfg->lmf_var; gboolean args_clobbered = FALSE; cfg->code_size = MAX (cfg->header->code_size * 4, 1024); code = cfg->native_code = (unsigned char *)g_malloc (cfg->code_size); /* Amount of stack space allocated by register saving code */ pos = 0; /* Offset between RSP and the CFA */ cfa_offset = 0; /* * The prolog consists of the following parts: * FP present: * - push rbp * - mov rbp, rsp * - save callee saved regs using moves * - allocate frame * - save rgctx if needed * - save lmf if needed * FP not present: * - allocate frame * - save rgctx if needed * - save lmf if needed * - save callee saved regs using moves */ // CFA = sp + 8 cfa_offset = 8; mono_emit_unwind_op_def_cfa (cfg, code, AMD64_RSP, 8); // IP saved at CFA - 8 mono_emit_unwind_op_offset (cfg, code, AMD64_RIP, -cfa_offset); async_exc_point (code); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF); if (!cfg->arch.omit_fp) { amd64_push_reg (code, AMD64_RBP); cfa_offset += 8; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); mono_emit_unwind_op_offset (cfg, code, AMD64_RBP, - cfa_offset); async_exc_point (code); /* These are handled automatically by the stack marking code */ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF); amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (target_mgreg_t)); mono_emit_unwind_op_def_cfa_reg (cfg, code, AMD64_RBP); mono_emit_unwind_op_fp_alloc (cfg, code, AMD64_RBP, 0); async_exc_point (code); } /* The param area is always at offset 0 from sp */ /* This needs to be allocated here, since it has to come after the spill area */ if (cfg->param_area) { if (cfg->arch.omit_fp) // FIXME: g_assert_not_reached (); cfg->stack_offset += ALIGN_TO (cfg->param_area, sizeof (target_mgreg_t)); } if (cfg->arch.omit_fp) { /* * On enter, the stack is misaligned by the pushing of the return * address. It is either made aligned by the pushing of %rbp, or by * this. */ alloc_size = ALIGN_TO (cfg->stack_offset, 8); if ((alloc_size % 16) == 0) { alloc_size += 8; /* Mark the padding slot as NOREF */ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset - sizeof (target_mgreg_t), SLOT_NOREF); } } else { alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT); if (cfg->stack_offset != alloc_size) { /* Mark the padding slot as NOREF */ mini_gc_set_slot_type_from_fp (cfg, -alloc_size + cfg->param_area, SLOT_NOREF); } cfg->arch.sp_fp_offset = alloc_size; alloc_size -= pos; } cfg->arch.stack_alloc_size = alloc_size; set_code_cursor (cfg, code); /* Allocate stack frame */ #ifdef TARGET_WIN32 code = emit_prolog_setup_sp_win64 (cfg, code, alloc_size, &cfa_offset); #else if (alloc_size) { /* See mono_emit_stack_alloc */ #if defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK) guint32 remaining_size = alloc_size; /* Use a loop for large sizes */ if (remaining_size > 10 * 0x1000) { amd64_mov_reg_imm (code, X86_EAX, remaining_size / 0x1000); guint8 *label = code; amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000); amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP); amd64_alu_reg_imm (code, X86_SUB, AMD64_RAX, 1); amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0); guint8 *label2 = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); amd64_patch (label2, label); if (cfg->arch.omit_fp) { cfa_offset += (remaining_size / 0x1000) * 0x1000; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); } remaining_size = remaining_size % 0x1000; set_code_cursor (cfg, code); } guint32 required_code_size = ((remaining_size / 0x1000) + 1) * 11; /*11 is the max size of amd64_alu_reg_imm + amd64_test_membase_reg*/ code = realloc_code (cfg, required_code_size); while (remaining_size >= 0x1000) { amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000); if (cfg->arch.omit_fp) { cfa_offset += 0x1000; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); } async_exc_point (code); amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP); remaining_size -= 0x1000; } if (remaining_size) { amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, remaining_size); if (cfg->arch.omit_fp) { cfa_offset += remaining_size; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); async_exc_point (code); } } #else amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size); if (cfg->arch.omit_fp) { cfa_offset += alloc_size; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); async_exc_point (code); } #endif } #endif /* Stack alignment check */ #if 0 { guint8 *buf; amd64_mov_reg_reg (code, AMD64_RAX, AMD64_RSP, 8); amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0xf); amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0); buf = code; x86_branch8 (code, X86_CC_EQ, 1, FALSE); amd64_breakpoint (code); amd64_patch (buf, code); } #endif if (mini_debug_options.init_stacks) { /* Fill the stack frame with a dummy value to force deterministic behavior */ /* Save registers to the red zone */ amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDI, 8); amd64_mov_membase_reg (code, AMD64_RSP, -16, AMD64_RCX, 8); MONO_DISABLE_WARNING (4310) // cast truncates constant value amd64_mov_reg_imm (code, AMD64_RAX, 0x2a2a2a2a2a2a2a2a); MONO_RESTORE_WARNING amd64_mov_reg_imm (code, AMD64_RCX, alloc_size / 8); amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RSP, 8); amd64_cld (code); amd64_prefix (code, X86_REP_PREFIX); amd64_stosl (code); amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RSP, -8, 8); amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, -16, 8); } /* Save LMF */ if (method->save_lmf) code = emit_setup_lmf (cfg, code, lmf_var->inst_offset, cfa_offset); /* Save callee saved registers */ if (cfg->arch.omit_fp) { save_area_offset = cfg->arch.reg_save_area_offset; /* Save caller saved registers after sp is adjusted */ /* The registers are saved at the bottom of the frame */ /* FIXME: Optimize this so the regs are saved at the end of the frame in increasing order */ } else { /* The registers are saved just below the saved rbp */ save_area_offset = cfg->arch.reg_save_area_offset; } for (i = 0; i < AMD64_NREG; ++i) { if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) { amd64_mov_membase_reg (code, cfg->frame_reg, save_area_offset, i, 8); if (cfg->arch.omit_fp) { mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - save_area_offset)); /* These are handled automatically by the stack marking code */ mini_gc_set_slot_type_from_cfa (cfg, - (cfa_offset - save_area_offset), SLOT_NOREF); } else { mono_emit_unwind_op_offset (cfg, code, i, - (-save_area_offset + (2 * 8))); // FIXME: GC } save_area_offset += 8; async_exc_point (code); } } /* store runtime generic context */ if (cfg->rgctx_var) { g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET && (cfg->rgctx_var->inst_basereg == AMD64_RBP || cfg->rgctx_var->inst_basereg == AMD64_RSP)); amd64_mov_membase_reg (code, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, sizeof(gpointer)); mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, cfg->rgctx_var, FALSE, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, code - cfg->native_code, 0); } /* compute max_length in order to use short forward jumps */ max_epilog_size = get_max_epilog_size (cfg); if (cfg->opt & MONO_OPT_BRANCH && cfg->max_block_num < MAX_BBLOCKS_FOR_BRANCH_OPTS) { for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins; int max_length = 0; /* max alignment for loops */ if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb)) max_length += LOOP_ALIGNMENT; MONO_BB_FOR_EACH_INS (bb, ins) { max_length += ins_get_size (ins->opcode); } /* Take prolog and epilog instrumentation into account */ if (bb == cfg->bb_entry || bb == cfg->bb_exit) max_length += max_epilog_size; bb->max_length = max_length; } } sig = mono_method_signature_internal (method); pos = 0; cinfo = cfg->arch.cinfo; if (sig->ret->type != MONO_TYPE_VOID) { /* Save volatile arguments to the stack */ if (cfg->vret_addr && (cfg->vret_addr->opcode != OP_REGVAR)) amd64_mov_membase_reg (code, cfg->vret_addr->inst_basereg, cfg->vret_addr->inst_offset, cinfo->ret.reg, 8); } /* Keep this in sync with emit_load_volatile_arguments */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; ins = cfg->args [i]; if (ins->flags & MONO_INST_IS_DEAD && !MONO_CFG_PROFILE (cfg, ENTER_CONTEXT)) /* Unused arguments */ continue; /* Save volatile arguments to the stack */ if (ins->opcode != OP_REGVAR) { switch (ainfo->storage) { case ArgInIReg: { guint32 size = 8; /* FIXME: I1 etc */ /* if (stack_offset & 0x1) size = 1; else if (stack_offset & 0x2) size = 2; else if (stack_offset & 0x4) size = 4; else size = 8; */ amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg, size); /* * Save the original location of 'this', * mono_get_generic_info_from_stack_frame () needs this to properly look up * the argument value during the handling of async exceptions. */ if (i == 0 && sig->hasthis) { mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, ins, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0); } break; } case ArgInFloatSSEReg: amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg); break; case ArgInDoubleSSEReg: amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg); break; case ArgValuetypeInReg: for (quad = 0; quad < 2; quad ++) { switch (ainfo->pair_storage [quad]) { case ArgInIReg: amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_regs [quad], sizeof (target_mgreg_t)); break; case ArgInFloatSSEReg: amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_regs [quad]); break; case ArgInDoubleSSEReg: amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_regs [quad]); break; case ArgNone: break; default: g_assert_not_reached (); } } break; case ArgValuetypeAddrInIReg: if (ainfo->pair_storage [0] == ArgInIReg) amd64_mov_membase_reg (code, ins->inst_left->inst_basereg, ins->inst_left->inst_offset, ainfo->pair_regs [0], sizeof (target_mgreg_t)); break; case ArgValuetypeAddrOnStack: break; case ArgGSharedVtInReg: amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg, 8); break; default: break; } } else { /* Argument allocated to (non-volatile) register */ switch (ainfo->storage) { case ArgInIReg: amd64_mov_reg_reg (code, ins->dreg, ainfo->reg, 8); break; case ArgOnStack: amd64_mov_reg_membase (code, ins->dreg, AMD64_RBP, ARGS_OFFSET + ainfo->offset, 8); break; default: g_assert_not_reached (); } if (i == 0 && sig->hasthis) { g_assert (ainfo->storage == ArgInIReg); mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, ins, TRUE, ins->dreg, 0, code - cfg->native_code, 0); } } } if (cfg->method->save_lmf) args_clobbered = TRUE; /* * Optimize the common case of the first bblock making a call with the same * arguments as the method. This works because the arguments are still in their * original argument registers. * FIXME: Generalize this */ if (!args_clobbered) { MonoBasicBlock *first_bb = cfg->bb_entry; MonoInst *next; int filter = FILTER_IL_SEQ_POINT; next = mono_bb_first_inst (first_bb, filter); if (!next && first_bb->next_bb) { first_bb = first_bb->next_bb; next = mono_bb_first_inst (first_bb, filter); } if (first_bb->in_count > 1) next = NULL; for (i = 0; next && i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; gboolean match = FALSE; ins = cfg->args [i]; if (ins->opcode != OP_REGVAR) { switch (ainfo->storage) { case ArgInIReg: { if (((next->opcode == OP_LOAD_MEMBASE) || (next->opcode == OP_LOADI4_MEMBASE)) && next->inst_basereg == ins->inst_basereg && next->inst_offset == ins->inst_offset) { if (next->dreg == ainfo->reg) { NULLIFY_INS (next); match = TRUE; } else { next->opcode = OP_MOVE; next->sreg1 = ainfo->reg; /* Only continue if the instruction doesn't change argument regs */ if (next->dreg == ainfo->reg || next->dreg == AMD64_RAX) match = TRUE; } } break; } default: break; } } else { /* Argument allocated to (non-volatile) register */ switch (ainfo->storage) { case ArgInIReg: if (next->opcode == OP_MOVE && next->sreg1 == ins->dreg && next->dreg == ainfo->reg) { NULLIFY_INS (next); match = TRUE; } break; default: break; } } if (match) { next = mono_inst_next (next, filter); //next = mono_inst_list_next (&next->node, &first_bb->ins_list); if (!next) break; } } } if (cfg->gen_sdb_seq_points) { MonoInst *info_var = cfg->arch.seq_point_info_var; /* Initialize seq_point_info_var */ if (cfg->compile_aot) { /* Initialize the variable from a GOT slot */ /* Same as OP_AOTCONST */ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof(gpointer)); g_assert (info_var->opcode == OP_REGOFFSET); amd64_mov_membase_reg (code, info_var->inst_basereg, info_var->inst_offset, AMD64_R11, 8); } if (cfg->compile_aot) { /* Initialize ss_tramp_var */ ins = cfg->arch.ss_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8); amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr), 8); amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8); } else { /* Initialize ss_tramp_var */ ins = cfg->arch.ss_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); amd64_mov_reg_imm (code, AMD64_R11, (guint64)&ss_trampoline); amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8); /* Initialize bp_tramp_var */ ins = cfg->arch.bp_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); amd64_mov_reg_imm (code, AMD64_R11, (guint64)&bp_trampoline); amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8); } } set_code_cursor (cfg, code); return code; } void mono_arch_emit_epilog (MonoCompile *cfg) { MonoMethod *method = cfg->method; int quad, i; guint8 *code; int max_epilog_size; CallInfo *cinfo; gint32 lmf_offset = cfg->lmf_var ? cfg->lmf_var->inst_offset : -1; gint32 save_area_offset = cfg->arch.reg_save_area_offset; max_epilog_size = get_max_epilog_size (cfg); code = realloc_code (cfg, max_epilog_size); cfg->has_unwind_info_for_epilog = TRUE; /* Mark the start of the epilog */ mono_emit_unwind_op_mark_loc (cfg, code, 0); /* Save the uwind state which is needed by the out-of-line code */ mono_emit_unwind_op_remember_state (cfg, code); /* the code restoring the registers must be kept in sync with OP_TAILCALL */ if (method->save_lmf) { if (cfg->used_int_regs & (1 << AMD64_RBP)) amd64_mov_reg_membase (code, AMD64_RBP, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), 8); if (cfg->arch.omit_fp) /* * emit_setup_lmf () marks RBP as saved, we have to mark it as same value here before clearing up the stack * since its stack slot will become invalid. */ mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP); } /* Restore callee saved regs */ for (i = 0; i < AMD64_NREG; ++i) { if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) { /* Restore only used_int_regs, not arch.saved_iregs */ #if defined(MONO_SUPPORT_TASKLETS) int restore_reg = 1; #else int restore_reg = (cfg->used_int_regs & (1 << i)); #endif if (restore_reg) { amd64_mov_reg_membase (code, i, cfg->frame_reg, save_area_offset, 8); mono_emit_unwind_op_same_value (cfg, code, i); async_exc_point (code); } save_area_offset += 8; } } /* Load returned vtypes into registers if needed */ cinfo = cfg->arch.cinfo; if (cinfo->ret.storage == ArgValuetypeInReg) { ArgInfo *ainfo = &cinfo->ret; MonoInst *inst = cfg->ret; for (quad = 0; quad < 2; quad ++) { switch (ainfo->pair_storage [quad]) { case ArgInIReg: amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_size [quad]); break; case ArgInFloatSSEReg: amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (target_mgreg_t))); break; case ArgInDoubleSSEReg: amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (target_mgreg_t))); break; case ArgNone: break; default: g_assert_not_reached (); } } } if (cfg->arch.omit_fp) { if (cfg->arch.stack_alloc_size) { amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size); } } else { #ifdef TARGET_WIN32 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0); amd64_pop_reg (code, AMD64_RBP); mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP); #else amd64_leave (code); mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP); #endif } mono_emit_unwind_op_def_cfa (cfg, code, AMD64_RSP, 8); async_exc_point (code); amd64_ret (code); /* Restore the unwind state to be the same as before the epilog */ mono_emit_unwind_op_restore_state (cfg, code); set_code_cursor (cfg, code); } void mono_arch_emit_exceptions (MonoCompile *cfg) { MonoJumpInfo *patch_info; int nthrows, i; guint8 *code; MonoClass *exc_classes [16]; guint8 *exc_throw_start [16], *exc_throw_end [16]; guint32 code_size = 0; /* Compute needed space */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { if (patch_info->type == MONO_PATCH_INFO_EXC) code_size += 40; if (patch_info->type == MONO_PATCH_INFO_R8) code_size += 8 + 15; /* sizeof (double) + alignment */ if (patch_info->type == MONO_PATCH_INFO_R4) code_size += 4 + 15; /* sizeof (float) + alignment */ if (patch_info->type == MONO_PATCH_INFO_GC_CARD_TABLE_ADDR) code_size += 8 + 7; /*sizeof (void*) + alignment */ } code = realloc_code (cfg, code_size); /* add code to raise exceptions */ nthrows = 0; for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { switch (patch_info->type) { case MONO_PATCH_INFO_EXC: { MonoClass *exc_class; guint8 *buf, *buf2; guint32 throw_ip; amd64_patch (patch_info->ip.i + cfg->native_code, code); exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name); throw_ip = patch_info->ip.i; //x86_breakpoint (code); /* Find a throw sequence for the same exception class */ for (i = 0; i < nthrows; ++i) if (exc_classes [i] == exc_class) break; if (i < nthrows) { amd64_mov_reg_imm (code, AMD64_ARG_REG2, (exc_throw_end [i] - cfg->native_code) - throw_ip); x86_jump_code (code, exc_throw_start [i]); patch_info->type = MONO_PATCH_INFO_NONE; } else { buf = code; amd64_mov_reg_imm_size (code, AMD64_ARG_REG2, 0xf0f0f0f0, 4); buf2 = code; if (nthrows < 16) { exc_classes [nthrows] = exc_class; exc_throw_start [nthrows] = code; } amd64_mov_reg_imm (code, AMD64_ARG_REG1, m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF); patch_info->type = MONO_PATCH_INFO_NONE; code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_arch_throw_corlib_exception); amd64_mov_reg_imm (buf, AMD64_ARG_REG2, (code - cfg->native_code) - throw_ip); while (buf < buf2) x86_nop (buf); if (nthrows < 16) { exc_throw_end [nthrows] = code; nthrows ++; } } break; } default: /* do nothing */ break; } set_code_cursor (cfg, code); } /* Handle relocations with RIP relative addressing */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { gboolean remove = FALSE; guint8 *orig_code = code; switch (patch_info->type) { case MONO_PATCH_INFO_R8: case MONO_PATCH_INFO_R4: { guint8 *pos, *patch_pos; guint32 target_pos; /* The SSE opcodes require a 16 byte alignment */ code = (guint8*)ALIGN_TO (code, 16); pos = cfg->native_code + patch_info->ip.i; if (IS_REX (pos [1])) { patch_pos = pos + 5; target_pos = code - pos - 9; } else { patch_pos = pos + 4; target_pos = code - pos - 8; } if (patch_info->type == MONO_PATCH_INFO_R8) { *(double*)code = *(double*)patch_info->data.target; code += sizeof (double); } else { *(float*)code = *(float*)patch_info->data.target; code += sizeof (float); } *(guint32*)(patch_pos) = target_pos; remove = TRUE; break; } case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: { guint8 *pos; if (cfg->compile_aot) continue; /*loading is faster against aligned addresses.*/ code = (guint8*)ALIGN_TO (code, 8); memset (orig_code, 0, code - orig_code); pos = cfg->native_code + patch_info->ip.i; /*alu_op [rex] modr/m imm32 - 7 or 8 bytes */ if (IS_REX (pos [1])) *(guint32*)(pos + 4) = (guint8*)code - pos - 8; else *(guint32*)(pos + 3) = (guint8*)code - pos - 7; *(gpointer*)code = (gpointer)patch_info->data.target; code += sizeof (gpointer); remove = TRUE; break; } default: break; } if (remove) { if (patch_info == cfg->patch_info) cfg->patch_info = patch_info->next; else { MonoJumpInfo *tmp; for (tmp = cfg->patch_info; tmp->next != patch_info; tmp = tmp->next) ; tmp->next = patch_info->next; } } set_code_cursor (cfg, code); } set_code_cursor (cfg, code); } #endif /* DISABLE_JIT */ MONO_NEVER_INLINE void mono_arch_flush_icache (guint8 *code, gint size) { /* call/ret required (or likely other control transfer) */ } void mono_arch_flush_register_windows (void) { } gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return amd64_use_imm32 (imm); } /* * Determine whenever the trap whose info is in SIGINFO is caused by * integer overflow. */ gboolean mono_arch_is_int_overflow (void *sigctx, void *info) { MonoContext ctx; guint8* rip; int reg; gint64 value; mono_sigctx_to_monoctx (sigctx, &ctx); rip = (guint8*)ctx.gregs [AMD64_RIP]; if (IS_REX (rip [0])) { reg = amd64_rex_b (rip [0]); rip ++; } else reg = 0; if ((rip [0] == 0xf7) && (x86_modrm_mod (rip [1]) == 0x3) && (x86_modrm_reg (rip [1]) == 0x7)) { /* idiv REG */ reg += x86_modrm_rm (rip [1]); value = ctx.gregs [reg]; if (value == -1) return TRUE; } return FALSE; } guint32 mono_arch_get_patch_offset (guint8 *code) { return 3; } /** * \return TRUE if no sw breakpoint was present (always). * * Copy \p size bytes from \p code - \p offset to the buffer \p buf. If the debugger inserted software * breakpoints in the original code, they are removed in the copy. */ gboolean mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size) { /* * If method_start is non-NULL we need to perform bound checks, since we access memory * at code - offset we could go before the start of the method and end up in a different * page of memory that is not mapped or read incorrect data anyway. We zero-fill the bytes * instead. */ if (!method_start || code - offset >= method_start) { memcpy (buf, code - offset, size); } else { int diff = code - method_start; memset (buf, 0, size); memcpy (buf + offset - diff, method_start, diff + size - offset); } return TRUE; } int mono_arch_get_this_arg_reg (guint8 *code) { return AMD64_ARG_REG1; } gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code) { return (gpointer)regs [mono_arch_get_this_arg_reg (code)]; } #define MAX_ARCH_DELEGATE_PARAMS 10 static gpointer get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, guint32 param_count) { guint8 *code, *start; GSList *unwind_ops = NULL; int i; unwind_ops = mono_arch_get_cie_program (); const int size = 64; start = code = (guint8 *)mono_global_codeman_reserve (size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)); if (has_target) { /* Replace the this argument with the target */ amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8); amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8); amd64_jump_membase (code, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); } else { if (param_count == 0) { amd64_jump_membase (code, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); } else { /* We have to shift the arguments left */ amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8); for (i = 0; i < param_count; ++i) { #ifdef TARGET_WIN32 if (i < 3) amd64_mov_reg_reg (code, param_regs [i], param_regs [i + 1], 8); else amd64_mov_reg_membase (code, param_regs [i], AMD64_RSP, 0x28, 8); #else amd64_mov_reg_reg (code, param_regs [i], param_regs [i + 1], 8); #endif } amd64_jump_membase (code, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); } } g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size); g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0))); mono_arch_flush_icache (start, code - start); if (has_target) { *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops); } else { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count); *info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops); g_free (name); } if (mono_jit_map_is_enabled ()) { char *buff; if (has_target) buff = (char*)"delegate_invoke_has_target"; else buff = g_strdup_printf ("delegate_invoke_no_target_%d", param_count); mono_emit_jit_tramp (start, code - start, buff); if (!has_target) g_free (buff); } MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); return start; } #define MAX_VIRTUAL_DELEGATE_OFFSET 32 static gpointer get_delegate_virtual_invoke_impl (MonoTrampInfo **info, gboolean load_imt_reg, int offset) { guint8 *code, *start; const int size = 20; char *tramp_name; GSList *unwind_ops; if (offset / (int)sizeof (target_mgreg_t) > MAX_VIRTUAL_DELEGATE_OFFSET) return NULL; start = code = (guint8 *)mono_global_codeman_reserve (size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)); unwind_ops = mono_arch_get_cie_program (); /* Replace the this argument with the target */ amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8); amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8); if (load_imt_reg) { /* Load the IMT reg */ amd64_mov_reg_membase (code, MONO_ARCH_IMT_REG, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method), 8); } /* Load the vtable */ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoObject, vtable), 8); amd64_jump_membase (code, AMD64_RAX, offset); g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); tramp_name = mono_get_delegate_virtual_invoke_impl_name (load_imt_reg, offset); *info = mono_tramp_info_create (tramp_name, start, code - start, NULL, unwind_ops); g_free (tramp_name); return start; } /* * mono_arch_get_delegate_invoke_impls: * * Return a list of MonoTrampInfo structures for the delegate invoke impl * trampolines. */ GSList* mono_arch_get_delegate_invoke_impls (void) { GSList *res = NULL; MonoTrampInfo *info; int i; get_delegate_invoke_impl (&info, TRUE, 0); res = g_slist_prepend (res, info); for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) { get_delegate_invoke_impl (&info, FALSE, i); res = g_slist_prepend (res, info); } for (i = 1; i <= MONO_IMT_SIZE; ++i) { get_delegate_virtual_invoke_impl (&info, TRUE, - i * TARGET_SIZEOF_VOID_P); res = g_slist_prepend (res, info); } for (i = 0; i <= MAX_VIRTUAL_DELEGATE_OFFSET; ++i) { get_delegate_virtual_invoke_impl (&info, FALSE, i * TARGET_SIZEOF_VOID_P); res = g_slist_prepend (res, info); get_delegate_virtual_invoke_impl (&info, TRUE, i * TARGET_SIZEOF_VOID_P); res = g_slist_prepend (res, info); } return res; } gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target) { guint8 *code, *start; int i; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; /* FIXME: Support more cases */ if (MONO_TYPE_ISSTRUCT (mini_get_underlying_type (sig->ret))) return NULL; if (has_target) { static guint8* cached = NULL; if (cached) return cached; if (mono_ee_features.use_aot_trampolines) { start = (guint8 *)mono_aot_get_trampoline ("delegate_invoke_impl_has_target"); } else { MonoTrampInfo *info; start = (guint8 *)get_delegate_invoke_impl (&info, TRUE, 0); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cached = start; } else { static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL}; for (i = 0; i < sig->param_count; ++i) if (!mono_is_regsize_var (sig->params [i])) return NULL; if (sig->param_count > 4) return NULL; code = cache [sig->param_count]; if (code) return code; if (mono_ee_features.use_aot_trampolines) { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); start = (guint8 *)mono_aot_get_trampoline (name); g_free (name); } else { MonoTrampInfo *info; start = (guint8 *)get_delegate_invoke_impl (&info, FALSE, sig->param_count); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cache [sig->param_count] = start; } return start; } gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg) { MonoTrampInfo *info; gpointer code; code = get_delegate_virtual_invoke_impl (&info, load_imt_reg, offset); if (code) mono_tramp_info_register (info, NULL); return code; } void mono_arch_finish_init (void) { #if !defined(HOST_WIN32) && defined(MONO_XEN_OPT) optimize_for_xen = access ("/proc/xen", F_OK) == 0; #endif } #define CMP_SIZE (6 + 1) #define CMP_REG_REG_SIZE (4 + 1) #define BR_SMALL_SIZE 2 #define BR_LARGE_SIZE 6 #define MOV_REG_IMM_SIZE 10 #define MOV_REG_IMM_32BIT_SIZE 6 #define JUMP_REG_SIZE (2 + 1) static int imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target) { int i, distance = 0; for (i = start; i < target; ++i) distance += imt_entries [i]->chunk_size; return distance; } /* * LOCKING: called with the domain lock held */ gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int i; int size = 0; guint8 *code, *start; gboolean vtable_is_32bit = ((gsize)(vtable) == (gsize)(int)(gsize)(vtable)); GSList *unwind_ops; MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) { if (amd64_use_imm32 ((gint64)item->key)) item->chunk_size += CMP_SIZE; else item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE; } if (item->has_target_code) { item->chunk_size += MOV_REG_IMM_SIZE; } else { if (vtable_is_32bit) item->chunk_size += MOV_REG_IMM_32BIT_SIZE; else item->chunk_size += MOV_REG_IMM_SIZE; } item->chunk_size += BR_SMALL_SIZE + JUMP_REG_SIZE; } else { if (fail_tramp) { item->chunk_size += MOV_REG_IMM_SIZE * 3 + CMP_REG_REG_SIZE + BR_SMALL_SIZE + JUMP_REG_SIZE * 2; } else { if (vtable_is_32bit) item->chunk_size += MOV_REG_IMM_32BIT_SIZE; else item->chunk_size += MOV_REG_IMM_SIZE; item->chunk_size += JUMP_REG_SIZE; /* with assert below: * item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1; */ } } } else { if (amd64_use_imm32 ((gint64)item->key)) item->chunk_size += CMP_SIZE; else item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE; item->chunk_size += BR_LARGE_SIZE; imt_entries [item->check_target_idx]->compare_done = TRUE; } size += item->chunk_size; } if (fail_tramp) { code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)); } else { code = (guint8 *)mono_mem_manager_code_reserve (mem_manager, size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)); } start = code; unwind_ops = mono_arch_get_cie_program (); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; item->code_target = code; if (item->is_equals) { gboolean fail_case = !item->check_target_idx && fail_tramp; if (item->check_target_idx || fail_case) { if (!item->compare_done || fail_case) { if (amd64_use_imm32 ((gint64)item->key)) amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer)); else { amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof(gpointer)); amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG); } } item->jmp_code = code; amd64_branch8 (code, X86_CC_NE, 0, FALSE); if (item->has_target_code) { amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->value.target_code); amd64_jump_reg (code, MONO_ARCH_IMT_SCRATCH_REG); } else { amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot])); amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0); } if (fail_case) { amd64_patch (item->jmp_code, code); amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, fail_tramp); amd64_jump_reg (code, MONO_ARCH_IMT_SCRATCH_REG); item->jmp_code = NULL; } } else { /* enable the commented code to assert on wrong method */ #if 0 if (amd64_is_imm32 (item->key)) amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer)); else { amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key); amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG); } item->jmp_code = code; amd64_branch8 (code, X86_CC_NE, 0, FALSE); /* See the comment below about R10 */ amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot])); amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0); amd64_patch (item->jmp_code, code); amd64_breakpoint (code); item->jmp_code = NULL; #else /* We're using R10 (MONO_ARCH_IMT_SCRATCH_REG) here because R11 (MONO_ARCH_IMT_REG) needs to be preserved. R10 needs to be preserved for calls which require a runtime generic context, but interface calls don't. */ amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot])); amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0); #endif } } else { if (amd64_use_imm32 ((gint64)item->key)) amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof (target_mgreg_t)); else { amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof (target_mgreg_t)); amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG); } item->jmp_code = code; if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx))) x86_branch8 (code, X86_CC_GE, 0, FALSE); else x86_branch32 (code, X86_CC_GE, 0, FALSE); } g_assertf (code - item->code_target <= item->chunk_size, "%X %X", (guint)(code - item->code_target), (guint)item->chunk_size); } /* patch the branches to get to the target items */ for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code) { if (item->check_target_idx) { amd64_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target); } } } if (!fail_tramp) UnlockedAdd (&mono_stats.imt_trampolines_size, code - start); g_assert (code - start <= size); g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0))); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL)); mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), mem_manager); return start; } MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { return (MonoMethod*)regs [MONO_ARCH_IMT_REG]; } MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code) { return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG]; } GSList* mono_arch_get_cie_program (void) { GSList *l = NULL; mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, AMD64_RSP, 8); mono_add_unwind_op_offset (l, (guint8*)NULL, (guint8*)NULL, AMD64_RIP, -8); return l; } #ifndef DISABLE_JIT MonoInst* mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins = NULL; int opcode = 0; if (cmethod->klass == mono_class_try_get_math_class ()) { if (strcmp (cmethod->name, "Sqrt") == 0) { opcode = OP_SQRT; } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) { opcode = OP_ABS; } if (opcode && fsig->param_count == 1) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_R8; ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = args [0]->dreg; MONO_ADD_INS (cfg->cbb, ins); } opcode = 0; if (cfg->opt & MONO_OPT_CMOV) { if (strcmp (cmethod->name, "Min") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMIN; if (fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_IMIN_UN; else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_LMIN; else if (fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_LMIN_UN; } else if (strcmp (cmethod->name, "Max") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMAX; if (fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_IMAX_UN; else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_LMAX; else if (fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_LMAX_UN; } } if (opcode && fsig->param_count == 2) { MONO_INST_NEW (cfg, ins, opcode); ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8; ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; MONO_ADD_INS (cfg->cbb, ins); } #if 0 /* OP_FREM is not IEEE compatible */ else if (strcmp (cmethod->name, "IEEERemainder") == 0 && fsig->param_count == 2) { MONO_INST_NEW (cfg, ins, OP_FREM); ins->inst_i0 = args [0]; ins->inst_i1 = args [1]; } #endif if ((mini_get_cpu_features (cfg) & MONO_CPU_X86_SSE41) != 0 && fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) { int mode = -1; if (!strcmp (cmethod->name, "Round")) mode = 0; else if (!strcmp (cmethod->name, "Floor")) mode = 1; else if (!strcmp (cmethod->name, "Ceiling")) mode = 2; if (mode != -1) { int xreg = alloc_xreg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_FCONV_TO_R8_X, xreg, args [0]->dreg); EMIT_NEW_UNALU (cfg, ins, OP_SSE41_ROUNDP, xreg, xreg); ins->inst_c0 = mode; ins->inst_c1 = MONO_TYPE_R8; int dreg = alloc_freg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_EXTRACT_R8, dreg, xreg); ins->inst_c0 = 0; ins->inst_c1 = MONO_TYPE_R8; return ins; } } } return ins; } #endif host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { return ctx->gregs [reg]; } host_mgreg_t * mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { return &ctx->gregs [reg]; } void mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val) { ctx->gregs [reg] = val; } /* * mono_arch_emit_load_aotconst: * * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and * TARGET from the mscorlib GOT in full-aot code. * On AMD64, the result is placed into R11. */ guint8* mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, MonoJumpInfoType tramp_type, gconstpointer target) { *ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); return code; } /* * mono_arch_get_trampolines: * * Return a list of MonoTrampInfo structures describing arch specific trampolines * for AOT. */ GSList * mono_arch_get_trampolines (gboolean aot) { return mono_amd64_get_exception_trampolines (aot); } /* Soft Debug support */ #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED /* * mono_arch_set_breakpoint: * * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET. * The location should contain code emitted by OP_SEQ_POINT. */ void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; if (ji->from_aot) { guint32 native_offset = ip - (guint8*)ji->code_start; SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8 *)ji->code_start); g_assert (info->bp_addrs [native_offset] == 0); info->bp_addrs [native_offset] = mini_get_breakpoint_trampoline (); } else { /* ip points to a mov r11, 0 */ g_assert (code [0] == 0x41); g_assert (code [1] == 0xbb); amd64_mov_reg_imm (code, AMD64_R11, 1); } } /* * mono_arch_clear_breakpoint: * * Clear the breakpoint at IP. */ void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; if (ji->from_aot) { guint32 native_offset = ip - (guint8*)ji->code_start; SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8 *)ji->code_start); info->bp_addrs [native_offset] = NULL; } else { amd64_mov_reg_imm (code, AMD64_R11, 0); } } gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx) { /* We use soft breakpoints on amd64 */ return FALSE; } /* * mono_arch_skip_breakpoint: * * Modify CTX so the ip is placed after the breakpoint instruction, so when * we resume, the instruction is not executed again. */ void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji) { g_assert_not_reached (); } /* * mono_arch_start_single_stepping: * * Start single stepping. */ void mono_arch_start_single_stepping (void) { ss_trampoline = mini_get_single_step_trampoline (); } /* * mono_arch_stop_single_stepping: * * Stop single stepping. */ void mono_arch_stop_single_stepping (void) { ss_trampoline = NULL; } /* * mono_arch_is_single_step_event: * * Return whenever the machine state in SIGCTX corresponds to a single * step event. */ gboolean mono_arch_is_single_step_event (void *info, void *sigctx) { /* We use soft breakpoints on amd64 */ return FALSE; } /* * mono_arch_skip_single_step: * * Modify CTX so the ip is placed after the single step trigger instruction, * we resume, the instruction is not executed again. */ void mono_arch_skip_single_step (MonoContext *ctx) { g_assert_not_reached (); } /* * mono_arch_create_seq_point_info: * * Return a pointer to a data structure which is used by the sequence * point implementation in AOTed code. */ SeqPointInfo* mono_arch_get_seq_point_info (guint8 *code) { SeqPointInfo *info; MonoJitInfo *ji; MonoJitMemoryManager *jit_mm; /* * We don't have access to the method etc. so use the global * memory manager for now. */ jit_mm = get_default_jit_mm (); // FIXME: Add a free function jit_mm_lock (jit_mm); info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code); jit_mm_unlock (jit_mm); if (!info) { ji = mini_jit_info_table_find (code); g_assert (ji); // FIXME: Optimize the size info = (SeqPointInfo *)g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size * sizeof (gpointer))); info->ss_tramp_addr = &ss_trampoline; jit_mm_lock (jit_mm); g_hash_table_insert (jit_mm->arch_seq_points, code, info); jit_mm_unlock (jit_mm); } return info; } #endif gboolean mono_arch_opcode_supported (int opcode) { switch (opcode) { case OP_ATOMIC_ADD_I4: case OP_ATOMIC_ADD_I8: case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_EXCHANGE_I8: case OP_ATOMIC_CAS_I4: case OP_ATOMIC_CAS_I8: case OP_ATOMIC_LOAD_I1: case OP_ATOMIC_LOAD_I2: case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_I8: case OP_ATOMIC_LOAD_U1: case OP_ATOMIC_LOAD_U2: case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_LOAD_U8: case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_LOAD_R8: case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_I8: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_U4: case OP_ATOMIC_STORE_U8: case OP_ATOMIC_STORE_R4: case OP_ATOMIC_STORE_R8: return TRUE; default: return FALSE; } } CallInfo* mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { return get_call_info (mp, sig); } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { gpointer target = NULL; switch (jit_icall_id) { #undef MONO_AOT_ICALL #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break; MONO_AOT_ICALL (mono_amd64_resume_unwind) MONO_AOT_ICALL (mono_amd64_start_gsharedvt_call) MONO_AOT_ICALL (mono_amd64_throw_corlib_exception) MONO_AOT_ICALL (mono_amd64_throw_exception) default: break; } return target; }
/** * \file * AMD64 backend for the Mono code generator * * Based on mini-x86.c. * * Authors: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * Patrik Torstensson * Zoltan Varga ([email protected]) * Johan Lorensson ([email protected]) * * (C) 2003 Ximian, Inc. * Copyright 2003-2011 Novell, Inc (http://www.novell.com) * Copyright 2011 Xamarin, Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include "mini.h" #include <string.h> #include <math.h> #include <assert.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <mono/metadata/abi-details.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/threads.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/gc-internals.h> #include <mono/metadata/tokentype.h> #include <mono/utils/mono-math.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-memory-model.h> #include <mono/utils/mono-tls.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/mono-threads.h> #include <mono/utils/unlocked.h> #include "interp/interp.h" #include "ir-emit.h" #include "mini-amd64.h" #include "cpu-amd64.h" #include "mini-gc.h" #include "mini-runtime.h" #include "aot-runtime.h" #ifdef MONO_XEN_OPT static gboolean optimize_for_xen = TRUE; #else #define optimize_for_xen 0 #endif static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math") #define IS_IMM32(val) ((((guint64)val) >> 32) == 0) #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f)) /* The single step trampoline */ static gpointer ss_trampoline; /* The breakpoint trampoline */ static gpointer bp_trampoline; /* Offset between fp and the first argument in the callee */ #define ARGS_OFFSET 16 #define GP_SCRATCH_REG AMD64_R11 /* Max number of bblocks before we bail from using more advanced branch placement code */ #define MAX_BBLOCKS_FOR_BRANCH_OPTS 800 /* * AMD64 register usage: * - callee saved registers are used for global register allocation * - %r11 is used for materializing 64 bit constants in opcodes * - the rest is used for local allocation */ /* * Floating point comparison results: * ZF PF CF * A > B 0 0 0 * A < B 0 0 1 * A = B 1 0 0 * A > B 0 0 0 * UNORDERED 1 1 1 */ const char* mono_arch_regname (int reg) { switch (reg) { case AMD64_RAX: return "%rax"; case AMD64_RBX: return "%rbx"; case AMD64_RCX: return "%rcx"; case AMD64_RDX: return "%rdx"; case AMD64_RSP: return "%rsp"; case AMD64_RBP: return "%rbp"; case AMD64_RDI: return "%rdi"; case AMD64_RSI: return "%rsi"; case AMD64_R8: return "%r8"; case AMD64_R9: return "%r9"; case AMD64_R10: return "%r10"; case AMD64_R11: return "%r11"; case AMD64_R12: return "%r12"; case AMD64_R13: return "%r13"; case AMD64_R14: return "%r14"; case AMD64_R15: return "%r15"; } return "unknown"; } static const char * const packed_xmmregs [] = { "p:xmm0", "p:xmm1", "p:xmm2", "p:xmm3", "p:xmm4", "p:xmm5", "p:xmm6", "p:xmm7", "p:xmm8", "p:xmm9", "p:xmm10", "p:xmm11", "p:xmm12", "p:xmm13", "p:xmm14", "p:xmm15" }; static const char * const single_xmmregs [] = { "s:xmm0", "s:xmm1", "s:xmm2", "s:xmm3", "s:xmm4", "s:xmm5", "s:xmm6", "s:xmm7", "s:xmm8", "s:xmm9", "s:xmm10", "s:xmm11", "s:xmm12", "s:xmm13", "s:xmm14", "s:xmm15" }; const char* mono_arch_fregname (int reg) { if (reg < AMD64_XMM_NREG) return single_xmmregs [reg]; else return "unknown"; } const char * mono_arch_xregname (int reg) { if (reg < AMD64_XMM_NREG) return packed_xmmregs [reg]; else return "unknown"; } static gboolean debug_omit_fp (void) { #if 0 return mono_debug_count (); #else return TRUE; #endif } static gboolean amd64_is_near_call (guint8 *code) { /* Skip REX */ if ((code [0] >= 0x40) && (code [0] <= 0x4f)) code += 1; return code [0] == 0xe8; } static gboolean amd64_use_imm32 (gint64 val) { if (mini_debug_options.single_imm_size) return FALSE; return amd64_is_imm32 (val); } void mono_x86_patch (unsigned char* code, gpointer target) { mono_x86_patch_inline (code, target); } static void amd64_patch (unsigned char* code, gpointer target) { // NOTE: Sometimes code has just been generated, is not running yet, // and has no alignment requirements. Sometimes it could be running while we patch it, // and there are alignment requirements. // FIXME Assert alignment. guint8 rex = 0; /* Skip REX */ if ((code [0] >= 0x40) && (code [0] <= 0x4f)) { rex = code [0]; code += 1; } if ((code [0] & 0xf8) == 0xb8) { /* amd64_set_reg_template */ *(guint64*)(code + 1) = (guint64)target; } else if ((code [0] == 0x8b) && rex && x86_modrm_mod (code [1]) == 0 && x86_modrm_rm (code [1]) == 5) { /* mov 0(%rip), %dreg */ g_assert (!1); // Historical code was incorrect. ptrdiff_t const offset = (guchar*)target - (code + 6); g_assert (offset == (gint32)offset); *(gint32*)(code + 2) = (gint32)offset; } else if (code [0] == 0xff && (code [1] == 0x15 || code [1] == 0x25)) { /* call or jmp *<OFFSET>(%rip) */ // Patch the data, not the code. g_assert (!2); // For possible use later. *(void**)(code + 6 + *(gint32*)(code + 2)) = target; } else x86_patch (code, target); } void mono_amd64_patch (unsigned char* code, gpointer target) { amd64_patch (code, target); } #define DEBUG(a) if (cfg->verbose_level > 1) a static void inline add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo) { ainfo->offset = *stack_size; if (*gr >= PARAM_REGS) { ainfo->storage = ArgOnStack; ainfo->arg_size = sizeof (target_mgreg_t); /* Since the same stack slot size is used for all arg */ /* types, it needs to be big enough to hold them all */ (*stack_size) += sizeof (target_mgreg_t); } else { ainfo->storage = ArgInIReg; ainfo->reg = param_regs [*gr]; (*gr) ++; } } static void inline add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double) { ainfo->offset = *stack_size; if (*gr >= FLOAT_PARAM_REGS) { ainfo->storage = ArgOnStack; ainfo->arg_size = sizeof (target_mgreg_t); /* Since the same stack slot size is used for both float */ /* types, it needs to be big enough to hold them both */ (*stack_size) += sizeof (target_mgreg_t); } else { /* A double register */ if (is_double) ainfo->storage = ArgInDoubleSSEReg; else ainfo->storage = ArgInFloatSSEReg; ainfo->reg = *gr; (*gr) += 1; } } typedef enum ArgumentClass { ARG_CLASS_NO_CLASS, ARG_CLASS_MEMORY, ARG_CLASS_INTEGER, ARG_CLASS_SSE } ArgumentClass; static ArgumentClass merge_argument_class_from_type (MonoType *type, ArgumentClass class1) { ArgumentClass class2 = ARG_CLASS_NO_CLASS; MonoType *ptype; ptype = mini_get_underlying_type (type); switch (ptype->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_OBJECT: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_I8: case MONO_TYPE_U8: class2 = ARG_CLASS_INTEGER; break; case MONO_TYPE_R4: case MONO_TYPE_R8: #ifdef TARGET_WIN32 class2 = ARG_CLASS_INTEGER; #else class2 = ARG_CLASS_SSE; #endif break; case MONO_TYPE_TYPEDBYREF: g_assert_not_reached (); case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ptype)) { class2 = ARG_CLASS_INTEGER; break; } /* fall through */ case MONO_TYPE_VALUETYPE: { MonoMarshalType *info = mono_marshal_load_type_info (ptype->data.klass); int i; for (i = 0; i < info->num_fields; ++i) { class2 = class1; class2 = merge_argument_class_from_type (info->fields [i].field->type, class2); } break; } default: g_assert_not_reached (); } /* Merge */ if (class1 == class2) ; else if (class1 == ARG_CLASS_NO_CLASS) class1 = class2; else if ((class1 == ARG_CLASS_MEMORY) || (class2 == ARG_CLASS_MEMORY)) class1 = ARG_CLASS_MEMORY; else if ((class1 == ARG_CLASS_INTEGER) || (class2 == ARG_CLASS_INTEGER)) class1 = ARG_CLASS_INTEGER; else class1 = ARG_CLASS_SSE; return class1; } typedef struct { MonoType *type; int size, offset; } StructFieldInfo; /* * collect_field_info_nested: * * Collect field info from KLASS recursively into FIELDS. */ static void collect_field_info_nested (MonoClass *klass, GArray *fields_array, int offset, gboolean pinvoke, gboolean unicode) { MonoMarshalType *info; int i; if (pinvoke) { info = mono_marshal_load_type_info (klass); g_assert(info); for (i = 0; i < info->num_fields; ++i) { if (MONO_TYPE_ISSTRUCT (info->fields [i].field->type)) { collect_field_info_nested (mono_class_from_mono_type_internal (info->fields [i].field->type), fields_array, info->fields [i].offset, pinvoke, unicode); } else { guint32 align; StructFieldInfo f; f.type = info->fields [i].field->type; f.size = mono_marshal_type_size (info->fields [i].field->type, info->fields [i].mspec, &align, TRUE, unicode); f.offset = offset + info->fields [i].offset; if (i == info->num_fields - 1 && f.size + f.offset < info->native_size) { /* This can happen with .pack directives eg. 'fixed' arrays */ if (MONO_TYPE_IS_PRIMITIVE (f.type)) { /* Replicate the last field to fill out the remaining place, since the code in add_valuetype () needs type information */ g_array_append_val (fields_array, f); while (f.size + f.offset < info->native_size) { f.offset += f.size; g_array_append_val (fields_array, f); } } else { f.size = info->native_size - f.offset; g_array_append_val (fields_array, f); } } else { g_array_append_val (fields_array, f); } } } } else { gpointer iter; MonoClassField *field; iter = NULL; while ((field = mono_class_get_fields_internal (klass, &iter))) { if (field->type->attrs & FIELD_ATTRIBUTE_STATIC) continue; if (MONO_TYPE_ISSTRUCT (field->type)) { collect_field_info_nested (mono_class_from_mono_type_internal (field->type), fields_array, field->offset - MONO_ABI_SIZEOF (MonoObject), pinvoke, unicode); } else { int align; StructFieldInfo f; f.type = field->type; f.size = mono_type_size (field->type, &align); f.offset = field->offset - MONO_ABI_SIZEOF (MonoObject) + offset; g_array_append_val (fields_array, f); } } } } #ifdef TARGET_WIN32 /* Windows x64 ABI can pass/return value types in register of size 1,2,4,8 bytes. */ #define MONO_WIN64_VALUE_TYPE_FITS_REG(arg_size) (arg_size <= SIZEOF_REGISTER && (arg_size == 1 || arg_size == 2 || arg_size == 4 || arg_size == 8)) static gboolean allocate_register_for_valuetype_win64 (ArgInfo *arg_info, ArgumentClass arg_class, guint32 arg_size, const AMD64_Reg_No int_regs [], int int_reg_count, const AMD64_XMM_Reg_No float_regs [], int float_reg_count, guint32 *current_int_reg, guint32 *current_float_reg) { gboolean result = FALSE; assert (arg_info != NULL && int_regs != NULL && float_regs != NULL && current_int_reg != NULL && current_float_reg != NULL); assert (arg_info->storage == ArgValuetypeInReg || arg_info->storage == ArgValuetypeAddrInIReg); arg_info->pair_storage [0] = arg_info->pair_storage [1] = ArgNone; arg_info->pair_regs [0] = arg_info->pair_regs [1] = ArgNone; arg_info->pair_size [0] = 0; arg_info->pair_size [1] = 0; arg_info->nregs = 0; if (arg_class == ARG_CLASS_INTEGER && *current_int_reg < int_reg_count) { /* Pass parameter in integer register. */ arg_info->pair_storage [0] = ArgInIReg; arg_info->pair_regs [0] = int_regs [*current_int_reg]; (*current_int_reg) ++; result = TRUE; } else if (arg_class == ARG_CLASS_SSE && *current_float_reg < float_reg_count) { /* Pass parameter in float register. */ arg_info->pair_storage [0] = (arg_size <= sizeof (gfloat)) ? ArgInFloatSSEReg : ArgInDoubleSSEReg; arg_info->pair_regs [0] = float_regs [*current_float_reg]; (*current_float_reg) ++; result = TRUE; } if (result == TRUE) { arg_info->pair_size [0] = arg_size; arg_info->nregs = 1; } return result; } static gboolean allocate_parameter_register_for_valuetype_win64 (ArgInfo *arg_info, ArgumentClass arg_class, guint32 arg_size, guint32 *current_int_reg, guint32 *current_float_reg) { return allocate_register_for_valuetype_win64 (arg_info, arg_class, arg_size, param_regs, PARAM_REGS, float_param_regs, FLOAT_PARAM_REGS, current_int_reg, current_float_reg); } static gboolean allocate_return_register_for_valuetype_win64 (ArgInfo *arg_info, ArgumentClass arg_class, guint32 arg_size, guint32 *current_int_reg, guint32 *current_float_reg) { return allocate_register_for_valuetype_win64 (arg_info, arg_class, arg_size, return_regs, RETURN_REGS, float_return_regs, FLOAT_RETURN_REGS, current_int_reg, current_float_reg); } static void allocate_storage_for_valuetype_win64 (ArgInfo *arg_info, MonoType *type, gboolean is_return, ArgumentClass arg_class, guint32 arg_size, guint32 *current_int_reg, guint32 *current_float_reg, guint32 *stack_size) { /* Windows x64 value type ABI. * * Parameters: https://msdn.microsoft.com/en-us/library/zthk2dkh.aspx * * Integer/Float types smaller than or equals to 8 bytes or porperly sized struct/union (1,2,4,8) * Try pass in register using ArgValuetypeInReg/(ArgInIReg|ArgInFloatSSEReg|ArgInDoubleSSEReg) as storage and size of parameter(1,2,4,8), if no more registers, pass on stack using ArgOnStack as storage and size of parameter(1,2,4,8). * Integer/Float types bigger than 8 bytes or struct/unions larger than 8 bytes or (3,5,6,7). * Try to pass pointer in register using ArgValuetypeAddrInIReg, if no more registers, pass pointer on stack using ArgValuetypeAddrOnStack as storage and parameter size of register (8 bytes). * * Return values: https://msdn.microsoft.com/en-us/library/7572ztz4.aspx. * * Integers/Float types smaller than or equal to 8 bytes * Return in corresponding register RAX/XMM0 using ArgValuetypeInReg/(ArgInIReg|ArgInFloatSSEReg|ArgInDoubleSSEReg) as storage and size of parameter(1,2,4,8). * Properly sized struct/unions (1,2,4,8) * Return in register RAX using ArgValuetypeInReg as storage and size of parameter(1,2,4,8). * Types bigger than 8 bytes or struct/unions larger than 8 bytes or (3,5,6,7). * Return pointer to allocated stack space (allocated by caller) using ArgValuetypeAddrInIReg as storage and parameter size. */ assert (arg_info != NULL && type != NULL && current_int_reg != NULL && current_float_reg != NULL && stack_size != NULL); if (!is_return) { /* Parameter cases. */ if (arg_class != ARG_CLASS_MEMORY && MONO_WIN64_VALUE_TYPE_FITS_REG (arg_size)) { assert (arg_size == 1 || arg_size == 2 || arg_size == 4 || arg_size == 8); /* First, try to use registers for parameter. If type is struct it can only be passed by value in integer register. */ arg_info->storage = ArgValuetypeInReg; if (!allocate_parameter_register_for_valuetype_win64 (arg_info, !MONO_TYPE_ISSTRUCT (type) ? arg_class : ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg)) { /* No more registers, fallback passing parameter on stack as value. */ assert (arg_info->pair_storage [0] == ArgNone && arg_info->pair_storage [1] == ArgNone && arg_info->pair_size [0] == 0 && arg_info->pair_size [1] == 0 && arg_info->nregs == 0); /* Passing value directly on stack, so use size of value. */ arg_info->storage = ArgOnStack; arg_size = ALIGN_TO (arg_size, sizeof (target_mgreg_t)); arg_info->offset = *stack_size; arg_info->arg_size = arg_size; *stack_size += arg_size; } } else { /* Fallback to stack, try to pass address to parameter in register. Always use integer register to represent stack address. */ arg_info->storage = ArgValuetypeAddrInIReg; if (!allocate_parameter_register_for_valuetype_win64 (arg_info, ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg)) { /* No more registers, fallback passing address to parameter on stack. */ assert (arg_info->pair_storage [0] == ArgNone && arg_info->pair_storage [1] == ArgNone && arg_info->pair_size [0] == 0 && arg_info->pair_size [1] == 0 && arg_info->nregs == 0); /* Passing an address to value on stack, so use size of register as argument size. */ arg_info->storage = ArgValuetypeAddrOnStack; arg_size = sizeof (target_mgreg_t); arg_info->offset = *stack_size; arg_info->arg_size = arg_size; *stack_size += arg_size; } } } else { /* Return value cases. */ if (arg_class != ARG_CLASS_MEMORY && MONO_WIN64_VALUE_TYPE_FITS_REG (arg_size)) { assert (arg_size == 1 || arg_size == 2 || arg_size == 4 || arg_size == 8); /* Return value fits into return registers. If type is struct it can only be returned by value in integer register. */ arg_info->storage = ArgValuetypeInReg; allocate_return_register_for_valuetype_win64 (arg_info, !MONO_TYPE_ISSTRUCT (type) ? arg_class : ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg); /* Only RAX/XMM0 should be used to return valuetype. */ assert ((arg_info->pair_regs[0] == AMD64_RAX && arg_info->pair_regs[1] == ArgNone) || (arg_info->pair_regs[0] == AMD64_XMM0 && arg_info->pair_regs[1] == ArgNone)); } else { /* Return value doesn't fit into return register, return address to allocated stack space (allocated by caller and passed as input). */ arg_info->storage = ArgValuetypeAddrInIReg; allocate_return_register_for_valuetype_win64 (arg_info, ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg); /* Only RAX should be used to return valuetype address. */ assert (arg_info->pair_regs[0] == AMD64_RAX && arg_info->pair_regs[1] == ArgNone); arg_size = ALIGN_TO (arg_size, sizeof (target_mgreg_t)); arg_info->offset = *stack_size; *stack_size += arg_size; } } } static void get_valuetype_size_win64 (MonoClass *klass, gboolean pinvoke, ArgInfo *arg_info, MonoType *type, ArgumentClass *arg_class, guint32 *arg_size) { *arg_size = 0; *arg_class = ARG_CLASS_NO_CLASS; assert (klass != NULL && arg_info != NULL && type != NULL && arg_class != NULL && arg_size != NULL); if (pinvoke) { /* Calculate argument class type and size of marshalled type. */ MonoMarshalType *info = mono_marshal_load_type_info (klass); *arg_size = info->native_size; } else { /* Calculate argument class type and size of managed type. */ *arg_size = mono_class_value_size (klass, NULL); } /* Windows ABI only handle value types on stack or passed in integer register (if it fits register size). */ *arg_class = MONO_WIN64_VALUE_TYPE_FITS_REG (*arg_size) ? ARG_CLASS_INTEGER : ARG_CLASS_MEMORY; if (*arg_class == ARG_CLASS_MEMORY) { /* Value type has a size that doesn't seem to fit register according to ABI. Try to used full stack size of type. */ *arg_size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, pinvoke); } /* * Standard C and C++ doesn't allow empty structs, empty structs will always have a size of 1 byte. * GCC have an extension to allow empty structs, https://gcc.gnu.org/onlinedocs/gcc/Empty-Structures.html. * This cause a little dilemma since runtime build using none GCC compiler will not be compatible with * GCC build C libraries and the other way around. On platforms where empty structs has size of 1 byte * it must be represented in call and cannot be dropped. */ if (*arg_size == 0 && MONO_TYPE_ISSTRUCT (type)) { arg_info->pass_empty_struct = TRUE; *arg_size = SIZEOF_REGISTER; *arg_class = ARG_CLASS_INTEGER; } assert (*arg_class != ARG_CLASS_NO_CLASS); } static void add_valuetype_win64 (MonoMethodSignature *signature, ArgInfo *arg_info, MonoType *type, gboolean is_return, guint32 *current_int_reg, guint32 *current_float_reg, guint32 *stack_size) { guint32 arg_size = SIZEOF_REGISTER; MonoClass *klass = NULL; ArgumentClass arg_class; assert (signature != NULL && arg_info != NULL && type != NULL && current_int_reg != NULL && current_float_reg != NULL && stack_size != NULL); klass = mono_class_from_mono_type_internal (type); get_valuetype_size_win64 (klass, signature->pinvoke && !signature->marshalling_disabled, arg_info, type, &arg_class, &arg_size); /* Only drop value type if its not an empty struct as input that must be represented in call */ if ((arg_size == 0 && !arg_info->pass_empty_struct) || (arg_info->pass_empty_struct && is_return)) { arg_info->storage = ArgValuetypeInReg; arg_info->pair_storage [0] = arg_info->pair_storage [1] = ArgNone; } else { /* Alocate storage for value type. */ allocate_storage_for_valuetype_win64 (arg_info, type, is_return, arg_class, arg_size, current_int_reg, current_float_reg, stack_size); } } #endif /* TARGET_WIN32 */ static void add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type, gboolean is_return, guint32 *gr, guint32 *fr, guint32 *stack_size) { #ifdef TARGET_WIN32 add_valuetype_win64 (sig, ainfo, type, is_return, gr, fr, stack_size); #else guint32 size, quad, nquads, i, nfields; /* Keep track of the size used in each quad so we can */ /* use the right size when copying args/return vars. */ guint32 quadsize [2] = {8, 8}; ArgumentClass args [2]; StructFieldInfo *fields = NULL; GArray *fields_array; MonoClass *klass; gboolean pass_on_stack = FALSE; int struct_size; klass = mono_class_from_mono_type_internal (type); size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, sig->pinvoke && !sig->marshalling_disabled); if (!sig->pinvoke && ((is_return && (size == 8)) || (!is_return && (size <= 16)))) { /* We pass and return vtypes of size 8 in a register */ } else if (!sig->pinvoke || (size == 0) || (size > 16)) { pass_on_stack = TRUE; } /* If this struct can't be split up naturally into 8-byte */ /* chunks (registers), pass it on the stack. */ if (sig->pinvoke && !sig->marshalling_disabled) { MonoMarshalType *info = mono_marshal_load_type_info (klass); g_assert (info); struct_size = info->native_size; } else { struct_size = mono_class_value_size (klass, NULL); } /* * Collect field information recursively to be able to * handle nested structures. */ fields_array = g_array_new (FALSE, TRUE, sizeof (StructFieldInfo)); collect_field_info_nested (klass, fields_array, 0, sig->pinvoke && !sig->marshalling_disabled, m_class_is_unicode (klass)); fields = (StructFieldInfo*)fields_array->data; nfields = fields_array->len; for (i = 0; i < nfields; ++i) { if ((fields [i].offset < 8) && (fields [i].offset + fields [i].size) > 8) { pass_on_stack = TRUE; break; } } if (size == 0) { ainfo->storage = ArgValuetypeInReg; ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone; return; } if (pass_on_stack) { /* Allways pass in memory */ ainfo->offset = *stack_size; *stack_size += ALIGN_TO (size, 8); ainfo->storage = is_return ? ArgValuetypeAddrInIReg : ArgOnStack; if (!is_return) ainfo->arg_size = ALIGN_TO (size, 8); g_array_free (fields_array, TRUE); return; } if (size > 8) nquads = 2; else nquads = 1; if (!sig->pinvoke) { int n = mono_class_value_size (klass, NULL); quadsize [0] = n >= 8 ? 8 : n; quadsize [1] = n >= 8 ? MAX (n - 8, 8) : 0; /* Always pass in 1 or 2 integer registers */ args [0] = ARG_CLASS_INTEGER; args [1] = ARG_CLASS_INTEGER; /* Only the simplest cases are supported */ if (is_return && nquads != 1) { args [0] = ARG_CLASS_MEMORY; args [1] = ARG_CLASS_MEMORY; } } else { /* * Implement the algorithm from section 3.2.3 of the X86_64 ABI. * The X87 and SSEUP stuff is left out since there are no such types in * the CLR. */ if (!nfields) { ainfo->storage = ArgValuetypeInReg; ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone; return; } if (struct_size > 16) { ainfo->offset = *stack_size; *stack_size += ALIGN_TO (struct_size, 8); ainfo->storage = is_return ? ArgValuetypeAddrInIReg : ArgOnStack; if (!is_return) ainfo->arg_size = ALIGN_TO (struct_size, 8); g_array_free (fields_array, TRUE); return; } args [0] = ARG_CLASS_NO_CLASS; args [1] = ARG_CLASS_NO_CLASS; for (quad = 0; quad < nquads; ++quad) { ArgumentClass class1; if (nfields == 0) class1 = ARG_CLASS_MEMORY; else class1 = ARG_CLASS_NO_CLASS; for (i = 0; i < nfields; ++i) { if ((fields [i].offset < 8) && (fields [i].offset + fields [i].size) > 8) { /* Unaligned field */ NOT_IMPLEMENTED; } /* Skip fields in other quad */ if ((quad == 0) && (fields [i].offset >= 8)) continue; if ((quad == 1) && (fields [i].offset < 8)) continue; /* How far into this quad this data extends.*/ /* (8 is size of quad) */ quadsize [quad] = fields [i].offset + fields [i].size - (quad * 8); class1 = merge_argument_class_from_type (fields [i].type, class1); } /* Empty structs have a nonzero size, causing this assert to be hit */ if (sig->pinvoke) g_assert (class1 != ARG_CLASS_NO_CLASS); args [quad] = class1; } } g_array_free (fields_array, TRUE); /* Post merger cleanup */ if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY)) args [0] = args [1] = ARG_CLASS_MEMORY; /* Allocate registers */ { int orig_gr = *gr; int orig_fr = *fr; while (quadsize [0] != 1 && quadsize [0] != 2 && quadsize [0] != 4 && quadsize [0] != 8) quadsize [0] ++; while (quadsize [1] != 0 && quadsize [1] != 1 && quadsize [1] != 2 && quadsize [1] != 4 && quadsize [1] != 8) quadsize [1] ++; ainfo->storage = ArgValuetypeInReg; ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone; g_assert (quadsize [0] <= 8); g_assert (quadsize [1] <= 8); ainfo->pair_size [0] = quadsize [0]; ainfo->pair_size [1] = quadsize [1]; ainfo->nregs = nquads; for (quad = 0; quad < nquads; ++quad) { switch (args [quad]) { case ARG_CLASS_INTEGER: if (*gr >= PARAM_REGS) args [quad] = ARG_CLASS_MEMORY; else { ainfo->pair_storage [quad] = ArgInIReg; if (is_return) ainfo->pair_regs [quad] = return_regs [*gr]; else ainfo->pair_regs [quad] = param_regs [*gr]; (*gr) ++; } break; case ARG_CLASS_SSE: if (*fr >= FLOAT_PARAM_REGS) args [quad] = ARG_CLASS_MEMORY; else { if (quadsize[quad] <= 4) ainfo->pair_storage [quad] = ArgInFloatSSEReg; else ainfo->pair_storage [quad] = ArgInDoubleSSEReg; ainfo->pair_regs [quad] = *fr; (*fr) ++; } break; case ARG_CLASS_MEMORY: break; case ARG_CLASS_NO_CLASS: break; default: g_assert_not_reached (); } } if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY)) { int arg_size; /* Revert possible register assignments */ *gr = orig_gr; *fr = orig_fr; ainfo->offset = *stack_size; if (sig->pinvoke) arg_size = ALIGN_TO (struct_size, 8); else arg_size = nquads * sizeof (target_mgreg_t); *stack_size += arg_size; ainfo->storage = is_return ? ArgValuetypeAddrInIReg : ArgOnStack; if (!is_return) ainfo->arg_size = arg_size; } } #endif /* !TARGET_WIN32 */ } /* * get_call_info: * * Obtain information about a call according to the calling convention. * For AMD64 System V, see the "System V ABI, x86-64 Architecture Processor Supplement * Draft Version 0.23" document for more information. * For AMD64 Windows, see "Overview of x64 Calling Conventions", * https://msdn.microsoft.com/en-us/library/ms235286.aspx */ static CallInfo* get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { guint32 i, gr, fr, pstart; MonoType *ret_type; int n = sig->hasthis + sig->param_count; guint32 stack_size = 0; CallInfo *cinfo; gboolean is_pinvoke = sig->pinvoke; if (mp) cinfo = (CallInfo *)mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n)); else cinfo = (CallInfo *)g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n)); cinfo->nargs = n; cinfo->gsharedvt = mini_is_gsharedvt_variable_signature (sig); gr = 0; fr = 0; #ifdef TARGET_WIN32 /* Reserve space where the callee can save the argument registers */ stack_size = 4 * sizeof (target_mgreg_t); #endif /* return value */ ret_type = mini_get_underlying_type (sig->ret); switch (ret_type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = AMD64_RAX; break; case MONO_TYPE_U8: case MONO_TYPE_I8: cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = AMD64_RAX; break; case MONO_TYPE_R4: cinfo->ret.storage = ArgInFloatSSEReg; cinfo->ret.reg = AMD64_XMM0; break; case MONO_TYPE_R8: cinfo->ret.storage = ArgInDoubleSSEReg; cinfo->ret.reg = AMD64_XMM0; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ret_type)) { cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = AMD64_RAX; break; } if (mini_is_gsharedvt_type (ret_type)) { cinfo->ret.storage = ArgGsharedvtVariableInReg; break; } /* fall through */ case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: { guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0; add_valuetype (sig, &cinfo->ret, ret_type, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize); g_assert (cinfo->ret.storage != ArgInIReg); break; } case MONO_TYPE_VAR: case MONO_TYPE_MVAR: g_assert (mini_is_gsharedvt_type (ret_type)); cinfo->ret.storage = ArgGsharedvtVariableInReg; break; case MONO_TYPE_VOID: break; default: g_error ("Can't handle as return value 0x%x", ret_type->type); } pstart = 0; /* * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after * the first argument, allowing 'this' to be always passed in the first arg reg. * Also do this if the first argument is a reference type, since virtual calls * are sometimes made using calli without sig->hasthis set, like in the delegate * invoke wrappers. */ ArgStorage ret_storage = cinfo->ret.storage; if ((ret_storage == ArgValuetypeAddrInIReg || ret_storage == ArgGsharedvtVariableInReg) && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) { if (sig->hasthis) { add_general (&gr, &stack_size, cinfo->args + 0); } else { add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0]); pstart = 1; } add_general (&gr, &stack_size, &cinfo->ret); cinfo->ret.storage = ret_storage; cinfo->vret_arg_index = 1; } else { /* this */ if (sig->hasthis) add_general (&gr, &stack_size, cinfo->args + 0); if (ret_storage == ArgValuetypeAddrInIReg || ret_storage == ArgGsharedvtVariableInReg) { add_general (&gr, &stack_size, &cinfo->ret); cinfo->ret.storage = ret_storage; } } if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) { gr = PARAM_REGS; fr = FLOAT_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie); } for (i = pstart; i < sig->param_count; ++i) { ArgInfo *ainfo = &cinfo->args [sig->hasthis + i]; MonoType *ptype; #ifdef TARGET_WIN32 /* The float param registers and other param registers must be the same index on Windows x64.*/ if (gr > fr) fr = gr; else if (fr > gr) gr = fr; #endif if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* We allways pass the sig cookie on the stack for simplicity */ /* * Prevent implicit arguments + the sig cookie from being passed * in registers. */ gr = PARAM_REGS; fr = FLOAT_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie); } ptype = mini_get_underlying_type (sig->params [i]); switch (ptype->type) { case MONO_TYPE_I1: ainfo->is_signed = 1; case MONO_TYPE_U1: add_general (&gr, &stack_size, ainfo); ainfo->byte_arg_size = 1; break; case MONO_TYPE_I2: ainfo->is_signed = 1; case MONO_TYPE_U2: add_general (&gr, &stack_size, ainfo); ainfo->byte_arg_size = 2; break; case MONO_TYPE_I4: ainfo->is_signed = 1; case MONO_TYPE_U4: add_general (&gr, &stack_size, ainfo); ainfo->byte_arg_size = 4; break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: add_general (&gr, &stack_size, ainfo); break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ptype)) { add_general (&gr, &stack_size, ainfo); break; } if (mini_is_gsharedvt_variable_type (ptype)) { /* gsharedvt arguments are passed by ref */ add_general (&gr, &stack_size, ainfo); if (ainfo->storage == ArgInIReg) ainfo->storage = ArgGSharedVtInReg; else ainfo->storage = ArgGSharedVtOnStack; break; } /* fall through */ case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: add_valuetype (sig, ainfo, ptype, FALSE, &gr, &fr, &stack_size); break; case MONO_TYPE_U8: case MONO_TYPE_I8: add_general (&gr, &stack_size, ainfo); break; case MONO_TYPE_R4: add_float (&fr, &stack_size, ainfo, FALSE); break; case MONO_TYPE_R8: add_float (&fr, &stack_size, ainfo, TRUE); break; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: /* gsharedvt arguments are passed by ref */ g_assert (mini_is_gsharedvt_type (ptype)); add_general (&gr, &stack_size, ainfo); if (ainfo->storage == ArgInIReg) ainfo->storage = ArgGSharedVtInReg; else ainfo->storage = ArgGSharedVtOnStack; break; default: g_assert_not_reached (); } } if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) { gr = PARAM_REGS; fr = FLOAT_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie); } cinfo->stack_usage = stack_size; cinfo->reg_usage = gr; cinfo->freg_usage = fr; return cinfo; } static int arg_need_temp (ArgInfo *ainfo) { // Value types using one register doesn't need temp. if (ainfo->storage == ArgValuetypeInReg && ainfo->nregs > 1) return ainfo->nregs * sizeof (host_mgreg_t); return 0; } static gpointer arg_get_storage (CallContext *ccontext, ArgInfo *ainfo) { switch (ainfo->storage) { case ArgInIReg: return &ccontext->gregs [ainfo->reg]; case ArgInFloatSSEReg: case ArgInDoubleSSEReg: return &ccontext->fregs [ainfo->reg]; case ArgOnStack: case ArgValuetypeAddrOnStack: return ccontext->stack + ainfo->offset; case ArgValuetypeInReg: // Empty struct if (ainfo->nregs == 0) return NULL; // Value type using one register can be stored // directly in its context gregs/fregs slot. g_assert (ainfo->nregs == 1); switch (ainfo->pair_storage [0]) { case ArgInIReg: return &ccontext->gregs [ainfo->pair_regs [0]]; case ArgInFloatSSEReg: case ArgInDoubleSSEReg: return &ccontext->fregs [ainfo->pair_regs [0]]; default: g_assert_not_reached (); } case ArgValuetypeAddrInIReg: g_assert (ainfo->pair_storage [0] == ArgInIReg && ainfo->pair_storage [1] == ArgNone); return &ccontext->gregs [ainfo->pair_regs [0]]; default: g_error ("Arg storage type not yet supported"); } } static void arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest) { g_assert (arg_need_temp (ainfo)); host_mgreg_t *dest_cast = (host_mgreg_t*)dest; /* Reconstruct the value type */ for (int k = 0; k < ainfo->nregs; k++) { int storage_type = ainfo->pair_storage [k]; int reg_storage = ainfo->pair_regs [k]; switch (storage_type) { case ArgInIReg: *dest_cast = ccontext->gregs [reg_storage]; break; case ArgInFloatSSEReg: case ArgInDoubleSSEReg: *(double*)dest_cast = ccontext->fregs [reg_storage]; break; default: g_assert_not_reached (); } dest_cast++; } } static void arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src) { g_assert (arg_need_temp (ainfo)); host_mgreg_t *src_cast = (host_mgreg_t*)src; for (int k = 0; k < ainfo->nregs; k++) { int storage_type = ainfo->pair_storage [k]; int reg_storage = ainfo->pair_regs [k]; switch (storage_type) { case ArgInIReg: ccontext->gregs [reg_storage] = *src_cast; break; case ArgInFloatSSEReg: case ArgInDoubleSSEReg: ccontext->fregs [reg_storage] = *(double*)src_cast; break; default: g_assert_not_reached (); } src_cast++; } } void mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { CallInfo *cinfo = get_call_info (NULL, sig); const MonoEECallbacks *interp_cb = mini_get_interp_callbacks (); gpointer storage; ArgInfo *ainfo; memset (ccontext, 0, sizeof (CallContext)); ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT); if (ccontext->stack_size) ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size); if (sig->ret->type != MONO_TYPE_VOID) { ainfo = &cinfo->ret; if (ainfo->storage == ArgValuetypeAddrInIReg) { storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1); ccontext->gregs [cinfo->ret.reg] = (host_mgreg_t)storage; } } g_assert (!sig->hasthis); for (int i = 0; i < sig->param_count; i++) { ainfo = &cinfo->args [i]; if (ainfo->storage == ArgValuetypeAddrInIReg || ainfo->storage == ArgValuetypeAddrOnStack) { storage = arg_get_storage (ccontext, ainfo); *(gpointer *)storage = interp_cb->frame_arg_to_storage (frame, sig, i); continue; } int temp_size = arg_need_temp (ainfo); if (temp_size) storage = alloca (temp_size); // FIXME? alloca in a loop else storage = arg_get_storage (ccontext, ainfo); interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage); if (temp_size) arg_set_val (ccontext, ainfo, storage); } g_free (cinfo); } void mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp) { const MonoEECallbacks *interp_cb; CallInfo *cinfo; gpointer storage; ArgInfo *ainfo; if (sig->ret->type == MONO_TYPE_VOID) return; interp_cb = mini_get_interp_callbacks (); cinfo = get_call_info (NULL, sig); ainfo = &cinfo->ret; if (retp) { g_assert (cinfo->ret.storage == ArgValuetypeAddrInIReg); interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, retp); #ifdef TARGET_WIN32 // Windows x64 ABI ainfo implementation includes info on how to return value type address. // back to caller. storage = arg_get_storage (ccontext, ainfo); *(gpointer *)storage = retp; #endif } else { g_assert (cinfo->ret.storage != ArgValuetypeAddrInIReg); int temp_size = arg_need_temp (ainfo); if (temp_size) storage = alloca (temp_size); else storage = arg_get_storage (ccontext, ainfo); memset (ccontext, 0, sizeof (CallContext)); // FIXME interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage); if (temp_size) arg_set_val (ccontext, ainfo, storage); } g_free (cinfo); } gpointer mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb = mini_get_interp_callbacks (); CallInfo *cinfo = get_call_info (NULL, sig); gpointer storage; ArgInfo *ainfo; for (int i = 0; i < sig->param_count + sig->hasthis; i++) { ainfo = &cinfo->args [i]; if (ainfo->storage == ArgValuetypeAddrInIReg || ainfo->storage == ArgValuetypeAddrOnStack) { storage = arg_get_storage (ccontext, ainfo); interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, *(gpointer *)storage); continue; } int temp_size = arg_need_temp (ainfo); if (temp_size) { storage = alloca (temp_size); // FIXME? alloca in a loop arg_get_val (ccontext, ainfo, storage); } else { storage = arg_get_storage (ccontext, ainfo); } interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage); } storage = NULL; if (sig->ret->type != MONO_TYPE_VOID) { ainfo = &cinfo->ret; if (ainfo->storage == ArgValuetypeAddrInIReg) storage = (gpointer) ccontext->gregs [cinfo->ret.reg]; } g_free (cinfo); return storage; } void mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb; CallInfo *cinfo; ArgInfo *ainfo; gpointer storage; /* No return value */ if (sig->ret->type == MONO_TYPE_VOID) return; interp_cb = mini_get_interp_callbacks (); cinfo = get_call_info (NULL, sig); ainfo = &cinfo->ret; /* The return values were stored directly at address passed in reg */ if (cinfo->ret.storage != ArgValuetypeAddrInIReg) { int temp_size = arg_need_temp (ainfo); if (temp_size) { storage = alloca (temp_size); arg_get_val (ccontext, ainfo, storage); } else { storage = arg_get_storage (ccontext, ainfo); } interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage); } g_free (cinfo); } /* * mono_arch_get_argument_info: * @csig: a method signature * @param_count: the number of parameters to consider * @arg_info: an array to store the result infos * * Gathers information on parameters such as size, alignment and * padding. arg_info should be large enought to hold param_count + 1 entries. * * Returns the size of the argument area on the stack. */ int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { int k; CallInfo *cinfo = get_call_info (NULL, csig); guint32 args_size = cinfo->stack_usage; /* The arguments are saved to a stack area in mono_arch_instrument_prolog */ if (csig->hasthis) { arg_info [0].offset = 0; } for (k = 0; k < param_count; k++) { arg_info [k + 1].offset = ((k + csig->hasthis) * 8); /* FIXME: */ arg_info [k + 1].size = 0; } g_free (cinfo); return args_size; } #ifndef DISABLE_JIT gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { CallInfo *caller_info = get_call_info (NULL, caller_sig); CallInfo *callee_info = get_call_info (NULL, callee_sig); gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage) && IS_SUPPORTED_TAILCALL (callee_info->ret.storage == caller_info->ret.storage); // Limit stack_usage to 1G. Assume 32bit limits when we move parameters. res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (1 << 30)); res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (1 << 30)); // valuetype parameters are address of local const ArgInfo *ainfo; ainfo = callee_info->args + callee_sig->hasthis; for (int i = 0; res && i < callee_sig->param_count; ++i) { res = IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgValuetypeAddrInIReg) && IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgValuetypeAddrOnStack); } g_free (caller_info); g_free (callee_info); return res; } #endif /* DISABLE_JIT */ /* * Initialize the cpu to execute managed code. */ void mono_arch_cpu_init (void) { #ifndef _MSC_VER guint16 fpcw; /* spec compliance requires running with double precision */ __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw)); fpcw &= ~X86_FPCW_PRECC_MASK; fpcw |= X86_FPCW_PREC_DOUBLE; __asm__ __volatile__ ("fldcw %0\n": : "m" (fpcw)); __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw)); #else /* TODO: This is crashing on Win64 right now. * _control87 (_PC_53, MCW_PC); */ #endif } /* * Initialize architecture specific code. */ void mono_arch_init (void) { #ifndef DISABLE_JIT if (!mono_aot_only) bp_trampoline = mini_get_breakpoint_trampoline (); #endif } /* * Cleanup architecture specific code. */ void mono_arch_cleanup (void) { } /* * This function returns the optimizations supported on this cpu. */ guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { guint32 opts = 0; *exclude_mask = 0; if (mono_hwcap_x86_has_cmov) { opts |= MONO_OPT_CMOV; if (mono_hwcap_x86_has_fcmov) opts |= MONO_OPT_FCMOV; else *exclude_mask |= MONO_OPT_FCMOV; } else { *exclude_mask |= MONO_OPT_CMOV; } return opts; } MonoCPUFeatures mono_arch_get_cpu_features (void) { guint64 features = MONO_CPU_INITED; if (mono_hwcap_x86_has_sse1) features |= MONO_CPU_X86_SSE; if (mono_hwcap_x86_has_sse2) features |= MONO_CPU_X86_SSE2; if (mono_hwcap_x86_has_sse3) features |= MONO_CPU_X86_SSE3; if (mono_hwcap_x86_has_ssse3) features |= MONO_CPU_X86_SSSE3; if (mono_hwcap_x86_has_sse41) features |= MONO_CPU_X86_SSE41; if (mono_hwcap_x86_has_sse42) features |= MONO_CPU_X86_SSE42; if (mono_hwcap_x86_has_popcnt) features |= MONO_CPU_X86_POPCNT; if (mono_hwcap_x86_has_lzcnt) features |= MONO_CPU_X86_LZCNT; return (MonoCPUFeatures)features; } #ifndef DISABLE_JIT GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG)) continue; if (mono_is_regsize_var (ins->inst_vtype)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = g_list_prepend (vars, vmv); } } vars = mono_varlist_sort (cfg, vars, 0); return vars; } /** * mono_arch_compute_omit_fp: * Determine whether the frame pointer can be eliminated. */ static void mono_arch_compute_omit_fp (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; int i, locals_size; CallInfo *cinfo; if (cfg->arch.omit_fp_computed) return; header = cfg->header; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; /* * FIXME: Remove some of the restrictions. */ cfg->arch.omit_fp = TRUE; cfg->arch.omit_fp_computed = TRUE; if (cfg->disable_omit_fp) cfg->arch.omit_fp = FALSE; if (!debug_omit_fp ()) cfg->arch.omit_fp = FALSE; /* if (cfg->method->save_lmf) cfg->arch.omit_fp = FALSE; */ if (cfg->flags & MONO_CFG_HAS_ALLOCA) cfg->arch.omit_fp = FALSE; if (header->num_clauses) cfg->arch.omit_fp = FALSE; if (cfg->param_area) cfg->arch.omit_fp = FALSE; if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) cfg->arch.omit_fp = FALSE; for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = &cinfo->args [i]; if (ainfo->storage == ArgOnStack || ainfo->storage == ArgValuetypeAddrInIReg || ainfo->storage == ArgValuetypeAddrOnStack) { /* * The stack offset can only be determined when the frame * size is known. */ cfg->arch.omit_fp = FALSE; } } locals_size = 0; for (i = cfg->locals_start; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; int ialign; locals_size += mono_type_size (ins->inst_vtype, &ialign); } } GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; mono_arch_compute_omit_fp (cfg); if (cfg->arch.omit_fp) regs = g_list_prepend (regs, (gpointer)AMD64_RBP); /* We use the callee saved registers for global allocation */ regs = g_list_prepend (regs, (gpointer)AMD64_RBX); regs = g_list_prepend (regs, (gpointer)AMD64_R12); regs = g_list_prepend (regs, (gpointer)AMD64_R13); regs = g_list_prepend (regs, (gpointer)AMD64_R14); regs = g_list_prepend (regs, (gpointer)AMD64_R15); #ifdef TARGET_WIN32 regs = g_list_prepend (regs, (gpointer)AMD64_RDI); regs = g_list_prepend (regs, (gpointer)AMD64_RSI); #endif return regs; } /* * mono_arch_regalloc_cost: * * Return the cost, in number of memory references, of the action of * allocating the variable VMV into a register during global register * allocation. */ guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { MonoInst *ins = cfg->varinfo [vmv->idx]; if (cfg->method->save_lmf) /* The register is already saved */ /* substract 1 for the invisible store in the prolog */ return (ins->opcode == OP_ARG) ? 0 : 1; else /* push+pop */ return (ins->opcode == OP_ARG) ? 1 : 2; } /* * mono_arch_fill_argument_info: * * Populate cfg->args, cfg->ret and cfg->vret_addr with information about the arguments * of the method. */ void mono_arch_fill_argument_info (MonoCompile *cfg) { MonoMethodSignature *sig; MonoInst *ins; int i; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); cinfo = cfg->arch.cinfo; /* * Contrary to mono_arch_allocate_vars (), the information should describe * where the arguments are at the beginning of the method, not where they can be * accessed during the execution of the method. The later makes no sense for the * global register allocator, since a variable can be in more than one location. */ switch (cinfo->ret.storage) { case ArgInIReg: case ArgInFloatSSEReg: case ArgInDoubleSSEReg: cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cinfo->ret.reg; break; case ArgValuetypeInReg: cfg->ret->opcode = OP_REGOFFSET; cfg->ret->inst_basereg = -1; cfg->ret->inst_offset = -1; break; case ArgNone: break; default: g_assert_not_reached (); } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = &cinfo->args [i]; ins = cfg->args [i]; switch (ainfo->storage) { case ArgInIReg: case ArgInFloatSSEReg: case ArgInDoubleSSEReg: ins->opcode = OP_REGVAR; ins->inst_c0 = ainfo->reg; break; case ArgOnStack: ins->opcode = OP_REGOFFSET; ins->inst_basereg = -1; ins->inst_offset = -1; break; case ArgValuetypeInReg: /* Dummy */ ins->opcode = OP_NOP; break; default: g_assert_not_reached (); } } } void mono_arch_allocate_vars (MonoCompile *cfg) { MonoType *sig_ret; MonoMethodSignature *sig; MonoInst *ins; int i, offset; guint32 locals_stack_size, locals_stack_align; gint32 *offsets; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); cinfo = cfg->arch.cinfo; sig_ret = mini_get_underlying_type (sig->ret); mono_arch_compute_omit_fp (cfg); /* * We use the ABI calling conventions for managed code as well. * Exception: valuetypes are only sometimes passed or returned in registers. */ /* * The stack looks like this: * <incoming arguments passed on the stack> * <return value> * <lmf/caller saved registers> * <locals> * <spill area> * <localloc area> -> grows dynamically * <params area> */ if (cfg->arch.omit_fp) { cfg->flags |= MONO_CFG_HAS_SPILLUP; cfg->frame_reg = AMD64_RSP; offset = 0; } else { /* Locals are allocated backwards from %fp */ cfg->frame_reg = AMD64_RBP; offset = 0; } cfg->arch.saved_iregs = cfg->used_int_regs; if (cfg->method->save_lmf) { /* Save all callee-saved registers normally (except RBP, if not already used), and restore them when unwinding through an LMF */ guint32 iregs_to_save = AMD64_CALLEE_SAVED_REGS & ~(1<<AMD64_RBP); cfg->arch.saved_iregs |= iregs_to_save; } if (cfg->arch.omit_fp) cfg->arch.reg_save_area_offset = offset; /* Reserve space for callee saved registers */ for (i = 0; i < AMD64_NREG; ++i) if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) { offset += sizeof (target_mgreg_t); } if (!cfg->arch.omit_fp) cfg->arch.reg_save_area_offset = -offset; if (sig_ret->type != MONO_TYPE_VOID) { switch (cinfo->ret.storage) { case ArgInIReg: case ArgInFloatSSEReg: case ArgInDoubleSSEReg: cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cinfo->ret.reg; cfg->ret->dreg = cinfo->ret.reg; break; case ArgValuetypeAddrInIReg: case ArgGsharedvtVariableInReg: /* The register is volatile */ cfg->vret_addr->opcode = OP_REGOFFSET; cfg->vret_addr->inst_basereg = cfg->frame_reg; if (cfg->arch.omit_fp) { cfg->vret_addr->inst_offset = offset; offset += 8; } else { offset += 8; cfg->vret_addr->inst_offset = -offset; } if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr ="); mono_print_ins (cfg->vret_addr); } break; case ArgValuetypeInReg: /* Allocate a local to hold the result, the epilog will copy it to the correct place */ cfg->ret->opcode = OP_REGOFFSET; cfg->ret->inst_basereg = cfg->frame_reg; if (cfg->arch.omit_fp) { cfg->ret->inst_offset = offset; offset += cinfo->ret.pair_storage [1] == ArgNone ? 8 : 16; } else { offset += cinfo->ret.pair_storage [1] == ArgNone ? 8 : 16; cfg->ret->inst_offset = - offset; } break; default: g_assert_not_reached (); } } /* Allocate locals */ offsets = mono_allocate_stack_slots (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align); if (locals_stack_align) { offset += (locals_stack_align - 1); offset &= ~(locals_stack_align - 1); } if (cfg->arch.omit_fp) { cfg->locals_min_stack_offset = offset; cfg->locals_max_stack_offset = offset + locals_stack_size; } else { cfg->locals_min_stack_offset = - (offset + locals_stack_size); cfg->locals_max_stack_offset = - offset; } for (i = cfg->locals_start; i < cfg->num_varinfo; i++) { if (offsets [i] != -1) { MonoInst *ins = cfg->varinfo [i]; ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; if (cfg->arch.omit_fp) ins->inst_offset = (offset + offsets [i]); else ins->inst_offset = - (offset + offsets [i]); //printf ("allocated local %d to ", i); mono_print_tree_nl (ins); } } offset += locals_stack_size; if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) { g_assert (!cfg->arch.omit_fp); g_assert (cinfo->sig_cookie.storage == ArgOnStack); cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET; } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ins = cfg->args [i]; if (ins->opcode != OP_REGVAR) { ArgInfo *ainfo = &cinfo->args [i]; gboolean inreg = TRUE; /* FIXME: Allocate volatile arguments to registers */ if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) inreg = FALSE; /* * Under AMD64, all registers used to pass arguments to functions * are volatile across calls. * FIXME: Optimize this. */ if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInFloatSSEReg) || (ainfo->storage == ArgInDoubleSSEReg) || (ainfo->storage == ArgValuetypeInReg) || (ainfo->storage == ArgGSharedVtInReg)) inreg = FALSE; ins->opcode = OP_REGOFFSET; switch (ainfo->storage) { case ArgInIReg: case ArgInFloatSSEReg: case ArgInDoubleSSEReg: case ArgGSharedVtInReg: if (inreg) { ins->opcode = OP_REGVAR; ins->dreg = ainfo->reg; } break; case ArgOnStack: case ArgGSharedVtOnStack: g_assert (!cfg->arch.omit_fp); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = ainfo->offset + ARGS_OFFSET; break; case ArgValuetypeInReg: break; case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: { MonoInst *indir; g_assert (!cfg->arch.omit_fp); g_assert (ainfo->storage == ArgValuetypeAddrInIReg || (ainfo->storage == ArgValuetypeAddrOnStack && ainfo->pair_storage [0] == ArgNone)); MONO_INST_NEW (cfg, indir, 0); indir->opcode = OP_REGOFFSET; if (ainfo->pair_storage [0] == ArgInIReg) { indir->inst_basereg = cfg->frame_reg; offset = ALIGN_TO (offset, sizeof (target_mgreg_t)); offset += sizeof (target_mgreg_t); indir->inst_offset = - offset; } else { indir->inst_basereg = cfg->frame_reg; indir->inst_offset = ainfo->offset + ARGS_OFFSET; } ins->opcode = OP_VTARG_ADDR; ins->inst_left = indir; break; } default: NOT_IMPLEMENTED; } if (!inreg && (ainfo->storage != ArgOnStack) && (ainfo->storage != ArgValuetypeAddrInIReg) && (ainfo->storage != ArgValuetypeAddrOnStack) && (ainfo->storage != ArgGSharedVtOnStack)) { ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; /* These arguments are saved to the stack in the prolog */ offset = ALIGN_TO (offset, sizeof (target_mgreg_t)); if (cfg->arch.omit_fp) { ins->inst_offset = offset; offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (target_mgreg_t) : sizeof (target_mgreg_t); // Arguments are yet supported by the stack map creation code //cfg->locals_max_stack_offset = MAX (cfg->locals_max_stack_offset, offset); } else { offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (target_mgreg_t) : sizeof (target_mgreg_t); ins->inst_offset = - offset; //cfg->locals_min_stack_offset = MIN (cfg->locals_min_stack_offset, offset); } } } } cfg->stack_offset = offset; } void mono_arch_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; if (cinfo->ret.storage == ArgValuetypeInReg) cfg->ret_var_is_local = TRUE; if (cinfo->ret.storage == ArgValuetypeAddrInIReg || cinfo->ret.storage == ArgGsharedvtVariableInReg) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG); if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr = "); mono_print_ins (cfg->vret_addr); } } if (cfg->gen_sdb_seq_points) { MonoInst *ins; if (cfg->compile_aot) { MonoInst *ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.seq_point_info_var = ins; } ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.ss_tramp_var = ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.bp_tramp_var = ins; } if (cfg->method->save_lmf) cfg->create_lmf_var = TRUE; if (cfg->method->save_lmf) { cfg->lmf_ir = TRUE; } } static void add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree) { MonoInst *ins; switch (storage) { case ArgInIReg: MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg_copy (cfg, tree->dreg); ins->sreg1 = tree->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE); break; case ArgInFloatSSEReg: MONO_INST_NEW (cfg, ins, OP_AMD64_SET_XMMREG_R4); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = tree->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE); break; case ArgInDoubleSSEReg: MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = tree->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE); break; default: g_assert_not_reached (); } } static int arg_storage_to_load_membase (ArgStorage storage) { switch (storage) { case ArgInIReg: #if defined(MONO_ARCH_ILP32) return OP_LOADI8_MEMBASE; #else return OP_LOAD_MEMBASE; #endif case ArgInDoubleSSEReg: return OP_LOADR8_MEMBASE; case ArgInFloatSSEReg: return OP_LOADR4_MEMBASE; default: g_assert_not_reached (); } return -1; } static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { MonoMethodSignature *tmp_sig; int sig_reg; if (call->tailcall) // FIXME tailcall is not always yet initialized. NOT_IMPLEMENTED; g_assert (cinfo->sig_cookie.storage == ArgOnStack); /* * mono_ArgIterator_Setup assumes the signature cookie is * passed first and all the arguments which were before it are * passed on the stack after the signature. So compensate by * passing a different signature. */ tmp_sig = mono_metadata_signature_dup_full (m_class_get_image (cfg->method->klass), call->signature); tmp_sig->param_count -= call->signature->sentinelpos; tmp_sig->sentinelpos = 0; memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*)); sig_reg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, cinfo->sig_cookie.offset, sig_reg); } #ifdef ENABLE_LLVM static LLVMArgStorage arg_storage_to_llvm_arg_storage (MonoCompile *cfg, ArgStorage storage) { switch (storage) { case ArgInIReg: return LLVMArgInIReg; case ArgNone: return LLVMArgNone; case ArgGSharedVtInReg: case ArgGSharedVtOnStack: return LLVMArgGSharedVt; default: g_assert_not_reached (); return LLVMArgNone; } } LLVMCallInfo* mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig) { int i, n; CallInfo *cinfo; ArgInfo *ainfo; int j; LLVMCallInfo *linfo; MonoType *t, *sig_ret; n = sig->param_count + sig->hasthis; sig_ret = mini_get_underlying_type (sig->ret); cinfo = get_call_info (cfg->mempool, sig); linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n)); /* * LLVM always uses the native ABI while we use our own ABI, the * only difference is the handling of vtypes: * - we only pass/receive them in registers in some cases, and only * in 1 or 2 integer registers. */ switch (cinfo->ret.storage) { case ArgNone: linfo->ret.storage = LLVMArgNone; break; case ArgInIReg: case ArgInFloatSSEReg: case ArgInDoubleSSEReg: linfo->ret.storage = LLVMArgNormal; break; case ArgValuetypeInReg: { ainfo = &cinfo->ret; if (sig->pinvoke && (ainfo->pair_storage [0] == ArgInFloatSSEReg || ainfo->pair_storage [0] == ArgInDoubleSSEReg || ainfo->pair_storage [1] == ArgInFloatSSEReg || ainfo->pair_storage [1] == ArgInDoubleSSEReg)) { cfg->exception_message = g_strdup ("pinvoke + vtype ret"); cfg->disable_llvm = TRUE; return linfo; } linfo->ret.storage = LLVMArgVtypeInReg; for (j = 0; j < 2; ++j) linfo->ret.pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, ainfo->pair_storage [j]); break; } case ArgValuetypeAddrInIReg: case ArgGsharedvtVariableInReg: /* Vtype returned using a hidden argument */ linfo->ret.storage = LLVMArgVtypeRetAddr; linfo->vret_arg_index = cinfo->vret_arg_index; break; default: g_assert_not_reached (); break; } for (i = 0; i < n; ++i) { ainfo = cinfo->args + i; if (i >= sig->hasthis) t = sig->params [i - sig->hasthis]; else t = mono_get_int_type (); t = mini_type_get_underlying_type (t); linfo->args [i].storage = LLVMArgNone; switch (ainfo->storage) { case ArgInIReg: linfo->args [i].storage = LLVMArgNormal; break; case ArgInDoubleSSEReg: case ArgInFloatSSEReg: linfo->args [i].storage = LLVMArgNormal; break; case ArgOnStack: if (MONO_TYPE_ISSTRUCT (t)) linfo->args [i].storage = LLVMArgVtypeByVal; else linfo->args [i].storage = LLVMArgNormal; break; case ArgValuetypeInReg: if (sig->pinvoke && (ainfo->pair_storage [0] == ArgInFloatSSEReg || ainfo->pair_storage [0] == ArgInDoubleSSEReg || ainfo->pair_storage [1] == ArgInFloatSSEReg || ainfo->pair_storage [1] == ArgInDoubleSSEReg)) { cfg->exception_message = g_strdup ("pinvoke + vtypes"); cfg->disable_llvm = TRUE; return linfo; } linfo->args [i].storage = LLVMArgVtypeInReg; for (j = 0; j < 2; ++j) linfo->args [i].pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, ainfo->pair_storage [j]); break; case ArgGSharedVtInReg: case ArgGSharedVtOnStack: linfo->args [i].storage = LLVMArgGSharedVt; break; case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: linfo->args [i].storage = LLVMArgVtypeAddr; break; default: cfg->exception_message = g_strdup ("ainfo->storage"); cfg->disable_llvm = TRUE; break; } } return linfo; } #endif void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoInst *arg, *in; MonoMethodSignature *sig; int i, n; CallInfo *cinfo; ArgInfo *ainfo; sig = call->signature; n = sig->param_count + sig->hasthis; cinfo = get_call_info (cfg->mempool, sig); if (COMPILE_LLVM (cfg)) { /* We shouldn't be called in the llvm case */ cfg->disable_llvm = TRUE; return; } /* * Emit all arguments which are passed on the stack to prevent register * allocation problems. */ for (i = 0; i < n; ++i) { MonoType *t; ainfo = cinfo->args + i; in = call->args [i]; if (sig->hasthis && i == 0) t = mono_get_object_type (); else t = sig->params [i - sig->hasthis]; t = mini_get_underlying_type (t); //XXX what about ArgGSharedVtOnStack here? if (ainfo->storage == ArgOnStack && !MONO_TYPE_ISSTRUCT (t)) { if (!m_type_is_byref (t)) { if (t->type == MONO_TYPE_R4) MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg); else if (t->type == MONO_TYPE_R8) MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg); else MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg); } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg); } if (cfg->compute_gc_maps) { MonoInst *def; EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, t); } } } /* * Emit all parameters passed in registers in non-reverse order for better readability * and to help the optimization in emit_prolog (). */ for (i = 0; i < n; ++i) { ainfo = cinfo->args + i; in = call->args [i]; if (ainfo->storage == ArgInIReg) add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, in); } for (i = n - 1; i >= 0; --i) { MonoType *t; ainfo = cinfo->args + i; in = call->args [i]; if (sig->hasthis && i == 0) t = mono_get_object_type (); else t = sig->params [i - sig->hasthis]; t = mini_get_underlying_type (t); switch (ainfo->storage) { case ArgInIReg: /* Already done */ break; case ArgInFloatSSEReg: case ArgInDoubleSSEReg: add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, in); break; case ArgOnStack: case ArgValuetypeInReg: case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: case ArgGSharedVtInReg: case ArgGSharedVtOnStack: { if (ainfo->storage == ArgOnStack && !MONO_TYPE_ISSTRUCT (t)) /* Already emitted above */ break; guint32 align; guint32 size; if (sig->pinvoke && !sig->marshalling_disabled) size = mono_type_native_stack_size (t, &align); else { /* * Other backends use mono_type_stack_size (), but that * aligns the size to 8, which is larger than the size of * the source, leading to reads of invalid memory if the * source is at the end of address space. */ size = mono_class_value_size (mono_class_from_mono_type_internal (t), &align); } if (size >= 10000) { /* Avoid asserts in emit_memcpy () */ mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Passing an argument of size '%d'.", size)); /* Continue normally */ } if (size > 0 || ainfo->pass_empty_struct) { MONO_INST_NEW (cfg, arg, OP_OUTARG_VT); arg->sreg1 = in->dreg; arg->klass = mono_class_from_mono_type_internal (t); arg->backend.size = size; arg->inst_p0 = call; arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, arg); } break; } default: g_assert_not_reached (); } if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) /* Emit the signature cookie just before the implicit arguments */ emit_sig_cookie (cfg, call, cinfo); } /* Handle the case where there are no implicit arguments */ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) emit_sig_cookie (cfg, call, cinfo); switch (cinfo->ret.storage) { case ArgValuetypeInReg: if (cinfo->ret.pair_storage [0] == ArgInIReg && cinfo->ret.pair_storage [1] == ArgNone) { /* * Tell the JIT to use a more efficient calling convention: call using * OP_CALL, compute the result location after the call, and save the * result there. */ call->vret_in_reg = TRUE; /* * Nullify the instruction computing the vret addr to enable * future optimizations. */ if (call->vret_var) NULLIFY_INS (call->vret_var); } else { if (call->tailcall) NOT_IMPLEMENTED; /* * The valuetype is in RAX:RDX after the call, need to be copied to * the stack. Push the address here, so the call instruction can * access it. */ if (!cfg->arch.vret_addr_loc) { cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* Prevent it from being register allocated or optimized away */ cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE; } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg); } break; case ArgValuetypeAddrInIReg: case ArgGsharedvtVariableInReg: { MonoInst *vtarg; MONO_INST_NEW (cfg, vtarg, OP_MOVE); vtarg->sreg1 = call->vret_var->dreg; vtarg->dreg = mono_alloc_preg (cfg); MONO_ADD_INS (cfg->cbb, vtarg); mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE); break; } default: break; } if (cfg->method->save_lmf) { MONO_INST_NEW (cfg, arg, OP_AMD64_SAVE_SP_TO_LMF); MONO_ADD_INS (cfg->cbb, arg); } call->stack_usage = cinfo->stack_usage; } void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoInst *arg; MonoCallInst *call = (MonoCallInst*)ins->inst_p0; ArgInfo *ainfo = (ArgInfo*)ins->inst_p1; int size = ins->backend.size; switch (ainfo->storage) { case ArgValuetypeInReg: { MonoInst *load; int part; for (part = 0; part < 2; ++part) { if (ainfo->pair_storage [part] == ArgNone) continue; if (ainfo->pass_empty_struct) { //Pass empty struct value as 0 on platforms representing empty structs as 1 byte. NEW_ICONST (cfg, load, 0); } else { MONO_INST_NEW (cfg, load, arg_storage_to_load_membase (ainfo->pair_storage [part])); load->inst_basereg = src->dreg; load->inst_offset = part * sizeof (target_mgreg_t); switch (ainfo->pair_storage [part]) { case ArgInIReg: load->dreg = mono_alloc_ireg (cfg); break; case ArgInDoubleSSEReg: case ArgInFloatSSEReg: load->dreg = mono_alloc_freg (cfg); break; default: g_assert_not_reached (); } } MONO_ADD_INS (cfg->cbb, load); add_outarg_reg (cfg, call, ainfo->pair_storage [part], ainfo->pair_regs [part], load); } break; } case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: { MonoInst *vtaddr, *load; g_assert (ainfo->storage == ArgValuetypeAddrInIReg || (ainfo->storage == ArgValuetypeAddrOnStack && ainfo->pair_storage [0] == ArgNone)); vtaddr = mono_compile_create_var (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL); vtaddr->backend.is_pinvoke = call->signature->pinvoke && !call->signature->marshalling_disabled; MONO_INST_NEW (cfg, load, OP_LDADDR); cfg->has_indirection = TRUE; load->inst_p0 = vtaddr; vtaddr->flags |= MONO_INST_INDIRECT; load->type = STACK_MP; load->klass = vtaddr->klass; load->dreg = mono_alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, load); mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, TARGET_SIZEOF_VOID_P); if (ainfo->pair_storage [0] == ArgInIReg) { MONO_INST_NEW (cfg, arg, OP_AMD64_LEA_MEMBASE); arg->dreg = mono_alloc_ireg (cfg); arg->sreg1 = load->dreg; arg->inst_imm = 0; MONO_ADD_INS (cfg->cbb, arg); mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, ainfo->pair_regs [0], FALSE); } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, load->dreg); } break; } case ArgGSharedVtInReg: /* Pass by addr */ mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE); break; case ArgGSharedVtOnStack: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, src->dreg); break; default: if (size == 8) { int dreg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, 0); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, dreg); } else if (size <= 40) { mini_emit_memcpy (cfg, AMD64_RSP, ainfo->offset, src->dreg, 0, size, TARGET_SIZEOF_VOID_P); } else { // FIXME: Code growth mini_emit_memcpy (cfg, AMD64_RSP, ainfo->offset, src->dreg, 0, size, TARGET_SIZEOF_VOID_P); } if (cfg->compute_gc_maps) { MonoInst *def; EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, m_class_get_byval_arg (ins->klass)); } } } void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret); if (ret->type == MONO_TYPE_R4) { if (COMPILE_LLVM (cfg)) MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); else MONO_EMIT_NEW_UNALU (cfg, OP_AMD64_SET_XMMREG_R4, cfg->ret->dreg, val->dreg); return; } else if (ret->type == MONO_TYPE_R8) { MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); return; } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); } #endif /* DISABLE_JIT */ #define EMIT_COND_BRANCH(ins,cond,sign) \ if (ins->inst_true_bb->native_offset) { \ x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \ } else { \ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \ if (optimize_branch_pred && \ x86_is_imm8 (ins->inst_true_bb->max_offset - offset)) \ x86_branch8 (code, cond, 0, sign); \ else \ x86_branch32 (code, cond, 0, sign); \ } typedef struct { MonoMethodSignature *sig; CallInfo *cinfo; int nstack_args, nullable_area; } ArchDynCallInfo; static gboolean dyn_call_supported (MonoMethodSignature *sig, CallInfo *cinfo) { int i; switch (cinfo->ret.storage) { case ArgNone: case ArgInIReg: case ArgInFloatSSEReg: case ArgInDoubleSSEReg: case ArgValuetypeAddrInIReg: case ArgValuetypeInReg: break; default: return FALSE; } for (i = 0; i < cinfo->nargs; ++i) { ArgInfo *ainfo = &cinfo->args [i]; switch (ainfo->storage) { case ArgInIReg: case ArgInFloatSSEReg: case ArgInDoubleSSEReg: case ArgValuetypeInReg: case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: case ArgOnStack: break; default: return FALSE; } } return TRUE; } /* * mono_arch_dyn_call_prepare: * * Return a pointer to an arch-specific structure which contains information * needed by mono_arch_get_dyn_call_args (). Return NULL if OP_DYN_CALL is not * supported for SIG. * This function is equivalent to ffi_prep_cif in libffi. */ MonoDynCallInfo* mono_arch_dyn_call_prepare (MonoMethodSignature *sig) { ArchDynCallInfo *info; CallInfo *cinfo; int i, aindex; cinfo = get_call_info (NULL, sig); if (!dyn_call_supported (sig, cinfo)) { g_free (cinfo); return NULL; } info = g_new0 (ArchDynCallInfo, 1); // FIXME: Preprocess the info to speed up get_dyn_call_args (). info->sig = sig; info->cinfo = cinfo; info->nstack_args = 0; for (i = 0; i < cinfo->nargs; ++i) { ArgInfo *ainfo = &cinfo->args [i]; switch (ainfo->storage) { case ArgOnStack: case ArgValuetypeAddrOnStack: info->nstack_args = MAX (info->nstack_args, (ainfo->offset / sizeof (target_mgreg_t)) + (ainfo->arg_size / sizeof (target_mgreg_t))); break; default: break; } } for (aindex = 0; aindex < sig->param_count; aindex++) { MonoType *t = sig->params [aindex]; ArgInfo *ainfo = &cinfo->args [aindex + sig->hasthis]; if (m_type_is_byref (t)) continue; switch (t->type) { case MONO_TYPE_GENERICINST: if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) { MonoClass *klass = mono_class_from_mono_type_internal (t); int size; if (!(ainfo->storage == ArgValuetypeInReg || ainfo->storage == ArgOnStack)) { /* Nullables need a temporary buffer, its stored at the end of DynCallArgs.regs after the stack args */ size = mono_class_value_size (klass, NULL); info->nullable_area += size; } } break; default: break; } } info->nullable_area = ALIGN_TO (info->nullable_area, 16); /* Align to 16 bytes */ if (info->nstack_args & 1) info->nstack_args ++; return (MonoDynCallInfo*)info; } /* * mono_arch_dyn_call_free: * * Free a MonoDynCallInfo structure. */ void mono_arch_dyn_call_free (MonoDynCallInfo *info) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; g_free (ainfo->cinfo); g_free (ainfo); } int mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; /* Extend the 'regs' field dynamically */ return sizeof (DynCallArgs) + (ainfo->nstack_args * sizeof (target_mgreg_t)) + ainfo->nullable_area; } #define PTR_TO_GREG(ptr) ((host_mgreg_t)(ptr)) #define GREG_TO_PTR(greg) ((gpointer)(greg)) /* * mono_arch_get_start_dyn_call: * * Convert the arguments ARGS to a format which can be passed to OP_DYN_CALL, and * store the result into BUF. * ARGS should be an array of pointers pointing to the arguments. * RET should point to a memory buffer large enought to hold the result of the * call. * This function should be as fast as possible, any work which does not depend * on the actual values of the arguments should be done in * mono_arch_dyn_call_prepare (). * start_dyn_call + OP_DYN_CALL + finish_dyn_call is equivalent to ffi_call in * libffi. */ void mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf) { ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info; DynCallArgs *p = (DynCallArgs*)buf; int arg_index, greg, i, pindex; MonoMethodSignature *sig = dinfo->sig; int buffer_offset = 0; guint8 *nullable_buffer; static int general_param_reg_to_index [MONO_MAX_IREGS]; static int float_param_reg_to_index [MONO_MAX_FREGS]; static gboolean param_reg_to_index_inited; if (!param_reg_to_index_inited) { for (i = 0; i < PARAM_REGS; ++i) general_param_reg_to_index [param_regs[i]] = i; for (i = 0; i < FLOAT_PARAM_REGS; ++i) float_param_reg_to_index [float_param_regs[i]] = i; mono_memory_barrier (); param_reg_to_index_inited = 1; } else { mono_memory_barrier (); } p->res = 0; p->ret = ret; p->nstack_args = dinfo->nstack_args; arg_index = 0; greg = 0; pindex = 0; /* Stored after the stack arguments */ nullable_buffer = (guint8*)&(p->regs [PARAM_REGS + dinfo->nstack_args]); if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) { p->regs [greg ++] = PTR_TO_GREG(*(args [arg_index ++])); if (!sig->hasthis) pindex = 1; } if (dinfo->cinfo->ret.storage == ArgValuetypeAddrInIReg || dinfo->cinfo->ret.storage == ArgGsharedvtVariableInReg) p->regs [greg ++] = PTR_TO_GREG (ret); for (; pindex < sig->param_count; pindex++) { MonoType *t = mini_get_underlying_type (sig->params [pindex]); gpointer *arg = args [arg_index ++]; ArgInfo *ainfo = &dinfo->cinfo->args [pindex + sig->hasthis]; int slot; if (ainfo->storage == ArgOnStack || ainfo->storage == ArgValuetypeAddrOnStack) { slot = PARAM_REGS + (ainfo->offset / sizeof (target_mgreg_t)); } else if (ainfo->storage == ArgValuetypeAddrInIReg) { g_assert (ainfo->pair_storage [0] == ArgInIReg && ainfo->pair_storage [1] == ArgNone); slot = general_param_reg_to_index [ainfo->pair_regs [0]]; } else if (ainfo->storage == ArgInFloatSSEReg || ainfo->storage == ArgInDoubleSSEReg) { slot = float_param_reg_to_index [ainfo->reg]; } else { slot = general_param_reg_to_index [ainfo->reg]; } if (m_type_is_byref (t)) { p->regs [slot] = PTR_TO_GREG (*(arg)); continue; } switch (t->type) { case MONO_TYPE_OBJECT: case MONO_TYPE_PTR: case MONO_TYPE_I: case MONO_TYPE_U: #if !defined(MONO_ARCH_ILP32) case MONO_TYPE_I8: case MONO_TYPE_U8: #endif p->regs [slot] = PTR_TO_GREG (*(arg)); break; #if defined(MONO_ARCH_ILP32) case MONO_TYPE_I8: case MONO_TYPE_U8: p->regs [slot] = *(guint64*)(arg); break; #endif case MONO_TYPE_U1: p->regs [slot] = *(guint8*)(arg); break; case MONO_TYPE_I1: p->regs [slot] = *(gint8*)(arg); break; case MONO_TYPE_I2: p->regs [slot] = *(gint16*)(arg); break; case MONO_TYPE_U2: p->regs [slot] = *(guint16*)(arg); break; case MONO_TYPE_I4: p->regs [slot] = *(gint32*)(arg); break; case MONO_TYPE_U4: p->regs [slot] = *(guint32*)(arg); break; case MONO_TYPE_R4: { double d; *(float*)&d = *(float*)(arg); if (ainfo->storage == ArgOnStack) { *(double *)(p->regs + slot) = d; } else { p->has_fp = 1; p->fregs [slot] = d; } break; } case MONO_TYPE_R8: if (ainfo->storage == ArgOnStack) { *(double *)(p->regs + slot) = *(double*)(arg); } else { p->has_fp = 1; p->fregs [slot] = *(double*)(arg); } break; case MONO_TYPE_GENERICINST: if (MONO_TYPE_IS_REFERENCE (t)) { p->regs [slot] = PTR_TO_GREG (*(arg)); break; } else if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) { MonoClass *klass = mono_class_from_mono_type_internal (t); guint8 *nullable_buf; int size; size = mono_class_value_size (klass, NULL); if (ainfo->storage == ArgValuetypeInReg || ainfo->storage == ArgOnStack) { nullable_buf = g_alloca (size); } else { nullable_buf = nullable_buffer + buffer_offset; buffer_offset += size; g_assert (buffer_offset <= dinfo->nullable_area); } /* The argument pointed to by arg is either a boxed vtype or null */ mono_nullable_init (nullable_buf, (MonoObject*)arg, klass); arg = (gpointer*)nullable_buf; /* Fall though */ } else { /* Fall through */ } case MONO_TYPE_VALUETYPE: { switch (ainfo->storage) { case ArgValuetypeInReg: for (i = 0; i < 2; ++i) { switch (ainfo->pair_storage [i]) { case ArgNone: break; case ArgInIReg: slot = general_param_reg_to_index [ainfo->pair_regs [i]]; p->regs [slot] = ((target_mgreg_t*)(arg))[i]; break; case ArgInFloatSSEReg: { double d; p->has_fp = 1; slot = float_param_reg_to_index [ainfo->pair_regs [i]]; *(float*)&d = ((float*)(arg))[i]; p->fregs [slot] = d; break; } case ArgInDoubleSSEReg: p->has_fp = 1; slot = float_param_reg_to_index [ainfo->pair_regs [i]]; p->fregs [slot] = ((double*)(arg))[i]; break; default: g_assert_not_reached (); break; } } break; case ArgValuetypeAddrInIReg: case ArgValuetypeAddrOnStack: // In DYNCALL use case value types are already copied when included in parameter array. // Currently no need to make an extra temporary value type on stack for this use case. p->regs [slot] = (target_mgreg_t)arg; break; case ArgOnStack: for (i = 0; i < ainfo->arg_size / 8; ++i) p->regs [slot + i] = ((target_mgreg_t*)(arg))[i]; break; default: g_assert_not_reached (); break; } break; } default: g_assert_not_reached (); } } } /* * mono_arch_finish_dyn_call: * * Store the result of a dyn call into the return value buffer passed to * start_dyn_call (). * This function should be as fast as possible, any work which does not depend * on the actual values of the arguments should be done in * mono_arch_dyn_call_prepare (). */ void mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf) { ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info; MonoMethodSignature *sig = dinfo->sig; DynCallArgs *dargs = (DynCallArgs*)buf; guint8 *ret = dargs->ret; host_mgreg_t res = dargs->res; MonoType *sig_ret = mini_get_underlying_type (sig->ret); int i; switch (sig_ret->type) { case MONO_TYPE_VOID: *(gpointer*)ret = NULL; break; case MONO_TYPE_OBJECT: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: *(gpointer*)ret = GREG_TO_PTR (res); break; case MONO_TYPE_I1: *(gint8*)ret = res; break; case MONO_TYPE_U1: *(guint8*)ret = res; break; case MONO_TYPE_I2: *(gint16*)ret = res; break; case MONO_TYPE_U2: *(guint16*)ret = res; break; case MONO_TYPE_I4: *(gint32*)ret = res; break; case MONO_TYPE_U4: *(guint32*)ret = res; break; case MONO_TYPE_I8: *(gint64*)ret = res; break; case MONO_TYPE_U8: *(guint64*)ret = res; break; case MONO_TYPE_R4: *(float*)ret = *(float*)&(dargs->fregs [0]); break; case MONO_TYPE_R8: *(double*)ret = dargs->fregs [0]; break; case MONO_TYPE_GENERICINST: if (MONO_TYPE_IS_REFERENCE (sig_ret)) { *(gpointer*)ret = GREG_TO_PTR(res); break; } else { /* Fall through */ } case MONO_TYPE_VALUETYPE: if (dinfo->cinfo->ret.storage == ArgValuetypeAddrInIReg || dinfo->cinfo->ret.storage == ArgGsharedvtVariableInReg) { /* Nothing to do */ } else { ArgInfo *ainfo = &dinfo->cinfo->ret; g_assert (ainfo->storage == ArgValuetypeInReg); for (i = 0; i < 2; ++i) { switch (ainfo->pair_storage [0]) { case ArgInIReg: ((host_mgreg_t*)ret)[i] = res; break; case ArgInDoubleSSEReg: ((double*)ret)[i] = dargs->fregs [i]; break; case ArgNone: break; default: g_assert_not_reached (); break; } } } break; default: g_assert_not_reached (); } } #undef PTR_TO_GREG #undef GREG_TO_PTR /* emit an exception if condition is fail */ #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \ do { \ MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \ if (tins == NULL) { \ mono_add_patch_info (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_EXC, exc_name); \ x86_branch32 (code, cond, 0, signed); \ } else { \ EMIT_COND_BRANCH (tins, cond, signed); \ } \ } while (0); #define EMIT_SSE2_FPFUNC(code, op, dreg, sreg1) do { \ amd64_movsd_membase_reg (code, AMD64_RSP, -8, (sreg1)); \ amd64_fld_membase (code, AMD64_RSP, -8, TRUE); \ amd64_ ##op (code); \ amd64_fst_membase (code, AMD64_RSP, -8, TRUE, TRUE); \ amd64_movsd_reg_membase (code, (dreg), AMD64_RSP, -8); \ } while (0); #ifndef DISABLE_JIT static guint8* emit_call (MonoCompile *cfg, MonoCallInst *call, guint8 *code, MonoJitICallId jit_icall_id) { gboolean no_patch = FALSE; MonoJumpInfoTarget patch; // FIXME? This is similar to mono_call_to_patch, except it favors MONO_PATCH_INFO_ABS over call->jit_icall_id. if (jit_icall_id) { g_assert (!call); patch.type = MONO_PATCH_INFO_JIT_ICALL_ID; patch.target = GUINT_TO_POINTER (jit_icall_id); } else if (call->inst.flags & MONO_INST_HAS_METHOD) { patch.type = MONO_PATCH_INFO_METHOD; patch.target = call->method; } else { patch.type = MONO_PATCH_INFO_ABS; patch.target = call->fptr; } /* * FIXME: Add support for thunks */ { gboolean near_call = FALSE; /* * Indirect calls are expensive so try to make a near call if possible. * The caller memory is allocated by the code manager so it is * guaranteed to be at a 32 bit offset. */ if (patch.type != MONO_PATCH_INFO_ABS) { /* The target is in memory allocated using the code manager */ near_call = TRUE; if (patch.type == MONO_PATCH_INFO_METHOD) { MonoMethod* const method = call->method; if (m_class_get_image (method->klass)->aot_module) /* The callee might be an AOT method */ near_call = FALSE; if (method->dynamic) /* The target is in malloc-ed memory */ near_call = FALSE; } else { /* * The call might go directly to a native function without * the wrapper. */ MonoJitICallInfo * const mi = mono_find_jit_icall_info (jit_icall_id); gconstpointer target = mono_icall_get_wrapper (mi); if ((((guint64)target) >> 32) != 0) near_call = FALSE; } } else { MonoJumpInfo *jinfo = NULL; if (cfg->abs_patches) jinfo = (MonoJumpInfo *)g_hash_table_lookup (cfg->abs_patches, call->fptr); if (jinfo) { if (jinfo->type == MONO_PATCH_INFO_JIT_ICALL_ADDR) { MonoJitICallInfo *mi = mono_find_jit_icall_info (jinfo->data.jit_icall_id); if (mi && (((guint64)mi->func) >> 32) == 0) near_call = TRUE; no_patch = TRUE; } else { /* * This is not really an optimization, but required because the * generic class init trampolines use R11 to pass the vtable. */ near_call = TRUE; } } else { jit_icall_id = call->jit_icall_id; if (jit_icall_id) { MonoJitICallInfo const *info = mono_find_jit_icall_info (jit_icall_id); // Change patch from MONO_PATCH_INFO_ABS to MONO_PATCH_INFO_JIT_ICALL_ID. patch.type = MONO_PATCH_INFO_JIT_ICALL_ID; patch.target = GUINT_TO_POINTER (jit_icall_id); if (info->func == info->wrapper) { /* No wrapper */ if ((((guint64)info->func) >> 32) == 0) near_call = TRUE; } else { /* ?See the comment in mono_codegen ()? */ near_call = TRUE; } } else if ((((guint64)patch.target) >> 32) == 0) { near_call = TRUE; no_patch = TRUE; } } } if (cfg->method->dynamic) /* These methods are allocated using malloc */ near_call = FALSE; #ifdef MONO_ARCH_NOMAP32BIT near_call = FALSE; #endif /* The 64bit XEN kernel does not honour the MAP_32BIT flag. (#522894) */ if (optimize_for_xen) near_call = FALSE; if (cfg->compile_aot) { near_call = TRUE; no_patch = TRUE; } if (near_call) { /* * Align the call displacement to an address divisible by 4 so it does * not span cache lines. This is required for code patching to work on SMP * systems. */ if (!no_patch && ((guint32)(code + 1 - cfg->native_code) % 4) != 0) { guint32 pad_size = 4 - ((guint32)(code + 1 - cfg->native_code) % 4); amd64_padding (code, pad_size); } mono_add_patch_info (cfg, code - cfg->native_code, patch.type, patch.target); amd64_call_code (code, 0); } else { if (!no_patch && ((guint32)(code + 2 - cfg->native_code) % 8) != 0) { guint32 pad_size = 8 - ((guint32)(code + 2 - cfg->native_code) % 8); amd64_padding (code, pad_size); g_assert ((guint64)(code + 2 - cfg->native_code) % 8 == 0); } mono_add_patch_info (cfg, code - cfg->native_code, patch.type, patch.target); amd64_set_reg_template (code, GP_SCRATCH_REG); amd64_call_reg (code, GP_SCRATCH_REG); } } set_code_cursor (cfg, code); return code; } static int store_membase_imm_to_store_membase_reg (int opcode) { switch (opcode) { case OP_STORE_MEMBASE_IMM: return OP_STORE_MEMBASE_REG; case OP_STOREI4_MEMBASE_IMM: return OP_STOREI4_MEMBASE_REG; case OP_STOREI8_MEMBASE_IMM: return OP_STOREI8_MEMBASE_REG; } return -1; } #define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB) || ((opcode) == OP_ISBB_IMM))) /* * mono_arch_peephole_pass_1: * * Perform peephole opts which should/can be performed before local regalloc */ void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { MonoInst *last_ins = mono_inst_prev (ins, FILTER_IL_SEQ_POINT); switch (ins->opcode) { case OP_ADD_IMM: case OP_IADD_IMM: case OP_LADD_IMM: if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS) && (ins->inst_imm > 0)) { /* * X86_LEA is like ADD, but doesn't have the * sreg1==dreg restriction. inst_imm > 0 is needed since LEA sign-extends * its operand to 64 bit. */ ins->opcode = ins->opcode == OP_IADD_IMM ? OP_X86_LEA_MEMBASE : OP_AMD64_LEA_MEMBASE; ins->inst_basereg = ins->sreg1; } break; case OP_LXOR: case OP_IXOR: if ((ins->sreg1 == ins->sreg2) && (ins->sreg1 == ins->dreg)) { MonoInst *ins2; /* * Replace STORE_MEMBASE_IMM 0 with STORE_MEMBASE_REG since * the latter has length 2-3 instead of 6 (reverse constant * propagation). These instruction sequences are very common * in the initlocals bblock. */ for (ins2 = ins->next; ins2; ins2 = ins2->next) { if (((ins2->opcode == OP_STORE_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_IMM) || (ins2->opcode == OP_STORE_MEMBASE_IMM)) && (ins2->inst_imm == 0)) { ins2->opcode = store_membase_imm_to_store_membase_reg (ins2->opcode); ins2->sreg1 = ins->dreg; } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG)) { /* Continue */ } else if (((ins2->opcode == OP_ICONST) || (ins2->opcode == OP_I8CONST)) && (ins2->dreg == ins->dreg) && (ins2->inst_c0 == 0)) { NULLIFY_INS (ins2); /* Continue */ } else if (ins2->opcode == OP_IL_SEQ_POINT) { /* Continue */ } else { break; } } } break; case OP_COMPARE_IMM: case OP_LCOMPARE_IMM: /* OP_COMPARE_IMM (reg, 0) * --> * OP_AMD64_TEST_NULL (reg) */ if (!ins->inst_imm) ins->opcode = OP_AMD64_TEST_NULL; break; case OP_ICOMPARE_IMM: if (!ins->inst_imm) ins->opcode = OP_X86_TEST_NULL; break; case OP_AMD64_ICOMPARE_MEMBASE_IMM: /* * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm * --> * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_COMPARE_IMM reg, imm * * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL */ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = OP_ICOMPARE_IMM; ins->sreg1 = last_ins->sreg1; /* check if we can remove cmp reg,0 with test null */ if (!ins->inst_imm) ins->opcode = OP_X86_TEST_NULL; } break; } mono_peephole_ins (bb, ins); } } void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { switch (ins->opcode) { case OP_ICONST: case OP_I8CONST: { MonoInst *next = mono_inst_next (ins, FILTER_IL_SEQ_POINT); /* reg = 0 -> XOR (reg, reg) */ /* XOR sets cflags on x86, so we cant do it always */ if (ins->inst_c0 == 0 && (!next || (next && INST_IGNORES_CFLAGS (next->opcode)))) { ins->opcode = OP_LXOR; ins->sreg1 = ins->dreg; ins->sreg2 = ins->dreg; /* Fall through */ } else { break; } } case OP_LXOR: /* * Use IXOR to avoid a rex prefix if possible. The cpu will sign extend the * 0 result into 64 bits. */ if ((ins->sreg1 == ins->sreg2) && (ins->sreg1 == ins->dreg)) { ins->opcode = OP_IXOR; } /* Fall through */ case OP_IXOR: if ((ins->sreg1 == ins->sreg2) && (ins->sreg1 == ins->dreg)) { MonoInst *ins2; /* * Replace STORE_MEMBASE_IMM 0 with STORE_MEMBASE_REG since * the latter has length 2-3 instead of 6 (reverse constant * propagation). These instruction sequences are very common * in the initlocals bblock. */ for (ins2 = ins->next; ins2; ins2 = ins2->next) { if (((ins2->opcode == OP_STORE_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_IMM) || (ins2->opcode == OP_STORE_MEMBASE_IMM)) && (ins2->inst_imm == 0)) { ins2->opcode = store_membase_imm_to_store_membase_reg (ins2->opcode); ins2->sreg1 = ins->dreg; } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_REG) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG) || (ins2->opcode == OP_LIVERANGE_START) || (ins2->opcode == OP_GC_LIVENESS_DEF) || (ins2->opcode == OP_GC_LIVENESS_USE)) { /* Continue */ } else if (((ins2->opcode == OP_ICONST) || (ins2->opcode == OP_I8CONST)) && (ins2->dreg == ins->dreg) && (ins2->inst_c0 == 0)) { NULLIFY_INS (ins2); /* Continue */ } else if (ins2->opcode == OP_IL_SEQ_POINT) { /* Continue */ } else { break; } } } break; case OP_IADD_IMM: if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1)) ins->opcode = OP_X86_INC_REG; break; case OP_ISUB_IMM: if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1)) ins->opcode = OP_X86_DEC_REG; break; } mono_peephole_ins (bb, ins); } } #define NEW_INS(cfg,ins,dest,op) do { \ MONO_INST_NEW ((cfg), (dest), (op)); \ (dest)->cil_code = (ins)->cil_code; \ mono_bblock_insert_before_ins (bb, ins, (dest)); \ } while (0) #define NEW_SIMD_INS(cfg,ins,dest,op,d,s1,s2) do { \ MONO_INST_NEW ((cfg), (dest), (op)); \ (dest)->cil_code = (ins)->cil_code; \ (dest)->dreg = d; \ (dest)->sreg1 = s1; \ (dest)->sreg2 = s2; \ (dest)->type = STACK_VTYPE; \ (dest)->klass = ins->klass; \ mono_bblock_insert_before_ins (bb, ins, (dest)); \ } while (0) static int simd_type_to_comp_op (int t) { switch (t) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_PCMPEQB; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_PCMPEQW; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_PCMPEQD; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_PCMPEQQ; // SSE 4.1 case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_PCMPEQQ; // SSE 4.1 #else return OP_PCMPEQD; #endif default: g_assert_not_reached (); return -1; } } static int simd_type_to_sub_op (int t) { switch (t) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_PSUBB; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_PSUBW; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_PSUBD; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_PSUBQ; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_PSUBQ; #else return OP_PSUBD; #endif default: g_assert_not_reached (); return -1; } } static int simd_type_to_shl_op (int t) { switch (t) { case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_PSHLW; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_PSHLD; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_PSHLQ; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_PSHLD; #else return OP_PSHLQ; #endif default: g_assert_not_reached (); return -1; } } static int simd_type_to_gt_op (int t) { switch (t) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_PCMPGTB; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_PCMPGTW; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_PCMPGTD; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_PCMPGTQ; // SSE 4.2 case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_PCMPGTQ; // SSE 4.2 #else return OP_PCMPGTD; #endif default: g_assert_not_reached (); return -1; } } static int simd_type_to_max_un_op (int t) { switch (t) { case MONO_TYPE_U1: return OP_PMAXB_UN; case MONO_TYPE_U2: return OP_PMAXW_UN; // SSE 4.1 case MONO_TYPE_U4: return OP_PMAXD_UN; // SSE 4.1 //case MONO_TYPE_U8: // return OP_PMAXQ_UN; // AVX #if TARGET_SIZEOF_VOID_P == 8 //case MONO_TYPE_U: // return OP_PMAXQ_UN; // AVX #else case MONO_TYPE_U: return OP_PMAXD_UN; // SSE 4.1 #endif default: g_assert_not_reached (); return -1; } } static int simd_type_to_add_op (int t) { switch (t) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_PADDB; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_PADDW; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_PADDD; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_PADDQ; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_PADDQ; #else return OP_PADDD; #endif default: g_assert_not_reached (); return -1; } } static int simd_type_to_min_op (int t) { switch (t) { case MONO_TYPE_I1: return OP_PMINB; // SSE 4.1 case MONO_TYPE_U1: return OP_PMINB_UN; // SSE 4.1 case MONO_TYPE_I2: return OP_PMINW; case MONO_TYPE_U2: return OP_PMINW_UN; case MONO_TYPE_I4: return OP_PMIND; // SSE 4.1 case MONO_TYPE_U4: return OP_PMIND_UN; // SSE 4.1 // case MONO_TYPE_I8: // AVX // case MONO_TYPE_U8: #if TARGET_SIZEOF_VOID_P == 8 //case MONO_TYPE_I: // AVX //case MONO_TYPE_U: #else case MONO_TYPE_I: return OP_PMIND; // SSE 4.1 case MONO_TYPE_U: return OP_PMIND_UN; // SSE 4.1 #endif default: g_assert_not_reached (); return -1; } } static int simd_type_to_max_op (int t) { switch (t) { case MONO_TYPE_I1: return OP_PMAXB; // SSE 4.1 case MONO_TYPE_U1: return OP_PMAXB_UN; // SSE 4.1 case MONO_TYPE_I2: return OP_PMAXW; case MONO_TYPE_U2: return OP_PMAXW_UN; case MONO_TYPE_I4: return OP_PMAXD; // SSE 4.1 case MONO_TYPE_U4: return OP_PMAXD_UN; // SSE 4.1 // case MONO_TYPE_I8: // AVX // case MONO_TYPE_U8: #if TARGET_SIZEOF_VOID_P == 8 //case MONO_TYPE_I: // AVX //case MONO_TYPE_U: #else case MONO_TYPE_I: return OP_PMAXD; // SSE 4.1 case MONO_TYPE_U: return OP_PMAXD_UN; // SSE 4.1 #endif default: g_assert_not_reached (); return -1; } } static void emit_simd_comp_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2) { MonoInst *temp; gboolean is64BitNativeInt = FALSE; #if TARGET_SIZEOF_VOID_P == 8 is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U; #endif if (!mono_hwcap_x86_has_sse42 && (ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8 || is64BitNativeInt)) { int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); NEW_SIMD_INS (cfg, ins, temp, OP_PCMPEQD, temp_reg1, sreg1, sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_reg2, temp_reg1, -1); temp->inst_c0 = 0xB1; NEW_SIMD_INS (cfg, ins, temp, OP_ANDPD, dreg, temp_reg1, temp_reg2); } else { NEW_SIMD_INS (cfg, ins, temp, simd_type_to_comp_op (type), dreg, sreg1, sreg2); } } static void emit_simd_gt_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2); static void emit_simd_gt_un_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2) { MonoInst *temp; switch (type) { case MONO_TYPE_U2: case MONO_TYPE_U4: if (mono_hwcap_x86_has_sse41) goto USE_MAX; goto USE_SIGNED_GT; case MONO_TYPE_U1: USE_MAX: { // dreg = max(sreg1, sreg2) != sreg2 int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); int temp_reg3 = mono_alloc_ireg (cfg); NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_un_op (type), temp_reg1, sreg1, sreg2); emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg2, temp_reg1, ins->sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_XONES, temp_reg3, -1, -1); NEW_SIMD_INS (cfg, ins, temp, OP_XORPD, dreg, temp_reg2, temp_reg3); break; } case MONO_TYPE_U8: USE_SIGNED_GT: { // convert to signed integer by subtracting (1 << (size - 1)) from each operand // and then use signed comparison int temp_c0 = mono_alloc_ireg (cfg); int temp_c80 = mono_alloc_ireg (cfg); int temp_s1 = mono_alloc_ireg (cfg); int temp_s2 = mono_alloc_ireg (cfg); NEW_SIMD_INS (cfg, ins, temp, OP_XONES, temp_c0, -1, -1); NEW_SIMD_INS (cfg, ins, temp, simd_type_to_shl_op (type), temp_c80, temp_c0, -1); temp->inst_imm = type == MONO_TYPE_U2 ? 15 : (type == MONO_TYPE_U4 ? 31 : 63); NEW_SIMD_INS (cfg, ins, temp, simd_type_to_sub_op (type), temp_s1, sreg1, temp_c80); NEW_SIMD_INS (cfg, ins, temp, simd_type_to_sub_op (type), temp_s2, sreg2, temp_c80); emit_simd_gt_op (cfg, bb, ins, type, dreg, temp_s1, temp_s2); break; case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 goto USE_SIGNED_GT; #else if (mono_hwcap_x86_has_sse41) goto USE_MAX; goto USE_SIGNED_GT; #endif } } } static void emit_simd_gt_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2) { MonoInst *temp; gboolean is64BitNativeInt = FALSE; #if TARGET_SIZEOF_VOID_P == 8 is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U; #endif if (!mono_hwcap_x86_has_sse42 && (type == MONO_TYPE_I8 || type == MONO_TYPE_U8 || is64BitNativeInt)) { // Decompose 64-bit greater than to 32-bit // // t = (v1 > v2) // u = (v1 == v2) // v = (v1 > v2) unsigned // // z = shuffle(t, (3, 3, 1, 1)) // t1 = shuffle(v, (2, 2, 0, 0)) // u1 = shuffle(u, (3, 3, 1, 1)) // w = and(t1, u1) // result = bitwise_or(z, w) int temp_t = mono_alloc_ireg (cfg); int temp_u = mono_alloc_ireg (cfg); int temp_v = mono_alloc_ireg (cfg); int temp_z = temp_t; int temp_t1 = temp_v; int temp_u1 = temp_u; int temp_w = temp_t1; NEW_SIMD_INS (cfg, ins, temp, OP_PCMPGTD, temp_t, sreg1, sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_PCMPEQD, temp_u, sreg1, sreg2); emit_simd_gt_un_op (cfg, bb, ins, MONO_TYPE_U4, temp_v, sreg1, sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_z, temp_t, -1); temp->inst_c0 = 0xF5; NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_t1, temp_v, -1); temp->inst_c0 = 0xA0; NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_u1, temp_u, -1); temp->inst_c0 = 0xF5; NEW_SIMD_INS (cfg, ins, temp, OP_ANDPD, temp_w, temp_t1, temp_u1); NEW_SIMD_INS (cfg, ins, temp, OP_ORPD, dreg, temp_z, temp_w); } else { NEW_SIMD_INS (cfg, ins, temp, simd_type_to_gt_op (type), dreg, sreg1, sreg2); } } static void emit_simd_min_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2) { MonoInst *temp; gboolean is64BitNativeInt = FALSE; #if TARGET_SIZEOF_VOID_P == 8 is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U; #endif if (type == MONO_TYPE_I2 || type == MONO_TYPE_U2) { // SSE2, so always available NEW_SIMD_INS (cfg, ins, temp, simd_type_to_min_op (type), dreg, sreg1, sreg2); } else if (!mono_hwcap_x86_has_sse41 || type == MONO_TYPE_I8 || type == MONO_TYPE_U8 || is64BitNativeInt) { // Decompose to t = (s1 > s2), d = (s1 & !t) | (s2 & t) int temp_t = mono_alloc_ireg (cfg); int temp_d1 = mono_alloc_ireg (cfg); int temp_d2 = mono_alloc_ireg (cfg); if (type == MONO_TYPE_U8 || type == MONO_TYPE_U4 || type == MONO_TYPE_U1) emit_simd_gt_un_op (cfg, bb, ins, type, temp_t, sreg1, sreg2); else emit_simd_gt_op (cfg, bb, ins, type, temp_t, sreg1, sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_PANDN, temp_d1, temp_t, sreg1); NEW_SIMD_INS (cfg, ins, temp, OP_PAND, temp_d2, temp_t, sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_POR, dreg, temp_d1, temp_d2); } else { // SSE 4.1 has byte- and dword- operations NEW_SIMD_INS (cfg, ins, temp, simd_type_to_min_op (type), dreg, sreg1, sreg2); } } static void emit_simd_max_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2) { MonoInst *temp; gboolean is64BitNativeInt = FALSE; #if TARGET_SIZEOF_VOID_P == 8 is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U; #endif if (type == MONO_TYPE_I2 || type == MONO_TYPE_U2) { // SSE2, so always available NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_op (type), dreg, sreg1, sreg2); } else if (!mono_hwcap_x86_has_sse41 || type == MONO_TYPE_I8 || type == MONO_TYPE_U8 || is64BitNativeInt) { // Decompose to t = (s1 > s2), d = (s1 & t) | (s2 & !t) int temp_t = mono_alloc_ireg (cfg); int temp_d1 = mono_alloc_ireg (cfg); int temp_d2 = mono_alloc_ireg (cfg); if (type == MONO_TYPE_U8 || type == MONO_TYPE_U4 || type == MONO_TYPE_U1) emit_simd_gt_un_op (cfg, bb, ins, type, temp_t, sreg1, sreg2); else emit_simd_gt_op (cfg, bb, ins, type, temp_t, sreg1, sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_PAND, temp_d1, temp_t, sreg1); NEW_SIMD_INS (cfg, ins, temp, OP_PANDN, temp_d2, temp_t, sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_POR, dreg, temp_d1, temp_d2); } else { // SSE 4.1 has byte- and dword- operations NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_op (type), dreg, sreg1, sreg2); } } /* * mono_arch_lowering_pass: * * Converts complex opcodes into simpler ones so that each IR instruction * corresponds to one machine instruction. */ void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n, *temp; /* * FIXME: Need to add more instructions, but the current machine * description can't model some parts of the composite instructions like * cdq. */ MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { switch (ins->opcode) { case OP_DIV_IMM: case OP_REM_IMM: case OP_IDIV_IMM: case OP_IDIV_UN_IMM: case OP_IREM_UN_IMM: case OP_LREM_IMM: case OP_IREM_IMM: mono_decompose_op_imm (cfg, bb, ins); break; case OP_COMPARE_IMM: case OP_LCOMPARE_IMM: if (!amd64_use_imm32 (ins->inst_imm)) { NEW_INS (cfg, ins, temp, OP_I8CONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->opcode = OP_COMPARE; ins->sreg2 = temp->dreg; } break; #ifndef MONO_ARCH_ILP32 case OP_LOAD_MEMBASE: #endif case OP_LOADI8_MEMBASE: /* Don't generate memindex opcodes (to simplify */ /* read sandboxing) */ if (!amd64_use_imm32 (ins->inst_offset)) { NEW_INS (cfg, ins, temp, OP_I8CONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ins->opcode = OP_AMD64_LOADI8_MEMINDEX; ins->inst_indexreg = temp->dreg; } break; #ifndef MONO_ARCH_ILP32 case OP_STORE_MEMBASE_IMM: #endif case OP_STOREI8_MEMBASE_IMM: if (!amd64_use_imm32 (ins->inst_imm)) { NEW_INS (cfg, ins, temp, OP_I8CONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->opcode = OP_STOREI8_MEMBASE_REG; ins->sreg1 = temp->dreg; } break; #ifdef MONO_ARCH_SIMD_INTRINSICS case OP_EXPAND_I1: { int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); int original_reg = ins->sreg1; NEW_INS (cfg, ins, temp, OP_ICONV_TO_U1); temp->sreg1 = original_reg; temp->dreg = temp_reg1; NEW_INS (cfg, ins, temp, OP_SHL_IMM); temp->sreg1 = temp_reg1; temp->dreg = temp_reg2; temp->inst_imm = 8; NEW_INS (cfg, ins, temp, OP_LOR); temp->sreg1 = temp->dreg = temp_reg2; temp->sreg2 = temp_reg1; ins->opcode = OP_EXPAND_I2; ins->sreg1 = temp_reg2; break; } case OP_XEQUAL: { int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); NEW_SIMD_INS (cfg, ins, temp, OP_PCMPEQD, temp_reg1, ins->sreg1, ins->sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_EXTRACT_MASK, temp_reg2, temp_reg1, -1); temp->type = STACK_I4; NEW_INS (cfg, ins, temp, OP_COMPARE_IMM); temp->sreg1 = temp_reg2; temp->inst_imm = 0xFFFF; temp->klass = ins->klass; ins->opcode = OP_CEQ; ins->sreg1 = -1; ins->sreg2 = -1; break; } case OP_XCOMPARE: { int temp_reg; gboolean is64BitNativeInt = FALSE; switch (ins->inst_c0) { case CMP_EQ: emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2); NULLIFY_INS (ins); break; case CMP_NE: { int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg1, ins->sreg1, ins->sreg2); NEW_SIMD_INS (cfg, ins, temp, OP_XONES, temp_reg2, -1, -1); ins->opcode = OP_XORPD; ins->sreg1 = temp_reg1; ins->sreg1 = temp_reg2; break; } case CMP_LT: temp_reg = ins->sreg1; ins->sreg1 = ins->sreg2; ins->sreg2 = temp_reg; case CMP_GT: emit_simd_gt_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2); NULLIFY_INS (ins); break; case CMP_LE: temp_reg = ins->sreg1; ins->sreg1 = ins->sreg2; ins->sreg2 = temp_reg; case CMP_GE: { int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); emit_simd_gt_op (cfg, bb, ins, ins->inst_c1, temp_reg1, ins->sreg1, ins->sreg2); emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg2, ins->sreg1, ins->sreg2); ins->opcode = OP_POR; ins->sreg1 = temp_reg1; ins->sreg2 = temp_reg2; break; } case CMP_LE_UN: temp_reg = ins->sreg1; ins->sreg1 = ins->sreg2; ins->sreg2 = temp_reg; case CMP_GE_UN: #if TARGET_SIZEOF_VOID_P == 8 is64BitNativeInt = ins->inst_c1 == MONO_TYPE_U; #endif if (mono_hwcap_x86_has_sse41 && ins->inst_c1 != MONO_TYPE_U8 && !is64BitNativeInt) { int temp_reg1 = mono_alloc_ireg (cfg); NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_un_op (ins->inst_c1), temp_reg1, ins->sreg1, ins->sreg2); emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, ins->dreg, temp_reg1, ins->sreg1); NULLIFY_INS (ins); } else { int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); emit_simd_gt_un_op (cfg, bb, ins, ins->inst_c1, temp_reg1, ins->sreg1, ins->sreg2); emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg2, ins->sreg1, ins->sreg2); ins->opcode = OP_POR; ins->sreg1 = temp_reg1; ins->sreg2 = temp_reg2; } break; case CMP_LT_UN: temp_reg = ins->sreg1; ins->sreg1 = ins->sreg2; ins->sreg2 = temp_reg; case CMP_GT_UN: { emit_simd_gt_un_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2); NULLIFY_INS (ins); break; } default: g_assert_not_reached(); break; } ins->type = STACK_VTYPE; ins->inst_c0 = 0; break; } case OP_XCOMPARE_FP: { ins->opcode = ins->inst_c1 == MONO_TYPE_R4 ? OP_COMPPS : OP_COMPPD; switch (ins->inst_c0) { case CMP_EQ: ins->inst_c0 = 0; break; case CMP_NE: ins->inst_c0 = 4; break; case CMP_LT: ins->inst_c0 = 1; break; case CMP_LE: ins->inst_c0 = 2; break; case CMP_GT: ins->inst_c0 = 6; break; case CMP_GE: ins->inst_c0 = 5; break; default: g_assert_not_reached(); break; } break; } case OP_XCAST: { ins->opcode = OP_XMOVE; break; } case OP_XBINOP: { switch (ins->inst_c0) { case OP_ISUB: ins->opcode = simd_type_to_sub_op (ins->inst_c1); break; case OP_IADD: ins->opcode = simd_type_to_add_op (ins->inst_c1); break; case OP_IAND: ins->opcode = OP_ANDPD; break; case OP_IXOR: ins->opcode = OP_XORPD; break; case OP_IOR: ins->opcode = OP_ORPD; break; case OP_IMIN: emit_simd_min_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2); NULLIFY_INS (ins); break; case OP_IMAX: emit_simd_max_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2); NULLIFY_INS (ins); break; case OP_FSUB: ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_SUBPD : OP_SUBPS; break; case OP_FADD: ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_ADDPD : OP_ADDPS; break; case OP_FDIV: ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_DIVPD : OP_DIVPS; break; case OP_FMUL: ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_MULPD : OP_MULPS; break; case OP_FMIN: ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_MINPD : OP_MINPS; break; case OP_FMAX: ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_MAXPD : OP_MAXPS; break; default: g_assert_not_reached(); break; } break; } case OP_XEXTRACT_R4: case OP_XEXTRACT_R8: case OP_XEXTRACT_I4: case OP_XEXTRACT_I8: { // TODO g_assert_not_reached(); break; } #endif default: break; } } bb->max_vreg = cfg->next_vreg; } static const int branch_cc_table [] = { X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT, X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT, X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC }; /* Maps CMP_... constants to X86_CC_... constants */ static const int cc_table [] = { X86_CC_EQ, X86_CC_NE, X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT, X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT }; static const int cc_signed_table [] = { TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE }; /*#include "cprop.c"*/ static unsigned char* emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed) { // Use 8 as register size to get Nan/Inf conversion to uint result truncated to 0 if (size == 8 || (!is_signed && size == 4)) amd64_sse_cvttsd2si_reg_reg (code, dreg, sreg); else amd64_sse_cvttsd2si_reg_reg_size (code, dreg, sreg, 4); if (size == 1) amd64_widen_reg (code, dreg, dreg, is_signed, FALSE); else if (size == 2) amd64_widen_reg (code, dreg, dreg, is_signed, TRUE); return code; } static unsigned char* mono_emit_stack_alloc (MonoCompile *cfg, guchar *code, MonoInst* tree) { int sreg = tree->sreg1; int need_touch = FALSE; #if defined(TARGET_WIN32) need_touch = TRUE; #elif defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK) if (!(tree->flags & MONO_INST_INIT)) need_touch = TRUE; #endif if (need_touch) { guint8* br[5]; /* * Under Windows: * If requested stack size is larger than one page, * perform stack-touch operation */ /* * Generate stack probe code. * Under Windows, it is necessary to allocate one page at a time, * "touching" stack after each successful sub-allocation. This is * because of the way stack growth is implemented - there is a * guard page before the lowest stack page that is currently commited. * Stack normally grows sequentially so OS traps access to the * guard page and commits more pages when needed. */ amd64_test_reg_imm (code, sreg, ~0xFFF); br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE); br[2] = code; /* loop */ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000); amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP); amd64_alu_reg_imm (code, X86_SUB, sreg, 0x1000); amd64_alu_reg_imm (code, X86_CMP, sreg, 0x1000); br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE); amd64_patch (br[3], br[2]); amd64_test_reg_reg (code, sreg, sreg); br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE); amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg); br[1] = code; x86_jump8 (code, 0); amd64_patch (br[0], code); amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg); amd64_patch (br[1], code); amd64_patch (br[4], code); } else amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, tree->sreg1); if (tree->flags & MONO_INST_INIT) { int offset = 0; if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) { amd64_push_reg (code, AMD64_RAX); offset += 8; } if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX) { amd64_push_reg (code, AMD64_RCX); offset += 8; } if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI) { amd64_push_reg (code, AMD64_RDI); offset += 8; } amd64_shift_reg_imm (code, X86_SHR, sreg, 3); if (sreg != AMD64_RCX) amd64_mov_reg_reg (code, AMD64_RCX, sreg, 8); amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX); amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, offset); if (cfg->param_area) amd64_alu_reg_imm (code, X86_ADD, AMD64_RDI, cfg->param_area); amd64_cld (code); amd64_prefix (code, X86_REP_PREFIX); amd64_stosl (code); if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI) amd64_pop_reg (code, AMD64_RDI); if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX) amd64_pop_reg (code, AMD64_RCX); if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) amd64_pop_reg (code, AMD64_RAX); } return code; } static guint8* emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code) { CallInfo *cinfo; guint32 quad; /* Move return value to the target register */ /* FIXME: do this in the local reg allocator */ switch (ins->opcode) { case OP_CALL: case OP_CALL_REG: case OP_CALL_MEMBASE: case OP_LCALL: case OP_LCALL_REG: case OP_LCALL_MEMBASE: g_assert (ins->dreg == AMD64_RAX); break; case OP_FCALL: case OP_FCALL_REG: case OP_FCALL_MEMBASE: { MonoType *rtype = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret); if (rtype->type == MONO_TYPE_R4) { amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, AMD64_XMM0); } else { if (ins->dreg != AMD64_XMM0) amd64_sse_movsd_reg_reg (code, ins->dreg, AMD64_XMM0); } break; } case OP_RCALL: case OP_RCALL_REG: case OP_RCALL_MEMBASE: if (ins->dreg != AMD64_XMM0) amd64_sse_movss_reg_reg (code, ins->dreg, AMD64_XMM0); break; case OP_VCALL: case OP_VCALL_REG: case OP_VCALL_MEMBASE: case OP_VCALL2: case OP_VCALL2_REG: case OP_VCALL2_MEMBASE: cinfo = get_call_info (cfg->mempool, ((MonoCallInst*)ins)->signature); if (cinfo->ret.storage == ArgValuetypeInReg) { MonoInst *loc = cfg->arch.vret_addr_loc; /* Load the destination address */ g_assert (loc->opcode == OP_REGOFFSET); amd64_mov_reg_membase (code, AMD64_RCX, loc->inst_basereg, loc->inst_offset, sizeof(gpointer)); for (quad = 0; quad < 2; quad ++) { switch (cinfo->ret.pair_storage [quad]) { case ArgInIReg: amd64_mov_membase_reg (code, AMD64_RCX, (quad * sizeof (target_mgreg_t)), cinfo->ret.pair_regs [quad], sizeof (target_mgreg_t)); break; case ArgInFloatSSEReg: amd64_movss_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]); break; case ArgInDoubleSSEReg: amd64_movsd_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]); break; case ArgNone: break; default: NOT_IMPLEMENTED; } } } break; } return code; } #endif /* DISABLE_JIT */ #ifdef TARGET_MACH static int tls_gs_offset; #endif gboolean mono_arch_have_fast_tls (void) { #ifdef TARGET_MACH static gboolean have_fast_tls = FALSE; static gboolean inited = FALSE; guint8 *ins; if (mini_debug_options.use_fallback_tls) return FALSE; if (inited) return have_fast_tls; ins = (guint8*)pthread_getspecific; /* * We're looking for these two instructions: * * mov %gs:[offset](,%rdi,8),%rax * retq */ have_fast_tls = ins [0] == 0x65 && ins [1] == 0x48 && ins [2] == 0x8b && ins [3] == 0x04 && ins [4] == 0xfd && ins [6] == 0x00 && ins [7] == 0x00 && ins [8] == 0x00 && ins [9] == 0xc3; tls_gs_offset = ins[5]; /* * Apple now loads a different version of pthread_getspecific when launched from Xcode * For that version we're looking for these instructions: * * pushq %rbp * movq %rsp, %rbp * mov %gs:[offset](,%rdi,8),%rax * popq %rbp * retq */ if (!have_fast_tls) { have_fast_tls = ins [0] == 0x55 && ins [1] == 0x48 && ins [2] == 0x89 && ins [3] == 0xe5 && ins [4] == 0x65 && ins [5] == 0x48 && ins [6] == 0x8b && ins [7] == 0x04 && ins [8] == 0xfd && ins [10] == 0x00 && ins [11] == 0x00 && ins [12] == 0x00 && ins [13] == 0x5d && ins [14] == 0xc3; tls_gs_offset = ins[9]; } inited = TRUE; return have_fast_tls; #elif defined(TARGET_ANDROID) return FALSE; #else if (mini_debug_options.use_fallback_tls) return FALSE; return TRUE; #endif } int mono_amd64_get_tls_gs_offset (void) { #ifdef TARGET_OSX return tls_gs_offset; #else g_assert_not_reached (); return -1; #endif } /* * \param code buffer to store code to * \param dreg hard register where to place the result * \param tls_offset offset info * \return a pointer to the end of the stored code * * mono_amd64_emit_tls_get emits in \p code the native code that puts in * the dreg register the item in the thread local storage identified * by tls_offset. */ static guint8* mono_amd64_emit_tls_get (guint8* code, int dreg, int tls_offset) { #ifdef TARGET_WIN32 if (tls_offset < 64) { x86_prefix (code, X86_GS_PREFIX); amd64_mov_reg_mem (code, dreg, (tls_offset * 8) + 0x1480, 8); } else { guint8 *buf [16]; g_assert (tls_offset < 0x440); /* Load TEB->TlsExpansionSlots */ x86_prefix (code, X86_GS_PREFIX); amd64_mov_reg_mem (code, dreg, 0x1780, 8); amd64_test_reg_reg (code, dreg, dreg); buf [0] = code; amd64_branch (code, X86_CC_EQ, code, TRUE); amd64_mov_reg_membase (code, dreg, dreg, (tls_offset * 8) - 0x200, 8); amd64_patch (buf [0], code); } #elif defined(TARGET_MACH) x86_prefix (code, X86_GS_PREFIX); amd64_mov_reg_mem (code, dreg, tls_gs_offset + (tls_offset * 8), 8); #else if (optimize_for_xen) { x86_prefix (code, X86_FS_PREFIX); amd64_mov_reg_mem (code, dreg, 0, 8); amd64_mov_reg_membase (code, dreg, dreg, tls_offset, 8); } else { x86_prefix (code, X86_FS_PREFIX); amd64_mov_reg_mem (code, dreg, tls_offset, 8); } #endif return code; } static guint8* mono_amd64_emit_tls_set (guint8 *code, int sreg, int tls_offset) { #ifdef TARGET_WIN32 g_assert_not_reached (); #elif defined(TARGET_MACH) x86_prefix (code, X86_GS_PREFIX); amd64_mov_mem_reg (code, tls_gs_offset + (tls_offset * 8), sreg, 8); #else g_assert (!optimize_for_xen); x86_prefix (code, X86_FS_PREFIX); amd64_mov_mem_reg (code, tls_offset, sreg, 8); #endif return code; } /* * emit_setup_lmf: * * Emit code to initialize an LMF structure at LMF_OFFSET. */ static guint8* emit_setup_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, int cfa_offset) { /* * The ip field is not set, the exception handling code will obtain it from the stack location pointed to by the sp field. */ /* * sp is saved right before calls but we need to save it here too so * async stack walks would work. */ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8); /* Save rbp */ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), AMD64_RBP, 8); if (cfg->arch.omit_fp && cfa_offset != -1) mono_emit_unwind_op_offset (cfg, code, AMD64_RBP, - (cfa_offset - (lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp)))); /* These can't contain refs */ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF); mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), SLOT_NOREF); /* These are handled automatically by the stack marking code */ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), SLOT_NOREF); return code; } #ifdef TARGET_WIN32 #define TEB_LAST_ERROR_OFFSET 0x68 static guint8* emit_get_last_error (guint8* code, int dreg) { /* Threads last error value is located in TEB_LAST_ERROR_OFFSET. */ x86_prefix (code, X86_GS_PREFIX); amd64_mov_reg_mem (code, dreg, TEB_LAST_ERROR_OFFSET, sizeof (guint32)); return code; } #else static guint8* emit_get_last_error (guint8* code, int dreg) { g_assert_not_reached (); } #endif /* benchmark and set based on cpu */ #define LOOP_ALIGNMENT 8 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting) #ifndef DISABLE_JIT static guint8* amd64_handle_varargs_nregs (guint8 *code, guint32 nregs) { #ifndef TARGET_WIN32 if (nregs) amd64_mov_reg_imm (code, AMD64_RAX, nregs); else amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX); #endif return code; } static guint8* amd64_handle_varargs_call (MonoCompile *cfg, guint8 *code, MonoCallInst *call, gboolean free_rax) { #ifdef TARGET_WIN32 return code; #else /* * The AMD64 ABI forces callers to know about varargs. */ guint32 nregs = 0; if (call->signature->call_convention == MONO_CALL_VARARG && call->signature->pinvoke) { // deliberatly nothing -- but nreg = 0 and do not return } else if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE && m_class_get_image (cfg->method->klass) != mono_defaults.corlib) { /* * Since the unmanaged calling convention doesn't contain a * 'vararg' entry, we have to treat every pinvoke call as a * potential vararg call. */ for (guint32 i = 0; i < AMD64_XMM_NREG; ++i) nregs += (call->used_fregs & (1 << i)) != 0; } else { return code; } MonoInst *ins = (MonoInst*)call; if (free_rax && ins->sreg1 == AMD64_RAX) { amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8); ins->sreg1 = AMD64_R11; } return amd64_handle_varargs_nregs (code, nregs); #endif } void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins; MonoCallInst *call; guint8 *code = cfg->native_code + cfg->code_len; /* Fix max_offset estimate for each successor bb */ gboolean optimize_branch_pred = (cfg->opt & MONO_OPT_BRANCH) && (cfg->max_block_num < MAX_BBLOCKS_FOR_BRANCH_OPTS); if (optimize_branch_pred) { int current_offset = cfg->code_len; MonoBasicBlock *current_bb; for (current_bb = bb; current_bb != NULL; current_bb = current_bb->next_bb) { current_bb->max_offset = current_offset; current_offset += current_bb->max_length; } } if (cfg->opt & MONO_OPT_LOOP) { int pad, align = LOOP_ALIGNMENT; /* set alignment depending on cpu */ if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) { pad = align - pad; /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/ amd64_padding (code, pad); cfg->code_len += pad; bb->native_offset = cfg->code_len; } } if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); set_code_cursor (cfg, code); mono_debug_open_block (cfg, bb, code - cfg->native_code); if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) x86_breakpoint (code); MONO_BB_FOR_EACH_INS (bb, ins) { const guint offset = code - cfg->native_code; set_code_cursor (cfg, code); int max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); if (cfg->debug_info) mono_debug_record_line_number (cfg, ins, offset); switch (ins->opcode) { case OP_BIGMUL: amd64_mul_reg (code, ins->sreg2, TRUE); break; case OP_BIGMUL_UN: amd64_mul_reg (code, ins->sreg2, FALSE); break; case OP_X86_SETEQ_MEMBASE: amd64_set_membase (code, X86_CC_EQ, ins->inst_basereg, ins->inst_offset, TRUE); break; case OP_STOREI1_MEMBASE_IMM: amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1); break; case OP_STOREI2_MEMBASE_IMM: amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2); break; case OP_STOREI4_MEMBASE_IMM: amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_STOREI1_MEMBASE_REG: amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1); break; case OP_STOREI2_MEMBASE_REG: amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2); break; /* In AMD64 NaCl, pointers are 4 bytes, */ /* so STORE_* != STOREI8_*. Likewise below. */ case OP_STORE_MEMBASE_REG: amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, sizeof(gpointer)); break; case OP_STOREI8_MEMBASE_REG: amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 8); break; case OP_STOREI4_MEMBASE_REG: amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4); break; case OP_STORE_MEMBASE_IMM: /* In NaCl, this could be a PCONST type, which could */ /* mean a pointer type was copied directly into the */ /* lower 32-bits of inst_imm, so for InvalidPtr==-1 */ /* the value would be 0x00000000FFFFFFFF which is */ /* not proper for an imm32 unless you cast it. */ g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, (gint32)ins->inst_imm, sizeof(gpointer)); break; case OP_STOREI8_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8); break; case OP_LOAD_MEM: #ifdef MONO_ARCH_ILP32 /* In ILP32, pointers are 4 bytes, so separate these */ /* cases, use literal 8 below where we really want 8 */ amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm); amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, sizeof(gpointer)); break; #endif case OP_LOADI8_MEM: // FIXME: Decompose this earlier if (amd64_use_imm32 (ins->inst_imm)) amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 8); else { amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer)); amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 8); } break; case OP_LOADI4_MEM: amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm); amd64_movsxd_reg_membase (code, ins->dreg, ins->dreg, 0); break; case OP_LOADU4_MEM: // FIXME: Decompose this earlier if (amd64_use_imm32 (ins->inst_imm)) amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4); else { amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer)); amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4); } break; case OP_LOADU1_MEM: amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm); amd64_widen_membase (code, ins->dreg, ins->dreg, 0, FALSE, FALSE); break; case OP_LOADU2_MEM: /* For NaCl, pointers are 4 bytes, so separate these */ /* cases, use literal 8 below where we really want 8 */ amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm); amd64_widen_membase (code, ins->dreg, ins->dreg, 0, FALSE, TRUE); break; case OP_LOAD_MEMBASE: g_assert (amd64_is_imm32 (ins->inst_offset)); amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof(gpointer)); break; case OP_LOADI8_MEMBASE: /* Use literal 8 instead of sizeof pointer or */ /* register, we really want 8 for this opcode */ g_assert (amd64_is_imm32 (ins->inst_offset)); amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 8); break; case OP_LOADI4_MEMBASE: amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADU4_MEMBASE: amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4); break; case OP_LOADU1_MEMBASE: /* The cpu zero extends the result into 64 bits */ amd64_widen_membase_size (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE, 4); break; case OP_LOADI1_MEMBASE: amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE); break; case OP_LOADU2_MEMBASE: /* The cpu zero extends the result into 64 bits */ amd64_widen_membase_size (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE, 4); break; case OP_LOADI2_MEMBASE: amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE); break; case OP_AMD64_LOADI8_MEMINDEX: amd64_mov_reg_memindex_size (code, ins->dreg, ins->inst_basereg, 0, ins->inst_indexreg, 0, 8); break; case OP_LCONV_TO_I1: case OP_ICONV_TO_I1: case OP_SEXT_I1: amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE); break; case OP_LCONV_TO_I2: case OP_ICONV_TO_I2: case OP_SEXT_I2: amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE); break; case OP_LCONV_TO_U1: case OP_ICONV_TO_U1: amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE); break; case OP_LCONV_TO_U2: case OP_ICONV_TO_U2: amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE); break; case OP_ZEXT_I4: /* Clean out the upper word */ amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 4); break; case OP_SEXT_I4: amd64_movsxd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_COMPARE: case OP_LCOMPARE: amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); break; case OP_COMPARE_IMM: #if defined(MONO_ARCH_ILP32) /* Comparison of pointer immediates should be 4 bytes to avoid sign-extend problems */ g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4); break; #endif case OP_LCOMPARE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm); break; case OP_X86_COMPARE_REG_MEMBASE: amd64_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_X86_TEST_NULL: amd64_test_reg_reg_size (code, ins->sreg1, ins->sreg1, 4); break; case OP_AMD64_TEST_NULL: amd64_test_reg_reg (code, ins->sreg1, ins->sreg1); break; case OP_X86_ADD_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset, 4); break; case OP_X86_SUB_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset, 4); break; case OP_X86_AND_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_AND, ins->sreg1, ins->sreg2, ins->inst_offset, 4); break; case OP_X86_OR_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_OR, ins->sreg1, ins->sreg2, ins->inst_offset, 4); break; case OP_X86_XOR_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_XOR, ins->sreg1, ins->sreg2, ins->inst_offset, 4); break; case OP_X86_ADD_MEMBASE_IMM: /* FIXME: Make a 64 version too */ amd64_alu_membase_imm_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_X86_SUB_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_X86_AND_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_X86_OR_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_X86_XOR_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_X86_ADD_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4); break; case OP_X86_SUB_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4); break; case OP_X86_AND_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4); break; case OP_X86_OR_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4); break; case OP_X86_XOR_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4); break; case OP_X86_INC_MEMBASE: amd64_inc_membase_size (code, ins->inst_basereg, ins->inst_offset, 4); break; case OP_X86_INC_REG: amd64_inc_reg_size (code, ins->dreg, 4); break; case OP_X86_DEC_MEMBASE: amd64_dec_membase_size (code, ins->inst_basereg, ins->inst_offset, 4); break; case OP_X86_DEC_REG: amd64_dec_reg_size (code, ins->dreg, 4); break; case OP_X86_MUL_REG_MEMBASE: case OP_X86_MUL_MEMBASE_REG: amd64_imul_reg_membase_size (code, ins->sreg1, ins->sreg2, ins->inst_offset, 4); break; case OP_AMD64_ICOMPARE_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4); break; case OP_AMD64_ICOMPARE_MEMBASE_IMM: amd64_alu_membase_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_AMD64_COMPARE_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8); break; case OP_AMD64_COMPARE_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8); break; case OP_X86_COMPARE_MEMBASE8_IMM: amd64_alu_membase8_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_AMD64_ICOMPARE_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset, 4); break; case OP_AMD64_COMPARE_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset, 8); break; case OP_AMD64_ADD_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset, 8); break; case OP_AMD64_SUB_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset, 8); break; case OP_AMD64_AND_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_AND, ins->sreg1, ins->sreg2, ins->inst_offset, 8); break; case OP_AMD64_OR_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_OR, ins->sreg1, ins->sreg2, ins->inst_offset, 8); break; case OP_AMD64_XOR_REG_MEMBASE: amd64_alu_reg_membase_size (code, X86_XOR, ins->sreg1, ins->sreg2, ins->inst_offset, 8); break; case OP_AMD64_ADD_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8); break; case OP_AMD64_SUB_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8); break; case OP_AMD64_AND_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8); break; case OP_AMD64_OR_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8); break; case OP_AMD64_XOR_MEMBASE_REG: amd64_alu_membase_reg_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8); break; case OP_AMD64_ADD_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8); break; case OP_AMD64_SUB_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8); break; case OP_AMD64_AND_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8); break; case OP_AMD64_OR_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8); break; case OP_AMD64_XOR_MEMBASE_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_membase_imm_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8); break; case OP_BREAK: amd64_breakpoint (code); break; case OP_RELAXED_NOP: x86_prefix (code, X86_REP_PREFIX); x86_nop (code); break; case OP_HARD_NOP: x86_nop (code); break; case OP_NOP: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_I8CONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: case OP_NOT_REACHED: case OP_NOT_NULL: break; case OP_IL_SEQ_POINT: mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); break; case OP_SEQ_POINT: { if (ins->flags & MONO_INST_SINGLE_STEP_LOC) { MonoInst *var = cfg->arch.ss_tramp_var; guint8 *label; /* Load ss_tramp_var */ /* This is equal to &ss_trampoline */ amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8); /* Load the trampoline address */ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8); /* Call it if it is non-null */ amd64_test_reg_reg (code, AMD64_R11, AMD64_R11); label = code; amd64_branch8 (code, X86_CC_Z, 0, FALSE); amd64_call_reg (code, AMD64_R11); amd64_patch (label, code); } /* * This is the address which is saved in seq points, */ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); if (cfg->compile_aot) { const guint32 offset = code - cfg->native_code; guint32 val; MonoInst *info_var = cfg->arch.seq_point_info_var; guint8 *label; /* Load info var */ amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8); val = ((offset) * sizeof (target_mgreg_t)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs); /* Load the info->bp_addrs [offset], which is either NULL or the address of the breakpoint trampoline */ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, val, 8); amd64_test_reg_reg (code, AMD64_R11, AMD64_R11); label = code; amd64_branch8 (code, X86_CC_Z, 0, FALSE); /* Call the trampoline */ amd64_call_reg (code, AMD64_R11); amd64_patch (label, code); } else { MonoInst *var = cfg->arch.bp_tramp_var; guint8 *label; /* * Emit a test+branch against a constant, the constant will be overwritten * by mono_arch_set_breakpoint () to cause the test to fail. */ amd64_mov_reg_imm (code, AMD64_R11, 0); amd64_test_reg_reg (code, AMD64_R11, AMD64_R11); label = code; amd64_branch8 (code, X86_CC_Z, 0, FALSE); g_assert (var); g_assert (var->opcode == OP_REGOFFSET); /* Load bp_tramp_var */ /* This is equal to &bp_trampoline */ amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8); /* Call the trampoline */ amd64_call_membase (code, AMD64_R11, 0); amd64_patch (label, code); } /* * Add an additional nop so skipping the bp doesn't cause the ip to point * to another IL offset. */ x86_nop (code); break; } case OP_ADDCC: case OP_LADDCC: case OP_LADD: amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2); break; case OP_ADC: amd64_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2); break; case OP_ADD_IMM: case OP_LADD_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm); break; case OP_ADC_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm); break; case OP_SUBCC: case OP_LSUBCC: case OP_LSUB: amd64_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2); break; case OP_SBB: amd64_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2); break; case OP_SUB_IMM: case OP_LSUB_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm); break; case OP_SBB_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm); break; case OP_LAND: amd64_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2); break; case OP_AND_IMM: case OP_LAND_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm); break; case OP_LMUL: amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MUL_IMM: case OP_LMUL_IMM: case OP_IMUL_IMM: { guint32 size = (ins->opcode == OP_IMUL_IMM) ? 4 : 8; switch (ins->inst_imm) { case 2: /* MOV r1, r2 */ /* ADD r1, r1 */ if (ins->dreg != ins->sreg1) amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, size); amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg); break; case 3: /* LEA r1, [r2 + r2*2] */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1); break; case 5: /* LEA r1, [r2 + r2*4] */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); break; case 6: /* LEA r1, [r2 + r2*2] */ /* ADD r1, r1 */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1); amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg); break; case 9: /* LEA r1, [r2 + r2*8] */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 3); break; case 10: /* LEA r1, [r2 + r2*4] */ /* ADD r1, r1 */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg); break; case 12: /* LEA r1, [r2 + r2*2] */ /* SHL r1, 2 */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1); amd64_shift_reg_imm (code, X86_SHL, ins->dreg, 2); break; case 25: /* LEA r1, [r2 + r2*4] */ /* LEA r1, [r1 + r1*4] */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); amd64_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2); break; case 100: /* LEA r1, [r2 + r2*4] */ /* SHL r1, 2 */ /* LEA r1, [r1 + r1*4] */ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); amd64_shift_reg_imm (code, X86_SHL, ins->dreg, 2); amd64_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2); break; default: amd64_imul_reg_reg_imm_size (code, ins->dreg, ins->sreg1, ins->inst_imm, size); break; } break; } case OP_LDIV: case OP_LREM: /* Regalloc magic makes the div/rem cases the same */ if (ins->sreg2 == AMD64_RDX) { amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8); amd64_cdq (code); amd64_div_membase (code, AMD64_RSP, -8, TRUE); } else { amd64_cdq (code); amd64_div_reg (code, ins->sreg2, TRUE); } break; case OP_LDIV_UN: case OP_LREM_UN: if (ins->sreg2 == AMD64_RDX) { amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8); amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX); amd64_div_membase (code, AMD64_RSP, -8, FALSE); } else { amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX); amd64_div_reg (code, ins->sreg2, FALSE); } break; case OP_IDIV: case OP_IREM: if (ins->sreg2 == AMD64_RDX) { amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8); amd64_cdq_size (code, 4); amd64_div_membase_size (code, AMD64_RSP, -8, TRUE, 4); } else { amd64_cdq_size (code, 4); amd64_div_reg_size (code, ins->sreg2, TRUE, 4); } break; case OP_IDIV_UN: case OP_IREM_UN: if (ins->sreg2 == AMD64_RDX) { amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8); amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX); amd64_div_membase_size (code, AMD64_RSP, -8, FALSE, 4); } else { amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX); amd64_div_reg_size (code, ins->sreg2, FALSE, 4); } break; case OP_LMUL_OVF: amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException"); break; case OP_LOR: amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2); break; case OP_OR_IMM: case OP_LOR_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm); break; case OP_LXOR: amd64_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2); break; case OP_XOR_IMM: case OP_LXOR_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm); break; case OP_LSHL: g_assert (ins->sreg2 == AMD64_RCX); amd64_shift_reg (code, X86_SHL, ins->dreg); break; case OP_LSHR: g_assert (ins->sreg2 == AMD64_RCX); amd64_shift_reg (code, X86_SAR, ins->dreg); break; case OP_SHR_IMM: case OP_LSHR_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm); break; case OP_SHR_UN_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4); break; case OP_LSHR_UN_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm); break; case OP_LSHR_UN: g_assert (ins->sreg2 == AMD64_RCX); amd64_shift_reg (code, X86_SHR, ins->dreg); break; case OP_SHL_IMM: case OP_LSHL_IMM: g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm); break; case OP_IADDCC: case OP_IADD: amd64_alu_reg_reg_size (code, X86_ADD, ins->sreg1, ins->sreg2, 4); break; case OP_IADC: amd64_alu_reg_reg_size (code, X86_ADC, ins->sreg1, ins->sreg2, 4); break; case OP_IADD_IMM: amd64_alu_reg_imm_size (code, X86_ADD, ins->dreg, ins->inst_imm, 4); break; case OP_IADC_IMM: amd64_alu_reg_imm_size (code, X86_ADC, ins->dreg, ins->inst_imm, 4); break; case OP_ISUBCC: case OP_ISUB: amd64_alu_reg_reg_size (code, X86_SUB, ins->sreg1, ins->sreg2, 4); break; case OP_ISBB: amd64_alu_reg_reg_size (code, X86_SBB, ins->sreg1, ins->sreg2, 4); break; case OP_ISUB_IMM: amd64_alu_reg_imm_size (code, X86_SUB, ins->dreg, ins->inst_imm, 4); break; case OP_ISBB_IMM: amd64_alu_reg_imm_size (code, X86_SBB, ins->dreg, ins->inst_imm, 4); break; case OP_IAND: amd64_alu_reg_reg_size (code, X86_AND, ins->sreg1, ins->sreg2, 4); break; case OP_IAND_IMM: amd64_alu_reg_imm_size (code, X86_AND, ins->sreg1, ins->inst_imm, 4); break; case OP_IOR: amd64_alu_reg_reg_size (code, X86_OR, ins->sreg1, ins->sreg2, 4); break; case OP_IOR_IMM: amd64_alu_reg_imm_size (code, X86_OR, ins->sreg1, ins->inst_imm, 4); break; case OP_IXOR: amd64_alu_reg_reg_size (code, X86_XOR, ins->sreg1, ins->sreg2, 4); break; case OP_IXOR_IMM: amd64_alu_reg_imm_size (code, X86_XOR, ins->sreg1, ins->inst_imm, 4); break; case OP_INEG: amd64_neg_reg_size (code, ins->sreg1, 4); break; case OP_INOT: amd64_not_reg_size (code, ins->sreg1, 4); break; case OP_ISHL: g_assert (ins->sreg2 == AMD64_RCX); amd64_shift_reg_size (code, X86_SHL, ins->dreg, 4); break; case OP_ISHR: g_assert (ins->sreg2 == AMD64_RCX); amd64_shift_reg_size (code, X86_SAR, ins->dreg, 4); break; case OP_ISHR_IMM: amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4); break; case OP_ISHR_UN_IMM: amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4); break; case OP_ISHR_UN: g_assert (ins->sreg2 == AMD64_RCX); amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4); break; case OP_ISHL_IMM: amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4); break; case OP_IMUL: amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4); break; case OP_IMUL_OVF: amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException"); break; case OP_IMUL_OVF_UN: case OP_LMUL_OVF_UN: { /* the mul operation and the exception check should most likely be split */ int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE; int size = (ins->opcode == OP_IMUL_OVF_UN) ? 4 : 8; /*g_assert (ins->sreg2 == X86_EAX); g_assert (ins->dreg == X86_EAX);*/ if (ins->sreg2 == X86_EAX) { non_eax_reg = ins->sreg1; } else if (ins->sreg1 == X86_EAX) { non_eax_reg = ins->sreg2; } else { /* no need to save since we're going to store to it anyway */ if (ins->dreg != X86_EAX) { saved_eax = TRUE; amd64_push_reg (code, X86_EAX); } amd64_mov_reg_reg (code, X86_EAX, ins->sreg1, size); non_eax_reg = ins->sreg2; } if (ins->dreg == X86_EDX) { if (!saved_eax) { saved_eax = TRUE; amd64_push_reg (code, X86_EAX); } } else { saved_edx = TRUE; amd64_push_reg (code, X86_EDX); } amd64_mul_reg_size (code, non_eax_reg, FALSE, size); /* save before the check since pop and mov don't change the flags */ if (ins->dreg != X86_EAX) amd64_mov_reg_reg (code, ins->dreg, X86_EAX, size); if (saved_edx) amd64_pop_reg (code, X86_EDX); if (saved_eax) amd64_pop_reg (code, X86_EAX); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException"); break; } case OP_ICOMPARE: amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4); break; case OP_ICOMPARE_IMM: amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4); break; case OP_IBEQ: case OP_IBLT: case OP_IBGT: case OP_IBGE: case OP_IBLE: case OP_LBEQ: case OP_LBLT: case OP_LBGT: case OP_LBGE: case OP_LBLE: case OP_IBNE_UN: case OP_IBLT_UN: case OP_IBGT_UN: case OP_IBGE_UN: case OP_IBLE_UN: case OP_LBNE_UN: case OP_LBLT_UN: case OP_LBGT_UN: case OP_LBGE_UN: case OP_LBLE_UN: EMIT_COND_BRANCH (ins, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)]); break; case OP_CMOV_IEQ: case OP_CMOV_IGE: case OP_CMOV_IGT: case OP_CMOV_ILE: case OP_CMOV_ILT: case OP_CMOV_INE_UN: case OP_CMOV_IGE_UN: case OP_CMOV_IGT_UN: case OP_CMOV_ILE_UN: case OP_CMOV_ILT_UN: case OP_CMOV_LEQ: case OP_CMOV_LGE: case OP_CMOV_LGT: case OP_CMOV_LLE: case OP_CMOV_LLT: case OP_CMOV_LNE_UN: case OP_CMOV_LGE_UN: case OP_CMOV_LGT_UN: case OP_CMOV_LLE_UN: case OP_CMOV_LLT_UN: g_assert (ins->dreg == ins->sreg1); /* This needs to operate on 64 bit values */ amd64_cmov_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, ins->sreg2); break; case OP_LNOT: amd64_not_reg (code, ins->sreg1); break; case OP_LNEG: amd64_neg_reg (code, ins->sreg1); break; case OP_ICONST: case OP_I8CONST: if ((((guint64)ins->inst_c0) >> 32) == 0 && !mini_debug_options.single_imm_size) amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 4); else amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 8); break; case OP_AOTCONST: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0); amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, sizeof(gpointer)); break; case OP_JUMP_TABLE: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0); amd64_mov_reg_imm_size (code, ins->dreg, 0, 8); break; case OP_MOVE: if (ins->dreg != ins->sreg1) amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (target_mgreg_t)); break; case OP_AMD64_SET_XMMREG_R4: { if (cfg->r4fp) { if (ins->dreg != ins->sreg1) amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg1); } else { amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1); } break; } case OP_AMD64_SET_XMMREG_R8: { if (ins->dreg != ins->sreg1) amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1); break; } case OP_TAILCALL_PARAMETER: // This opcode helps compute sizes, i.e. // of the subsequent OP_TAILCALL, but contributes no code. g_assert (ins->next); break; case OP_TAILCALL: case OP_TAILCALL_REG: case OP_TAILCALL_MEMBASE: { call = (MonoCallInst*)ins; int i, save_area_offset; gboolean tailcall_membase = (ins->opcode == OP_TAILCALL_MEMBASE); gboolean tailcall_reg = (ins->opcode == OP_TAILCALL_REG); g_assert (!cfg->method->save_lmf); max_len += AMD64_NREG * 4; max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER); code = realloc_code (cfg, max_len); // FIXME hardcoding RAX here is not ideal. if (tailcall_reg) { int const reg = ins->sreg1; g_assert (reg > -1); if (reg != AMD64_RAX) amd64_mov_reg_reg (code, AMD64_RAX, reg, 8); } else if (tailcall_membase) { int const reg = ins->sreg1; g_assert (reg > -1); amd64_mov_reg_membase (code, AMD64_RAX, reg, ins->inst_offset, 8); } else { if (cfg->compile_aot) { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method); amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RIP, 0, 8); } else { // FIXME Patch data instead of code. guint32 pad_size = (guint32)((code + 2 - cfg->native_code) % 8); if (pad_size) amd64_padding (code, 8 - pad_size); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method); amd64_set_reg_template (code, AMD64_RAX); } } /* Restore callee saved registers */ save_area_offset = cfg->arch.reg_save_area_offset; for (i = 0; i < AMD64_NREG; ++i) if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & ((regmask_t)1 << i))) { amd64_mov_reg_membase (code, i, cfg->frame_reg, save_area_offset, 8); save_area_offset += 8; } if (cfg->arch.omit_fp) { if (cfg->arch.stack_alloc_size) amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size); // FIXME: if (call->stack_usage) NOT_IMPLEMENTED; } else { amd64_push_reg (code, AMD64_RAX); /* Copy arguments on the stack to our argument area */ // FIXME use rep mov for constant code size, before nonvolatiles // restored, first saving rsi, rdi into volatiles for (i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) { amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, i + 8, sizeof (target_mgreg_t)); amd64_mov_membase_reg (code, AMD64_RBP, ARGS_OFFSET + i, AMD64_RAX, sizeof (target_mgreg_t)); } amd64_pop_reg (code, AMD64_RAX); #ifdef TARGET_WIN32 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0); amd64_pop_reg (code, AMD64_RBP); mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP); #else amd64_leave (code); #endif } #ifdef TARGET_WIN32 // Redundant REX byte indicates a tailcall to the native unwinder. It means nothing to the processor. // https://github.com/dotnet/coreclr/blob/966dabb5bb3c4bf1ea885e1e8dc6528e8c64dc4f/src/unwinder/amd64/unwinder_amd64.cpp#L1394 // FIXME This should be jmp rip+32 for AOT direct to same assembly. // FIXME This should be jmp [rip+32] for AOT direct to not-same assembly (through data). // FIXME This should be jmp [rip+32] for JIT direct -- patch data instead of code. // This is only close to ideal for tailcall_membase, and even then it should // have a more dynamic register allocation. x86_imm_emit8 (code, 0x48); amd64_jump_reg (code, AMD64_RAX); #else // NT does not have varargs rax use, and NT ABI does not have red zone. // Use red-zone mov/jmp instead of push/ret to preserve call/ret speculation stack. // FIXME Just like NT the direct cases are are not ideal. amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RAX, 8); code = amd64_handle_varargs_call (cfg, code, call, FALSE); amd64_jump_membase (code, AMD64_RSP, -8); #endif ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; break; } case OP_CHECK_THIS: /* ensure ins->sreg1 is not NULL */ amd64_alu_membase_imm_size (code, X86_CMP, ins->sreg1, 0, 0, 4); break; case OP_ARGLIST: { amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, cfg->sig_cookie); amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, sizeof(gpointer)); break; } case OP_CALL: case OP_FCALL: case OP_RCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: call = (MonoCallInst*)ins; code = amd64_handle_varargs_call (cfg, code, call, FALSE); code = emit_call (cfg, call, code, MONO_JIT_ICALL_ZeroIsReserved); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; code = emit_move_return_value (cfg, ins, code); break; case OP_FCALL_REG: case OP_RCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: call = (MonoCallInst*)ins; if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) { amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8); ins->sreg1 = AMD64_R11; } code = amd64_handle_varargs_call (cfg, code, call, TRUE); amd64_call_reg (code, ins->sreg1); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; code = emit_move_return_value (cfg, ins, code); break; case OP_FCALL_MEMBASE: case OP_RCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: call = (MonoCallInst*)ins; amd64_call_membase (code, ins->sreg1, ins->inst_offset); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; code = emit_move_return_value (cfg, ins, code); break; case OP_DYN_CALL: { int i, limit_reg, index_reg, src_reg, dst_reg; MonoInst *var = cfg->dyn_call_var; guint8 *label; guint8 *buf [16]; g_assert (var->opcode == OP_REGOFFSET); /* r11 = args buffer filled by mono_arch_get_dyn_call_args () */ amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8); /* r10 = ftn */ amd64_mov_reg_reg (code, AMD64_R10, ins->sreg2, 8); /* Save args buffer */ amd64_mov_membase_reg (code, var->inst_basereg, var->inst_offset, AMD64_R11, 8); /* Set fp arg regs */ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, has_fp), sizeof (target_mgreg_t)); amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX); label = code; amd64_branch8 (code, X86_CC_Z, -1, 1); for (i = 0; i < FLOAT_PARAM_REGS; ++i) amd64_sse_movsd_reg_membase (code, i, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, fregs) + (i * sizeof (double))); amd64_patch (label, code); /* Allocate param area */ /* This doesn't need to be freed since OP_DYN_CALL is never called in a loop */ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, nstack_args), 8); amd64_shift_reg_imm (code, X86_SHL, AMD64_RAX, 3); amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, AMD64_RAX); /* Set stack args */ /* rax/rcx/rdx/r8/r9 is scratch */ limit_reg = AMD64_RAX; index_reg = AMD64_RCX; src_reg = AMD64_R8; dst_reg = AMD64_R9; amd64_mov_reg_membase (code, limit_reg, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, nstack_args), 8); amd64_mov_reg_imm (code, index_reg, 0); amd64_lea_membase (code, src_reg, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, regs) + ((PARAM_REGS) * sizeof (target_mgreg_t))); amd64_mov_reg_reg (code, dst_reg, AMD64_RSP, 8); buf [0] = code; x86_jump8 (code, 0); buf [1] = code; amd64_mov_reg_membase (code, AMD64_RDX, src_reg, 0, 8); amd64_mov_membase_reg (code, dst_reg, 0, AMD64_RDX, 8); amd64_alu_reg_imm (code, X86_ADD, index_reg, 1); amd64_alu_reg_imm (code, X86_ADD, src_reg, 8); amd64_alu_reg_imm (code, X86_ADD, dst_reg, 8); amd64_patch (buf [0], code); amd64_alu_reg_reg (code, X86_CMP, index_reg, limit_reg); buf [2] = code; x86_branch8 (code, X86_CC_LT, 0, FALSE); amd64_patch (buf [2], buf [1]); /* Set argument registers */ for (i = 0; i < PARAM_REGS; ++i) amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (i * sizeof (target_mgreg_t)), sizeof (target_mgreg_t)); /* Make the call */ amd64_call_reg (code, AMD64_R10); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; /* Save result */ amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8); amd64_mov_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, res), AMD64_RAX, 8); amd64_sse_movsd_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, fregs), AMD64_XMM0); amd64_sse_movsd_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, fregs) + sizeof (double), AMD64_XMM1); break; } case OP_AMD64_SAVE_SP_TO_LMF: { MonoInst *lmf_var = cfg->lmf_var; amd64_mov_membase_reg (code, lmf_var->inst_basereg, lmf_var->inst_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8); break; } case OP_X86_PUSH: g_assert_not_reached (); amd64_push_reg (code, ins->sreg1); break; case OP_X86_PUSH_IMM: g_assert_not_reached (); g_assert (amd64_is_imm32 (ins->inst_imm)); amd64_push_imm (code, ins->inst_imm); break; case OP_X86_PUSH_MEMBASE: g_assert_not_reached (); amd64_push_membase (code, ins->inst_basereg, ins->inst_offset); break; case OP_X86_PUSH_OBJ: { int size = ALIGN_TO (ins->inst_imm, 8); g_assert_not_reached (); amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size); amd64_push_reg (code, AMD64_RDI); amd64_push_reg (code, AMD64_RSI); amd64_push_reg (code, AMD64_RCX); if (ins->inst_offset) amd64_lea_membase (code, AMD64_RSI, ins->inst_basereg, ins->inst_offset); else amd64_mov_reg_reg (code, AMD64_RSI, ins->inst_basereg, 8); amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, (3 * 8)); amd64_mov_reg_imm (code, AMD64_RCX, (size >> 3)); amd64_cld (code); amd64_prefix (code, X86_REP_PREFIX); amd64_movsd (code); amd64_pop_reg (code, AMD64_RCX); amd64_pop_reg (code, AMD64_RSI); amd64_pop_reg (code, AMD64_RDI); break; } case OP_GENERIC_CLASS_INIT: { guint8 *jump; g_assert (ins->sreg1 == MONO_AMD64_ARG_REG1); amd64_test_membase_imm_size (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoVTable, initialized), 1, 1); jump = code; amd64_branch8 (code, X86_CC_NZ, -1, 1); code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_generic_class_init); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; x86_patch (jump, code); break; } case OP_X86_LEA: amd64_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->backend.shift_amount); break; case OP_X86_LEA_MEMBASE: amd64_lea4_membase (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_AMD64_LEA_MEMBASE: amd64_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_X86_XCHG: amd64_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4); break; case OP_LOCALLOC: /* keep alignment */ amd64_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1); amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_FRAME_ALIGNMENT - 1)); code = mono_emit_stack_alloc (cfg, code, ins); amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8); if (cfg->param_area) amd64_alu_reg_imm (code, X86_ADD, ins->dreg, cfg->param_area); break; case OP_LOCALLOC_IMM: { guint32 size = ins->inst_imm; size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1); if (ins->flags & MONO_INST_INIT) { if (size < 64) { int i; amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size); amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); for (i = 0; i < size; i += 8) amd64_mov_membase_reg (code, AMD64_RSP, i, ins->dreg, 8); amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8); } else { amd64_mov_reg_imm (code, ins->dreg, size); ins->sreg1 = ins->dreg; code = mono_emit_stack_alloc (cfg, code, ins); amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8); } } else { amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size); amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8); } if (cfg->param_area) amd64_alu_reg_imm (code, X86_ADD, ins->dreg, cfg->param_area); break; } case OP_THROW: { amd64_mov_reg_reg (code, AMD64_ARG_REG1, ins->sreg1, 8); code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_arch_throw_exception); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; break; } case OP_RETHROW: { amd64_mov_reg_reg (code, AMD64_ARG_REG1, ins->sreg1, 8); code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_arch_rethrow_exception); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; break; } case OP_CALL_HANDLER: /* Align stack */ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb); amd64_call_imm (code, 0); /* * ins->inst_eh_blocks and bb->clause_holes are part of same GList. * Holes from bb->clause_holes will be added separately for the entire * basic block. Add only the rest of them. */ for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); /* Restore stack alignment */ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8); break; case OP_START_HANDLER: { /* Even though we're saving RSP, use sizeof */ /* gpointer because spvar is of type IntPtr */ /* see: mono_create_spvar_for_region */ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); amd64_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, AMD64_RSP, sizeof(gpointer)); if ((MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FINALLY) || MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FILTER) || MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FAULT)) && cfg->param_area) { amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT)); } break; } case OP_ENDFINALLY: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, sizeof(gpointer)); amd64_ret (code); break; } case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, sizeof(gpointer)); /* The local allocator will put the result into RAX */ amd64_ret (code); break; } case OP_GET_EX_OBJ: if (ins->dreg != AMD64_RAX) amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, sizeof (target_mgreg_t)); break; case OP_LABEL: ins->inst_c0 = code - cfg->native_code; break; case OP_BR: //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins); //if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins) //break; if (ins->inst_target_bb->native_offset) { amd64_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); if (optimize_branch_pred && x86_is_imm8 (ins->inst_target_bb->max_offset - offset)) x86_jump8 (code, 0); else x86_jump32 (code, 0); } break; case OP_BR_REG: amd64_jump_reg (code, ins->sreg1); break; case OP_ICNEQ: case OP_ICGE: case OP_ICLE: case OP_ICGE_UN: case OP_ICLE_UN: case OP_CEQ: case OP_LCEQ: case OP_ICEQ: case OP_CLT: case OP_LCLT: case OP_ICLT: case OP_CGT: case OP_ICGT: case OP_LCGT: case OP_CLT_UN: case OP_LCLT_UN: case OP_ICLT_UN: case OP_CGT_UN: case OP_LCGT_UN: case OP_ICGT_UN: amd64_set_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, cc_signed_table [mono_opcode_to_cond (ins->opcode)]); amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); break; case OP_COND_EXC_EQ: case OP_COND_EXC_NE_UN: case OP_COND_EXC_LT: case OP_COND_EXC_LT_UN: case OP_COND_EXC_GT: case OP_COND_EXC_GT_UN: case OP_COND_EXC_GE: case OP_COND_EXC_GE_UN: case OP_COND_EXC_LE: case OP_COND_EXC_LE_UN: case OP_COND_EXC_IEQ: case OP_COND_EXC_INE_UN: case OP_COND_EXC_ILT: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_IGT: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_IGE: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_ILE: case OP_COND_EXC_ILE_UN: EMIT_COND_SYSTEM_EXCEPTION (cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], (const char *)ins->inst_p1); break; case OP_COND_EXC_OV: case OP_COND_EXC_NO: case OP_COND_EXC_C: case OP_COND_EXC_NC: EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], (ins->opcode < OP_COND_EXC_NE_UN), (const char *)ins->inst_p1); break; case OP_COND_EXC_IOV: case OP_COND_EXC_INO: case OP_COND_EXC_IC: case OP_COND_EXC_INC: EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_IEQ], (ins->opcode < OP_COND_EXC_INE_UN), (const char *)ins->inst_p1); break; /* floating point opcodes */ case OP_R8CONST: { double d = *(double *)ins->inst_p0; if ((d == 0.0) && (mono_signbit (d) == 0)) { amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg); } else if (cfg->compile_aot && cfg->code_exec_only) { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8_GOT, ins->inst_p0); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof(gpointer)); amd64_sse_movsd_reg_membase (code, ins->dreg, AMD64_R11, 0); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0); amd64_sse_movsd_reg_membase (code, ins->dreg, AMD64_RIP, 0); } break; } case OP_R4CONST: { float f = *(float *)ins->inst_p0; if ((f == 0.0) && (mono_signbit (f) == 0)) { if (cfg->r4fp) amd64_sse_xorps_reg_reg (code, ins->dreg, ins->dreg); else amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg); } else { if (cfg->compile_aot && cfg->code_exec_only) { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4_GOT, ins->inst_p0); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof(gpointer)); amd64_sse_movss_reg_membase (code, ins->dreg, AMD64_R11, 0); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0); amd64_sse_movss_reg_membase (code, ins->dreg, AMD64_RIP, 0); } if (!cfg->r4fp) amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); } break; } case OP_STORER8_MEMBASE_REG: amd64_sse_movsd_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1); break; case OP_LOADR8_MEMBASE: amd64_sse_movsd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; case OP_STORER4_MEMBASE_REG: if (cfg->r4fp) { amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1); } else { /* This requires a double->single conversion */ amd64_sse_cvtsd2ss_reg_reg (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1); amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, MONO_ARCH_FP_SCRATCH_REG); } break; case OP_LOADR4_MEMBASE: if (cfg->r4fp) { amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); } break; case OP_ICONV_TO_R4: if (cfg->r4fp) { amd64_sse_cvtsi2ss_reg_reg_size (code, ins->dreg, ins->sreg1, 4); } else { amd64_sse_cvtsi2ss_reg_reg_size (code, ins->dreg, ins->sreg1, 4); amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); } break; case OP_ICONV_TO_R8: amd64_sse_cvtsi2sd_reg_reg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_LCONV_TO_R4: if (cfg->r4fp) { amd64_sse_cvtsi2ss_reg_reg (code, ins->dreg, ins->sreg1); } else { amd64_sse_cvtsi2ss_reg_reg (code, ins->dreg, ins->sreg1); amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); } break; case OP_LCONV_TO_R8: amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_FCONV_TO_R4: if (cfg->r4fp) { amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1); } else { amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1); amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); } break; case OP_FCONV_TO_I1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE); break; case OP_FCONV_TO_U1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE); break; case OP_FCONV_TO_I2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE); break; case OP_FCONV_TO_U2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE); break; case OP_FCONV_TO_U4: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE); break; case OP_FCONV_TO_I4: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE); break; case OP_FCONV_TO_I8: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE); break; case OP_RCONV_TO_I1: amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4); amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE); break; case OP_RCONV_TO_U1: amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4); amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); break; case OP_RCONV_TO_I2: amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4); amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE); break; case OP_RCONV_TO_U2: amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4); amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE); break; case OP_RCONV_TO_I4: amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_RCONV_TO_U4: // Use 8 as register size to get Nan/Inf conversion result truncated to 0 amd64_sse_cvtss2si_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_RCONV_TO_I8: amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 8); break; case OP_RCONV_TO_R8: amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_RCONV_TO_R4: if (ins->dreg != ins->sreg1) amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_LCONV_TO_R_UN: { guint8 *br [2]; /* Based on gcc code */ amd64_test_reg_reg (code, ins->sreg1, ins->sreg1); br [0] = code; x86_branch8 (code, X86_CC_S, 0, TRUE); /* Positive case */ amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1); br [1] = code; x86_jump8 (code, 0); amd64_patch (br [0], code); /* Negative case */ /* Save to the red zone */ amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RAX, 8); amd64_mov_membase_reg (code, AMD64_RSP, -16, AMD64_RCX, 8); amd64_mov_reg_reg (code, AMD64_RCX, ins->sreg1, 8); amd64_mov_reg_reg (code, AMD64_RAX, ins->sreg1, 8); amd64_alu_reg_imm (code, X86_AND, AMD64_RCX, 1); amd64_shift_reg_imm (code, X86_SHR, AMD64_RAX, 1); amd64_alu_reg_imm (code, X86_OR, AMD64_RAX, AMD64_RCX); amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, AMD64_RAX); amd64_sse_addsd_reg_reg (code, ins->dreg, ins->dreg); /* Restore */ amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, -16, 8); amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, -8, 8); amd64_patch (br [1], code); break; } case OP_LCONV_TO_OVF_U4: amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, TRUE, "OverflowException"); amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8); break; case OP_LCONV_TO_OVF_I4_UN: amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0x7fffffff); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, FALSE, "OverflowException"); amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8); break; case OP_FMOVE: if (ins->dreg != ins->sreg1) amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_RMOVE: if (ins->dreg != ins->sreg1) amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_MOVE_F_TO_I4: if (cfg->r4fp) { amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8); } else { amd64_sse_cvtsd2ss_reg_reg (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1); amd64_movd_reg_xreg_size (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG, 8); } break; case OP_MOVE_I4_TO_F: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8); if (!cfg->r4fp) amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); break; case OP_MOVE_F_TO_I8: amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8); break; case OP_MOVE_I8_TO_F: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8); break; case OP_FADD: amd64_sse_addsd_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_FSUB: amd64_sse_subsd_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_FMUL: amd64_sse_mulsd_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_FDIV: amd64_sse_divsd_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_FNEG: { static double r8_0 = -0.0; g_assert (ins->sreg1 == ins->dreg); if (cfg->compile_aot && cfg->code_exec_only) { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8_GOT, &r8_0); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof (target_mgreg_t)); amd64_sse_movsd_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_R11, 0); amd64_sse_xorpd_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, &r8_0); amd64_sse_xorpd_reg_membase (code, ins->dreg, AMD64_RIP, 0); } break; } case OP_ABS: { static guint64 d = 0x7fffffffffffffffUL; g_assert (ins->sreg1 == ins->dreg); if (cfg->compile_aot && cfg->code_exec_only) { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8_GOT, &d); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof (target_mgreg_t)); amd64_sse_movsd_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_R11, 0); amd64_sse_andpd_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, &d); amd64_sse_andpd_reg_membase (code, ins->dreg, AMD64_RIP, 0); } break; } case OP_SQRT: EMIT_SSE2_FPFUNC (code, fsqrt, ins->dreg, ins->sreg1); break; case OP_RADD: amd64_sse_addss_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_RSUB: amd64_sse_subss_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_RMUL: amd64_sse_mulss_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_RDIV: amd64_sse_divss_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_RNEG: { static float r4_0 = -0.0; g_assert (ins->sreg1 == ins->dreg); if (cfg->compile_aot && cfg->code_exec_only) { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4_GOT, &r4_0); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof (target_mgreg_t)); amd64_sse_movss_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_R11, 0); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, &r4_0); amd64_sse_movss_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_RIP, 0); } amd64_sse_xorps_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG); break; } case OP_IMIN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4); amd64_cmov_reg_size (code, X86_CC_GT, TRUE, ins->dreg, ins->sreg2, 4); break; case OP_IMIN_UN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4); amd64_cmov_reg_size (code, X86_CC_GT, FALSE, ins->dreg, ins->sreg2, 4); break; case OP_IMAX: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4); amd64_cmov_reg_size (code, X86_CC_LT, TRUE, ins->dreg, ins->sreg2, 4); break; case OP_IMAX_UN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4); amd64_cmov_reg_size (code, X86_CC_LT, FALSE, ins->dreg, ins->sreg2, 4); break; case OP_LMIN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); amd64_cmov_reg (code, X86_CC_GT, TRUE, ins->dreg, ins->sreg2); break; case OP_LMIN_UN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); amd64_cmov_reg (code, X86_CC_GT, FALSE, ins->dreg, ins->sreg2); break; case OP_LMAX: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); amd64_cmov_reg (code, X86_CC_LT, TRUE, ins->dreg, ins->sreg2); break; case OP_LMAX_UN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); amd64_cmov_reg (code, X86_CC_LT, FALSE, ins->dreg, ins->sreg2); break; case OP_X86_FPOP: break; case OP_FCOMPARE: /* * The two arguments are swapped because the fbranch instructions * depend on this for the non-sse case to work. */ amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1); break; case OP_RCOMPARE: /* * FIXME: Get rid of this. * The two arguments are swapped because the fbranch instructions * depend on this for the non-sse case to work. */ amd64_sse_comiss_reg_reg (code, ins->sreg2, ins->sreg1); break; case OP_FCNEQ: case OP_FCEQ: { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ guchar *unordered_check; amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2); unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); if (ins->opcode == OP_FCEQ) { amd64_set_reg (code, X86_CC_EQ, ins->dreg, FALSE); amd64_patch (unordered_check, code); } else { guchar *jump_to_end; amd64_set_reg (code, X86_CC_NE, ins->dreg, FALSE); jump_to_end = code; x86_jump8 (code, 0); amd64_patch (unordered_check, code); amd64_inc_reg (code, ins->dreg); amd64_patch (jump_to_end, code); } break; } case OP_FCLT: case OP_FCLT_UN: { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1); if (ins->opcode == OP_FCLT_UN) { guchar *unordered_check = code; guchar *jump_to_end; x86_branch8 (code, X86_CC_P, 0, FALSE); amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE); jump_to_end = code; x86_jump8 (code, 0); amd64_patch (unordered_check, code); amd64_inc_reg (code, ins->dreg); amd64_patch (jump_to_end, code); } else { amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE); } break; } case OP_FCLE: { guchar *unordered_check; amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1); unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); amd64_set_reg (code, X86_CC_NB, ins->dreg, FALSE); amd64_patch (unordered_check, code); break; } case OP_FCGT: case OP_FCGT_UN: { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ guchar *unordered_check; amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1); if (ins->opcode == OP_FCGT) { unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE); amd64_patch (unordered_check, code); } else { amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE); } break; } case OP_FCGE: { guchar *unordered_check; amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1); unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); amd64_set_reg (code, X86_CC_NA, ins->dreg, FALSE); amd64_patch (unordered_check, code); break; } case OP_RCEQ: case OP_RCGT: case OP_RCLT: case OP_RCLT_UN: case OP_RCGT_UN: { int x86_cond; amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); amd64_sse_comiss_reg_reg (code, ins->sreg2, ins->sreg1); switch (ins->opcode) { case OP_RCEQ: x86_cond = X86_CC_EQ; break; case OP_RCGT: x86_cond = X86_CC_LT; break; case OP_RCLT: x86_cond = X86_CC_GT; break; case OP_RCLT_UN: x86_cond = X86_CC_GT; break; case OP_RCGT_UN: x86_cond = X86_CC_LT; break; default: g_assert_not_reached (); break; } guchar *unordered_check; switch (ins->opcode) { case OP_RCEQ: case OP_RCGT: unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); amd64_set_reg (code, x86_cond, ins->dreg, FALSE); amd64_patch (unordered_check, code); break; case OP_RCLT_UN: case OP_RCGT_UN: { guchar *jump_to_end; unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); amd64_set_reg (code, x86_cond, ins->dreg, FALSE); jump_to_end = code; x86_jump8 (code, 0); amd64_patch (unordered_check, code); amd64_inc_reg (code, ins->dreg); amd64_patch (jump_to_end, code); break; } case OP_RCLT: amd64_set_reg (code, x86_cond, ins->dreg, FALSE); break; default: g_assert_not_reached (); break; } break; } case OP_FCLT_MEMBASE: case OP_FCGT_MEMBASE: case OP_FCLT_UN_MEMBASE: case OP_FCGT_UN_MEMBASE: case OP_FCEQ_MEMBASE: { guchar *unordered_check, *jump_to_end; int x86_cond; amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); amd64_sse_comisd_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset); switch (ins->opcode) { case OP_FCEQ_MEMBASE: x86_cond = X86_CC_EQ; break; case OP_FCLT_MEMBASE: case OP_FCLT_UN_MEMBASE: x86_cond = X86_CC_LT; break; case OP_FCGT_MEMBASE: case OP_FCGT_UN_MEMBASE: x86_cond = X86_CC_GT; break; default: g_assert_not_reached (); } unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); amd64_set_reg (code, x86_cond, ins->dreg, FALSE); switch (ins->opcode) { case OP_FCEQ_MEMBASE: case OP_FCLT_MEMBASE: case OP_FCGT_MEMBASE: amd64_patch (unordered_check, code); break; case OP_FCLT_UN_MEMBASE: case OP_FCGT_UN_MEMBASE: jump_to_end = code; x86_jump8 (code, 0); amd64_patch (unordered_check, code); amd64_inc_reg (code, ins->dreg); amd64_patch (jump_to_end, code); break; default: break; } break; } case OP_FBEQ: { guchar *jump = code; x86_branch8 (code, X86_CC_P, 0, TRUE); EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); amd64_patch (jump, code); break; } case OP_FBNE_UN: /* Branch if C013 != 100 */ /* branch if !ZF or (PF|CF) */ EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE); EMIT_COND_BRANCH (ins, X86_CC_P, FALSE); EMIT_COND_BRANCH (ins, X86_CC_B, FALSE); break; case OP_FBLT: EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE); break; case OP_FBLT_UN: EMIT_COND_BRANCH (ins, X86_CC_P, FALSE); EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE); break; case OP_FBGT: case OP_FBGT_UN: if (ins->opcode == OP_FBGT) { guchar *br1; /* skip branch if C1=1 */ br1 = code; x86_branch8 (code, X86_CC_P, 0, FALSE); /* branch if (C0 | C3) = 1 */ EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE); amd64_patch (br1, code); break; } else { EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE); } break; case OP_FBGE: { /* Branch if C013 == 100 or 001 */ guchar *br1; /* skip branch if C1=1 */ br1 = code; x86_branch8 (code, X86_CC_P, 0, FALSE); /* branch if (C0 | C3) = 1 */ EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE); amd64_patch (br1, code); break; } case OP_FBGE_UN: /* Branch if C013 == 000 */ EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE); break; case OP_FBLE: { /* Branch if C013=000 or 100 */ guchar *br1; /* skip branch if C1=1 */ br1 = code; x86_branch8 (code, X86_CC_P, 0, FALSE); /* branch if C0=0 */ EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE); amd64_patch (br1, code); break; } case OP_FBLE_UN: /* Branch if C013 != 001 */ EMIT_COND_BRANCH (ins, X86_CC_P, FALSE); EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE); break; case OP_CKFINITE: /* Transfer value to the fp stack */ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 16); amd64_movsd_membase_reg (code, AMD64_RSP, 0, ins->sreg1); amd64_fld_membase (code, AMD64_RSP, 0, TRUE); amd64_push_reg (code, AMD64_RAX); amd64_fxam (code); amd64_fnstsw (code); amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0x4100); amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0); amd64_pop_reg (code, AMD64_RAX); amd64_fstp (code, 0); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException"); amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 16); break; case OP_TLS_GET: { code = mono_amd64_emit_tls_get (code, ins->dreg, ins->inst_offset); break; } case OP_TLS_SET: { code = mono_amd64_emit_tls_set (code, ins->sreg1, ins->inst_offset); break; } case OP_MEMORY_BARRIER: { if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) x86_mfence (code); break; } case OP_ATOMIC_ADD_I4: case OP_ATOMIC_ADD_I8: { int dreg = ins->dreg; guint32 size = (ins->opcode == OP_ATOMIC_ADD_I4) ? 4 : 8; if ((dreg == ins->sreg2) || (dreg == ins->inst_basereg)) dreg = AMD64_R11; amd64_mov_reg_reg (code, dreg, ins->sreg2, size); amd64_prefix (code, X86_LOCK_PREFIX); amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size); /* dreg contains the old value, add with sreg2 value */ amd64_alu_reg_reg_size (code, X86_ADD, dreg, ins->sreg2, size); if (ins->dreg != dreg) amd64_mov_reg_reg (code, ins->dreg, dreg, size); break; } case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_EXCHANGE_I8: { guint32 size = ins->opcode == OP_ATOMIC_EXCHANGE_I4 ? 4 : 8; /* LOCK prefix is implied. */ amd64_mov_reg_reg (code, GP_SCRATCH_REG, ins->sreg2, size); amd64_xchg_membase_reg_size (code, ins->sreg1, ins->inst_offset, GP_SCRATCH_REG, size); amd64_mov_reg_reg (code, ins->dreg, GP_SCRATCH_REG, size); break; } case OP_ATOMIC_CAS_I4: case OP_ATOMIC_CAS_I8: { guint32 size; if (ins->opcode == OP_ATOMIC_CAS_I8) size = 8; else size = 4; /* * See http://msdn.microsoft.com/en-us/magazine/cc302329.aspx for * an explanation of how this works. */ g_assert (ins->sreg3 == AMD64_RAX); g_assert (ins->sreg1 != AMD64_RAX); g_assert (ins->sreg1 != ins->sreg2); amd64_prefix (code, X86_LOCK_PREFIX); amd64_cmpxchg_membase_reg_size (code, ins->sreg1, ins->inst_offset, ins->sreg2, size); if (ins->dreg != AMD64_RAX) amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, size); break; } case OP_ATOMIC_LOAD_I1: { amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE); break; } case OP_ATOMIC_LOAD_U1: { amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE); break; } case OP_ATOMIC_LOAD_I2: { amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE); break; } case OP_ATOMIC_LOAD_U2: { amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE); break; } case OP_ATOMIC_LOAD_I4: { amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; } case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_LOAD_I8: case OP_ATOMIC_LOAD_U8: { amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, ins->opcode == OP_ATOMIC_LOAD_U4 ? 4 : 8); break; } case OP_ATOMIC_LOAD_R4: { if (cfg->r4fp) { amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); } break; } case OP_ATOMIC_LOAD_R8: { amd64_sse_movsd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; } case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U4: case OP_ATOMIC_STORE_I8: case OP_ATOMIC_STORE_U8: { int size; switch (ins->opcode) { case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_U1: size = 1; break; case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_U2: size = 2; break; case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U4: size = 4; break; case OP_ATOMIC_STORE_I8: case OP_ATOMIC_STORE_U8: size = 8; break; } amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, size); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) x86_mfence (code); break; } case OP_ATOMIC_STORE_R4: { if (cfg->r4fp) { amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1); } else { amd64_sse_cvtsd2ss_reg_reg (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1); amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, MONO_ARCH_FP_SCRATCH_REG); } if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) x86_mfence (code); break; } case OP_ATOMIC_STORE_R8: { x86_nop (code); x86_nop (code); amd64_sse_movsd_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1); x86_nop (code); x86_nop (code); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) x86_mfence (code); break; } case OP_CARD_TABLE_WBARRIER: { int ptr = ins->sreg1; int value = ins->sreg2; guchar *br = 0; int nursery_shift, card_table_shift; gpointer card_table_mask; size_t nursery_size; gpointer card_table = mono_gc_get_card_table (&card_table_shift, &card_table_mask); guint64 nursery_start = (guint64)mono_gc_get_nursery (&nursery_shift, &nursery_size); guint64 shifted_nursery_start = nursery_start >> nursery_shift; /*If either point to the stack we can simply avoid the WB. This happens due to * optimizations revealing a stack store that was not visible when op_cardtable was emited. */ if (ins->sreg1 == AMD64_RSP || ins->sreg2 == AMD64_RSP) continue; /* * We need one register we can clobber, we choose EDX and make sreg1 * fixed EAX to work around limitations in the local register allocator. * sreg2 might get allocated to EDX, but that is not a problem since * we use it before clobbering EDX. */ g_assert (ins->sreg1 == AMD64_RAX); /* * This is the code we produce: * * edx = value * edx >>= nursery_shift * cmp edx, (nursery_start >> nursery_shift) * jne done * edx = ptr * edx >>= card_table_shift * edx += cardtable * [edx] = 1 * done: */ if (mono_gc_card_table_nursery_check ()) { if (value != AMD64_RDX) amd64_mov_reg_reg (code, AMD64_RDX, value, 8); amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, nursery_shift); if (shifted_nursery_start >> 31) { /* * The value we need to compare against is 64 bits, so we need * another spare register. We use RBX, which we save and * restore. */ amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RBX, 8); amd64_mov_reg_imm (code, AMD64_RBX, shifted_nursery_start); amd64_alu_reg_reg (code, X86_CMP, AMD64_RDX, AMD64_RBX); amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RSP, -8, 8); } else { amd64_alu_reg_imm (code, X86_CMP, AMD64_RDX, shifted_nursery_start); } br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE); } amd64_mov_reg_reg (code, AMD64_RDX, ptr, 8); amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, card_table_shift); if (card_table_mask) amd64_alu_reg_imm (code, X86_AND, AMD64_RDX, (guint32)(guint64)card_table_mask); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, card_table); amd64_alu_reg_membase (code, X86_ADD, AMD64_RDX, AMD64_RIP, 0); amd64_mov_membase_imm (code, AMD64_RDX, 0, 1, 1); if (mono_gc_card_table_nursery_check ()) x86_patch (br, code); break; } #ifdef MONO_ARCH_SIMD_INTRINSICS /* TODO: Some of these IR opcodes are marked as no clobber when they indeed do. */ case OP_ADDPS: amd64_sse_addps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_DIVPS: amd64_sse_divps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MULPS: amd64_sse_mulps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_SUBPS: amd64_sse_subps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MAXPS: amd64_sse_maxps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MINPS: amd64_sse_minps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_COMPPS: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7); amd64_sse_cmpps_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ANDPS: amd64_sse_andps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_ANDNPS: amd64_sse_andnps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_ORPS: amd64_sse_orps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_XORPS: amd64_sse_xorps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_SQRTPS: amd64_sse_sqrtps_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_RSQRTPS: amd64_sse_rsqrtps_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_RCPPS: amd64_sse_rcpps_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_ADDSUBPS: amd64_sse_addsubps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_HADDPS: amd64_sse_haddps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_HSUBPS: amd64_sse_hsubps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_DUPPS_HIGH: amd64_sse_movshdup_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_DUPPS_LOW: amd64_sse_movsldup_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_PSHUFLEW_HIGH: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); amd64_sse_pshufhw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_PSHUFLEW_LOW: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); amd64_sse_pshuflw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_PSHUFLED: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_SHUFPS: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); amd64_sse_shufps_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_SHUFPD: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0x3); amd64_sse_shufpd_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ADDPD: amd64_sse_addpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_DIVPD: amd64_sse_divpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MULPD: amd64_sse_mulpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_SUBPD: amd64_sse_subpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MAXPD: amd64_sse_maxpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MINPD: amd64_sse_minpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_COMPPD: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7); amd64_sse_cmppd_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ANDPD: amd64_sse_andpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_ANDNPD: amd64_sse_andnpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_ORPD: amd64_sse_orpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_XORPD: amd64_sse_xorpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_SQRTPD: amd64_sse_sqrtpd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_ADDSUBPD: amd64_sse_addsubpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_HADDPD: amd64_sse_haddpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_HSUBPD: amd64_sse_hsubpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_DUPPD: amd64_sse_movddup_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_EXTRACT_MASK: amd64_sse_pmovmskb_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_PAND: amd64_sse_pand_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PANDN: amd64_sse_pandn_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_POR: amd64_sse_por_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PXOR: amd64_sse_pxor_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDB: amd64_sse_paddb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDW: amd64_sse_paddw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDD: amd64_sse_paddd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDQ: amd64_sse_paddq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBB: amd64_sse_psubb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBW: amd64_sse_psubw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBD: amd64_sse_psubd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBQ: amd64_sse_psubq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMAXB_UN: amd64_sse_pmaxub_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMAXW_UN: amd64_sse_pmaxuw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMAXD_UN: amd64_sse_pmaxud_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMAXB: amd64_sse_pmaxsb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMAXW: amd64_sse_pmaxsw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMAXD: amd64_sse_pmaxsd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PAVGB_UN: amd64_sse_pavgb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PAVGW_UN: amd64_sse_pavgw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMINB_UN: amd64_sse_pminub_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMINW_UN: amd64_sse_pminuw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMIND_UN: amd64_sse_pminud_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMINB: amd64_sse_pminsb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMINW: amd64_sse_pminsw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMIND: amd64_sse_pminsd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQB: amd64_sse_pcmpeqb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQW: amd64_sse_pcmpeqw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQD: amd64_sse_pcmpeqd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQQ: amd64_sse_pcmpeqq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTB: amd64_sse_pcmpgtb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTW: amd64_sse_pcmpgtw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTD: amd64_sse_pcmpgtd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTQ: amd64_sse_pcmpgtq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUM_ABS_DIFF: amd64_sse_psadbw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWB: amd64_sse_punpcklbw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWW: amd64_sse_punpcklwd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWD: amd64_sse_punpckldq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWQ: amd64_sse_punpcklqdq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWPS: amd64_sse_unpcklps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWPD: amd64_sse_unpcklpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHB: amd64_sse_punpckhbw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHW: amd64_sse_punpckhwd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHD: amd64_sse_punpckhdq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHQ: amd64_sse_punpckhqdq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHPS: amd64_sse_unpckhps_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHPD: amd64_sse_unpckhpd_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PACKW: amd64_sse_packsswb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PACKD: amd64_sse_packssdw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PACKW_UN: amd64_sse_packuswb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PACKD_UN: amd64_sse_packusdw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDB_SAT_UN: amd64_sse_paddusb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBB_SAT_UN: amd64_sse_psubusb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDW_SAT_UN: amd64_sse_paddusw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBW_SAT_UN: amd64_sse_psubusw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDB_SAT: amd64_sse_paddsb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBB_SAT: amd64_sse_psubsb_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PADDW_SAT: amd64_sse_paddsw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSUBW_SAT: amd64_sse_psubsw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMULW: amd64_sse_pmullw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMULD: amd64_sse_pmulld_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMULQ: amd64_sse_pmuludq_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMULW_HIGH_UN: amd64_sse_pmulhuw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PMULW_HIGH: amd64_sse_pmulhw_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_PSHRW: amd64_sse_psrlw_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHRW_REG: amd64_sse_psrlw_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_PSARW: amd64_sse_psraw_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSARW_REG: amd64_sse_psraw_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_PSHLW: amd64_sse_psllw_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHLW_REG: amd64_sse_psllw_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_PSHRD: amd64_sse_psrld_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHRD_REG: amd64_sse_psrld_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_PSARD: amd64_sse_psrad_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSARD_REG: amd64_sse_psrad_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_PSHLD: amd64_sse_pslld_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHLD_REG: amd64_sse_pslld_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_PSHRQ: amd64_sse_psrlq_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHRQ_REG: amd64_sse_psrlq_reg_reg (code, ins->dreg, ins->sreg2); break; /*TODO: This is appart of the sse spec but not added case OP_PSARQ: amd64_sse_psraq_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSARQ_REG: amd64_sse_psraq_reg_reg (code, ins->dreg, ins->sreg2); break; */ case OP_PSHLQ: amd64_sse_psllq_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHLQ_REG: amd64_sse_psllq_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_CVTDQ2PD: amd64_sse_cvtdq2pd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_CVTDQ2PS: amd64_sse_cvtdq2ps_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_CVTPD2DQ: amd64_sse_cvtpd2dq_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_CVTPD2PS: amd64_sse_cvtpd2ps_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_CVTPS2DQ: amd64_sse_cvtps2dq_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_CVTPS2PD: amd64_sse_cvtps2pd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_CVTTPD2DQ: amd64_sse_cvttpd2dq_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_CVTTPS2DQ: amd64_sse_cvttps2dq_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_ICONV_TO_X: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_EXTRACT_I4: amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_EXTRACT_I8: if (ins->inst_c0) { amd64_movhlps_reg_reg (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1); amd64_movd_reg_xreg_size (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG, 8); } else { amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8); } break; case OP_EXTRACT_I1: amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); if (ins->inst_c0) amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_c0 * 8); amd64_widen_reg (code, ins->dreg, ins->dreg, ins->inst_c1 == MONO_TYPE_I1, FALSE); break; case OP_EXTRACT_I2: /*amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); if (ins->inst_c0) amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, 16, 4);*/ amd64_sse_pextrw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); amd64_widen_reg_size (code, ins->dreg, ins->dreg, ins->inst_c1 == MONO_TYPE_I2, TRUE, 4); break; case OP_EXTRACT_R8: if (ins->inst_c0) amd64_movhlps_reg_reg (code, ins->dreg, ins->sreg1); else amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_INSERT_I2: amd64_sse_pinsrw_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_EXTRACTX_U2: amd64_sse_pextrw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_INSERTX_U1_SLOW: /*sreg1 is the extracted ireg (scratch) /sreg2 is the to be inserted ireg (scratch) /dreg is the xreg to receive the value*/ /*clear the bits from the extracted word*/ amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_c0 & 1 ? 0x00FF : 0xFF00); /*shift the value to insert if needed*/ if (ins->inst_c0 & 1) amd64_shift_reg_imm_size (code, X86_SHL, ins->sreg2, 8, 4); /*join them together*/ amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2); amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0 / 2); break; case OP_INSERTX_I4_SLOW: amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2); amd64_shift_reg_imm (code, X86_SHR, ins->sreg2, 16); amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2 + 1); break; case OP_INSERTX_I8_SLOW: amd64_movd_xreg_reg_size(code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg2, 8); if (ins->inst_c0) amd64_movlhps_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG); else amd64_sse_movsd_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG); break; case OP_INSERTX_R4_SLOW: switch (ins->inst_c0) { case 0: if (cfg->r4fp) amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2); else amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2); break; case 1: amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3)); if (cfg->r4fp) amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2); else amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3)); break; case 2: amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3)); if (cfg->r4fp) amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2); else amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3)); break; case 3: amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0)); if (cfg->r4fp) amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2); else amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0)); break; } break; case OP_INSERTX_R8_SLOW: if (ins->inst_c0) amd64_movlhps_reg_reg (code, ins->dreg, ins->sreg2); else amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg2); break; case OP_STOREX_MEMBASE_REG: case OP_STOREX_MEMBASE: amd64_sse_movups_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1); break; case OP_LOADX_MEMBASE: amd64_sse_movups_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_LOADX_ALIGNED_MEMBASE: amd64_sse_movaps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_STOREX_ALIGNED_MEMBASE_REG: amd64_sse_movaps_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1); break; case OP_STOREX_NTA_MEMBASE_REG: amd64_sse_movntps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_PREFETCH_MEMBASE: amd64_sse_prefetch_reg_membase (code, ins->backend.arg_info, ins->sreg1, ins->inst_offset); break; case OP_XMOVE: /*FIXME the peephole pass should have killed this*/ if (ins->dreg != ins->sreg1) amd64_sse_movaps_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_XZERO: amd64_sse_pxor_reg_reg (code, ins->dreg, ins->dreg); break; case OP_XONES: amd64_sse_pcmpeqb_reg_reg (code, ins->dreg, ins->dreg); break; case OP_ICONV_TO_R4_RAW: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4); if (!cfg->r4fp) amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg); break; case OP_FCONV_TO_R8_X: amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_XCONV_R8_TO_I4: amd64_sse_cvttsd2si_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); switch (ins->backend.source_opcode) { case OP_FCONV_TO_I1: amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE); break; case OP_FCONV_TO_U1: amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); break; case OP_FCONV_TO_I2: amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE); break; case OP_FCONV_TO_U2: amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE); break; } break; case OP_EXPAND_I2: amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg1, 0); amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg1, 1); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_I4: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_I8: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0x44); break; case OP_EXPAND_R4: if (cfg->r4fp) { amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1); } else { amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1); amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->dreg); } amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_R8: amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1); amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0x44); break; case OP_SSE41_ROUNDP: { if (ins->inst_c1 == MONO_TYPE_R8) amd64_sse_roundpd_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); else g_assert_not_reached (); // roundps, but it's not used anywhere for non-llvm back-end yet. break; } #endif case OP_LZCNT32: amd64_sse_lzcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_LZCNT64: amd64_sse_lzcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 8); break; case OP_POPCNT32: amd64_sse_popcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_POPCNT64: amd64_sse_popcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 8); break; case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: { guint8 *br [1]; amd64_test_membase_imm_size (code, ins->sreg1, 0, 1, 4); br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_threads_state_poll); amd64_patch (br[0], code); break; } case OP_GC_LIVENESS_DEF: case OP_GC_LIVENESS_USE: case OP_GC_PARAM_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; break; case OP_GC_SPILL_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins); break; case OP_GET_LAST_ERROR: code = emit_get_last_error(code, ins->dreg); break; case OP_FILL_PROF_CALL_CTX: for (int i = 0; i < AMD64_NREG; i++) if (AMD64_IS_CALLEE_SAVED_REG (i) || i == AMD64_RSP) amd64_mov_membase_reg (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, gregs) + i * sizeof (target_mgreg_t), i, sizeof (target_mgreg_t)); break; default: g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__); g_assert_not_reached (); } g_assertf ((code - cfg->native_code - offset) <= max_len, "wrong maximal instruction length of instruction %s (expected %d, got %d)", mono_inst_name (ins->opcode), max_len, (int)(code - cfg->native_code - offset)); } set_code_cursor (cfg, code); } #endif /* DISABLE_JIT */ G_BEGIN_DECLS void __chkstk (void); void ___chkstk_ms (void); G_END_DECLS void mono_arch_register_lowlevel_calls (void) { /* The signature doesn't matter */ mono_register_jit_icall (mono_amd64_throw_exception, mono_icall_sig_void, TRUE); #if defined(TARGET_WIN32) || defined(HOST_WIN32) #if _MSC_VER mono_register_jit_icall_info (&mono_get_jit_icall_info ()->mono_chkstk_win64, __chkstk, "mono_chkstk_win64", NULL, TRUE, "__chkstk"); #else mono_register_jit_icall_info (&mono_get_jit_icall_info ()->mono_chkstk_win64, ___chkstk_ms, "mono_chkstk_win64", NULL, TRUE, "___chkstk_ms"); #endif #endif } void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target) { unsigned char *ip = ji->ip.i + code; /* * Debug code to help track down problems where the target of a near call is * is not valid. */ if (amd64_is_near_call (ip)) { gint64 disp = (guint8*)target - (guint8*)ip; if (!amd64_is_imm32 (disp)) { printf ("TYPE: %d\n", ji->type); switch (ji->type) { case MONO_PATCH_INFO_JIT_ICALL_ID: printf ("V: %s\n", mono_find_jit_icall_info (ji->data.jit_icall_id)->name); break; case MONO_PATCH_INFO_METHOD_JUMP: case MONO_PATCH_INFO_METHOD: printf ("V: %s\n", ji->data.method->name); break; default: break; } } } amd64_patch (ip, (gpointer)target); } #ifndef DISABLE_JIT static int get_max_epilog_size (MonoCompile *cfg) { int max_epilog_size = 16; if (cfg->method->save_lmf) max_epilog_size += 256; max_epilog_size += (AMD64_NREG * 2); return max_epilog_size; } /* * This macro is used for testing whenever the unwinder works correctly at every point * where an async exception can happen. */ /* This will generate a SIGSEGV at the given point in the code */ #define async_exc_point(code) do { \ if (mono_inject_async_exc_method && mono_method_desc_full_match (mono_inject_async_exc_method, cfg->method)) { \ if (cfg->arch.async_point_count == mono_inject_async_exc_pos) \ amd64_mov_reg_mem (code, AMD64_RAX, 0, 4); \ cfg->arch.async_point_count ++; \ } \ } while (0) #ifdef TARGET_WIN32 static guint8 * emit_prolog_setup_sp_win64 (MonoCompile *cfg, guint8 *code, int alloc_size, int *cfa_offset_input) { int cfa_offset = *cfa_offset_input; /* Allocate windows stack frame using stack probing method */ if (alloc_size) { if (alloc_size >= 0x1000) { amd64_mov_reg_imm (code, AMD64_RAX, alloc_size); code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_chkstk_win64); } amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size); if (cfg->arch.omit_fp) { cfa_offset += alloc_size; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); async_exc_point (code); } // NOTE, in a standard win64 prolog the alloc unwind info is always emitted, but since mono // uses a frame pointer with negative offsets and a standard win64 prolog assumes positive offsets, we can't // emit sp alloc unwind metadata since the native OS unwinder will incorrectly restore sp. Excluding the alloc // metadata on the other hand won't give the OS the information so it can just restore the frame pointer to sp and // that will retrieve the expected results. if (cfg->arch.omit_fp) mono_emit_unwind_op_sp_alloc (cfg, code, alloc_size); } *cfa_offset_input = cfa_offset; set_code_cursor (cfg, code); return code; } #endif /* TARGET_WIN32 */ guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoBasicBlock *bb; MonoMethodSignature *sig; MonoInst *ins; int alloc_size, pos, i, cfa_offset, quad, max_epilog_size, save_area_offset; guint8 *code; CallInfo *cinfo; MonoInst *lmf_var = cfg->lmf_var; gboolean args_clobbered = FALSE; cfg->code_size = MAX (cfg->header->code_size * 4, 1024); code = cfg->native_code = (unsigned char *)g_malloc (cfg->code_size); /* Amount of stack space allocated by register saving code */ pos = 0; /* Offset between RSP and the CFA */ cfa_offset = 0; /* * The prolog consists of the following parts: * FP present: * - push rbp * - mov rbp, rsp * - save callee saved regs using moves * - allocate frame * - save rgctx if needed * - save lmf if needed * FP not present: * - allocate frame * - save rgctx if needed * - save lmf if needed * - save callee saved regs using moves */ // CFA = sp + 8 cfa_offset = 8; mono_emit_unwind_op_def_cfa (cfg, code, AMD64_RSP, 8); // IP saved at CFA - 8 mono_emit_unwind_op_offset (cfg, code, AMD64_RIP, -cfa_offset); async_exc_point (code); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF); if (!cfg->arch.omit_fp) { amd64_push_reg (code, AMD64_RBP); cfa_offset += 8; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); mono_emit_unwind_op_offset (cfg, code, AMD64_RBP, - cfa_offset); async_exc_point (code); /* These are handled automatically by the stack marking code */ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF); amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (target_mgreg_t)); mono_emit_unwind_op_def_cfa_reg (cfg, code, AMD64_RBP); mono_emit_unwind_op_fp_alloc (cfg, code, AMD64_RBP, 0); async_exc_point (code); } /* The param area is always at offset 0 from sp */ /* This needs to be allocated here, since it has to come after the spill area */ if (cfg->param_area) { if (cfg->arch.omit_fp) // FIXME: g_assert_not_reached (); cfg->stack_offset += ALIGN_TO (cfg->param_area, sizeof (target_mgreg_t)); } if (cfg->arch.omit_fp) { /* * On enter, the stack is misaligned by the pushing of the return * address. It is either made aligned by the pushing of %rbp, or by * this. */ alloc_size = ALIGN_TO (cfg->stack_offset, 8); if ((alloc_size % 16) == 0) { alloc_size += 8; /* Mark the padding slot as NOREF */ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset - sizeof (target_mgreg_t), SLOT_NOREF); } } else { alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT); if (cfg->stack_offset != alloc_size) { /* Mark the padding slot as NOREF */ mini_gc_set_slot_type_from_fp (cfg, -alloc_size + cfg->param_area, SLOT_NOREF); } cfg->arch.sp_fp_offset = alloc_size; alloc_size -= pos; } cfg->arch.stack_alloc_size = alloc_size; set_code_cursor (cfg, code); /* Allocate stack frame */ #ifdef TARGET_WIN32 code = emit_prolog_setup_sp_win64 (cfg, code, alloc_size, &cfa_offset); #else if (alloc_size) { /* See mono_emit_stack_alloc */ #if defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK) guint32 remaining_size = alloc_size; /* Use a loop for large sizes */ if (remaining_size > 10 * 0x1000) { amd64_mov_reg_imm (code, X86_EAX, remaining_size / 0x1000); guint8 *label = code; amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000); amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP); amd64_alu_reg_imm (code, X86_SUB, AMD64_RAX, 1); amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0); guint8 *label2 = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); amd64_patch (label2, label); if (cfg->arch.omit_fp) { cfa_offset += (remaining_size / 0x1000) * 0x1000; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); } remaining_size = remaining_size % 0x1000; set_code_cursor (cfg, code); } guint32 required_code_size = ((remaining_size / 0x1000) + 1) * 11; /*11 is the max size of amd64_alu_reg_imm + amd64_test_membase_reg*/ code = realloc_code (cfg, required_code_size); while (remaining_size >= 0x1000) { amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000); if (cfg->arch.omit_fp) { cfa_offset += 0x1000; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); } async_exc_point (code); amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP); remaining_size -= 0x1000; } if (remaining_size) { amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, remaining_size); if (cfg->arch.omit_fp) { cfa_offset += remaining_size; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); async_exc_point (code); } } #else amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size); if (cfg->arch.omit_fp) { cfa_offset += alloc_size; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); async_exc_point (code); } #endif } #endif /* Stack alignment check */ #if 0 { guint8 *buf; amd64_mov_reg_reg (code, AMD64_RAX, AMD64_RSP, 8); amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0xf); amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0); buf = code; x86_branch8 (code, X86_CC_EQ, 1, FALSE); amd64_breakpoint (code); amd64_patch (buf, code); } #endif if (mini_debug_options.init_stacks) { /* Fill the stack frame with a dummy value to force deterministic behavior */ /* Save registers to the red zone */ amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDI, 8); amd64_mov_membase_reg (code, AMD64_RSP, -16, AMD64_RCX, 8); MONO_DISABLE_WARNING (4310) // cast truncates constant value amd64_mov_reg_imm (code, AMD64_RAX, 0x2a2a2a2a2a2a2a2a); MONO_RESTORE_WARNING amd64_mov_reg_imm (code, AMD64_RCX, alloc_size / 8); amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RSP, 8); amd64_cld (code); amd64_prefix (code, X86_REP_PREFIX); amd64_stosl (code); amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RSP, -8, 8); amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, -16, 8); } /* Save LMF */ if (method->save_lmf) code = emit_setup_lmf (cfg, code, lmf_var->inst_offset, cfa_offset); /* Save callee saved registers */ if (cfg->arch.omit_fp) { save_area_offset = cfg->arch.reg_save_area_offset; /* Save caller saved registers after sp is adjusted */ /* The registers are saved at the bottom of the frame */ /* FIXME: Optimize this so the regs are saved at the end of the frame in increasing order */ } else { /* The registers are saved just below the saved rbp */ save_area_offset = cfg->arch.reg_save_area_offset; } for (i = 0; i < AMD64_NREG; ++i) { if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) { amd64_mov_membase_reg (code, cfg->frame_reg, save_area_offset, i, 8); if (cfg->arch.omit_fp) { mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - save_area_offset)); /* These are handled automatically by the stack marking code */ mini_gc_set_slot_type_from_cfa (cfg, - (cfa_offset - save_area_offset), SLOT_NOREF); } else { mono_emit_unwind_op_offset (cfg, code, i, - (-save_area_offset + (2 * 8))); // FIXME: GC } save_area_offset += 8; async_exc_point (code); } } /* store runtime generic context */ if (cfg->rgctx_var) { g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET && (cfg->rgctx_var->inst_basereg == AMD64_RBP || cfg->rgctx_var->inst_basereg == AMD64_RSP)); amd64_mov_membase_reg (code, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, sizeof(gpointer)); mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, cfg->rgctx_var, FALSE, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, code - cfg->native_code, 0); } /* compute max_length in order to use short forward jumps */ max_epilog_size = get_max_epilog_size (cfg); if (cfg->opt & MONO_OPT_BRANCH && cfg->max_block_num < MAX_BBLOCKS_FOR_BRANCH_OPTS) { for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins; int max_length = 0; /* max alignment for loops */ if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb)) max_length += LOOP_ALIGNMENT; MONO_BB_FOR_EACH_INS (bb, ins) { max_length += ins_get_size (ins->opcode); } /* Take prolog and epilog instrumentation into account */ if (bb == cfg->bb_entry || bb == cfg->bb_exit) max_length += max_epilog_size; bb->max_length = max_length; } } sig = mono_method_signature_internal (method); pos = 0; cinfo = cfg->arch.cinfo; if (sig->ret->type != MONO_TYPE_VOID) { /* Save volatile arguments to the stack */ if (cfg->vret_addr && (cfg->vret_addr->opcode != OP_REGVAR)) amd64_mov_membase_reg (code, cfg->vret_addr->inst_basereg, cfg->vret_addr->inst_offset, cinfo->ret.reg, 8); } /* Keep this in sync with emit_load_volatile_arguments */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; ins = cfg->args [i]; if (ins->flags & MONO_INST_IS_DEAD && !MONO_CFG_PROFILE (cfg, ENTER_CONTEXT)) /* Unused arguments */ continue; /* Save volatile arguments to the stack */ if (ins->opcode != OP_REGVAR) { switch (ainfo->storage) { case ArgInIReg: { guint32 size = 8; /* FIXME: I1 etc */ /* if (stack_offset & 0x1) size = 1; else if (stack_offset & 0x2) size = 2; else if (stack_offset & 0x4) size = 4; else size = 8; */ amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg, size); /* * Save the original location of 'this', * mono_get_generic_info_from_stack_frame () needs this to properly look up * the argument value during the handling of async exceptions. */ if (i == 0 && sig->hasthis) { mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, ins, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0); } break; } case ArgInFloatSSEReg: amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg); break; case ArgInDoubleSSEReg: amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg); break; case ArgValuetypeInReg: for (quad = 0; quad < 2; quad ++) { switch (ainfo->pair_storage [quad]) { case ArgInIReg: amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_regs [quad], sizeof (target_mgreg_t)); break; case ArgInFloatSSEReg: amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_regs [quad]); break; case ArgInDoubleSSEReg: amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_regs [quad]); break; case ArgNone: break; default: g_assert_not_reached (); } } break; case ArgValuetypeAddrInIReg: if (ainfo->pair_storage [0] == ArgInIReg) amd64_mov_membase_reg (code, ins->inst_left->inst_basereg, ins->inst_left->inst_offset, ainfo->pair_regs [0], sizeof (target_mgreg_t)); break; case ArgValuetypeAddrOnStack: break; case ArgGSharedVtInReg: amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg, 8); break; default: break; } } else { /* Argument allocated to (non-volatile) register */ switch (ainfo->storage) { case ArgInIReg: amd64_mov_reg_reg (code, ins->dreg, ainfo->reg, 8); break; case ArgOnStack: amd64_mov_reg_membase (code, ins->dreg, AMD64_RBP, ARGS_OFFSET + ainfo->offset, 8); break; default: g_assert_not_reached (); } if (i == 0 && sig->hasthis) { g_assert (ainfo->storage == ArgInIReg); mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, ins, TRUE, ins->dreg, 0, code - cfg->native_code, 0); } } } if (cfg->method->save_lmf) args_clobbered = TRUE; /* * Optimize the common case of the first bblock making a call with the same * arguments as the method. This works because the arguments are still in their * original argument registers. * FIXME: Generalize this */ if (!args_clobbered) { MonoBasicBlock *first_bb = cfg->bb_entry; MonoInst *next; int filter = FILTER_IL_SEQ_POINT; next = mono_bb_first_inst (first_bb, filter); if (!next && first_bb->next_bb) { first_bb = first_bb->next_bb; next = mono_bb_first_inst (first_bb, filter); } if (first_bb->in_count > 1) next = NULL; for (i = 0; next && i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; gboolean match = FALSE; ins = cfg->args [i]; if (ins->opcode != OP_REGVAR) { switch (ainfo->storage) { case ArgInIReg: { if (((next->opcode == OP_LOAD_MEMBASE) || (next->opcode == OP_LOADI4_MEMBASE)) && next->inst_basereg == ins->inst_basereg && next->inst_offset == ins->inst_offset) { if (next->dreg == ainfo->reg) { NULLIFY_INS (next); match = TRUE; } else { next->opcode = OP_MOVE; next->sreg1 = ainfo->reg; /* Only continue if the instruction doesn't change argument regs */ if (next->dreg == ainfo->reg || next->dreg == AMD64_RAX) match = TRUE; } } break; } default: break; } } else { /* Argument allocated to (non-volatile) register */ switch (ainfo->storage) { case ArgInIReg: if (next->opcode == OP_MOVE && next->sreg1 == ins->dreg && next->dreg == ainfo->reg) { NULLIFY_INS (next); match = TRUE; } break; default: break; } } if (match) { next = mono_inst_next (next, filter); //next = mono_inst_list_next (&next->node, &first_bb->ins_list); if (!next) break; } } } if (cfg->gen_sdb_seq_points) { MonoInst *info_var = cfg->arch.seq_point_info_var; /* Initialize seq_point_info_var */ if (cfg->compile_aot) { /* Initialize the variable from a GOT slot */ /* Same as OP_AOTCONST */ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof(gpointer)); g_assert (info_var->opcode == OP_REGOFFSET); amd64_mov_membase_reg (code, info_var->inst_basereg, info_var->inst_offset, AMD64_R11, 8); } if (cfg->compile_aot) { /* Initialize ss_tramp_var */ ins = cfg->arch.ss_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8); amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr), 8); amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8); } else { /* Initialize ss_tramp_var */ ins = cfg->arch.ss_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); amd64_mov_reg_imm (code, AMD64_R11, (guint64)&ss_trampoline); amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8); /* Initialize bp_tramp_var */ ins = cfg->arch.bp_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); amd64_mov_reg_imm (code, AMD64_R11, (guint64)&bp_trampoline); amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8); } } set_code_cursor (cfg, code); return code; } void mono_arch_emit_epilog (MonoCompile *cfg) { MonoMethod *method = cfg->method; int quad, i; guint8 *code; int max_epilog_size; CallInfo *cinfo; gint32 lmf_offset = cfg->lmf_var ? cfg->lmf_var->inst_offset : -1; gint32 save_area_offset = cfg->arch.reg_save_area_offset; max_epilog_size = get_max_epilog_size (cfg); code = realloc_code (cfg, max_epilog_size); cfg->has_unwind_info_for_epilog = TRUE; /* Mark the start of the epilog */ mono_emit_unwind_op_mark_loc (cfg, code, 0); /* Save the uwind state which is needed by the out-of-line code */ mono_emit_unwind_op_remember_state (cfg, code); /* the code restoring the registers must be kept in sync with OP_TAILCALL */ if (method->save_lmf) { if (cfg->used_int_regs & (1 << AMD64_RBP)) amd64_mov_reg_membase (code, AMD64_RBP, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), 8); if (cfg->arch.omit_fp) /* * emit_setup_lmf () marks RBP as saved, we have to mark it as same value here before clearing up the stack * since its stack slot will become invalid. */ mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP); } /* Restore callee saved regs */ for (i = 0; i < AMD64_NREG; ++i) { if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) { /* Restore only used_int_regs, not arch.saved_iregs */ #if defined(MONO_SUPPORT_TASKLETS) int restore_reg = 1; #else int restore_reg = (cfg->used_int_regs & (1 << i)); #endif if (restore_reg) { amd64_mov_reg_membase (code, i, cfg->frame_reg, save_area_offset, 8); mono_emit_unwind_op_same_value (cfg, code, i); async_exc_point (code); } save_area_offset += 8; } } /* Load returned vtypes into registers if needed */ cinfo = cfg->arch.cinfo; if (cinfo->ret.storage == ArgValuetypeInReg) { ArgInfo *ainfo = &cinfo->ret; MonoInst *inst = cfg->ret; for (quad = 0; quad < 2; quad ++) { switch (ainfo->pair_storage [quad]) { case ArgInIReg: amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_size [quad]); break; case ArgInFloatSSEReg: amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (target_mgreg_t))); break; case ArgInDoubleSSEReg: amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (target_mgreg_t))); break; case ArgNone: break; default: g_assert_not_reached (); } } } if (cfg->arch.omit_fp) { if (cfg->arch.stack_alloc_size) { amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size); } } else { #ifdef TARGET_WIN32 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0); amd64_pop_reg (code, AMD64_RBP); mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP); #else amd64_leave (code); mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP); #endif } mono_emit_unwind_op_def_cfa (cfg, code, AMD64_RSP, 8); async_exc_point (code); amd64_ret (code); /* Restore the unwind state to be the same as before the epilog */ mono_emit_unwind_op_restore_state (cfg, code); set_code_cursor (cfg, code); } void mono_arch_emit_exceptions (MonoCompile *cfg) { MonoJumpInfo *patch_info; int nthrows, i; guint8 *code; MonoClass *exc_classes [16]; guint8 *exc_throw_start [16], *exc_throw_end [16]; guint32 code_size = 0; /* Compute needed space */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { if (patch_info->type == MONO_PATCH_INFO_EXC) code_size += 40; if (patch_info->type == MONO_PATCH_INFO_R8) code_size += 8 + 15; /* sizeof (double) + alignment */ if (patch_info->type == MONO_PATCH_INFO_R4) code_size += 4 + 15; /* sizeof (float) + alignment */ if (patch_info->type == MONO_PATCH_INFO_GC_CARD_TABLE_ADDR) code_size += 8 + 7; /*sizeof (void*) + alignment */ } code = realloc_code (cfg, code_size); /* add code to raise exceptions */ nthrows = 0; for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { switch (patch_info->type) { case MONO_PATCH_INFO_EXC: { MonoClass *exc_class; guint8 *buf, *buf2; guint32 throw_ip; amd64_patch (patch_info->ip.i + cfg->native_code, code); exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name); throw_ip = patch_info->ip.i; //x86_breakpoint (code); /* Find a throw sequence for the same exception class */ for (i = 0; i < nthrows; ++i) if (exc_classes [i] == exc_class) break; if (i < nthrows) { amd64_mov_reg_imm (code, AMD64_ARG_REG2, (exc_throw_end [i] - cfg->native_code) - throw_ip); x86_jump_code (code, exc_throw_start [i]); patch_info->type = MONO_PATCH_INFO_NONE; } else { buf = code; amd64_mov_reg_imm_size (code, AMD64_ARG_REG2, 0xf0f0f0f0, 4); buf2 = code; if (nthrows < 16) { exc_classes [nthrows] = exc_class; exc_throw_start [nthrows] = code; } amd64_mov_reg_imm (code, AMD64_ARG_REG1, m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF); patch_info->type = MONO_PATCH_INFO_NONE; code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_arch_throw_corlib_exception); amd64_mov_reg_imm (buf, AMD64_ARG_REG2, (code - cfg->native_code) - throw_ip); while (buf < buf2) x86_nop (buf); if (nthrows < 16) { exc_throw_end [nthrows] = code; nthrows ++; } } break; } default: /* do nothing */ break; } set_code_cursor (cfg, code); } /* Handle relocations with RIP relative addressing */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { gboolean remove = FALSE; guint8 *orig_code = code; switch (patch_info->type) { case MONO_PATCH_INFO_R8: case MONO_PATCH_INFO_R4: { guint8 *pos, *patch_pos; guint32 target_pos; /* The SSE opcodes require a 16 byte alignment */ code = (guint8*)ALIGN_TO (code, 16); pos = cfg->native_code + patch_info->ip.i; if (IS_REX (pos [1])) { patch_pos = pos + 5; target_pos = code - pos - 9; } else { patch_pos = pos + 4; target_pos = code - pos - 8; } if (patch_info->type == MONO_PATCH_INFO_R8) { *(double*)code = *(double*)patch_info->data.target; code += sizeof (double); } else { *(float*)code = *(float*)patch_info->data.target; code += sizeof (float); } *(guint32*)(patch_pos) = target_pos; remove = TRUE; break; } case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: { guint8 *pos; if (cfg->compile_aot) continue; /*loading is faster against aligned addresses.*/ code = (guint8*)ALIGN_TO (code, 8); memset (orig_code, 0, code - orig_code); pos = cfg->native_code + patch_info->ip.i; /*alu_op [rex] modr/m imm32 - 7 or 8 bytes */ if (IS_REX (pos [1])) *(guint32*)(pos + 4) = (guint8*)code - pos - 8; else *(guint32*)(pos + 3) = (guint8*)code - pos - 7; *(gpointer*)code = (gpointer)patch_info->data.target; code += sizeof (gpointer); remove = TRUE; break; } default: break; } if (remove) { if (patch_info == cfg->patch_info) cfg->patch_info = patch_info->next; else { MonoJumpInfo *tmp; for (tmp = cfg->patch_info; tmp->next != patch_info; tmp = tmp->next) ; tmp->next = patch_info->next; } } set_code_cursor (cfg, code); } set_code_cursor (cfg, code); } #endif /* DISABLE_JIT */ MONO_NEVER_INLINE void mono_arch_flush_icache (guint8 *code, gint size) { /* call/ret required (or likely other control transfer) */ } void mono_arch_flush_register_windows (void) { } gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return amd64_use_imm32 (imm); } /* * Determine whenever the trap whose info is in SIGINFO is caused by * integer overflow. */ gboolean mono_arch_is_int_overflow (void *sigctx, void *info) { MonoContext ctx; guint8* rip; int reg; gint64 value; mono_sigctx_to_monoctx (sigctx, &ctx); rip = (guint8*)ctx.gregs [AMD64_RIP]; if (IS_REX (rip [0])) { reg = amd64_rex_b (rip [0]); rip ++; } else reg = 0; if ((rip [0] == 0xf7) && (x86_modrm_mod (rip [1]) == 0x3) && (x86_modrm_reg (rip [1]) == 0x7)) { /* idiv REG */ reg += x86_modrm_rm (rip [1]); value = ctx.gregs [reg]; if (value == -1) return TRUE; } return FALSE; } guint32 mono_arch_get_patch_offset (guint8 *code) { return 3; } /** * \return TRUE if no sw breakpoint was present (always). * * Copy \p size bytes from \p code - \p offset to the buffer \p buf. If the debugger inserted software * breakpoints in the original code, they are removed in the copy. */ gboolean mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size) { /* * If method_start is non-NULL we need to perform bound checks, since we access memory * at code - offset we could go before the start of the method and end up in a different * page of memory that is not mapped or read incorrect data anyway. We zero-fill the bytes * instead. */ if (!method_start || code - offset >= method_start) { memcpy (buf, code - offset, size); } else { int diff = code - method_start; memset (buf, 0, size); memcpy (buf + offset - diff, method_start, diff + size - offset); } return TRUE; } int mono_arch_get_this_arg_reg (guint8 *code) { return AMD64_ARG_REG1; } gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code) { return (gpointer)regs [mono_arch_get_this_arg_reg (code)]; } #define MAX_ARCH_DELEGATE_PARAMS 10 static gpointer get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, guint32 param_count) { guint8 *code, *start; GSList *unwind_ops = NULL; int i; unwind_ops = mono_arch_get_cie_program (); const int size = 64; start = code = (guint8 *)mono_global_codeman_reserve (size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)); if (has_target) { /* Replace the this argument with the target */ amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8); amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8); amd64_jump_membase (code, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); } else { if (param_count == 0) { amd64_jump_membase (code, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); } else { /* We have to shift the arguments left */ amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8); for (i = 0; i < param_count; ++i) { #ifdef TARGET_WIN32 if (i < 3) amd64_mov_reg_reg (code, param_regs [i], param_regs [i + 1], 8); else amd64_mov_reg_membase (code, param_regs [i], AMD64_RSP, 0x28, 8); #else amd64_mov_reg_reg (code, param_regs [i], param_regs [i + 1], 8); #endif } amd64_jump_membase (code, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); } } g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size); g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0))); mono_arch_flush_icache (start, code - start); if (has_target) { *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops); } else { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count); *info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops); g_free (name); } if (mono_jit_map_is_enabled ()) { char *buff; if (has_target) buff = (char*)"delegate_invoke_has_target"; else buff = g_strdup_printf ("delegate_invoke_no_target_%d", param_count); mono_emit_jit_tramp (start, code - start, buff); if (!has_target) g_free (buff); } MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); return start; } #define MAX_VIRTUAL_DELEGATE_OFFSET 32 static gpointer get_delegate_virtual_invoke_impl (MonoTrampInfo **info, gboolean load_imt_reg, int offset) { guint8 *code, *start; const int size = 20; char *tramp_name; GSList *unwind_ops; if (offset / (int)sizeof (target_mgreg_t) > MAX_VIRTUAL_DELEGATE_OFFSET) return NULL; start = code = (guint8 *)mono_global_codeman_reserve (size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)); unwind_ops = mono_arch_get_cie_program (); /* Replace the this argument with the target */ amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8); amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8); if (load_imt_reg) { /* Load the IMT reg */ amd64_mov_reg_membase (code, MONO_ARCH_IMT_REG, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method), 8); } /* Load the vtable */ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoObject, vtable), 8); amd64_jump_membase (code, AMD64_RAX, offset); g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); tramp_name = mono_get_delegate_virtual_invoke_impl_name (load_imt_reg, offset); *info = mono_tramp_info_create (tramp_name, start, code - start, NULL, unwind_ops); g_free (tramp_name); return start; } /* * mono_arch_get_delegate_invoke_impls: * * Return a list of MonoTrampInfo structures for the delegate invoke impl * trampolines. */ GSList* mono_arch_get_delegate_invoke_impls (void) { GSList *res = NULL; MonoTrampInfo *info; int i; get_delegate_invoke_impl (&info, TRUE, 0); res = g_slist_prepend (res, info); for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) { get_delegate_invoke_impl (&info, FALSE, i); res = g_slist_prepend (res, info); } for (i = 1; i <= MONO_IMT_SIZE; ++i) { get_delegate_virtual_invoke_impl (&info, TRUE, - i * TARGET_SIZEOF_VOID_P); res = g_slist_prepend (res, info); } for (i = 0; i <= MAX_VIRTUAL_DELEGATE_OFFSET; ++i) { get_delegate_virtual_invoke_impl (&info, FALSE, i * TARGET_SIZEOF_VOID_P); res = g_slist_prepend (res, info); get_delegate_virtual_invoke_impl (&info, TRUE, i * TARGET_SIZEOF_VOID_P); res = g_slist_prepend (res, info); } return res; } gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target) { guint8 *code, *start; int i; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; /* FIXME: Support more cases */ if (MONO_TYPE_ISSTRUCT (mini_get_underlying_type (sig->ret))) return NULL; if (has_target) { static guint8* cached = NULL; if (cached) return cached; if (mono_ee_features.use_aot_trampolines) { start = (guint8 *)mono_aot_get_trampoline ("delegate_invoke_impl_has_target"); } else { MonoTrampInfo *info; start = (guint8 *)get_delegate_invoke_impl (&info, TRUE, 0); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cached = start; } else { static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL}; for (i = 0; i < sig->param_count; ++i) if (!mono_is_regsize_var (sig->params [i])) return NULL; if (sig->param_count > 4) return NULL; code = cache [sig->param_count]; if (code) return code; if (mono_ee_features.use_aot_trampolines) { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); start = (guint8 *)mono_aot_get_trampoline (name); g_free (name); } else { MonoTrampInfo *info; start = (guint8 *)get_delegate_invoke_impl (&info, FALSE, sig->param_count); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cache [sig->param_count] = start; } return start; } gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg) { MonoTrampInfo *info; gpointer code; code = get_delegate_virtual_invoke_impl (&info, load_imt_reg, offset); if (code) mono_tramp_info_register (info, NULL); return code; } void mono_arch_finish_init (void) { #if !defined(HOST_WIN32) && defined(MONO_XEN_OPT) optimize_for_xen = access ("/proc/xen", F_OK) == 0; #endif } #define CMP_SIZE (6 + 1) #define CMP_REG_REG_SIZE (4 + 1) #define BR_SMALL_SIZE 2 #define BR_LARGE_SIZE 6 #define MOV_REG_IMM_SIZE 10 #define MOV_REG_IMM_32BIT_SIZE 6 #define JUMP_REG_SIZE (2 + 1) static int imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target) { int i, distance = 0; for (i = start; i < target; ++i) distance += imt_entries [i]->chunk_size; return distance; } /* * LOCKING: called with the domain lock held */ gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int i; int size = 0; guint8 *code, *start; gboolean vtable_is_32bit = ((gsize)(vtable) == (gsize)(int)(gsize)(vtable)); GSList *unwind_ops; MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) { if (amd64_use_imm32 ((gint64)item->key)) item->chunk_size += CMP_SIZE; else item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE; } if (item->has_target_code) { item->chunk_size += MOV_REG_IMM_SIZE; } else { if (vtable_is_32bit) item->chunk_size += MOV_REG_IMM_32BIT_SIZE; else item->chunk_size += MOV_REG_IMM_SIZE; } item->chunk_size += BR_SMALL_SIZE + JUMP_REG_SIZE; } else { if (fail_tramp) { item->chunk_size += MOV_REG_IMM_SIZE * 3 + CMP_REG_REG_SIZE + BR_SMALL_SIZE + JUMP_REG_SIZE * 2; } else { if (vtable_is_32bit) item->chunk_size += MOV_REG_IMM_32BIT_SIZE; else item->chunk_size += MOV_REG_IMM_SIZE; item->chunk_size += JUMP_REG_SIZE; /* with assert below: * item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1; */ } } } else { if (amd64_use_imm32 ((gint64)item->key)) item->chunk_size += CMP_SIZE; else item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE; item->chunk_size += BR_LARGE_SIZE; imt_entries [item->check_target_idx]->compare_done = TRUE; } size += item->chunk_size; } if (fail_tramp) { code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)); } else { code = (guint8 *)mono_mem_manager_code_reserve (mem_manager, size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)); } start = code; unwind_ops = mono_arch_get_cie_program (); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; item->code_target = code; if (item->is_equals) { gboolean fail_case = !item->check_target_idx && fail_tramp; if (item->check_target_idx || fail_case) { if (!item->compare_done || fail_case) { if (amd64_use_imm32 ((gint64)item->key)) amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer)); else { amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof(gpointer)); amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG); } } item->jmp_code = code; amd64_branch8 (code, X86_CC_NE, 0, FALSE); if (item->has_target_code) { amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->value.target_code); amd64_jump_reg (code, MONO_ARCH_IMT_SCRATCH_REG); } else { amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot])); amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0); } if (fail_case) { amd64_patch (item->jmp_code, code); amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, fail_tramp); amd64_jump_reg (code, MONO_ARCH_IMT_SCRATCH_REG); item->jmp_code = NULL; } } else { /* enable the commented code to assert on wrong method */ #if 0 if (amd64_is_imm32 (item->key)) amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer)); else { amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key); amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG); } item->jmp_code = code; amd64_branch8 (code, X86_CC_NE, 0, FALSE); /* See the comment below about R10 */ amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot])); amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0); amd64_patch (item->jmp_code, code); amd64_breakpoint (code); item->jmp_code = NULL; #else /* We're using R10 (MONO_ARCH_IMT_SCRATCH_REG) here because R11 (MONO_ARCH_IMT_REG) needs to be preserved. R10 needs to be preserved for calls which require a runtime generic context, but interface calls don't. */ amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot])); amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0); #endif } } else { if (amd64_use_imm32 ((gint64)item->key)) amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof (target_mgreg_t)); else { amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof (target_mgreg_t)); amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG); } item->jmp_code = code; if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx))) x86_branch8 (code, X86_CC_GE, 0, FALSE); else x86_branch32 (code, X86_CC_GE, 0, FALSE); } g_assertf (code - item->code_target <= item->chunk_size, "%X %X", (guint)(code - item->code_target), (guint)item->chunk_size); } /* patch the branches to get to the target items */ for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code) { if (item->check_target_idx) { amd64_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target); } } } if (!fail_tramp) UnlockedAdd (&mono_stats.imt_trampolines_size, code - start); g_assert (code - start <= size); g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0))); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL)); mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), mem_manager); return start; } MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { return (MonoMethod*)regs [MONO_ARCH_IMT_REG]; } MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code) { return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG]; } GSList* mono_arch_get_cie_program (void) { GSList *l = NULL; mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, AMD64_RSP, 8); mono_add_unwind_op_offset (l, (guint8*)NULL, (guint8*)NULL, AMD64_RIP, -8); return l; } #ifndef DISABLE_JIT MonoInst* mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins = NULL; int opcode = 0; if (cmethod->klass == mono_class_try_get_math_class ()) { if (strcmp (cmethod->name, "Sqrt") == 0) { opcode = OP_SQRT; } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) { opcode = OP_ABS; } if (opcode && fsig->param_count == 1) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_R8; ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = args [0]->dreg; MONO_ADD_INS (cfg->cbb, ins); } opcode = 0; if (cfg->opt & MONO_OPT_CMOV) { if (strcmp (cmethod->name, "Min") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMIN; if (fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_IMIN_UN; else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_LMIN; else if (fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_LMIN_UN; } else if (strcmp (cmethod->name, "Max") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMAX; if (fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_IMAX_UN; else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_LMAX; else if (fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_LMAX_UN; } } if (opcode && fsig->param_count == 2) { MONO_INST_NEW (cfg, ins, opcode); ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8; ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; MONO_ADD_INS (cfg->cbb, ins); } #if 0 /* OP_FREM is not IEEE compatible */ else if (strcmp (cmethod->name, "IEEERemainder") == 0 && fsig->param_count == 2) { MONO_INST_NEW (cfg, ins, OP_FREM); ins->inst_i0 = args [0]; ins->inst_i1 = args [1]; } #endif if ((mini_get_cpu_features (cfg) & MONO_CPU_X86_SSE41) != 0 && fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) { int mode = -1; if (!strcmp (cmethod->name, "Round")) mode = 0; else if (!strcmp (cmethod->name, "Floor")) mode = 1; else if (!strcmp (cmethod->name, "Ceiling")) mode = 2; if (mode != -1) { int xreg = alloc_xreg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_FCONV_TO_R8_X, xreg, args [0]->dreg); EMIT_NEW_UNALU (cfg, ins, OP_SSE41_ROUNDP, xreg, xreg); ins->inst_c0 = mode; ins->inst_c1 = MONO_TYPE_R8; int dreg = alloc_freg (cfg); EMIT_NEW_UNALU (cfg, ins, OP_EXTRACT_R8, dreg, xreg); ins->inst_c0 = 0; ins->inst_c1 = MONO_TYPE_R8; return ins; } } } return ins; } #endif host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { return ctx->gregs [reg]; } host_mgreg_t * mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { return &ctx->gregs [reg]; } void mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val) { ctx->gregs [reg] = val; } /* * mono_arch_emit_load_aotconst: * * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and * TARGET from the mscorlib GOT in full-aot code. * On AMD64, the result is placed into R11. */ guint8* mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, MonoJumpInfoType tramp_type, gconstpointer target) { *ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); return code; } /* * mono_arch_get_trampolines: * * Return a list of MonoTrampInfo structures describing arch specific trampolines * for AOT. */ GSList * mono_arch_get_trampolines (gboolean aot) { return mono_amd64_get_exception_trampolines (aot); } /* Soft Debug support */ #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED /* * mono_arch_set_breakpoint: * * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET. * The location should contain code emitted by OP_SEQ_POINT. */ void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; if (ji->from_aot) { guint32 native_offset = ip - (guint8*)ji->code_start; SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8 *)ji->code_start); g_assert (info->bp_addrs [native_offset] == 0); info->bp_addrs [native_offset] = mini_get_breakpoint_trampoline (); } else { /* ip points to a mov r11, 0 */ g_assert (code [0] == 0x41); g_assert (code [1] == 0xbb); amd64_mov_reg_imm (code, AMD64_R11, 1); } } /* * mono_arch_clear_breakpoint: * * Clear the breakpoint at IP. */ void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; if (ji->from_aot) { guint32 native_offset = ip - (guint8*)ji->code_start; SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8 *)ji->code_start); info->bp_addrs [native_offset] = NULL; } else { amd64_mov_reg_imm (code, AMD64_R11, 0); } } gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx) { /* We use soft breakpoints on amd64 */ return FALSE; } /* * mono_arch_skip_breakpoint: * * Modify CTX so the ip is placed after the breakpoint instruction, so when * we resume, the instruction is not executed again. */ void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji) { g_assert_not_reached (); } /* * mono_arch_start_single_stepping: * * Start single stepping. */ void mono_arch_start_single_stepping (void) { ss_trampoline = mini_get_single_step_trampoline (); } /* * mono_arch_stop_single_stepping: * * Stop single stepping. */ void mono_arch_stop_single_stepping (void) { ss_trampoline = NULL; } /* * mono_arch_is_single_step_event: * * Return whenever the machine state in SIGCTX corresponds to a single * step event. */ gboolean mono_arch_is_single_step_event (void *info, void *sigctx) { /* We use soft breakpoints on amd64 */ return FALSE; } /* * mono_arch_skip_single_step: * * Modify CTX so the ip is placed after the single step trigger instruction, * we resume, the instruction is not executed again. */ void mono_arch_skip_single_step (MonoContext *ctx) { g_assert_not_reached (); } /* * mono_arch_create_seq_point_info: * * Return a pointer to a data structure which is used by the sequence * point implementation in AOTed code. */ SeqPointInfo* mono_arch_get_seq_point_info (guint8 *code) { SeqPointInfo *info; MonoJitInfo *ji; MonoJitMemoryManager *jit_mm; /* * We don't have access to the method etc. so use the global * memory manager for now. */ jit_mm = get_default_jit_mm (); // FIXME: Add a free function jit_mm_lock (jit_mm); info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code); jit_mm_unlock (jit_mm); if (!info) { ji = mini_jit_info_table_find (code); g_assert (ji); // FIXME: Optimize the size info = (SeqPointInfo *)g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size * sizeof (gpointer))); info->ss_tramp_addr = &ss_trampoline; jit_mm_lock (jit_mm); g_hash_table_insert (jit_mm->arch_seq_points, code, info); jit_mm_unlock (jit_mm); } return info; } #endif gboolean mono_arch_opcode_supported (int opcode) { switch (opcode) { case OP_ATOMIC_ADD_I4: case OP_ATOMIC_ADD_I8: case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_EXCHANGE_I8: case OP_ATOMIC_CAS_I4: case OP_ATOMIC_CAS_I8: case OP_ATOMIC_LOAD_I1: case OP_ATOMIC_LOAD_I2: case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_I8: case OP_ATOMIC_LOAD_U1: case OP_ATOMIC_LOAD_U2: case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_LOAD_U8: case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_LOAD_R8: case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_I8: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_U4: case OP_ATOMIC_STORE_U8: case OP_ATOMIC_STORE_R4: case OP_ATOMIC_STORE_R8: return TRUE; default: return FALSE; } } CallInfo* mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { return get_call_info (mp, sig); } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { gpointer target = NULL; switch (jit_icall_id) { #undef MONO_AOT_ICALL #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break; MONO_AOT_ICALL (mono_amd64_resume_unwind) MONO_AOT_ICALL (mono_amd64_start_gsharedvt_call) MONO_AOT_ICALL (mono_amd64_throw_corlib_exception) MONO_AOT_ICALL (mono_amd64_throw_exception) default: break; } return target; }
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/mini-arm.c
/** * \file * ARM backend for the Mono code generator * * Authors: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * * (C) 2003 Ximian, Inc. * Copyright 2003-2011 Novell, Inc (http://www.novell.com) * Copyright 2011 Xamarin, Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include "mini.h" #include <string.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/tokentype.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/mono-memory-model.h> #include <mono/utils/mono-threads-coop.h> #include <mono/utils/unlocked.h> #include "interp/interp.h" #include "mini-arm.h" #include "cpu-arm.h" #include "ir-emit.h" #include "mini-gc.h" #include "mini-runtime.h" #include "aot-runtime.h" #include "mono/arch/arm/arm-vfp-codegen.h" #include "mono/utils/mono-tls-inline.h" /* Sanity check: This makes no sense */ #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD)) #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined" #endif /* * IS_SOFT_FLOAT: Is full software floating point used? * IS_HARD_FLOAT: Is full hardware floating point used? * IS_VFP: Is hardware floating point with software ABI used? * * These are not necessarily constants, e.g. IS_SOFT_FLOAT and * IS_VFP may delegate to mono_arch_is_soft_float (). */ #if defined(ARM_FPU_VFP_HARD) #define IS_SOFT_FLOAT (FALSE) #define IS_HARD_FLOAT (TRUE) #define IS_VFP (TRUE) #elif defined(ARM_FPU_NONE) #define IS_SOFT_FLOAT (mono_arch_is_soft_float ()) #define IS_HARD_FLOAT (FALSE) #define IS_VFP (!mono_arch_is_soft_float ()) #else #define IS_SOFT_FLOAT (FALSE) #define IS_HARD_FLOAT (FALSE) #define IS_VFP (TRUE) #endif #define THUNK_SIZE (3 * 4) #if __APPLE__ G_BEGIN_DECLS void sys_icache_invalidate (void *start, size_t len); G_END_DECLS #endif /* This mutex protects architecture specific caches */ #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex) #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex) static mono_mutex_t mini_arch_mutex; static gboolean v5_supported = FALSE; static gboolean v6_supported = FALSE; static gboolean v7_supported = FALSE; static gboolean v7s_supported = FALSE; static gboolean v7k_supported = FALSE; static gboolean thumb_supported = FALSE; static gboolean thumb2_supported = FALSE; /* * Whenever to use the ARM EABI */ static gboolean eabi_supported = FALSE; /* * Whenever to use the iphone ABI extensions: * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr. * This is required for debugging/profiling tools to work, but it has some overhead so it should * only be turned on in debug builds. */ static gboolean iphone_abi = FALSE; /* * The FPU we are generating code for. This is NOT runtime configurable right now, * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines. */ static MonoArmFPU arm_fpu; #if defined(ARM_FPU_VFP_HARD) /* * On armhf, d0-d7 are used for argument passing and d8-d15 * must be preserved across calls, which leaves us no room * for scratch registers. So we use d14-d15 but back up their * previous contents to a stack slot before using them - see * mono_arm_emit_vfp_scratch_save/_restore (). */ static int vfp_scratch1 = ARM_VFP_D14; static int vfp_scratch2 = ARM_VFP_D15; #else /* * On armel, d0-d7 do not need to be preserved, so we can * freely make use of them as scratch registers. */ static int vfp_scratch1 = ARM_VFP_D0; static int vfp_scratch2 = ARM_VFP_D1; #endif static int i8_align; static gpointer single_step_tramp, breakpoint_tramp; /* * The code generated for sequence points reads from this location, which is * made read-only when single stepping is enabled. */ static gpointer ss_trigger_page; /* Enabled breakpoints read from this trigger page */ static gpointer bp_trigger_page; /* * TODO: * floating point support: on ARM it is a mess, there are at least 3 * different setups, each of which binary incompat with the other. * 1) FPA: old and ugly, but unfortunately what current distros use * the double binary format has the two words swapped. 8 double registers. * Implemented usually by kernel emulation. * 2) softfloat: the compiler emulates all the fp ops. Usually uses the * ugly swapped double format (I guess a softfloat-vfp exists, too, though). * 3) VFP: the new and actually sensible and useful FP support. Implemented * in HW or kernel-emulated, requires new tools. I think this is what symbian uses. * * We do not care about FPA. We will support soft float and VFP. */ #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096) #define arm_is_imm8(v) ((v) > -256 && (v) < 256) #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020) #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12)) #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12)) #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL) //#define DEBUG_IMT 0 #ifndef DISABLE_JIT static void mono_arch_compute_omit_fp (MonoCompile *cfg); #endif static guint8* emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data); const char* mono_arch_regname (int reg) { static const char * rnames[] = { "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1", "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6", "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr", "arm_pc" }; if (reg >= 0 && reg < 16) return rnames [reg]; return "unknown"; } const char* mono_arch_fregname (int reg) { static const char * rnames[] = { "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4", "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9", "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14", "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19", "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24", "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29", "arm_f30", "arm_f31" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } #ifndef DISABLE_JIT static guint8* emit_big_add_temp (guint8 *code, int dreg, int sreg, int imm, int temp) { int imm8, rot_amount; g_assert (temp == ARMREG_IP || temp == ARMREG_LR); if (imm == 0) { if (sreg != dreg) ARM_MOV_REG_REG (code, dreg, sreg); } else if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) { ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount); return code; } if (dreg == sreg) { code = mono_arm_emit_load_imm (code, temp, imm); ARM_ADD_REG_REG (code, dreg, sreg, temp); } else { code = mono_arm_emit_load_imm (code, dreg, imm); ARM_ADD_REG_REG (code, dreg, dreg, sreg); } return code; } static guint8* emit_big_add (guint8 *code, int dreg, int sreg, int imm) { return emit_big_add_temp (code, dreg, sreg, imm, ARMREG_IP); } static guint8* emit_ldr_imm (guint8 *code, int dreg, int sreg, int imm) { if (!arm_is_imm12 (imm)) { g_assert (dreg != sreg); code = emit_big_add (code, dreg, sreg, imm); ARM_LDR_IMM (code, dreg, dreg, 0); } else { ARM_LDR_IMM (code, dreg, sreg, imm); } return code; } /* If dreg == sreg, this clobbers IP */ static guint8* emit_sub_imm (guint8 *code, int dreg, int sreg, int imm) { int imm8, rot_amount; if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) { ARM_SUB_REG_IMM (code, dreg, sreg, imm8, rot_amount); return code; } if (dreg == sreg) { code = mono_arm_emit_load_imm (code, ARMREG_IP, imm); ARM_SUB_REG_REG (code, dreg, sreg, ARMREG_IP); } else { code = mono_arm_emit_load_imm (code, dreg, imm); ARM_SUB_REG_REG (code, dreg, dreg, sreg); } return code; } static guint8* emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset) { /* we can use r0-r3, since this is called only for incoming args on the stack */ if (size > sizeof (target_mgreg_t) * 4) { guint8 *start_loop; code = emit_big_add (code, ARMREG_R0, sreg, soffset); code = emit_big_add (code, ARMREG_R1, dreg, doffset); start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size); ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0); ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0); ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4); ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4); ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4); ARM_B_COND (code, ARMCOND_NE, 0); arm_patch (code - 4, start_loop); return code; } if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) && arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) { while (size >= 4) { ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset); ARM_STR_IMM (code, ARMREG_LR, dreg, doffset); doffset += 4; soffset += 4; size -= 4; } } else if (size) { code = emit_big_add (code, ARMREG_R0, sreg, soffset); code = emit_big_add (code, ARMREG_R1, dreg, doffset); doffset = soffset = 0; while (size >= 4) { ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset); ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset); doffset += 4; soffset += 4; size -= 4; } } g_assert (size == 0); return code; } static guint8* emit_jmp_reg (guint8 *code, int reg) { if (thumb_supported) ARM_BX (code, reg); else ARM_MOV_REG_REG (code, ARMREG_PC, reg); return code; } static guint8* emit_call_reg (guint8 *code, int reg) { if (v5_supported) { ARM_BLX_REG (code, reg); } else { ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); return emit_jmp_reg (code, reg); } return code; } static guint8* emit_call_seq (MonoCompile *cfg, guint8 *code) { if (cfg->method->dynamic) { ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; code = emit_call_reg (code, ARMREG_IP); } else { ARM_BL (code, 0); } cfg->thunk_area += THUNK_SIZE; return code; } guint8* mono_arm_patchable_b (guint8 *code, int cond) { ARM_B_COND (code, cond, 0); return code; } guint8* mono_arm_patchable_bl (guint8 *code, int cond) { ARM_BL_COND (code, cond, 0); return code; } #if defined(__ARM_EABI__) && defined(__linux__) && !defined(HOST_ANDROID) && !defined(MONO_CROSS_COMPILE) #define HAVE_AEABI_READ_TP 1 #endif #ifdef HAVE_AEABI_READ_TP G_BEGIN_DECLS gpointer __aeabi_read_tp (void); G_END_DECLS #endif gboolean mono_arch_have_fast_tls (void) { #ifdef HAVE_AEABI_READ_TP static gboolean have_fast_tls = FALSE; static gboolean inited = FALSE; if (mini_debug_options.use_fallback_tls) return FALSE; if (inited) return have_fast_tls; if (v7_supported) { gpointer tp1, tp2; tp1 = __aeabi_read_tp (); asm volatile("mrc p15, 0, %0, c13, c0, 3" : "=r" (tp2)); have_fast_tls = tp1 && tp1 == tp2; } inited = TRUE; return have_fast_tls; #else return FALSE; #endif } static guint8* emit_tls_get (guint8 *code, int dreg, int tls_offset) { g_assert (v7_supported); ARM_MRC (code, 15, 0, dreg, 13, 0, 3); ARM_LDR_IMM (code, dreg, dreg, tls_offset); return code; } static guint8* emit_tls_set (guint8 *code, int sreg, int tls_offset) { int tp_reg = (sreg != ARMREG_R0) ? ARMREG_R0 : ARMREG_R1; g_assert (v7_supported); ARM_MRC (code, 15, 0, tp_reg, 13, 0, 3); ARM_STR_IMM (code, sreg, tp_reg, tls_offset); return code; } /* * emit_save_lmf: * * Emit code to push an LMF structure on the LMF stack. * On arm, this is intermixed with the initialization of other fields of the structure. */ static guint8* emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset) { int i; if (mono_arch_have_fast_tls () && mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR) != -1) { code = emit_tls_get (code, ARMREG_R0, mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR)); } else { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern)); code = emit_call_seq (cfg, code); } /* we build the MonoLMF structure on the stack - see mini-arm.h */ /* lmf_offset is the offset from the previous stack pointer, * alloc_size is the total stack space allocated, so the offset * of MonoLMF from the current stack ptr is alloc_size - lmf_offset. * The pointer to the struct is put in r1 (new_lmf). * ip is used as scratch * The callee-saved registers are already in the MonoLMF structure */ code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset); /* r0 is the result from mono_get_lmf_addr () */ ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr)); /* new_lmf->previous_lmf = *lmf_addr */ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf)); ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf)); /* *(lmf_addr) = r1 */ ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf)); /* Skip method (only needed for trampoline LMF frames) */ ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp)); ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp)); /* save the current IP */ ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC); ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip)); for (i = 0; i < MONO_ABI_SIZEOF (MonoLMF); i += sizeof (target_mgreg_t)) mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF); return code; } typedef struct { gint32 vreg; gint32 hreg; } FloatArgData; static guint8 * emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset) { GSList *list; set_code_cursor (cfg, code); for (list = inst->float_args; list; list = list->next) { FloatArgData *fad = (FloatArgData*)list->data; MonoInst *var = get_vreg_to_inst (cfg, fad->vreg); gboolean imm = arm_is_fpimm8 (var->inst_offset); /* 4+1 insns for emit_big_add () and 1 for FLDS. */ if (!imm) *max_len += 20 + 4; *max_len += 4; code = realloc_code (cfg, *max_len); if (!imm) { code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset); ARM_FLDS (code, fad->hreg, ARMREG_LR, 0); } else ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset); set_code_cursor (cfg, code); *offset = code - cfg->native_code; } return code; } static guint8 * mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg) { MonoInst *inst; g_assert (reg == vfp_scratch1 || reg == vfp_scratch2); inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1]; if (IS_HARD_FLOAT) { if (!arm_is_fpimm8 (inst->inst_offset)) { code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset); ARM_FSTD (code, reg, ARMREG_LR, 0); } else ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset); } return code; } static guint8 * mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg) { MonoInst *inst; g_assert (reg == vfp_scratch1 || reg == vfp_scratch2); inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1]; if (IS_HARD_FLOAT) { if (!arm_is_fpimm8 (inst->inst_offset)) { code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset); ARM_FLDD (code, reg, ARMREG_LR, 0); } else ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset); } return code; } /* * emit_restore_lmf: * * Emit code to pop an LMF structure from the LMF stack. */ static guint8* emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset) { int basereg, offset; if (lmf_offset < 32) { basereg = cfg->frame_reg; offset = lmf_offset; } else { basereg = ARMREG_R2; offset = 0; code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset); } /* ip = previous_lmf */ ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf)); /* lr = lmf_addr */ ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr)); /* *(lmf_addr) = previous_lmf */ ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf)); return code; } #endif /* #ifndef DISABLE_JIT */ /* * mono_arch_get_argument_info: * @csig: a method signature * @param_count: the number of parameters to consider * @arg_info: an array to store the result infos * * Gathers information on parameters such as size, alignment and * padding. arg_info should be large enought to hold param_count + 1 entries. * * Returns the size of the activation frame. */ int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { int k, frame_size = 0; guint32 size, align, pad; int offset = 8; MonoType *t; t = mini_get_underlying_type (csig->ret); if (MONO_TYPE_ISSTRUCT (t)) { frame_size += sizeof (target_mgreg_t); offset += 4; } arg_info [0].offset = offset; if (csig->hasthis) { frame_size += sizeof (target_mgreg_t); offset += 4; } arg_info [0].size = frame_size; for (k = 0; k < param_count; k++) { size = mini_type_stack_size_full (csig->params [k], &align, csig->pinvoke && !csig->marshalling_disabled); /* ignore alignment for now */ align = 1; frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; frame_size += size; arg_info [k + 1].pad = 0; arg_info [k + 1].size = size; offset += pad; arg_info [k + 1].offset = offset; offset += size; } align = MONO_ARCH_FRAME_ALIGNMENT; frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; return frame_size; } #define MAX_ARCH_DELEGATE_PARAMS 3 static guint8* get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, gboolean param_count) { guint8 *code, *start; GSList *unwind_ops = mono_arch_get_cie_program (); if (has_target) { start = code = mono_global_codeman_reserve (12); /* Replace the this argument with the target */ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target)); ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); g_assert ((code - start) <= 12); mono_arch_flush_icache (start, 12); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } else { int size, i; size = 8 + param_count * 4; start = code = mono_global_codeman_reserve (size); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); /* slide down the arguments */ for (i = 0; i < param_count; ++i) { ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1)); } ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); g_assert ((code - start) <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } if (has_target) { *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops); } else { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count); *info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops); g_free (name); } MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); return start; } /* * mono_arch_get_delegate_invoke_impls: * * Return a list of MonoAotTrampInfo structures for the delegate invoke impl * trampolines. */ GSList* mono_arch_get_delegate_invoke_impls (void) { GSList *res = NULL; MonoTrampInfo *info; int i; get_delegate_invoke_impl (&info, TRUE, 0); res = g_slist_prepend (res, info); for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) { get_delegate_invoke_impl (&info, FALSE, i); res = g_slist_prepend (res, info); } return res; } gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target) { guint8 *code, *start; MonoType *sig_ret; /* FIXME: Support more cases */ sig_ret = mini_get_underlying_type (sig->ret); if (MONO_TYPE_ISSTRUCT (sig_ret)) return NULL; if (has_target) { static guint8* cached = NULL; mono_mini_arch_lock (); if (cached) { mono_mini_arch_unlock (); return cached; } if (mono_ee_features.use_aot_trampolines) { start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target"); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, TRUE, 0); mono_tramp_info_register (info, NULL); } cached = start; mono_mini_arch_unlock (); return cached; } else { static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL}; int i; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; for (i = 0; i < sig->param_count; ++i) if (!mono_is_regsize_var (sig->params [i])) return NULL; mono_mini_arch_lock (); code = cache [sig->param_count]; if (code) { mono_mini_arch_unlock (); return code; } if (mono_ee_features.use_aot_trampolines) { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); start = (guint8*)mono_aot_get_trampoline (name); g_free (name); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, FALSE, sig->param_count); mono_tramp_info_register (info, NULL); } cache [sig->param_count] = start; mono_mini_arch_unlock (); return start; } return NULL; } gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg) { return NULL; } gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code) { return (gpointer)regs [ARMREG_R0]; } /* * Initialize the cpu to execute managed code. */ void mono_arch_cpu_init (void) { i8_align = MONO_ABI_ALIGNOF (gint64); #ifdef MONO_CROSS_COMPILE /* Need to set the alignment of i8 since it can different on the target */ #ifdef TARGET_ANDROID /* linux gnueabi */ mono_type_set_alignment (MONO_TYPE_I8, i8_align); #endif #endif } /* * Initialize architecture specific code. */ void mono_arch_init (void) { char *cpu_arch; #ifdef TARGET_WATCHOS mini_debug_options.soft_breakpoints = TRUE; #endif mono_os_mutex_init_recursive (&mini_arch_mutex); if (mini_debug_options.soft_breakpoints) { if (!mono_aot_only) breakpoint_tramp = mini_get_breakpoint_trampoline (); } else { ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER); bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER); mono_mprotect (bp_trigger_page, mono_pagesize (), 0); } #if defined(__ARM_EABI__) eabi_supported = TRUE; #endif #if defined(ARM_FPU_VFP_HARD) arm_fpu = MONO_ARM_FPU_VFP_HARD; #else arm_fpu = MONO_ARM_FPU_VFP; #if defined(ARM_FPU_NONE) && !defined(TARGET_IOS) /* * If we're compiling with a soft float fallback and it * turns out that no VFP unit is available, we need to * switch to soft float. We don't do this for iOS, since * iOS devices always have a VFP unit. */ if (!mono_hwcap_arm_has_vfp) arm_fpu = MONO_ARM_FPU_NONE; /* * This environment variable can be useful in testing * environments to make sure the soft float fallback * works. Most ARM devices have VFP units these days, so * normally soft float code would not be exercised much. */ char *soft = g_getenv ("MONO_ARM_FORCE_SOFT_FLOAT"); if (soft && !strncmp (soft, "1", 1)) arm_fpu = MONO_ARM_FPU_NONE; g_free (soft); #endif #endif v5_supported = mono_hwcap_arm_is_v5; v6_supported = mono_hwcap_arm_is_v6; v7_supported = mono_hwcap_arm_is_v7; /* * On weird devices, the hwcap code may fail to detect * the ARM version. In that case, we can at least safely * assume the version the runtime was compiled for. */ #ifdef HAVE_ARMV5 v5_supported = TRUE; #endif #ifdef HAVE_ARMV6 v6_supported = TRUE; #endif #ifdef HAVE_ARMV7 v7_supported = TRUE; #endif #if defined(TARGET_IOS) /* iOS is special-cased here because we don't yet have a way to properly detect CPU features on it. */ thumb_supported = TRUE; iphone_abi = TRUE; #elif defined(TARGET_ANDROID) thumb_supported = TRUE; #else thumb_supported = mono_hwcap_arm_has_thumb; thumb2_supported = mono_hwcap_arm_has_thumb2; #endif /* Format: armv(5|6|7[s])[-thumb[2]] */ cpu_arch = g_getenv ("MONO_CPU_ARCH"); /* Do this here so it overrides any detection. */ if (cpu_arch) { if (strncmp (cpu_arch, "armv", 4) == 0) { v5_supported = cpu_arch [4] >= '5'; v6_supported = cpu_arch [4] >= '6'; v7_supported = cpu_arch [4] >= '7'; v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0; v7k_supported = strncmp (cpu_arch, "armv7k", 6) == 0; } thumb_supported = strstr (cpu_arch, "thumb") != NULL; thumb2_supported = strstr (cpu_arch, "thumb2") != NULL; g_free (cpu_arch); } } /* * Cleanup architecture specific code. */ void mono_arch_cleanup (void) { } /* * This function returns the optimizations supported on this cpu. */ guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { /* no arm-specific optimizations yet */ *exclude_mask = 0; return 0; } gboolean mono_arm_is_hard_float (void) { return arm_fpu == MONO_ARM_FPU_VFP_HARD; } #ifndef DISABLE_JIT gboolean mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode) { if (v7s_supported || v7k_supported) { switch (opcode) { case OP_IDIV: case OP_IREM: case OP_IDIV_UN: case OP_IREM_UN: return FALSE; default: break; } } return TRUE; } #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK gboolean mono_arch_is_soft_float (void) { return arm_fpu == MONO_ARM_FPU_NONE; } #endif static gboolean is_regsize_var (MonoType *t) { if (m_type_is_byref (t)) return TRUE; t = mini_get_underlying_type (t); switch (t->type) { case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: return TRUE; case MONO_TYPE_OBJECT: return TRUE; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (t)) return TRUE; return FALSE; case MONO_TYPE_VALUETYPE: return FALSE; } return FALSE; } GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG)) continue; /* we can only allocate 32 bit values */ if (is_regsize_var (ins->inst_vtype)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE); } } return vars; } GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; mono_arch_compute_omit_fp (cfg); /* * FIXME: Interface calls might go through a static rgctx trampoline which * sets V5, but it doesn't save it, so we need to save it ourselves, and * avoid using it. */ if (cfg->flags & MONO_CFG_HAS_CALLS) cfg->uses_rgctx_reg = TRUE; if (cfg->arch.omit_fp) regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP)); regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1)); regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2)); regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3)); if (iphone_abi) /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */ regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7)); else regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4)); if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))) /* V5 is reserved for passing the vtable/rgctx/IMT method */ regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5)); /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/ /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/ return regs; } /* * mono_arch_regalloc_cost: * * Return the cost, in number of memory references, of the action of * allocating the variable VMV into a register during global register * allocation. */ guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { /* FIXME: */ return 2; } #endif /* #ifndef DISABLE_JIT */ void mono_arch_flush_icache (guint8 *code, gint size) { #if defined(MONO_CROSS_COMPILE) #elif __APPLE__ sys_icache_invalidate (code, size); #else __builtin___clear_cache ((char*)code, (char*)code + size); #endif } #define DEBUG(a) static void inline add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple) { if (simple) { if (*gr > ARMREG_R3) { ainfo->size = 4; ainfo->offset = *stack_size; ainfo->reg = ARMREG_SP; /* in the caller */ ainfo->storage = RegTypeBase; *stack_size += 4; } else { ainfo->storage = RegTypeGeneral; ainfo->reg = *gr; } } else { gboolean split; if (eabi_supported) split = i8_align == 4; else split = TRUE; ainfo->size = 8; if (*gr == ARMREG_R3 && split) { /* first word in r3 and the second on the stack */ ainfo->offset = *stack_size; ainfo->reg = ARMREG_SP; /* in the caller */ ainfo->storage = RegTypeBaseGen; *stack_size += 4; } else if (*gr >= ARMREG_R3) { if (eabi_supported) { /* darwin aligns longs to 4 byte only */ if (i8_align == 8) { *stack_size += 7; *stack_size &= ~7; } } ainfo->offset = *stack_size; ainfo->reg = ARMREG_SP; /* in the caller */ ainfo->storage = RegTypeBase; *stack_size += 8; } else { if (eabi_supported) { if (i8_align == 8 && ((*gr) & 1)) (*gr) ++; } ainfo->storage = RegTypeIRegPair; ainfo->reg = *gr; } (*gr) ++; } (*gr) ++; } static void inline add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare) { /* * If we're calling a function like this: * * void foo(float a, double b, float c) * * We pass a in s0 and b in d1. That leaves us * with s1 being unused. The armhf ABI recognizes * this and requires register assignment to then * use that for the next single-precision arg, * i.e. c in this example. So float_spare either * tells us which reg to use for the next single- * precision arg, or it's -1, meaning use *fpr. * * Note that even though most of the JIT speaks * double-precision, fpr represents single- * precision registers. * * See parts 5.5 and 6.1.2 of the AAPCS for how * this all works. */ if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) { ainfo->storage = RegTypeFP; if (is_double) { /* * If we're passing a double-precision value * and *fpr is odd (e.g. it's s1, s3, ...) * we need to use the next even register. So * we mark the current *fpr as a spare that * can be used for the next single-precision * value. */ if (*fpr % 2) { *float_spare = *fpr; (*fpr)++; } /* * At this point, we have an even register * so we assign that and move along. */ ainfo->reg = *fpr; *fpr += 2; } else if (*float_spare >= 0) { /* * We're passing a single-precision value * and it looks like a spare single- * precision register is available. Let's * use it. */ ainfo->reg = *float_spare; *float_spare = -1; } else { /* * If we hit this branch, we're passing a * single-precision value and we can simply * use the next available register. */ ainfo->reg = *fpr; (*fpr)++; } } else { /* * We've exhausted available floating point * regs, so pass the rest on the stack. */ if (is_double) { *stack_size += 7; *stack_size &= ~7; } ainfo->offset = *stack_size; ainfo->reg = ARMREG_SP; ainfo->storage = RegTypeBase; *stack_size += is_double ? 8 : 4; } } static gboolean is_hfa (MonoType *t, int *out_nfields, int *out_esize) { MonoClass *klass; gpointer iter; MonoClassField *field; MonoType *ftype, *prev_ftype = NULL; int nfields = 0; klass = mono_class_from_mono_type_internal (t); iter = NULL; while ((field = mono_class_get_fields_internal (klass, &iter))) { if (field->type->attrs & FIELD_ATTRIBUTE_STATIC) continue; ftype = mono_field_get_type_internal (field); ftype = mini_get_underlying_type (ftype); if (MONO_TYPE_ISSTRUCT (ftype)) { int nested_nfields, nested_esize; if (!is_hfa (ftype, &nested_nfields, &nested_esize)) return FALSE; if (nested_esize == 4) ftype = m_class_get_byval_arg (mono_defaults.single_class); else ftype = m_class_get_byval_arg (mono_defaults.double_class); if (prev_ftype && prev_ftype->type != ftype->type) return FALSE; prev_ftype = ftype; nfields += nested_nfields; } else { if (!(!m_type_is_byref (ftype) && (ftype->type == MONO_TYPE_R4 || ftype->type == MONO_TYPE_R8))) return FALSE; if (prev_ftype && prev_ftype->type != ftype->type) return FALSE; prev_ftype = ftype; nfields ++; } } if (nfields == 0 || nfields > 4) return FALSE; *out_nfields = nfields; *out_esize = prev_ftype->type == MONO_TYPE_R4 ? 4 : 8; return TRUE; } static CallInfo* get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { guint i, gr, fpr, pstart; gint float_spare; int n = sig->hasthis + sig->param_count; int nfields, esize; guint32 align; MonoType *t; guint32 stack_size = 0; CallInfo *cinfo; gboolean is_pinvoke = sig->pinvoke; gboolean vtype_retaddr = FALSE; if (mp) cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n)); else cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n)); cinfo->nargs = n; gr = ARMREG_R0; fpr = ARM_VFP_F0; float_spare = -1; t = mini_get_underlying_type (sig->ret); switch (t->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: cinfo->ret.storage = RegTypeGeneral; cinfo->ret.reg = ARMREG_R0; break; case MONO_TYPE_U8: case MONO_TYPE_I8: cinfo->ret.storage = RegTypeIRegPair; cinfo->ret.reg = ARMREG_R0; break; case MONO_TYPE_R4: case MONO_TYPE_R8: cinfo->ret.storage = RegTypeFP; if (t->type == MONO_TYPE_R4) cinfo->ret.size = 4; else cinfo->ret.size = 8; if (IS_HARD_FLOAT) { cinfo->ret.reg = ARM_VFP_F0; } else { cinfo->ret.reg = ARMREG_R0; } break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (t)) { cinfo->ret.storage = RegTypeGeneral; cinfo->ret.reg = ARMREG_R0; break; } if (mini_is_gsharedvt_variable_type (t)) { cinfo->ret.storage = RegTypeStructByAddr; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) { cinfo->ret.storage = RegTypeHFA; cinfo->ret.reg = 0; cinfo->ret.nregs = nfields; cinfo->ret.esize = esize; } else { if (sig->pinvoke && !sig->marshalling_disabled) { int native_size = mono_class_native_size (mono_class_from_mono_type_internal (t), &align); int max_size; #ifdef TARGET_WATCHOS max_size = 16; #else max_size = 4; #endif if (native_size <= max_size) { cinfo->ret.storage = RegTypeStructByVal; cinfo->ret.struct_size = native_size; cinfo->ret.nregs = ALIGN_TO (native_size, 4) / 4; } else { cinfo->ret.storage = RegTypeStructByAddr; } } else { cinfo->ret.storage = RegTypeStructByAddr; } } break; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: g_assert (mini_is_gsharedvt_type (t)); cinfo->ret.storage = RegTypeStructByAddr; break; case MONO_TYPE_VOID: break; default: g_error ("Can't handle as return value 0x%x", sig->ret->type); } vtype_retaddr = cinfo->ret.storage == RegTypeStructByAddr; pstart = 0; n = 0; /* * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after * the first argument, allowing 'this' to be always passed in the first arg reg. * Also do this if the first argument is a reference type, since virtual calls * are sometimes made using calli without sig->hasthis set, like in the delegate * invoke wrappers. */ if (vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) { if (sig->hasthis) { add_general (&gr, &stack_size, cinfo->args + 0, TRUE); } else { add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE); pstart = 1; } n ++; cinfo->ret.reg = gr; gr ++; cinfo->vret_arg_index = 1; } else { /* this */ if (sig->hasthis) { add_general (&gr, &stack_size, cinfo->args + 0, TRUE); n ++; } if (vtype_retaddr) { cinfo->ret.reg = gr; gr ++; } } DEBUG(g_print("params: %d\n", sig->param_count)); for (i = pstart; i < sig->param_count; ++i) { ArgInfo *ainfo = &cinfo->args [n]; if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ gr = ARMREG_R3 + 1; fpr = ARM_VFP_F16; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE); } DEBUG(g_print("param %d: ", i)); if (m_type_is_byref (sig->params [i])) { DEBUG(g_print("byref\n")); add_general (&gr, &stack_size, ainfo, TRUE); n++; continue; } t = mini_get_underlying_type (sig->params [i]); switch (t->type) { case MONO_TYPE_I1: cinfo->args [n].is_signed = 1; case MONO_TYPE_U1: cinfo->args [n].size = 1; add_general (&gr, &stack_size, ainfo, TRUE); break; case MONO_TYPE_I2: cinfo->args [n].is_signed = 1; case MONO_TYPE_U2: cinfo->args [n].size = 2; add_general (&gr, &stack_size, ainfo, TRUE); break; case MONO_TYPE_I4: case MONO_TYPE_U4: cinfo->args [n].size = 4; add_general (&gr, &stack_size, ainfo, TRUE); break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: cinfo->args [n].size = sizeof (target_mgreg_t); add_general (&gr, &stack_size, ainfo, TRUE); break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (t)) { cinfo->args [n].size = sizeof (target_mgreg_t); add_general (&gr, &stack_size, ainfo, TRUE); break; } if (mini_is_gsharedvt_variable_type (t)) { /* gsharedvt arguments are passed by ref */ g_assert (mini_is_gsharedvt_type (t)); add_general (&gr, &stack_size, ainfo, TRUE); switch (ainfo->storage) { case RegTypeGeneral: ainfo->storage = RegTypeGSharedVtInReg; break; case RegTypeBase: ainfo->storage = RegTypeGSharedVtOnStack; break; default: g_assert_not_reached (); } break; } /* Fall through */ case MONO_TYPE_TYPEDBYREF: case MONO_TYPE_VALUETYPE: { gint size; int align_size; int nwords, nfields, esize; guint32 align; if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) { if (fpr + nfields < ARM_VFP_F16) { ainfo->storage = RegTypeHFA; ainfo->reg = fpr; ainfo->nregs = nfields; ainfo->esize = esize; if (esize == 4) fpr += nfields; else fpr += nfields * 2; break; } else { fpr = ARM_VFP_F16; } } if (t->type == MONO_TYPE_TYPEDBYREF) { size = MONO_ABI_SIZEOF (MonoTypedRef); align = sizeof (target_mgreg_t); } else { MonoClass *klass = mono_class_from_mono_type_internal (sig->params [i]); if (sig->pinvoke && !sig->marshalling_disabled) size = mono_class_native_size (klass, &align); else size = mini_type_stack_size_full (t, &align, FALSE); } DEBUG(g_print ("load %d bytes struct\n", size)); #ifdef TARGET_WATCHOS /* Watchos pass large structures by ref */ /* We only do this for pinvoke to make gsharedvt/dyncall simpler */ if (sig->pinvoke && size > 16) { add_general (&gr, &stack_size, ainfo, TRUE); switch (ainfo->storage) { case RegTypeGeneral: ainfo->storage = RegTypeStructByAddr; break; case RegTypeBase: ainfo->storage = RegTypeStructByAddrOnStack; break; default: g_assert_not_reached (); break; } break; } #endif align_size = size; nwords = 0; align_size += (sizeof (target_mgreg_t) - 1); align_size &= ~(sizeof (target_mgreg_t) - 1); nwords = (align_size + sizeof (target_mgreg_t) -1 ) / sizeof (target_mgreg_t); ainfo->storage = RegTypeStructByVal; ainfo->struct_size = size; ainfo->align = align; if (eabi_supported) { if (align >= 8 && (gr & 1)) gr ++; } if (gr > ARMREG_R3) { ainfo->size = 0; ainfo->vtsize = nwords; } else { int rest = ARMREG_R3 - gr + 1; int n_in_regs = rest >= nwords? nwords: rest; ainfo->size = n_in_regs; ainfo->vtsize = nwords - n_in_regs; ainfo->reg = gr; gr += n_in_regs; nwords -= n_in_regs; } stack_size = ALIGN_TO (stack_size, align); ainfo->offset = stack_size; /*g_print ("offset for arg %d at %d\n", n, stack_size);*/ stack_size += nwords * sizeof (target_mgreg_t); break; } case MONO_TYPE_U8: case MONO_TYPE_I8: ainfo->size = 8; add_general (&gr, &stack_size, ainfo, FALSE); break; case MONO_TYPE_R4: ainfo->size = 4; if (IS_HARD_FLOAT) add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare); else add_general (&gr, &stack_size, ainfo, TRUE); break; case MONO_TYPE_R8: ainfo->size = 8; if (IS_HARD_FLOAT) add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare); else add_general (&gr, &stack_size, ainfo, FALSE); break; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: /* gsharedvt arguments are passed by ref */ g_assert (mini_is_gsharedvt_type (t)); add_general (&gr, &stack_size, ainfo, TRUE); switch (ainfo->storage) { case RegTypeGeneral: ainfo->storage = RegTypeGSharedVtInReg; break; case RegTypeBase: ainfo->storage = RegTypeGSharedVtOnStack; break; default: g_assert_not_reached (); } break; default: g_error ("Can't handle 0x%x", sig->params [i]->type); } n ++; } /* Handle the case where there are no implicit arguments */ if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ gr = ARMREG_R3 + 1; fpr = ARM_VFP_F16; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE); } DEBUG (g_print (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size)); stack_size = ALIGN_TO (stack_size, MONO_ARCH_FRAME_ALIGNMENT); cinfo->stack_usage = stack_size; return cinfo; } /* * We need to create a temporary value if the argument is not stored in * a linear memory range in the ccontext (this normally happens for * value types if they are passed both by stack and regs). */ static int arg_need_temp (ArgInfo *ainfo) { if (ainfo->storage == RegTypeStructByVal && ainfo->vtsize) return ainfo->struct_size; return 0; } static gpointer arg_get_storage (CallContext *ccontext, ArgInfo *ainfo) { switch (ainfo->storage) { case RegTypeIRegPair: case RegTypeGeneral: case RegTypeStructByVal: return &ccontext->gregs [ainfo->reg]; case RegTypeHFA: case RegTypeFP: if (IS_HARD_FLOAT) return &ccontext->fregs [ainfo->reg]; else return &ccontext->gregs [ainfo->reg]; case RegTypeBase: return ccontext->stack + ainfo->offset; default: g_error ("Arg storage type not yet supported"); } } static void arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest) { int reg_size = ainfo->size * sizeof (host_mgreg_t); g_assert (arg_need_temp (ainfo)); memcpy (dest, &ccontext->gregs [ainfo->reg], reg_size); memcpy ((host_mgreg_t*)dest + ainfo->size, ccontext->stack + ainfo->offset, ainfo->struct_size - reg_size); } static void arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src) { int reg_size = ainfo->size * sizeof (host_mgreg_t); g_assert (arg_need_temp (ainfo)); memcpy (&ccontext->gregs [ainfo->reg], src, reg_size); memcpy (ccontext->stack + ainfo->offset, (host_mgreg_t*)src + ainfo->size, ainfo->struct_size - reg_size); } /* Set arguments in the ccontext (for i2n entry) */ void mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb = mini_get_interp_callbacks (); CallInfo *cinfo = get_call_info (NULL, sig); gpointer storage; ArgInfo *ainfo; memset (ccontext, 0, sizeof (CallContext)); ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT); if (ccontext->stack_size) ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size); if (sig->ret->type != MONO_TYPE_VOID) { ainfo = &cinfo->ret; if (ainfo->storage == RegTypeStructByAddr) { storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1); ccontext->gregs [cinfo->ret.reg] = (host_mgreg_t)(gsize)storage; } } g_assert (!sig->hasthis); for (int i = 0; i < sig->param_count; i++) { ainfo = &cinfo->args [i]; int temp_size = arg_need_temp (ainfo); if (temp_size) storage = alloca (temp_size); // FIXME? alloca in a loop else storage = arg_get_storage (ccontext, ainfo); interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage); if (temp_size) arg_set_val (ccontext, ainfo, storage); } g_free (cinfo); } /* Set return value in the ccontext (for n2i return) */ void mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp) { const MonoEECallbacks *interp_cb; CallInfo *cinfo; gpointer storage; ArgInfo *ainfo; if (sig->ret->type == MONO_TYPE_VOID) return; interp_cb = mini_get_interp_callbacks (); cinfo = get_call_info (NULL, sig); ainfo = &cinfo->ret; if (retp) { g_assert (ainfo->storage == RegTypeStructByAddr); interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, retp); } else { g_assert (ainfo->storage != RegTypeStructByAddr); g_assert (!arg_need_temp (ainfo)); storage = arg_get_storage (ccontext, ainfo); memset (ccontext, 0, sizeof (CallContext)); // FIXME interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage); } g_free (cinfo); } /* Gets the arguments from ccontext (for n2i entry) */ gpointer mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb = mini_get_interp_callbacks (); CallInfo *cinfo = get_call_info (NULL, sig); gpointer storage; ArgInfo *ainfo; for (int i = 0; i < sig->param_count + sig->hasthis; i++) { ainfo = &cinfo->args [i]; int temp_size = arg_need_temp (ainfo); if (temp_size) { storage = alloca (temp_size); // FIXME? alloca in a loop arg_get_val (ccontext, ainfo, storage); } else { storage = arg_get_storage (ccontext, ainfo); } interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage); } storage = NULL; if (sig->ret->type != MONO_TYPE_VOID) { ainfo = &cinfo->ret; if (ainfo->storage == RegTypeStructByAddr) storage = (gpointer)(gsize)ccontext->gregs [cinfo->ret.reg]; } g_free (cinfo); return storage; } /* Gets the return value from ccontext (for i2n exit) */ void mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb; CallInfo *cinfo; ArgInfo *ainfo; gpointer storage; if (sig->ret->type == MONO_TYPE_VOID) return; interp_cb = mini_get_interp_callbacks (); cinfo = get_call_info (NULL, sig); ainfo = &cinfo->ret; if (ainfo->storage != RegTypeStructByAddr) { g_assert (!arg_need_temp (ainfo)); storage = arg_get_storage (ccontext, ainfo); interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage); } g_free (cinfo); } #ifndef DISABLE_JIT gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { g_assert (caller_sig); g_assert (callee_sig); CallInfo *caller_info = get_call_info (NULL, caller_sig); CallInfo *callee_info = get_call_info (NULL, callee_sig); /* * Tailcalls with more callee stack usage than the caller cannot be supported, since * the extra stack space would be left on the stack after the tailcall. */ gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage) && IS_SUPPORTED_TAILCALL (caller_info->ret.storage == callee_info->ret.storage); // FIXME The limit here is that moving the parameters requires addressing the parameters // with 12bit (4K) immediate offsets. - 4 for TAILCALL_REG/MEMBASE res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (4096 - 4)); res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (4096 - 4)); g_free (caller_info); g_free (callee_info); return res; } static gboolean debug_omit_fp (void) { #if 0 return mono_debug_count (); #else return TRUE; #endif } /** * mono_arch_compute_omit_fp: * Determine whether the frame pointer can be eliminated. */ static void mono_arch_compute_omit_fp (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; int i, locals_size; CallInfo *cinfo; if (cfg->arch.omit_fp_computed) return; header = cfg->header; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; /* * FIXME: Remove some of the restrictions. */ cfg->arch.omit_fp = TRUE; cfg->arch.omit_fp_computed = TRUE; if (cfg->disable_omit_fp) cfg->arch.omit_fp = FALSE; if (!debug_omit_fp ()) cfg->arch.omit_fp = FALSE; /* if (cfg->method->save_lmf) cfg->arch.omit_fp = FALSE; */ if (cfg->flags & MONO_CFG_HAS_ALLOCA) cfg->arch.omit_fp = FALSE; if (header->num_clauses) cfg->arch.omit_fp = FALSE; if (cfg->param_area) cfg->arch.omit_fp = FALSE; if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) cfg->arch.omit_fp = FALSE; if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))) cfg->arch.omit_fp = FALSE; for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = &cinfo->args [i]; if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) { /* * The stack offset can only be determined when the frame * size is known. */ cfg->arch.omit_fp = FALSE; } } locals_size = 0; for (i = cfg->locals_start; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; int ialign; locals_size += mono_type_size (ins->inst_vtype, &ialign); } } /* * Set var information according to the calling convention. arm version. * The locals var stuff should most likely be split in another method. */ void mono_arch_allocate_vars (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; MonoInst *ins; MonoType *sig_ret; int i, offset, size, align, curinst; CallInfo *cinfo; ArgInfo *ainfo; guint32 ualign; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; sig_ret = mini_get_underlying_type (sig->ret); mono_arch_compute_omit_fp (cfg); if (cfg->arch.omit_fp) cfg->frame_reg = ARMREG_SP; else cfg->frame_reg = ARMREG_FP; cfg->flags |= MONO_CFG_HAS_SPILLUP; /* allow room for the vararg method args: void* and long/double */ if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) cfg->param_area = MAX (cfg->param_area, sizeof (target_mgreg_t)*8); header = cfg->header; /* See mono_arch_get_global_int_regs () */ if (cfg->flags & MONO_CFG_HAS_CALLS) cfg->uses_rgctx_reg = TRUE; if (cfg->frame_reg != ARMREG_SP) cfg->used_int_regs |= 1 << cfg->frame_reg; if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)) /* V5 is reserved for passing the vtable/rgctx/IMT method */ cfg->used_int_regs |= (1 << MONO_ARCH_IMT_REG); offset = 0; curinst = 0; if (!MONO_TYPE_ISSTRUCT (sig_ret) && cinfo->ret.storage != RegTypeStructByAddr) { if (sig_ret->type != MONO_TYPE_VOID) { cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = ARMREG_R0; } } /* local vars are at a positive offset from the stack pointer */ /* * also note that if the function uses alloca, we use FP * to point at the local variables. */ offset = 0; /* linkage area */ /* align the offset to 16 bytes: not sure this is needed here */ //offset += 8 - 1; //offset &= ~(8 - 1); /* add parameter area size for called functions */ offset += cfg->param_area; offset += 8 - 1; offset &= ~(8 - 1); if (cfg->flags & MONO_CFG_HAS_FPOUT) offset += 8; /* allow room to save the return value */ if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) offset += 8; switch (cinfo->ret.storage) { case RegTypeStructByVal: case RegTypeHFA: /* Allocate a local to hold the result, the epilog will copy it to the correct place */ offset = ALIGN_TO (offset, 8); cfg->ret->opcode = OP_REGOFFSET; cfg->ret->inst_basereg = cfg->frame_reg; cfg->ret->inst_offset = offset; if (cinfo->ret.storage == RegTypeStructByVal) offset += cinfo->ret.nregs * sizeof (target_mgreg_t); else offset += 32; break; case RegTypeStructByAddr: ins = cfg->vret_addr; offset += sizeof (target_mgreg_t) - 1; offset &= ~(sizeof (target_mgreg_t) - 1); ins->inst_offset = offset; ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; if (G_UNLIKELY (cfg->verbose_level > 1)) { g_print ("vret_addr ="); mono_print_ins (cfg->vret_addr); } offset += sizeof (target_mgreg_t); break; default: break; } /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */ if (cfg->arch.seq_point_info_var) { MonoInst *ins; ins = cfg->arch.seq_point_info_var; size = 4; align = 4; offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += size; } if (cfg->arch.ss_trigger_page_var) { MonoInst *ins; ins = cfg->arch.ss_trigger_page_var; size = 4; align = 4; offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += size; } if (cfg->arch.seq_point_ss_method_var) { MonoInst *ins; ins = cfg->arch.seq_point_ss_method_var; size = 4; align = 4; offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += size; } if (cfg->arch.seq_point_bp_method_var) { MonoInst *ins; ins = cfg->arch.seq_point_bp_method_var; size = 4; align = 4; offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += size; } if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) { /* Allocate a temporary used by the atomic ops */ size = 4; align = 4; /* Allocate a local slot to hold the sig cookie address */ offset += align - 1; offset &= ~(align - 1); cfg->arch.atomic_tmp_offset = offset; offset += size; } else { cfg->arch.atomic_tmp_offset = -1; } cfg->locals_min_stack_offset = offset; curinst = cfg->locals_start; for (i = curinst; i < cfg->num_varinfo; ++i) { MonoType *t; ins = cfg->varinfo [i]; if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET) continue; t = ins->inst_vtype; if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t)) continue; /* inst->backend.is_pinvoke indicates native sized value types, this is used by the * pinvoke wrappers when they call functions returning structure */ if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) { size = mono_class_native_size (mono_class_from_mono_type_internal (t), &ualign); align = ualign; } else size = mono_type_size (t, &align); /* FIXME: if a structure is misaligned, our memcpy doesn't work, * since it loads/stores misaligned words, which don't do the right thing. */ if (align < 4 && size >= 4) align = 4; if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4)) mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF); offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_offset = offset; ins->inst_basereg = cfg->frame_reg; offset += size; //g_print ("allocating local %d to %d\n", i, inst->inst_offset); } cfg->locals_max_stack_offset = offset; curinst = 0; if (sig->hasthis) { ins = cfg->args [curinst]; if (ins->opcode != OP_REGVAR) { ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; offset += sizeof (target_mgreg_t) - 1; offset &= ~(sizeof (target_mgreg_t) - 1); ins->inst_offset = offset; offset += sizeof (target_mgreg_t); } curinst++; } if (sig->call_convention == MONO_CALL_VARARG) { size = 4; align = 4; /* Allocate a local slot to hold the sig cookie address */ offset += align - 1; offset &= ~(align - 1); cfg->sig_cookie = offset; offset += size; } for (i = 0; i < sig->param_count; ++i) { ainfo = cinfo->args + i; ins = cfg->args [curinst]; switch (ainfo->storage) { case RegTypeHFA: offset = ALIGN_TO (offset, 8); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; /* These arguments are saved to the stack in the prolog */ ins->inst_offset = offset; if (cfg->verbose_level >= 2) g_print ("arg %d allocated to %s+0x%0x.\n", i, mono_arch_regname (ins->inst_basereg), (int)ins->inst_offset); // FIXME: offset += 32; break; default: break; } if (ins->opcode != OP_REGVAR) { ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; size = mini_type_stack_size_full (sig->params [i], &ualign, sig->pinvoke && !sig->marshalling_disabled); align = ualign; /* FIXME: if a structure is misaligned, our memcpy doesn't work, * since it loads/stores misaligned words, which don't do the right thing. */ if (align < 4 && size >= 4) align = 4; /* The code in the prolog () stores words when storing vtypes received in a register */ if (MONO_TYPE_ISSTRUCT (sig->params [i])) align = 4; if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4)) mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF); offset += align - 1; offset &= ~(align - 1); ins->inst_offset = offset; offset += size; } curinst++; } /* align the offset to 8 bytes */ if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4)) mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF); offset += 8 - 1; offset &= ~(8 - 1); /* change sign? */ cfg->stack_offset = offset; } void mono_arch_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig; CallInfo *cinfo; int i; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; if (IS_HARD_FLOAT) { for (i = 0; i < 2; i++) { MonoInst *inst = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL); inst->flags |= MONO_INST_VOLATILE; cfg->arch.vfp_scratch_slots [i] = inst; } } if (cinfo->ret.storage == RegTypeStructByVal) cfg->ret_var_is_local = TRUE; if (cinfo->ret.storage == RegTypeStructByAddr) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG); if (G_UNLIKELY (cfg->verbose_level > 1)) { g_print ("vret_addr = "); mono_print_ins (cfg->vret_addr); } } if (cfg->gen_sdb_seq_points) { if (cfg->compile_aot) { MonoInst *ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.seq_point_info_var = ins; if (!cfg->soft_breakpoints) { /* Allocate a separate variable for this to save 1 load per seq point */ ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.ss_trigger_page_var = ins; } } if (cfg->soft_breakpoints) { MonoInst *ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.seq_point_ss_method_var = ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.seq_point_bp_method_var = ins; } } } static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { MonoMethodSignature *tmp_sig; int sig_reg; if (MONO_IS_TAILCALL_OPCODE (call)) NOT_IMPLEMENTED; g_assert (cinfo->sig_cookie.storage == RegTypeBase); /* * mono_ArgIterator_Setup assumes the signature cookie is * passed first and all the arguments which were before it are * passed on the stack after the signature. So compensate by * passing a different signature. */ tmp_sig = mono_metadata_signature_dup (call->signature); tmp_sig->param_count -= call->signature->sentinelpos; tmp_sig->sentinelpos = 0; memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*)); sig_reg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg); } #ifdef ENABLE_LLVM LLVMCallInfo* mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig) { int i, n; CallInfo *cinfo; ArgInfo *ainfo; LLVMCallInfo *linfo; n = sig->param_count + sig->hasthis; cinfo = get_call_info (cfg->mempool, sig); linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n)); /* * LLVM always uses the native ABI while we use our own ABI, the * only difference is the handling of vtypes: * - we only pass/receive them in registers in some cases, and only * in 1 or 2 integer registers. */ switch (cinfo->ret.storage) { case RegTypeNone: linfo->ret.storage = LLVMArgNone; break; case RegTypeGeneral: case RegTypeFP: case RegTypeIRegPair: linfo->ret.storage = LLVMArgNormal; break; case RegTypeStructByAddr: if (sig->pinvoke) { linfo->ret.storage = LLVMArgVtypeByRef; } else { /* Vtype returned using a hidden argument */ linfo->ret.storage = LLVMArgVtypeRetAddr; linfo->vret_arg_index = cinfo->vret_arg_index; } break; #if TARGET_WATCHOS case RegTypeStructByVal: /* LLVM models this by returning an int array */ linfo->ret.storage = LLVMArgAsIArgs; linfo->ret.nslots = cinfo->ret.nregs; break; #endif case RegTypeHFA: linfo->ret.storage = LLVMArgFpStruct; linfo->ret.nslots = cinfo->ret.nregs; linfo->ret.esize = cinfo->ret.esize; break; default: cfg->exception_message = g_strdup_printf ("unknown ret conv (%d)", cinfo->ret.storage); cfg->disable_llvm = TRUE; return linfo; } for (i = 0; i < n; ++i) { LLVMArgInfo *lainfo = &linfo->args [i]; ainfo = cinfo->args + i; lainfo->storage = LLVMArgNone; switch (ainfo->storage) { case RegTypeGeneral: case RegTypeIRegPair: case RegTypeBase: case RegTypeBaseGen: case RegTypeFP: lainfo->storage = LLVMArgNormal; break; case RegTypeStructByVal: { lainfo->storage = LLVMArgAsIArgs; int slotsize = eabi_supported && ainfo->align == 8 ? 8 : 4; lainfo->nslots = ALIGN_TO (ainfo->struct_size, slotsize) / slotsize; lainfo->esize = slotsize; break; } case RegTypeStructByAddr: case RegTypeStructByAddrOnStack: lainfo->storage = LLVMArgVtypeByRef; break; case RegTypeHFA: { int j; lainfo->storage = LLVMArgAsFpArgs; lainfo->nslots = ainfo->nregs; lainfo->esize = ainfo->esize; for (j = 0; j < ainfo->nregs; ++j) lainfo->pair_storage [j] = LLVMArgInFPReg; break; } default: cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage); cfg->disable_llvm = TRUE; break; } } return linfo; } #endif void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoInst *in, *ins; MonoMethodSignature *sig; int i, n; CallInfo *cinfo; sig = call->signature; n = sig->param_count + sig->hasthis; cinfo = get_call_info (cfg->mempool, sig); switch (cinfo->ret.storage) { case RegTypeStructByVal: case RegTypeHFA: if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) { /* The JIT will transform this into a normal call */ call->vret_in_reg = TRUE; break; } if (MONO_IS_TAILCALL_OPCODE (call)) break; /* * The vtype is returned in registers, save the return area address in a local, and save the vtype into * the location pointed to by it after call in emit_move_return_value (). */ if (!cfg->arch.vret_addr_loc) { cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* Prevent it from being register allocated or optimized away */ cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE; } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg); break; case RegTypeStructByAddr: { MonoInst *vtarg; MONO_INST_NEW (cfg, vtarg, OP_MOVE); vtarg->sreg1 = call->vret_var->dreg; vtarg->dreg = mono_alloc_preg (cfg); MONO_ADD_INS (cfg->cbb, vtarg); mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE); break; } default: break; } for (i = 0; i < n; ++i) { ArgInfo *ainfo = cinfo->args + i; MonoType *t; if (i >= sig->hasthis) t = sig->params [i - sig->hasthis]; else t = mono_get_int_type (); t = mini_get_underlying_type (t); if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Emit the signature cookie just before the implicit arguments */ emit_sig_cookie (cfg, call, cinfo); } in = call->args [i]; switch (ainfo->storage) { case RegTypeGeneral: case RegTypeIRegPair: if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = MONO_LVREG_LS (in->dreg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = MONO_LVREG_MS (in->dreg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE); } else if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) { if (ainfo->size == 4) { if (IS_SOFT_FLOAT) { /* mono_emit_call_args () have already done the r8->r4 conversion */ /* The converted value is in an int vreg */ MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); } else { int creg; cfg->param_area = MAX (cfg->param_area, 8); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg); creg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8)); mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE); } } else { if (IS_SOFT_FLOAT) { MONO_INST_NEW (cfg, ins, OP_FGETLOW32); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); MONO_INST_NEW (cfg, ins, OP_FGETHIGH32); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE); } else { int creg; cfg->param_area = MAX (cfg->param_area, 8); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg); creg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8)); mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE); creg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4)); mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE); } } cfg->flags |= MONO_CFG_HAS_FPOUT; } else { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); } break; case RegTypeStructByVal: case RegTypeGSharedVtInReg: case RegTypeGSharedVtOnStack: case RegTypeHFA: case RegTypeStructByAddr: case RegTypeStructByAddrOnStack: MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); mono_call_inst_add_outarg_vt (cfg, call, ins); MONO_ADD_INS (cfg->cbb, ins); break; case RegTypeBase: if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg); } else if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) { if (t->type == MONO_TYPE_R8) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg); } else { if (IS_SOFT_FLOAT) MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg); else MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg); } } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg); } break; case RegTypeBaseGen: if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? MONO_LVREG_LS (in->dreg) : MONO_LVREG_MS (in->dreg)); MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? MONO_LVREG_MS (in->dreg) : MONO_LVREG_LS (in->dreg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE); } else if (!m_type_is_byref (t) && (t->type == MONO_TYPE_R8)) { int creg; /* This should work for soft-float as well */ cfg->param_area = MAX (cfg->param_area, 8); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg); creg = mono_alloc_ireg (cfg); mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8)); creg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg); cfg->flags |= MONO_CFG_HAS_FPOUT; } else { g_assert_not_reached (); } break; case RegTypeFP: { int fdreg = mono_alloc_freg (cfg); if (ainfo->size == 8) { MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->sreg1 = in->dreg; ins->dreg = fdreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE); } else { FloatArgData *fad; /* * Mono's register allocator doesn't speak single-precision registers that * overlap double-precision registers (i.e. armhf). So we have to work around * the register allocator and load the value from memory manually. * * So we create a variable for the float argument and an instruction to store * the argument into the variable. We then store the list of these arguments * in call->float_args. This list is then used by emit_float_args later to * pass the arguments in the various call opcodes. * * This is not very nice, and we should really try to fix the allocator. */ MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL); /* Make sure the instruction isn't seen as pointless and removed. */ float_arg->flags |= MONO_INST_VOLATILE; MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg); /* We use the dreg to look up the instruction later. The hreg is used to * emit the instruction that loads the value into the FP reg. */ fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData)); fad->vreg = float_arg->dreg; fad->hreg = ainfo->reg; call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad); } call->used_iregs |= 1 << ainfo->reg; cfg->flags |= MONO_CFG_HAS_FPOUT; break; } default: g_assert_not_reached (); } } /* Handle the case where there are no implicit arguments */ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) emit_sig_cookie (cfg, call, cinfo); call->call_info = cinfo; call->stack_usage = cinfo->stack_usage; } static void add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *arg) { MonoInst *ins; switch (storage) { case RegTypeFP: MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = arg->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE); break; default: g_assert_not_reached (); break; } } void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst*)ins->inst_p0; MonoInst *load; ArgInfo *ainfo = (ArgInfo*)ins->inst_p1; int ovf_size = ainfo->vtsize; int doffset = ainfo->offset; int struct_size = ainfo->struct_size; int i, soffset, dreg, tmpreg; switch (ainfo->storage) { case RegTypeGSharedVtInReg: case RegTypeStructByAddr: /* Pass by addr */ mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE); break; case RegTypeGSharedVtOnStack: case RegTypeStructByAddrOnStack: /* Pass by addr on stack */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg); break; case RegTypeHFA: for (i = 0; i < ainfo->nregs; ++i) { if (ainfo->esize == 4) MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE); else MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE); load->dreg = mono_alloc_freg (cfg); load->inst_basereg = src->dreg; load->inst_offset = i * ainfo->esize; MONO_ADD_INS (cfg->cbb, load); if (ainfo->esize == 4) { FloatArgData *fad; /* See RegTypeFP in mono_arch_emit_call () */ MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL); float_arg->flags |= MONO_INST_VOLATILE; MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, load->dreg); fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData)); fad->vreg = float_arg->dreg; fad->hreg = ainfo->reg + i; call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad); } else { add_outarg_reg (cfg, call, RegTypeFP, ainfo->reg + (i * 2), load); } } break; default: soffset = 0; for (i = 0; i < ainfo->size; ++i) { dreg = mono_alloc_ireg (cfg); switch (struct_size) { case 1: MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset); break; case 2: MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset); break; case 3: tmpreg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8); MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16); MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg); break; default: MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset); break; } mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE); soffset += sizeof (target_mgreg_t); struct_size -= sizeof (target_mgreg_t); } //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset); if (ovf_size != 0) mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (target_mgreg_t), struct_size), struct_size < 4 ? 1 : 4); break; } } void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret); if (!m_type_is_byref (ret)) { if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) { MonoInst *ins; if (COMPILE_LLVM (cfg)) { MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); } else { MONO_INST_NEW (cfg, ins, OP_SETLRET); ins->sreg1 = MONO_LVREG_LS (val->dreg); ins->sreg2 = MONO_LVREG_MS (val->dreg); MONO_ADD_INS (cfg->cbb, ins); } return; } switch (arm_fpu) { case MONO_ARM_FPU_NONE: if (ret->type == MONO_TYPE_R8) { MonoInst *ins; MONO_INST_NEW (cfg, ins, OP_SETFRET); ins->dreg = cfg->ret->dreg; ins->sreg1 = val->dreg; MONO_ADD_INS (cfg->cbb, ins); return; } if (ret->type == MONO_TYPE_R4) { /* Already converted to an int in method_to_ir () */ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); return; } break; case MONO_ARM_FPU_VFP: case MONO_ARM_FPU_VFP_HARD: if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) { MonoInst *ins; MONO_INST_NEW (cfg, ins, OP_SETFRET); ins->dreg = cfg->ret->dreg; ins->sreg1 = val->dreg; MONO_ADD_INS (cfg->cbb, ins); return; } break; default: g_assert_not_reached (); } } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); } #endif /* #ifndef DISABLE_JIT */ gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return TRUE; } typedef struct { MonoMethodSignature *sig; CallInfo *cinfo; MonoType *rtype; MonoType **param_types; } ArchDynCallInfo; static gboolean dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig) { int i; switch (cinfo->ret.storage) { case RegTypeNone: case RegTypeGeneral: case RegTypeIRegPair: case RegTypeStructByAddr: break; case RegTypeFP: if (IS_VFP) break; else return FALSE; default: return FALSE; } for (i = 0; i < cinfo->nargs; ++i) { ArgInfo *ainfo = &cinfo->args [i]; int last_slot; switch (ainfo->storage) { case RegTypeGeneral: case RegTypeIRegPair: case RegTypeBaseGen: case RegTypeFP: break; case RegTypeBase: break; case RegTypeStructByVal: if (ainfo->size == 0) last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize; else last_slot = ainfo->reg + ainfo->size + ainfo->vtsize; break; default: return FALSE; } } // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */ for (i = 0; i < sig->param_count; ++i) { MonoType *t = sig->params [i]; if (m_type_is_byref (t)) continue; t = mini_get_underlying_type (t); switch (t->type) { case MONO_TYPE_R4: case MONO_TYPE_R8: if (IS_SOFT_FLOAT) return FALSE; else break; /* case MONO_TYPE_I8: case MONO_TYPE_U8: return FALSE; */ default: break; } } return TRUE; } MonoDynCallInfo* mono_arch_dyn_call_prepare (MonoMethodSignature *sig) { ArchDynCallInfo *info; CallInfo *cinfo; int i; cinfo = get_call_info (NULL, sig); if (!dyn_call_supported (cinfo, sig)) { g_free (cinfo); return NULL; } info = g_new0 (ArchDynCallInfo, 1); // FIXME: Preprocess the info to speed up start_dyn_call () info->sig = sig; info->cinfo = cinfo; info->rtype = mini_get_underlying_type (sig->ret); info->param_types = g_new0 (MonoType*, sig->param_count); for (i = 0; i < sig->param_count; ++i) info->param_types [i] = mini_get_underlying_type (sig->params [i]); return (MonoDynCallInfo*)info; } void mono_arch_dyn_call_free (MonoDynCallInfo *info) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; g_free (ainfo->cinfo); g_free (ainfo); } int mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; g_assert (ainfo->cinfo->stack_usage % MONO_ARCH_FRAME_ALIGNMENT == 0); return sizeof (DynCallArgs) + ainfo->cinfo->stack_usage; } void mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf) { ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info; CallInfo *cinfo = dinfo->cinfo; DynCallArgs *p = (DynCallArgs*)buf; int arg_index, greg, i, j, pindex; MonoMethodSignature *sig = dinfo->sig; p->res = 0; p->ret = ret; p->has_fpregs = 0; p->n_stackargs = cinfo->stack_usage / sizeof (host_mgreg_t); arg_index = 0; greg = 0; pindex = 0; if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) { p->regs [greg ++] = (host_mgreg_t)(gsize)*(args [arg_index ++]); if (!sig->hasthis) pindex = 1; } if (dinfo->cinfo->ret.storage == RegTypeStructByAddr) p->regs [greg ++] = (host_mgreg_t)(gsize)ret; for (i = pindex; i < sig->param_count; i++) { MonoType *t = dinfo->param_types [i]; gpointer *arg = args [arg_index ++]; ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis]; int slot = -1; if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal) { slot = ainfo->reg; } else if (ainfo->storage == RegTypeFP) { } else if (ainfo->storage == RegTypeBase) { slot = PARAM_REGS + (ainfo->offset / 4); } else if (ainfo->storage == RegTypeBaseGen) { /* slot + 1 is the first stack slot, so the code below will work */ slot = 3; } else { g_assert_not_reached (); } if (m_type_is_byref (t)) { p->regs [slot] = (host_mgreg_t)(gsize)*arg; continue; } switch (t->type) { case MONO_TYPE_OBJECT: case MONO_TYPE_PTR: case MONO_TYPE_I: case MONO_TYPE_U: p->regs [slot] = (host_mgreg_t)(gsize)*arg; break; case MONO_TYPE_U1: p->regs [slot] = *(guint8*)arg; break; case MONO_TYPE_I1: p->regs [slot] = *(gint8*)arg; break; case MONO_TYPE_I2: p->regs [slot] = *(gint16*)arg; break; case MONO_TYPE_U2: p->regs [slot] = *(guint16*)arg; break; case MONO_TYPE_I4: p->regs [slot] = *(gint32*)arg; break; case MONO_TYPE_U4: p->regs [slot] = *(guint32*)arg; break; case MONO_TYPE_I8: case MONO_TYPE_U8: p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0]; p->regs [slot] = (host_mgreg_t)(gsize)arg [1]; break; case MONO_TYPE_R4: if (ainfo->storage == RegTypeFP) { float f = *(float*)arg; p->fpregs [ainfo->reg / 2] = *(double*)&f; p->has_fpregs = 1; } else { p->regs [slot] = *(host_mgreg_t*)arg; } break; case MONO_TYPE_R8: if (ainfo->storage == RegTypeFP) { p->fpregs [ainfo->reg / 2] = *(double*)arg; p->has_fpregs = 1; } else { p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0]; p->regs [slot] = (host_mgreg_t)(gsize)arg [1]; } break; case MONO_TYPE_GENERICINST: if (MONO_TYPE_IS_REFERENCE (t)) { p->regs [slot] = (host_mgreg_t)(gsize)*arg; break; } else { if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) { MonoClass *klass = mono_class_from_mono_type_internal (t); guint8 *nullable_buf; int size; size = mono_class_value_size (klass, NULL); nullable_buf = g_alloca (size); g_assert (nullable_buf); /* The argument pointed to by arg is either a boxed vtype or null */ mono_nullable_init (nullable_buf, (MonoObject*)arg, klass); arg = (gpointer*)nullable_buf; /* Fall though */ } else { /* Fall though */ } } case MONO_TYPE_VALUETYPE: g_assert (ainfo->storage == RegTypeStructByVal); if (ainfo->size == 0) slot = PARAM_REGS + (ainfo->offset / 4); else slot = ainfo->reg; for (j = 0; j < ainfo->size + ainfo->vtsize; ++j) p->regs [slot ++] = ((host_mgreg_t*)arg) [j]; break; default: g_assert_not_reached (); } } } void mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; DynCallArgs *p = (DynCallArgs*)buf; MonoType *ptype = ainfo->rtype; guint8 *ret = p->ret; host_mgreg_t res = p->res; host_mgreg_t res2 = p->res2; switch (ptype->type) { case MONO_TYPE_VOID: *(gpointer*)ret = NULL; break; case MONO_TYPE_OBJECT: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: *(gpointer*)ret = (gpointer)(gsize)res; break; case MONO_TYPE_I1: *(gint8*)ret = res; break; case MONO_TYPE_U1: *(guint8*)ret = res; break; case MONO_TYPE_I2: *(gint16*)ret = res; break; case MONO_TYPE_U2: *(guint16*)ret = res; break; case MONO_TYPE_I4: *(gint32*)ret = res; break; case MONO_TYPE_U4: *(guint32*)ret = res; break; case MONO_TYPE_I8: case MONO_TYPE_U8: /* This handles endianness as well */ ((gint32*)ret) [0] = res; ((gint32*)ret) [1] = res2; break; case MONO_TYPE_GENERICINST: if (MONO_TYPE_IS_REFERENCE (ptype)) { *(gpointer*)ret = (gpointer)res; break; } else { /* Fall though */ } case MONO_TYPE_VALUETYPE: g_assert (ainfo->cinfo->ret.storage == RegTypeStructByAddr); /* Nothing to do */ break; case MONO_TYPE_R4: g_assert (IS_VFP); if (IS_HARD_FLOAT) *(float*)ret = *(float*)&p->fpregs [0]; else *(float*)ret = *(float*)&res; break; case MONO_TYPE_R8: { host_mgreg_t regs [2]; g_assert (IS_VFP); if (IS_HARD_FLOAT) { *(double*)ret = p->fpregs [0]; } else { regs [0] = res; regs [1] = res2; *(double*)ret = *(double*)&regs; } break; } default: g_assert_not_reached (); } } #ifndef DISABLE_JIT /* * The immediate field for cond branches is big enough for all reasonable methods */ #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \ if (0 && ins->inst_true_bb->native_offset) { \ ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \ } else { \ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \ ARM_B_COND (code, (condcode), 0); \ } #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)]) /* emit an exception if condition is fail * * We assign the extra code used to throw the implicit exceptions * to cfg->bb_exit as far as the big branch handling is concerned */ #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \ do { \ mono_add_patch_info (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_EXC, exc_name); \ ARM_BL_COND (code, (condcode), 0); \ } while (0); #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name)) void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { } void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { MonoInst *last_ins = mono_inst_prev (ins, FILTER_IL_SEQ_POINT); switch (ins->opcode) { case OP_MUL_IMM: case OP_IMUL_IMM: /* Already done by an arch-independent pass */ break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: /* * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg */ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG || last_ins->opcode == OP_STORE_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } else { //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_MOVE; ins->sreg1 = last_ins->sreg1; } /* * Note: reg1 must be different from the basereg in the second load * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_MOVE reg1, reg2 */ } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE || last_ins->opcode == OP_LOAD_MEMBASE) && ins->inst_basereg != last_ins->dreg && ins->inst_basereg == last_ins->inst_basereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->dreg) { MONO_DELETE_INS (bb, ins); continue; } else { ins->opcode = OP_MOVE; ins->sreg1 = last_ins->dreg; } //g_assert_not_reached (); #if 0 /* * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg * --> * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_ICONST reg, imm */ } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM || last_ins->opcode == OP_STORE_MEMBASE_IMM) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_ICONST; ins->inst_c0 = last_ins->inst_imm; g_assert_not_reached (); // check this rule #endif } break; case OP_LOADU1_MEMBASE: case OP_LOADI1_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1; ins->sreg1 = last_ins->sreg1; } break; case OP_LOADU2_MEMBASE: case OP_LOADI2_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2; ins->sreg1 = last_ins->sreg1; } break; case OP_MOVE: ins->opcode = OP_MOVE; /* * OP_MOVE reg, reg */ if (ins->dreg == ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } /* * OP_MOVE sreg, dreg * OP_MOVE dreg, sreg */ if (last_ins && last_ins->opcode == OP_MOVE && ins->sreg1 == last_ins->dreg && ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } break; } } } /* * the branch_cc_table should maintain the order of these * opcodes. case CEE_BEQ: case CEE_BGE: case CEE_BGT: case CEE_BLE: case CEE_BLT: case CEE_BNE_UN: case CEE_BGE_UN: case CEE_BGT_UN: case CEE_BLE_UN: case CEE_BLT_UN: */ static const guchar branch_cc_table [] = { ARMCOND_EQ, ARMCOND_GE, ARMCOND_GT, ARMCOND_LE, ARMCOND_LT, ARMCOND_NE, ARMCOND_HS, ARMCOND_HI, ARMCOND_LS, ARMCOND_LO }; #define ADD_NEW_INS(cfg,dest,op) do { \ MONO_INST_NEW ((cfg), (dest), (op)); \ mono_bblock_insert_before_ins (bb, ins, (dest)); \ } while (0) static int map_to_reg_reg_op (int op) { switch (op) { case OP_ADD_IMM: return OP_IADD; case OP_SUB_IMM: return OP_ISUB; case OP_AND_IMM: return OP_IAND; case OP_COMPARE_IMM: return OP_COMPARE; case OP_ICOMPARE_IMM: return OP_ICOMPARE; case OP_ADDCC_IMM: return OP_ADDCC; case OP_ADC_IMM: return OP_ADC; case OP_SUBCC_IMM: return OP_SUBCC; case OP_SBB_IMM: return OP_SBB; case OP_OR_IMM: return OP_IOR; case OP_XOR_IMM: return OP_IXOR; case OP_LOAD_MEMBASE: return OP_LOAD_MEMINDEX; case OP_LOADI4_MEMBASE: return OP_LOADI4_MEMINDEX; case OP_LOADU4_MEMBASE: return OP_LOADU4_MEMINDEX; case OP_LOADU1_MEMBASE: return OP_LOADU1_MEMINDEX; case OP_LOADI2_MEMBASE: return OP_LOADI2_MEMINDEX; case OP_LOADU2_MEMBASE: return OP_LOADU2_MEMINDEX; case OP_LOADI1_MEMBASE: return OP_LOADI1_MEMINDEX; case OP_STOREI1_MEMBASE_REG: return OP_STOREI1_MEMINDEX; case OP_STOREI2_MEMBASE_REG: return OP_STOREI2_MEMINDEX; case OP_STOREI4_MEMBASE_REG: return OP_STOREI4_MEMINDEX; case OP_STORE_MEMBASE_REG: return OP_STORE_MEMINDEX; case OP_STORER4_MEMBASE_REG: return OP_STORER4_MEMINDEX; case OP_STORER8_MEMBASE_REG: return OP_STORER8_MEMINDEX; case OP_STORE_MEMBASE_IMM: return OP_STORE_MEMBASE_REG; case OP_STOREI1_MEMBASE_IMM: return OP_STOREI1_MEMBASE_REG; case OP_STOREI2_MEMBASE_IMM: return OP_STOREI2_MEMBASE_REG; case OP_STOREI4_MEMBASE_IMM: return OP_STOREI4_MEMBASE_REG; } g_assert_not_reached (); } /* * Remove from the instruction list the instructions that can't be * represented with very simple instructions with no register * requirements. */ void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *temp, *last_ins = NULL; int rot_amount, imm8, low_imm; MONO_BB_FOR_EACH_INS (bb, ins) { loop_start: switch (ins->opcode) { case OP_ADD_IMM: case OP_SUB_IMM: case OP_AND_IMM: case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: case OP_ADDCC_IMM: case OP_ADC_IMM: case OP_SUBCC_IMM: case OP_SBB_IMM: case OP_OR_IMM: case OP_XOR_IMM: case OP_IADD_IMM: case OP_ISUB_IMM: case OP_IAND_IMM: case OP_IADC_IMM: case OP_ISBB_IMM: case OP_IOR_IMM: case OP_IXOR_IMM: if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) { int opcode2 = mono_op_imm_to_op (ins->opcode); ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; if (opcode2 == -1) g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode)); ins->opcode = opcode2; } if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC) goto loop_start; else break; case OP_MUL_IMM: case OP_IMUL_IMM: if (ins->inst_imm == 1) { ins->opcode = OP_MOVE; break; } if (ins->inst_imm == 0) { ins->opcode = OP_ICONST; ins->inst_c0 = 0; break; } imm8 = (ins->inst_imm > 0) ? mono_is_power_of_two (ins->inst_imm) : -1; if (imm8 > 0) { ins->opcode = OP_SHL_IMM; ins->inst_imm = imm8; break; } ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = OP_IMUL; break; case OP_SBB: case OP_ISBB: case OP_SUBCC: case OP_ISUBCC: { int try_count = 2; MonoInst *current = ins; /* may require a look-ahead of a couple instructions due to spilling */ while (try_count-- && current->next) { if (current->next->opcode == OP_COND_EXC_C || current->next->opcode == OP_COND_EXC_IC) { /* ARM sets the C flag to 1 if there was _no_ overflow */ current->next->opcode = OP_COND_EXC_NC; break; } current = current->next; } break; } case OP_IDIV_IMM: case OP_IDIV_UN_IMM: case OP_IREM_IMM: case OP_IREM_UN_IMM: { int opcode2 = mono_op_imm_to_op (ins->opcode); ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; if (opcode2 == -1) g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode)); ins->opcode = opcode2; break; } case OP_LOCALLOC_IMM: ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg1 = temp->dreg; ins->opcode = OP_LOCALLOC; break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: case OP_LOADU4_MEMBASE: case OP_LOADU1_MEMBASE: /* we can do two things: load the immed in a register * and use an indexed load, or see if the immed can be * represented as an ad_imm + a load with a smaller offset * that fits. We just do the first for now, optimize later. */ if (arm_is_imm12 (ins->inst_offset)) break; ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_LOADI2_MEMBASE: case OP_LOADU2_MEMBASE: case OP_LOADI1_MEMBASE: if (arm_is_imm8 (ins->inst_offset)) break; ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_LOADR4_MEMBASE: case OP_LOADR8_MEMBASE: if (arm_is_fpimm8 (ins->inst_offset)) break; low_imm = ins->inst_offset & 0x1ff; if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) { ADD_NEW_INS (cfg, temp, OP_ADD_IMM); temp->inst_imm = ins->inst_offset & ~0x1ff; temp->sreg1 = ins->inst_basereg; temp->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = temp->dreg; ins->inst_offset = low_imm; } else { MonoInst *add_ins; ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ADD_NEW_INS (cfg, add_ins, OP_IADD); add_ins->sreg1 = ins->inst_basereg; add_ins->sreg2 = temp->dreg; add_ins->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = add_ins->dreg; ins->inst_offset = 0; } break; case OP_STORE_MEMBASE_REG: case OP_STOREI4_MEMBASE_REG: case OP_STOREI1_MEMBASE_REG: if (arm_is_imm12 (ins->inst_offset)) break; ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_STOREI2_MEMBASE_REG: if (arm_is_imm8 (ins->inst_offset)) break; ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_STORER4_MEMBASE_REG: case OP_STORER8_MEMBASE_REG: if (arm_is_fpimm8 (ins->inst_offset)) break; low_imm = ins->inst_offset & 0x1ff; if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) { ADD_NEW_INS (cfg, temp, OP_ADD_IMM); temp->inst_imm = ins->inst_offset & ~0x1ff; temp->sreg1 = ins->inst_destbasereg; temp->dreg = mono_alloc_ireg (cfg); ins->inst_destbasereg = temp->dreg; ins->inst_offset = low_imm; } else { MonoInst *add_ins; ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ADD_NEW_INS (cfg, add_ins, OP_IADD); add_ins->sreg1 = ins->inst_destbasereg; add_ins->sreg2 = temp->dreg; add_ins->dreg = mono_alloc_ireg (cfg); ins->inst_destbasereg = add_ins->dreg; ins->inst_offset = 0; } break; case OP_STORE_MEMBASE_IMM: case OP_STOREI1_MEMBASE_IMM: case OP_STOREI2_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg1 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); last_ins = temp; goto loop_start; /* make it handle the possibly big ins->inst_offset */ case OP_FCOMPARE: case OP_RCOMPARE: { gboolean swap = FALSE; int reg; if (!ins->next) { /* Optimized away */ NULLIFY_INS (ins); break; } /* Some fp compares require swapped operands */ switch (ins->next->opcode) { case OP_FBGT: ins->next->opcode = OP_FBLT; swap = TRUE; break; case OP_FBGT_UN: ins->next->opcode = OP_FBLT_UN; swap = TRUE; break; case OP_FBLE: ins->next->opcode = OP_FBGE; swap = TRUE; break; case OP_FBLE_UN: ins->next->opcode = OP_FBGE_UN; swap = TRUE; break; default: break; } if (swap) { reg = ins->sreg1; ins->sreg1 = ins->sreg2; ins->sreg2 = reg; } break; } } last_ins = ins; } bb->last_ins = last_ins; bb->max_vreg = cfg->next_vreg; } void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins) { MonoInst *ins; if (long_ins->opcode == OP_LNEG) { ins = long_ins; MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), 0); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), 0); NULLIFY_INS (ins); } } static guchar* emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed) { /* sreg is a float, dreg is an integer reg */ if (IS_VFP) { code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); if (is_signed) ARM_TOSIZD (code, vfp_scratch1, sreg); else ARM_TOUIZD (code, vfp_scratch1, sreg); ARM_FMRS (code, dreg, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); } if (!is_signed) { if (size == 1) ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff); else if (size == 2) { ARM_SHL_IMM (code, dreg, dreg, 16); ARM_SHR_IMM (code, dreg, dreg, 16); } } else { if (size == 1) { ARM_SHL_IMM (code, dreg, dreg, 24); ARM_SAR_IMM (code, dreg, dreg, 24); } else if (size == 2) { ARM_SHL_IMM (code, dreg, dreg, 16); ARM_SAR_IMM (code, dreg, dreg, 16); } } return code; } static guchar* emit_r4_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed) { /* sreg is a float, dreg is an integer reg */ g_assert (IS_VFP); code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); if (is_signed) ARM_TOSIZS (code, vfp_scratch1, sreg); else ARM_TOUIZS (code, vfp_scratch1, sreg); ARM_FMRS (code, dreg, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); if (!is_signed) { if (size == 1) ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff); else if (size == 2) { ARM_SHL_IMM (code, dreg, dreg, 16); ARM_SHR_IMM (code, dreg, dreg, 16); } } else { if (size == 1) { ARM_SHL_IMM (code, dreg, dreg, 24); ARM_SAR_IMM (code, dreg, dreg, 24); } else if (size == 2) { ARM_SHL_IMM (code, dreg, dreg, 16); ARM_SAR_IMM (code, dreg, dreg, 16); } } return code; } #endif /* #ifndef DISABLE_JIT */ #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431) static void emit_thunk (guint8 *code, gconstpointer target) { guint8 *p = code; ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); if (thumb_supported) ARM_BX (code, ARMREG_IP); else ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); *(guint32*)code = (guint32)(gsize)target; code += 4; mono_arch_flush_icache (p, code - p); } static void handle_thunk (MonoCompile *cfg, guchar *code, const guchar *target) { MonoJitInfo *ji = NULL; MonoThunkJitInfo *info; guint8 *thunks, *p; int thunks_size; guint8 *orig_target; guint8 *target_thunk; if (cfg) { /* * This can be called multiple times during JITting, * save the current position in cfg->arch to avoid * doing a O(n^2) search. */ if (!cfg->arch.thunks) { cfg->arch.thunks = cfg->thunks; cfg->arch.thunks_size = cfg->thunk_area; } thunks = cfg->arch.thunks; thunks_size = cfg->arch.thunks_size; if (!thunks_size) { g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE)); g_assert_not_reached (); } g_assert (*(guint32*)thunks == 0); emit_thunk (thunks, target); arm_patch (code, thunks); cfg->arch.thunks += THUNK_SIZE; cfg->arch.thunks_size -= THUNK_SIZE; } else { ji = mini_jit_info_table_find (code); g_assert (ji); info = mono_jit_info_get_thunk_info (ji); g_assert (info); thunks = (guint8*)ji->code_start + info->thunks_offset; thunks_size = info->thunks_size; orig_target = mono_arch_get_call_target (code + 4); mono_mini_arch_lock (); target_thunk = NULL; if (orig_target >= thunks && orig_target < thunks + thunks_size) { /* The call already points to a thunk, because of trampolines etc. */ target_thunk = orig_target; } else { for (p = thunks; p < thunks + thunks_size; p += THUNK_SIZE) { if (((guint32*)p) [0] == 0) { /* Free entry */ target_thunk = p; break; } else if (((guint32*)p) [2] == (guint32)(gsize)target) { /* Thunk already points to target */ target_thunk = p; break; } } } //g_print ("THUNK: %p %p %p\n", code, target, target_thunk); if (!target_thunk) { mono_mini_arch_unlock (); g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE)); g_assert_not_reached (); } emit_thunk (target_thunk, target); arm_patch (code, target_thunk); mono_arch_flush_icache (code, 4); mono_mini_arch_unlock (); } } static void arm_patch_general (MonoCompile *cfg, guchar *code, const guchar *target) { guint32 *code32 = (guint32*)code; guint32 ins = *code32; guint32 prim = (ins >> 25) & 7; guint32 tval = GPOINTER_TO_UINT (target); //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target); if (prim == 5) { /* 101b */ /* the diff starts 8 bytes from the branch opcode */ gint diff = target - code - 8; gint tbits; gint tmask = 0xffffffff; if (tval & 1) { /* entering thumb mode */ diff = target - 1 - code - 8; g_assert (thumb_supported); tbits = 0xf << 28; /* bl->blx bit pattern */ g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */ /* this low bit of the displacement is moved to bit 24 in the instruction encoding */ if (diff & 2) { tbits |= 1 << 24; } tmask = ~(1 << 24); /* clear the link bit */ /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/ } else { tbits = 0; } if (diff >= 0) { if (diff <= 33554431) { diff >>= 2; ins = (ins & 0xff000000) | diff; ins &= tmask; *code32 = ins | tbits; return; } } else { /* diff between 0 and -33554432 */ if (diff >= -33554432) { diff >>= 2; ins = (ins & 0xff000000) | (diff & ~0xff000000); ins &= tmask; *code32 = ins | tbits; return; } } handle_thunk (cfg, code, target); return; } /* * The alternative call sequences looks like this: * * ldr ip, [pc] // loads the address constant * b 1f // jumps around the constant * address constant embedded in the code * 1f: * mov lr, pc * mov pc, ip * * There are two cases for patching: * a) at the end of method emission: in this case code points to the start * of the call sequence * b) during runtime patching of the call site: in this case code points * to the mov pc, ip instruction * * We have to handle also the thunk jump code sequence: * * ldr ip, [pc] * mov pc, ip * address constant // execution never reaches here */ if ((ins & 0x0ffffff0) == 0x12fff10) { /* Branch and exchange: the address is constructed in a reg * We can patch BX when the code sequence is the following: * ldr ip, [pc, #0] ; 0x8 * b 0xc * .word code_ptr * mov lr, pc * bx ips * */ guint32 ccode [4]; guint8 *emit = (guint8*)ccode; ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0); ARM_B (emit, 0); ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC); ARM_BX (emit, ARMREG_IP); /*patching from magic trampoline*/ if (ins == ccode [3]) { g_assert (code32 [-4] == ccode [0]); g_assert (code32 [-3] == ccode [1]); g_assert (code32 [-1] == ccode [2]); code32 [-2] = (guint32)(gsize)target; return; } /*patching from JIT*/ if (ins == ccode [0]) { g_assert (code32 [1] == ccode [1]); g_assert (code32 [3] == ccode [2]); g_assert (code32 [4] == ccode [3]); code32 [2] = (guint32)(gsize)target; return; } g_assert_not_reached (); } else if ((ins & 0x0ffffff0) == 0x12fff30) { /* * ldr ip, [pc, #0] * b 0xc * .word code_ptr * blx ip */ guint32 ccode [4]; guint8 *emit = (guint8*)ccode; ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0); ARM_B (emit, 0); ARM_BLX_REG (emit, ARMREG_IP); g_assert (code32 [-3] == ccode [0]); g_assert (code32 [-2] == ccode [1]); g_assert (code32 [0] == ccode [2]); code32 [-1] = (guint32)(gsize)target; } else { guint32 ccode [4]; guint32 *tmp = ccode; guint8 *emit = (guint8*)tmp; ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0); ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC); ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP); ARM_BX (emit, ARMREG_IP); if (ins == ccode [2]) { g_assert_not_reached (); // should be -2 ... code32 [-1] = (guint32)(gsize)target; return; } if (ins == ccode [0]) { /* handles both thunk jump code and the far call sequence */ code32 [2] = (guint32)(gsize)target; return; } g_assert_not_reached (); } // g_print ("patched with 0x%08x\n", ins); } void arm_patch (guchar *code, const guchar *target) { arm_patch_general (NULL, code, target); } /* * Return the >= 0 uimm8 value if val can be represented with a byte + rotation * (with the rotation amount in *rot_amount. rot_amount is already adjusted * to be used with the emit macros. * Return -1 otherwise. */ int mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount) { guint32 res, i; for (i = 0; i < 31; i+= 2) { if (i == 0) res = val; else res = (val << (32 - i)) | (val >> i); if (res & ~0xff) continue; *rot_amount = i? 32 - i: 0; return res; } return -1; } /* * Emits in code a sequence of instructions that load the value 'val' * into the dreg register. Uses at most 4 instructions. */ guint8* mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val) { int imm8, rot_amount; #if 0 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0); /* skip the constant pool */ ARM_B (code, 0); *(int*)code = val; code += 4; return code; #endif if (mini_debug_options.single_imm_size && v7_supported) { ARM_MOVW_REG_IMM (code, dreg, val & 0xffff); ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff); return code; } if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) { ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount); } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) { ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount); } else { if (v7_supported) { ARM_MOVW_REG_IMM (code, dreg, val & 0xffff); if (val >> 16) ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff); return code; } if (val & 0xFF) { ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF)); if (val & 0xFF00) { ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24); } if (val & 0xFF0000) { ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16); } if (val & 0xFF000000) { ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8); } } else if (val & 0xFF00) { ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24); if (val & 0xFF0000) { ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16); } if (val & 0xFF000000) { ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8); } } else if (val & 0xFF0000) { ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16); if (val & 0xFF000000) { ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8); } } //g_assert_not_reached (); } return code; } gboolean mono_arm_thumb_supported (void) { return thumb_supported; } gboolean mono_arm_eabi_supported (void) { return eabi_supported; } int mono_arm_i8_align (void) { return i8_align; } #ifndef DISABLE_JIT static guint8* emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code) { CallInfo *cinfo; MonoCallInst *call; call = (MonoCallInst*)ins; cinfo = call->call_info; switch (cinfo->ret.storage) { case RegTypeStructByVal: case RegTypeHFA: { MonoInst *loc = cfg->arch.vret_addr_loc; int i; if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) { /* The JIT treats this as a normal call */ break; } /* Load the destination address */ g_assert (loc && loc->opcode == OP_REGOFFSET); if (arm_is_imm12 (loc->inst_offset)) { ARM_LDR_IMM (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, loc->inst_offset); ARM_LDR_REG_REG (code, ARMREG_LR, loc->inst_basereg, ARMREG_LR); } if (cinfo->ret.storage == RegTypeStructByVal) { int rsize = cinfo->ret.struct_size; for (i = 0; i < cinfo->ret.nregs; ++i) { g_assert (rsize >= 0); switch (rsize) { case 0: break; case 1: ARM_STRB_IMM (code, i, ARMREG_LR, i * 4); break; case 2: ARM_STRH_IMM (code, i, ARMREG_LR, i * 4); break; default: ARM_STR_IMM (code, i, ARMREG_LR, i * 4); break; } rsize -= 4; } } else { for (i = 0; i < cinfo->ret.nregs; ++i) { if (cinfo->ret.esize == 4) ARM_FSTS (code, cinfo->ret.reg + i, ARMREG_LR, i * 4); else ARM_FSTD (code, cinfo->ret.reg + (i * 2), ARMREG_LR, i * 8); } } return code; } default: break; } switch (ins->opcode) { case OP_FCALL: case OP_FCALL_REG: case OP_FCALL_MEMBASE: if (IS_VFP) { MonoType *sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret); if (sig_ret->type == MONO_TYPE_R4) { if (IS_HARD_FLOAT) { ARM_CVTS (code, ins->dreg, ARM_VFP_F0); } else { ARM_FMSR (code, ins->dreg, ARMREG_R0); ARM_CVTS (code, ins->dreg, ins->dreg); } } else { if (IS_HARD_FLOAT) { ARM_CPYD (code, ins->dreg, ARM_VFP_D0); } else { ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg); } } } break; case OP_RCALL: case OP_RCALL_REG: case OP_RCALL_MEMBASE: { MonoType *sig_ret; g_assert (IS_VFP); sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret); g_assert (sig_ret->type == MONO_TYPE_R4); if (IS_HARD_FLOAT) { ARM_CPYS (code, ins->dreg, ARM_VFP_F0); } else { ARM_FMSR (code, ins->dreg, ARMREG_R0); ARM_CPYS (code, ins->dreg, ins->dreg); } break; } default: break; } return code; } void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins; MonoCallInst *call; guint8 *code = cfg->native_code + cfg->code_len; MonoInst *last_ins = NULL; int max_len, cpos; int imm8, rot_amount; /* we don't align basic blocks of loops on arm */ if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); cpos = bb->max_offset; if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break)); code = emit_call_seq (cfg, code); } MONO_BB_FOR_EACH_INS (bb, ins) { guint offset = code - cfg->native_code; set_code_cursor (cfg, code); max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); // if (ins->cil_code) // g_print ("cil code\n"); mono_debug_record_line_number (cfg, ins, offset); switch (ins->opcode) { case OP_MEMORY_BARRIER: if (v7_supported) { ARM_DMB (code, ARM_DMB_ISH); } else if (v6_supported) { ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0); ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5); } break; case OP_TLS_GET: code = emit_tls_get (code, ins->dreg, ins->inst_offset); break; case OP_TLS_SET: code = emit_tls_set (code, ins->sreg1, ins->inst_offset); break; case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_CAS_I4: case OP_ATOMIC_ADD_I4: { int tmpreg; guint8 *buf [16]; g_assert (v7_supported); /* Free up a reg */ if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP) tmpreg = ARMREG_IP; else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0) tmpreg = ARMREG_R0; else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1) tmpreg = ARMREG_R1; else tmpreg = ARMREG_R2; g_assert (cfg->arch.atomic_tmp_offset != -1); ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset); switch (ins->opcode) { case OP_ATOMIC_EXCHANGE_I4: buf [0] = code; ARM_DMB (code, ARM_DMB_ISH); ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1); ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1); ARM_CMP_REG_IMM (code, tmpreg, 0, 0); buf [1] = code; ARM_B_COND (code, ARMCOND_NE, 0); arm_patch (buf [1], buf [0]); break; case OP_ATOMIC_CAS_I4: ARM_DMB (code, ARM_DMB_ISH); buf [0] = code; ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1); ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3); buf [1] = code; ARM_B_COND (code, ARMCOND_NE, 0); ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1); ARM_CMP_REG_IMM (code, tmpreg, 0, 0); buf [2] = code; ARM_B_COND (code, ARMCOND_NE, 0); arm_patch (buf [2], buf [0]); arm_patch (buf [1], code); break; case OP_ATOMIC_ADD_I4: buf [0] = code; ARM_DMB (code, ARM_DMB_ISH); ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1); ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2); ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1); ARM_CMP_REG_IMM (code, tmpreg, 0, 0); buf [1] = code; ARM_B_COND (code, ARMCOND_NE, 0); arm_patch (buf [1], buf [0]); break; default: g_assert_not_reached (); } ARM_DMB (code, ARM_DMB_ISH); if (tmpreg != ins->dreg) ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset); ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR); break; } case OP_ATOMIC_LOAD_I1: case OP_ATOMIC_LOAD_U1: case OP_ATOMIC_LOAD_I2: case OP_ATOMIC_LOAD_U2: case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_LOAD_R8: { if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) ARM_DMB (code, ARM_DMB_ISH); code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); switch (ins->opcode) { case OP_ATOMIC_LOAD_I1: ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR); break; case OP_ATOMIC_LOAD_U1: ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR); break; case OP_ATOMIC_LOAD_I2: ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR); break; case OP_ATOMIC_LOAD_U2: ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR); break; case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_U4: ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR); break; case OP_ATOMIC_LOAD_R4: if (cfg->r4fp) { ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR); ARM_FLDS (code, ins->dreg, ARMREG_LR, 0); } else { code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR); ARM_FLDS (code, vfp_scratch1, ARMREG_LR, 0); ARM_CVTS (code, ins->dreg, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); } break; case OP_ATOMIC_LOAD_R8: ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR); ARM_FLDD (code, ins->dreg, ARMREG_LR, 0); break; } if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE) ARM_DMB (code, ARM_DMB_ISH); break; } case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U4: case OP_ATOMIC_STORE_R4: case OP_ATOMIC_STORE_R8: { if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE) ARM_DMB (code, ARM_DMB_ISH); code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); switch (ins->opcode) { case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_U1: ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR); break; case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_U2: ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR); break; case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U4: ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR); break; case OP_ATOMIC_STORE_R4: if (cfg->r4fp) { ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR); ARM_FSTS (code, ins->sreg1, ARMREG_LR, 0); } else { code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR); ARM_CVTD (code, vfp_scratch1, ins->sreg1); ARM_FSTS (code, vfp_scratch1, ARMREG_LR, 0); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); } break; case OP_ATOMIC_STORE_R8: ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR); ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0); break; } if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) ARM_DMB (code, ARM_DMB_ISH); break; } case OP_BIGMUL: ARM_SMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_BIGMUL_UN: ARM_UMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_STOREI1_MEMBASE_IMM: code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF); g_assert (arm_is_imm12 (ins->inst_offset)); ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); break; case OP_STOREI2_MEMBASE_IMM: code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF); g_assert (arm_is_imm8 (ins->inst_offset)); ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); break; case OP_STORE_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm); g_assert (arm_is_imm12 (ins->inst_offset)); ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); break; case OP_STOREI1_MEMBASE_REG: g_assert (arm_is_imm12 (ins->inst_offset)); ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); break; case OP_STOREI2_MEMBASE_REG: g_assert (arm_is_imm8 (ins->inst_offset)); ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); break; case OP_STORE_MEMBASE_REG: case OP_STOREI4_MEMBASE_REG: /* this case is special, since it happens for spill code after lowering has been called */ if (arm_is_imm12 (ins->inst_offset)) { ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR); } break; case OP_STOREI1_MEMINDEX: ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case OP_STOREI2_MEMINDEX: ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case OP_STORE_MEMINDEX: case OP_STOREI4_MEMINDEX: ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case OP_LOADU4_MEM: g_assert_not_reached (); break; case OP_LOAD_MEMINDEX: case OP_LOADI4_MEMINDEX: case OP_LOADU4_MEMINDEX: ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADI1_MEMINDEX: ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADU1_MEMINDEX: ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADI2_MEMINDEX: ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADU2_MEMINDEX: ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: case OP_LOADU4_MEMBASE: /* this case is special, since it happens for spill code after lowering has been called */ if (arm_is_imm12 (ins->inst_offset)) { ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR); } break; case OP_LOADI1_MEMBASE: g_assert (arm_is_imm8 (ins->inst_offset)); ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADU1_MEMBASE: g_assert (arm_is_imm12 (ins->inst_offset)); ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADU2_MEMBASE: g_assert (arm_is_imm8 (ins->inst_offset)); ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADI2_MEMBASE: g_assert (arm_is_imm8 (ins->inst_offset)); ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; case OP_ICONV_TO_I1: ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24); ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24); break; case OP_ICONV_TO_I2: ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16); ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16); break; case OP_ICONV_TO_U1: ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff); break; case OP_ICONV_TO_U2: ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16); ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16); break; case OP_COMPARE: case OP_ICOMPARE: ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2); break; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount); break; case OP_BREAK: /* * gdb does not like encountering the hw breakpoint ins in the debugged code. * So instead of emitting a trap, we emit a call a C function and place a * breakpoint there. */ //*(int*)code = 0xef9f0001; //code += 4; //ARM_DBRK (code); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break)); code = emit_call_seq (cfg, code); break; case OP_RELAXED_NOP: ARM_NOP (code); break; case OP_NOP: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: case OP_NOT_REACHED: case OP_NOT_NULL: break; case OP_IL_SEQ_POINT: mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); break; case OP_SEQ_POINT: { int i; MonoInst *info_var = cfg->arch.seq_point_info_var; MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var; MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var; MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var; MonoInst *var; int dreg = ARMREG_LR; #if 0 if (cfg->soft_breakpoints) { g_assert (!cfg->compile_aot); } #endif /* * For AOT, we use one got slot per method, which will point to a * SeqPointInfo structure, containing all the information required * by the code below. */ if (cfg->compile_aot) { g_assert (info_var); g_assert (info_var->opcode == OP_REGOFFSET); } if (!cfg->soft_breakpoints && !cfg->compile_aot) { /* * Read from the single stepping trigger page. This will cause a * SIGSEGV when single stepping is enabled. * We do this _before_ the breakpoint, so single stepping after * a breakpoint is hit will step to the next IL offset. */ g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0); } /* Single step check */ if (ins->flags & MONO_INST_SINGLE_STEP_LOC) { if (cfg->soft_breakpoints) { /* Load the address of the sequence point method variable. */ var = ss_method_var; g_assert (var); g_assert (var->opcode == OP_REGOFFSET); code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset); /* Read the value and check whether it is non-zero. */ ARM_LDR_IMM (code, dreg, dreg, 0); ARM_CMP_REG_IMM (code, dreg, 0, 0); /* Call it conditionally. */ ARM_BLX_REG_COND (code, ARMCOND_NE, dreg); } else { if (cfg->compile_aot) { /* Load the trigger page addr from the variable initialized in the prolog */ var = ss_trigger_page_var; g_assert (var); g_assert (var->opcode == OP_REGOFFSET); code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset); } else { ARM_LDR_IMM (code, dreg, ARMREG_PC, 0); ARM_B (code, 0); *(int*)code = (int)(gsize)ss_trigger_page; code += 4; } ARM_LDR_IMM (code, dreg, dreg, 0); } } mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); /* Breakpoint check */ if (cfg->compile_aot) { const guint32 offset = code - cfg->native_code; guint32 val; var = info_var; code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset); /* Add the offset */ val = ((offset / 4) * sizeof (target_mgreg_t)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs); /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */ if (arm_is_imm12 ((int)val)) { ARM_LDR_IMM (code, dreg, dreg, val); } else { ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0); if (val & 0xFF00) ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24); if (val & 0xFF0000) ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16); g_assert (!(val & 0xFF000000)); ARM_LDR_IMM (code, dreg, dreg, 0); } /* What is faster, a branch or a load ? */ ARM_CMP_REG_IMM (code, dreg, 0, 0); /* The breakpoint instruction */ if (cfg->soft_breakpoints) ARM_BLX_REG_COND (code, ARMCOND_NE, dreg); else ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE); } else if (cfg->soft_breakpoints) { /* Load the address of the breakpoint method into ip. */ var = bp_method_var; g_assert (var); g_assert (var->opcode == OP_REGOFFSET); g_assert (arm_is_imm12 (var->inst_offset)); ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset); /* * A placeholder for a possible breakpoint inserted by * mono_arch_set_breakpoint (). */ ARM_NOP (code); } else { /* * A placeholder for a possible breakpoint inserted by * mono_arch_set_breakpoint (). */ for (i = 0; i < 4; ++i) ARM_NOP (code); } /* * Add an additional nop so skipping the bp doesn't cause the ip to point * to another IL offset. */ ARM_NOP (code); break; } case OP_ADDCC: case OP_IADDCC: ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IADD: ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_ADC: case OP_IADC: ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_ADDCC_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_ADD_IMM: case OP_IADD_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_ADC_IMM: case OP_IADC_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_IADD_OVF: ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_IADD_OVF_UN: ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_ISUB_OVF: ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_ISUB_OVF_UN: ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException"); break; case OP_ADD_OVF_CARRY: ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_ADD_OVF_UN_CARRY: ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_SUB_OVF_CARRY: ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_SUB_OVF_UN_CARRY: ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException"); break; case OP_SUBCC: case OP_ISUBCC: ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SUBCC_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_ISUB: ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SBB: case OP_ISBB: ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SUB_IMM: case OP_ISUB_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_SBB_IMM: case OP_ISBB_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_ARM_RSBS_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_ARM_RSC_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_IAND: ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_AND_IMM: case OP_IAND_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_IDIV: g_assert (v7s_supported || v7k_supported); ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IDIV_UN: g_assert (v7s_supported || v7k_supported); ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IREM: g_assert (v7s_supported || v7k_supported); ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2); ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1); break; case OP_IREM_UN: g_assert (v7s_supported || v7k_supported); ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2); ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1); break; case OP_DIV_IMM: case OP_REM_IMM: g_assert_not_reached (); case OP_IOR: ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_OR_IMM: case OP_IOR_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_IXOR: ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_XOR_IMM: case OP_IXOR_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_ISHL: ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SHL_IMM: case OP_ISHL_IMM: if (ins->inst_imm) ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f)); else if (ins->dreg != ins->sreg1) ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1); break; case OP_ISHR: ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SHR_IMM: case OP_ISHR_IMM: if (ins->inst_imm) ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f)); else if (ins->dreg != ins->sreg1) ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1); break; case OP_SHR_UN_IMM: case OP_ISHR_UN_IMM: if (ins->inst_imm) ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f)); else if (ins->dreg != ins->sreg1) ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1); break; case OP_ISHR_UN: ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_INOT: ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1); break; case OP_INEG: ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0); break; case OP_IMUL: if (ins->dreg == ins->sreg2) ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); else ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1); break; case OP_MUL_IMM: g_assert_not_reached (); break; case OP_IMUL_OVF: /* FIXME: handle ovf/ sreg2 != dreg */ ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); /* FIXME: MUL doesn't set the C/O flags on ARM */ break; case OP_IMUL_OVF_UN: /* FIXME: handle ovf/ sreg2 != dreg */ ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); /* FIXME: MUL doesn't set the C/O flags on ARM */ break; case OP_ICONST: code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0); break; case OP_AOTCONST: /* Load the GOT offset */ mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0); ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; /* Load the value from the GOT */ ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg); break; case OP_OBJC_GET_SELECTOR: mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0); ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg); break; case OP_ICONV_TO_I4: case OP_ICONV_TO_U4: case OP_MOVE: if (ins->dreg != ins->sreg1) ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1); break; case OP_SETLRET: { int saved = ins->sreg2; if (ins->sreg2 == ARM_LSW_REG) { ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2); saved = ARMREG_LR; } if (ins->sreg1 != ARM_LSW_REG) ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1); if (saved != ARM_MSW_REG) ARM_MOV_REG_REG (code, ARM_MSW_REG, saved); break; } case OP_FMOVE: if (IS_VFP && ins->dreg != ins->sreg1) ARM_CPYD (code, ins->dreg, ins->sreg1); break; case OP_RMOVE: if (IS_VFP && ins->dreg != ins->sreg1) ARM_CPYS (code, ins->dreg, ins->sreg1); break; case OP_MOVE_F_TO_I4: if (cfg->r4fp) { ARM_FMRS (code, ins->dreg, ins->sreg1); } else { code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); ARM_CVTD (code, vfp_scratch1, ins->sreg1); ARM_FMRS (code, ins->dreg, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); } break; case OP_MOVE_I4_TO_F: if (cfg->r4fp) { ARM_FMSR (code, ins->dreg, ins->sreg1); } else { ARM_FMSR (code, ins->dreg, ins->sreg1); ARM_CVTS (code, ins->dreg, ins->dreg); } break; case OP_FCONV_TO_R4: if (IS_VFP) { if (cfg->r4fp) { ARM_CVTD (code, ins->dreg, ins->sreg1); } else { ARM_CVTD (code, ins->dreg, ins->sreg1); ARM_CVTS (code, ins->dreg, ins->dreg); } } break; case OP_TAILCALL_PARAMETER: // This opcode helps compute sizes, i.e. // of the subsequent OP_TAILCALL, but contributes no code. g_assert (ins->next); break; case OP_TAILCALL: case OP_TAILCALL_MEMBASE: case OP_TAILCALL_REG: { gboolean const tailcall_membase = ins->opcode == OP_TAILCALL_MEMBASE; gboolean const tailcall_reg = ins->opcode == OP_TAILCALL_REG; MonoCallInst *call = (MonoCallInst*)ins; max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER); if (IS_HARD_FLOAT) code = emit_float_args (cfg, call, code, &max_len, &offset); code = realloc_code (cfg, max_len); // For reg and membase, get destination in IP. if (tailcall_reg) { g_assert (ins->sreg1 > -1); if (ins->sreg1 != ARMREG_IP) ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg1); } else if (tailcall_membase) { g_assert (ins->sreg1 > -1); if (!arm_is_imm12 (ins->inst_offset)) { g_assert (ins->sreg1 != ARMREG_IP); // temp in emit_big_add code = emit_big_add (code, ARMREG_IP, ins->sreg1, ins->inst_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); } else { ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, ins->inst_offset); } } /* * The stack looks like the following: * <caller argument area> * <saved regs etc> * <rest of frame> * <callee argument area> * <optionally saved IP> (about to be) * Need to copy the arguments from the callee argument area to * the caller argument area, and pop the frame. */ if (call->stack_usage) { int i, prev_sp_offset = 0; // When we get here, the parameters to the tailcall are already formed, // in registers and at the bottom of the grow-down stack. // // Our goal is generally preserve parameters, and trim the stack, // and, before trimming stack, move parameters from the bottom of the // frame to the bottom of the trimmed frame. // For the case of large frames, and presently therefore always, // IP is used as an adjusted frame_reg. // Be conservative and save IP around the movement // of parameters from the bottom of frame to top of the frame. const gboolean save_ip = tailcall_membase || tailcall_reg; if (save_ip) ARM_PUSH (code, 1 << ARMREG_IP); // When moving stacked parameters from the bottom // of the frame (sp) to the top of the frame (ip), // account, 0 or 4, for the conditional save of IP. const int offset_sp = save_ip ? 4 : 0; const int offset_ip = (save_ip && (cfg->frame_reg == ARMREG_SP)) ? 4 : 0; /* Compute size of saved registers restored below */ if (iphone_abi) prev_sp_offset = 2 * 4; else prev_sp_offset = 1 * 4; for (i = 0; i < 16; ++i) { if (cfg->used_int_regs & (1 << i)) prev_sp_offset += 4; } // Point IP at the start of where the parameters will go after trimming stack. // After locals and saved registers. code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset); /* Copy arguments on the stack to our argument area */ // FIXME a fixed size memcpy is desirable here, // at least for larger values of stack_usage. // // FIXME For most functions, with frames < 4K, we can use frame_reg directly here instead of IP. // See https://github.com/mono/mono/pull/12079 // See https://github.com/mono/mono/pull/12079/commits/93e7007a9567b78fa8152ce404b372b26e735516 for (i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) { ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i + offset_sp); ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i + offset_ip); } if (save_ip) ARM_POP (code, 1 << ARMREG_IP); } /* * Keep in sync with mono_arch_emit_epilog */ g_assert (!cfg->method->save_lmf); code = emit_big_add_temp (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage, ARMREG_LR); if (iphone_abi) { if (cfg->used_int_regs) ARM_POP (code, cfg->used_int_regs); ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR)); } else { ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR)); } if (tailcall_reg || tailcall_membase) { code = emit_jmp_reg (code, ARMREG_IP); } else { mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method); if (cfg->compile_aot) { ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP); } else { code = mono_arm_patchable_b (code, ARMCOND_AL); cfg->thunk_area += THUNK_SIZE; } } break; } case OP_CHECK_THIS: /* ensure ins->sreg1 is not NULL */ ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0); break; case OP_ARGLIST: { g_assert (cfg->sig_cookie < 128); ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie); ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0); break; } case OP_FCALL: case OP_RCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: call = (MonoCallInst*)ins; if (IS_HARD_FLOAT) code = emit_float_args (cfg, call, code, &max_len, &offset); mono_call_add_patch_info (cfg, call, code - cfg->native_code); code = emit_call_seq (cfg, code); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; code = emit_move_return_value (cfg, ins, code); break; case OP_FCALL_REG: case OP_RCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: if (IS_HARD_FLOAT) code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset); code = emit_call_reg (code, ins->sreg1); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; code = emit_move_return_value (cfg, ins, code); break; case OP_FCALL_MEMBASE: case OP_RCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: { g_assert (ins->sreg1 != ARMREG_LR); call = (MonoCallInst*)ins; if (IS_HARD_FLOAT) code = emit_float_args (cfg, call, code, &max_len, &offset); if (!arm_is_imm12 (ins->inst_offset)) { /* sreg1 might be IP */ ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1); code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_LR); ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, 0); } else { ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset); } ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; code = emit_move_return_value (cfg, ins, code); break; } case OP_GENERIC_CLASS_INIT: { int byte_offset; guint8 *jump; byte_offset = MONO_STRUCT_OFFSET (MonoVTable, initialized); g_assert (arm_is_imm8 (byte_offset)); ARM_LDRSB_IMM (code, ARMREG_IP, ins->sreg1, byte_offset); ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0); jump = code; ARM_B_COND (code, ARMCOND_NE, 0); /* Uninitialized case */ g_assert (ins->sreg1 == ARMREG_R0); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init)); code = emit_call_seq (cfg, code); /* Initialized case */ arm_patch (jump, code); break; } case OP_LOCALLOC: { /* round the size to 8 bytes */ ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, (MONO_ARCH_FRAME_ALIGNMENT - 1)); ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, (MONO_ARCH_FRAME_ALIGNMENT - 1)); ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg); /* memzero the area: dreg holds the size, sp is the pointer */ if (ins->flags & MONO_INST_INIT) { guint8 *start_loop, *branch_to_cond; ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0); branch_to_cond = code; ARM_B (code, 0); start_loop = code; ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg); arm_patch (branch_to_cond, code); /* decrement by 4 and set flags */ ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (target_mgreg_t)); ARM_B_COND (code, ARMCOND_GE, 0); arm_patch (code - 4, start_loop); } ARM_MOV_REG_REG (code, ins->dreg, ARMREG_SP); if (cfg->param_area) code = emit_sub_imm (code, ARMREG_SP, ARMREG_SP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT)); break; } case OP_DYN_CALL: { int i; MonoInst *var = cfg->dyn_call_var; guint8 *labels [16]; g_assert (var->opcode == OP_REGOFFSET); g_assert (arm_is_imm12 (var->inst_offset)); /* lr = args buffer filled by mono_arch_get_dyn_call_args () */ ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1); /* ip = ftn */ ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg2); /* Save args buffer */ ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset); /* Set fp argument registers */ if (IS_HARD_FLOAT) { ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, has_fpregs)); ARM_CMP_REG_IMM (code, ARMREG_R0, 0, 0); labels [0] = code; ARM_B_COND (code, ARMCOND_EQ, 0); for (i = 0; i < FP_PARAM_REGS; ++i) { const int offset = MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * sizeof (double)); g_assert (arm_is_fpimm8 (offset)); ARM_FLDD (code, i * 2, ARMREG_LR, offset); } arm_patch (labels [0], code); } /* Allocate callee area */ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs)); ARM_SHL_IMM (code, ARMREG_R1, ARMREG_R1, 2); ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_R1); /* Set stack args */ /* R1 = limit */ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs)); /* R2 = pointer into regs */ code = emit_big_add (code, ARMREG_R2, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (PARAM_REGS * sizeof (target_mgreg_t))); /* R3 = pointer to stack */ ARM_MOV_REG_REG (code, ARMREG_R3, ARMREG_SP); /* Loop */ labels [0] = code; ARM_B_COND (code, ARMCOND_AL, 0); labels [1] = code; ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R2, 0); ARM_STR_IMM (code, ARMREG_R0, ARMREG_R3, 0); ARM_ADD_REG_IMM (code, ARMREG_R2, ARMREG_R2, sizeof (target_mgreg_t), 0); ARM_ADD_REG_IMM (code, ARMREG_R3, ARMREG_R3, sizeof (target_mgreg_t), 0); ARM_SUB_REG_IMM (code, ARMREG_R1, ARMREG_R1, 1, 0); arm_patch (labels [0], code); ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0); labels [2] = code; ARM_B_COND (code, ARMCOND_GT, 0); arm_patch (labels [2], labels [1]); /* Set argument registers */ for (i = 0; i < PARAM_REGS; ++i) ARM_LDR_IMM (code, i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (i * sizeof (target_mgreg_t))); /* Make the call */ ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); /* Save result */ ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset); ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res)); ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2)); if (IS_HARD_FLOAT) ARM_FSTD (code, ARM_VFP_D0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, fpregs)); break; } case OP_THROW: { if (ins->sreg1 != ARMREG_R0) ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception)); code = emit_call_seq (cfg, code); break; } case OP_RETHROW: { if (ins->sreg1 != ARMREG_R0) ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception)); code = emit_call_seq (cfg, code); break; } case OP_START_HANDLER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT); int i, rot_amount; /* Reserve a param area, see filter-stack.exe */ if (param_area) { if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) { ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area); ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP); } } if (arm_is_imm12 (spvar->inst_offset)) { ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset); ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP); } break; } case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT); int i, rot_amount; /* Free the param area */ if (param_area) { if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) { ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area); ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP); } } if (ins->sreg1 != ARMREG_R0) ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1); if (arm_is_imm12 (spvar->inst_offset)) { ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset); } else { g_assert (ARMREG_IP != spvar->inst_basereg); code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset); ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP); } ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); break; } case OP_ENDFINALLY: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT); int i, rot_amount; /* Free the param area */ if (param_area) { if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) { ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area); ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP); } } if (arm_is_imm12 (spvar->inst_offset)) { ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset); } else { g_assert (ARMREG_IP != spvar->inst_basereg); code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset); ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP); } ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); break; } case OP_CALL_HANDLER: mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb); code = mono_arm_patchable_bl (code, ARMCOND_AL); cfg->thunk_area += THUNK_SIZE; for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); break; case OP_GET_EX_OBJ: if (ins->dreg != ARMREG_R0) ARM_MOV_REG_REG (code, ins->dreg, ARMREG_R0); break; case OP_LABEL: ins->inst_c0 = code - cfg->native_code; break; case OP_BR: /*if (ins->inst_target_bb->native_offset) { ARM_B (code, 0); //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); } else*/ { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); code = mono_arm_patchable_b (code, ARMCOND_AL); } break; case OP_BR_REG: ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1); break; case OP_SWITCH: /* * In the normal case we have: * ldr pc, [pc, ins->sreg1 << 2] * nop * If aot, we have: * ldr lr, [pc, ins->sreg1 << 2] * add pc, pc, lr * After follows the data. * FIXME: add aot support. */ mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0); max_len += 4 * GPOINTER_TO_INT (ins->klass); code = realloc_code (cfg, max_len); ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2); ARM_NOP (code); code += 4 * GPOINTER_TO_INT (ins->klass); break; case OP_CEQ: case OP_ICEQ: ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ); break; case OP_CLT: case OP_ICLT: ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT); break; case OP_CLT_UN: case OP_ICLT_UN: ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO); break; case OP_CGT: case OP_ICGT: ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT); break; case OP_CGT_UN: case OP_ICGT_UN: ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI); break; case OP_ICNEQ: ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ); break; case OP_ICGE: ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT); break; case OP_ICLE: ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT); break; case OP_ICGE_UN: ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO); break; case OP_ICLE_UN: ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_HI); break; case OP_COND_EXC_EQ: case OP_COND_EXC_NE_UN: case OP_COND_EXC_LT: case OP_COND_EXC_LT_UN: case OP_COND_EXC_GT: case OP_COND_EXC_GT_UN: case OP_COND_EXC_GE: case OP_COND_EXC_GE_UN: case OP_COND_EXC_LE: case OP_COND_EXC_LE_UN: EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1); break; case OP_COND_EXC_IEQ: case OP_COND_EXC_INE_UN: case OP_COND_EXC_ILT: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_IGT: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_IGE: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_ILE: case OP_COND_EXC_ILE_UN: EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1); break; case OP_COND_EXC_C: case OP_COND_EXC_IC: EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1); break; case OP_COND_EXC_OV: case OP_COND_EXC_IOV: EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1); break; case OP_COND_EXC_NC: case OP_COND_EXC_INC: EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1); break; case OP_COND_EXC_NO: case OP_COND_EXC_INO: EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1); break; case OP_IBEQ: case OP_IBNE_UN: case OP_IBLT: case OP_IBLT_UN: case OP_IBGT: case OP_IBGT_UN: case OP_IBGE: case OP_IBGE_UN: case OP_IBLE: case OP_IBLE_UN: EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ); break; /* floating point opcodes */ case OP_R8CONST: if (cfg->compile_aot) { ARM_FLDD (code, ins->dreg, ARMREG_PC, 0); ARM_B (code, 1); *(guint32*)code = ((guint32*)(ins->inst_p0))[0]; code += 4; *(guint32*)code = ((guint32*)(ins->inst_p0))[1]; code += 4; } else { /* FIXME: we can optimize the imm load by dealing with part of * the displacement in LDFD (aligning to 512). */ code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0); ARM_FLDD (code, ins->dreg, ARMREG_LR, 0); } break; case OP_R4CONST: if (cfg->compile_aot) { ARM_FLDS (code, ins->dreg, ARMREG_PC, 0); ARM_B (code, 0); *(guint32*)code = ((guint32*)(ins->inst_p0))[0]; code += 4; if (!cfg->r4fp) ARM_CVTS (code, ins->dreg, ins->dreg); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0); ARM_FLDS (code, ins->dreg, ARMREG_LR, 0); if (!cfg->r4fp) ARM_CVTS (code, ins->dreg, ins->dreg); } break; case OP_STORER8_MEMBASE_REG: /* This is generated by the local regalloc pass which runs after the lowering pass */ if (!arm_is_fpimm8 (ins->inst_offset)) { code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg); ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0); } else { ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } break; case OP_LOADR8_MEMBASE: /* This is generated by the local regalloc pass which runs after the lowering pass */ if (!arm_is_fpimm8 (ins->inst_offset)) { code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg); ARM_FLDD (code, ins->dreg, ARMREG_LR, 0); } else { ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } break; case OP_STORER4_MEMBASE_REG: g_assert (arm_is_fpimm8 (ins->inst_offset)); if (cfg->r4fp) { ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } else { code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); ARM_CVTD (code, vfp_scratch1, ins->sreg1); ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); } break; case OP_LOADR4_MEMBASE: if (cfg->r4fp) { ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { g_assert (arm_is_fpimm8 (ins->inst_offset)); code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset); ARM_CVTS (code, ins->dreg, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); } break; case OP_ICONV_TO_R_UN: { g_assert_not_reached (); break; } case OP_ICONV_TO_R4: if (cfg->r4fp) { ARM_FMSR (code, ins->dreg, ins->sreg1); ARM_FSITOS (code, ins->dreg, ins->dreg); } else { code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); ARM_FMSR (code, vfp_scratch1, ins->sreg1); ARM_FSITOS (code, vfp_scratch1, vfp_scratch1); ARM_CVTS (code, ins->dreg, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); } break; case OP_ICONV_TO_R8: code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); ARM_FMSR (code, vfp_scratch1, ins->sreg1); ARM_FSITOD (code, ins->dreg, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); break; case OP_SETFRET: { MonoType *sig_ret = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret); if (sig_ret->type == MONO_TYPE_R4) { if (cfg->r4fp) { if (IS_HARD_FLOAT) { if (ins->sreg1 != ARM_VFP_D0) ARM_CPYS (code, ARM_VFP_D0, ins->sreg1); } else { ARM_FMRS (code, ARMREG_R0, ins->sreg1); } } else { ARM_CVTD (code, ARM_VFP_F0, ins->sreg1); if (!IS_HARD_FLOAT) ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0); } } else { if (IS_HARD_FLOAT) ARM_CPYD (code, ARM_VFP_D0, ins->sreg1); else ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1); } break; } case OP_FCONV_TO_I1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE); break; case OP_FCONV_TO_U1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE); break; case OP_FCONV_TO_I2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE); break; case OP_FCONV_TO_U2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE); break; case OP_FCONV_TO_I4: case OP_FCONV_TO_I: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE); break; case OP_FCONV_TO_U4: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE); break; case OP_FCONV_TO_I8: case OP_FCONV_TO_U8: g_assert_not_reached (); /* Implemented as helper calls */ break; case OP_LCONV_TO_R_UN: g_assert_not_reached (); /* Implemented as helper calls */ break; case OP_LCONV_TO_OVF_I4_2: { guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive; /* * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000 */ ARM_CMP_REG_IMM8 (code, ins->sreg1, 0); high_bit_not_set = code; ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/ ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */ valid_negative = code; ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */ invalid_negative = code; ARM_B_COND (code, ARMCOND_AL, 0); arm_patch (high_bit_not_set, code); ARM_CMP_REG_IMM8 (code, ins->sreg2, 0); valid_positive = code; ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/ arm_patch (invalid_negative, code); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException"); arm_patch (valid_negative, code); arm_patch (valid_positive, code); if (ins->dreg != ins->sreg1) ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1); break; } case OP_FADD: ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FSUB: ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FMUL: ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FDIV: ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FNEG: ARM_NEGD (code, ins->dreg, ins->sreg1); break; case OP_FREM: /* emulated */ g_assert_not_reached (); break; case OP_FCOMPARE: if (IS_VFP) { ARM_CMPD (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } break; case OP_RCOMPARE: g_assert (IS_VFP); ARM_CMPS (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); break; case OP_FCEQ: if (IS_VFP) { ARM_CMPD (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ); break; case OP_FCLT: if (IS_VFP) { ARM_CMPD (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); break; case OP_FCLT_UN: if (IS_VFP) { ARM_CMPD (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS); break; case OP_FCGT: if (IS_VFP) { ARM_CMPD (code, ins->sreg2, ins->sreg1); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); break; case OP_FCGT_UN: if (IS_VFP) { ARM_CMPD (code, ins->sreg2, ins->sreg1); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS); break; case OP_FCNEQ: if (IS_VFP) { ARM_CMPD (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ); break; case OP_FCGE: if (IS_VFP) { ARM_CMPD (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI); break; case OP_FCLE: if (IS_VFP) { ARM_CMPD (code, ins->sreg2, ins->sreg1); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI); break; /* ARM FPA flags table: * N Less than ARMCOND_MI * Z Equal ARMCOND_EQ * C Greater Than or Equal ARMCOND_CS * V Unordered ARMCOND_VS */ case OP_FBEQ: EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ); break; case OP_FBNE_UN: EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ); break; case OP_FBLT: EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */ break; case OP_FBLT_UN: EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */ EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */ break; case OP_FBGT: case OP_FBGT_UN: case OP_FBLE: case OP_FBLE_UN: g_assert_not_reached (); break; case OP_FBGE: if (IS_VFP) { EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE); } else { /* FPA requires EQ even thou the docs suggests that just CS is enough */ EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ); EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS); } break; case OP_FBGE_UN: EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */ EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE); break; case OP_CKFINITE: { if (IS_VFP) { code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2); ARM_ABSD (code, vfp_scratch2, ins->sreg1); ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0); ARM_B (code, 1); *(guint32*)code = 0xffffffff; code += 4; *(guint32*)code = 0x7fefffff; code += 4; ARM_CMPD (code, vfp_scratch2, vfp_scratch1); ARM_FMSTAT (code); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "OverflowException"); ARM_CMPD (code, ins->sreg1, ins->sreg1); ARM_FMSTAT (code); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "OverflowException"); ARM_CPYD (code, ins->dreg, ins->sreg1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2); } break; } case OP_RCONV_TO_I1: code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE); break; case OP_RCONV_TO_U1: code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE); break; case OP_RCONV_TO_I2: code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE); break; case OP_RCONV_TO_U2: code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE); break; case OP_RCONV_TO_I4: case OP_RCONV_TO_I: code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE); break; case OP_RCONV_TO_U4: code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE); break; case OP_RCONV_TO_R4: g_assert (IS_VFP); if (ins->dreg != ins->sreg1) ARM_CPYS (code, ins->dreg, ins->sreg1); break; case OP_RCONV_TO_R8: g_assert (IS_VFP); ARM_CVTS (code, ins->dreg, ins->sreg1); break; case OP_RADD: ARM_VFP_ADDS (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_RSUB: ARM_VFP_SUBS (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_RMUL: ARM_VFP_MULS (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_RDIV: ARM_VFP_DIVS (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_RNEG: ARM_NEGS (code, ins->dreg, ins->sreg1); break; case OP_RCEQ: if (IS_VFP) { ARM_CMPS (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ); break; case OP_RCLT: if (IS_VFP) { ARM_CMPS (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); break; case OP_RCLT_UN: if (IS_VFP) { ARM_CMPS (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS); break; case OP_RCGT: if (IS_VFP) { ARM_CMPS (code, ins->sreg2, ins->sreg1); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); break; case OP_RCGT_UN: if (IS_VFP) { ARM_CMPS (code, ins->sreg2, ins->sreg1); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS); break; case OP_RCNEQ: if (IS_VFP) { ARM_CMPS (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ); break; case OP_RCGE: if (IS_VFP) { ARM_CMPS (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI); break; case OP_RCLE: if (IS_VFP) { ARM_CMPS (code, ins->sreg2, ins->sreg1); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI); break; case OP_GC_LIVENESS_DEF: case OP_GC_LIVENESS_USE: case OP_GC_PARAM_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; break; case OP_GC_SPILL_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins); break; case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: { guint8 *buf [1]; ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, 0); ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0); buf [0] = code; ARM_B_COND (code, ARMCOND_EQ, 0); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll)); code = emit_call_seq (cfg, code); arm_patch (buf [0], code); break; } case OP_FILL_PROF_CALL_CTX: for (int i = 0; i < ARMREG_MAX; i++) if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) || i == ARMREG_SP || i == ARMREG_FP) ARM_STR_IMM (code, i, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, regs) + i * sizeof (target_mgreg_t)); break; default: g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__); g_assert_not_reached (); } if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) { g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)", mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset); g_assert_not_reached (); } cpos += max_len; last_ins = ins; } set_code_cursor (cfg, code); } #endif /* DISABLE_JIT */ void mono_arch_register_lowlevel_calls (void) { /* The signature doesn't matter */ mono_register_jit_icall (mono_arm_throw_exception, mono_icall_sig_void, TRUE); mono_register_jit_icall (mono_arm_throw_exception_by_token, mono_icall_sig_void, TRUE); mono_register_jit_icall (mono_arm_unaligned_stack, mono_icall_sig_void, TRUE); } #define patch_lis_ori(ip,val) do {\ guint16 *__lis_ori = (guint16*)(ip); \ __lis_ori [1] = (((guint32)(gsize)(val)) >> 16) & 0xffff; \ __lis_ori [3] = ((guint32)(gsize)(val)) & 0xffff; \ } while (0) void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target) { unsigned char *ip = ji->ip.i + code; switch (ji->type) { case MONO_PATCH_INFO_SWITCH: { gpointer *jt = (gpointer*)(ip + 8); int i; /* jt is the inlined jump table, 2 instructions after ip * In the normal case we store the absolute addresses, * otherwise the displacements. */ for (i = 0; i < ji->data.table->table_size; i++) jt [i] = code + (int)(gsize)ji->data.table->table [i]; break; } case MONO_PATCH_INFO_IP: g_assert_not_reached (); patch_lis_ori (ip, ip); break; case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_IMAGE: case MONO_PATCH_INFO_FIELD: case MONO_PATCH_INFO_VTABLE: case MONO_PATCH_INFO_IID: case MONO_PATCH_INFO_SFLDA: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDTOKEN: g_assert_not_reached (); /* from OP_AOTCONST : lis + ori */ patch_lis_ori (ip, target); break; case MONO_PATCH_INFO_R4: case MONO_PATCH_INFO_R8: g_assert_not_reached (); *((gconstpointer *)(ip + 2)) = target; break; case MONO_PATCH_INFO_EXC_NAME: g_assert_not_reached (); *((gconstpointer *)(ip + 1)) = target; break; case MONO_PATCH_INFO_NONE: case MONO_PATCH_INFO_BB_OVF: case MONO_PATCH_INFO_EXC_OVF: /* everything is dealt with at epilog output time */ break; default: arm_patch_general (cfg, ip, (const guchar*)target); break; } } void mono_arm_unaligned_stack (MonoMethod *method) { g_assert_not_reached (); } #ifndef DISABLE_JIT /* * Stack frame layout: * * ------------------- fp * MonoLMF structure or saved registers * ------------------- * locals * ------------------- * spilled regs * ------------------- * param area size is cfg->param_area * ------------------- sp */ guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoBasicBlock *bb; MonoMethodSignature *sig; MonoInst *inst; int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount, part; guint8 *code; CallInfo *cinfo; int lmf_offset = 0; int prev_sp_offset, reg_offset; sig = mono_method_signature_internal (method); cfg->code_size = 256 + sig->param_count * 64; code = cfg->native_code = g_malloc (cfg->code_size); mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0); alloc_size = cfg->stack_offset; pos = 0; prev_sp_offset = 0; if (iphone_abi) { /* * The iphone uses R7 as the frame pointer, and it points at the saved * r7+lr: * <lr> * r7 -> <r7> * <rest of frame> * We can't use r7 as a frame pointer since it points into the middle of * the frame, so we keep using our own frame pointer. * FIXME: Optimize this. */ ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR)); prev_sp_offset += 8; /* r7 and lr */ mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset); mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0); ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP); } if (!method->save_lmf) { if (iphone_abi) { /* No need to push LR again */ if (cfg->used_int_regs) ARM_PUSH (code, cfg->used_int_regs); } else { ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR)); prev_sp_offset += 4; } for (i = 0; i < 16; ++i) { if (cfg->used_int_regs & (1 << i)) prev_sp_offset += 4; } mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset); reg_offset = 0; for (i = 0; i < 16; ++i) { if ((cfg->used_int_regs & (1 << i))) { mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset); mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF); reg_offset += 4; } } mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4); mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF); } else { ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP); ARM_PUSH (code, 0x5ff0); prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */ mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset); reg_offset = 0; for (i = 0; i < 16; ++i) { if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) { /* The original r7 is saved at the start */ if (!(iphone_abi && i == ARMREG_R7)) mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset); reg_offset += 4; } } g_assert (reg_offset == 4 * 10); pos += MONO_ABI_SIZEOF (MonoLMF) - (4 * 10); lmf_offset = pos; } alloc_size += pos; orig_alloc_size = alloc_size; // align to MONO_ARCH_FRAME_ALIGNMENT bytes if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) { alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1; alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1); } /* the stack used in the pushed regs */ alloc_size += ALIGN_TO (prev_sp_offset, MONO_ARCH_FRAME_ALIGNMENT) - prev_sp_offset; cfg->stack_usage = alloc_size; if (alloc_size) { if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) { ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size); ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP); } mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size); } if (cfg->frame_reg != ARMREG_SP) { ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP); mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg); } //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size); prev_sp_offset += alloc_size; for (i = 0; i < alloc_size - orig_alloc_size; i += 4) mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF); /* compute max_offset in order to use short forward jumps * we could skip do it on arm because the immediate displacement * for jumps is large enough, it may be useful later for constant pools */ max_offset = 0; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins = bb->code; bb->max_offset = max_offset; MONO_BB_FOR_EACH_INS (bb, ins) max_offset += ins_get_size (ins->opcode); } /* stack alignment check */ /* { guint8 *buf [16]; ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_SP); code = mono_arm_emit_load_imm (code, ARMREG_IP, MONO_ARCH_FRAME_ALIGNMENT -1); ARM_AND_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP); ARM_CMP_REG_IMM (code, ARMREG_LR, 0, 0); buf [0] = code; ARM_B_COND (code, ARMCOND_EQ, 0); if (cfg->compile_aot) ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0); else code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arm_unaligned_stack)); code = emit_call_seq (cfg, code); arm_patch (buf [0], code); } */ /* store runtime generic context */ if (cfg->rgctx_var) { MonoInst *ins = cfg->rgctx_var; g_assert (ins->opcode == OP_REGOFFSET); if (arm_is_imm12 (ins->inst_offset)) { ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR); } mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, cfg->rgctx_var, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0); } /* load arguments allocated to register from the stack */ cinfo = get_call_info (NULL, sig); if (cinfo->ret.storage == RegTypeStructByAddr) { ArgInfo *ainfo = &cinfo->ret; inst = cfg->vret_addr; g_assert (arm_is_imm12 (inst->inst_offset)); ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } if (sig->call_convention == MONO_CALL_VARARG) { ArgInfo *cookie = &cinfo->sig_cookie; /* Save the sig cookie address */ g_assert (cookie->storage == RegTypeBase); g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset)); g_assert (arm_is_imm12 (cfg->sig_cookie)); ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset); ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie); } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; inst = cfg->args [i]; if (cfg->verbose_level > 2) g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage); if (inst->opcode == OP_REGVAR) { if (ainfo->storage == RegTypeGeneral) ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg); else if (ainfo->storage == RegTypeFP) { g_assert_not_reached (); } else if (ainfo->storage == RegTypeBase) { if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) { ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset)); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset); ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP); } } else g_assert_not_reached (); if (i == 0 && sig->hasthis) { g_assert (ainfo->storage == RegTypeGeneral); mono_add_var_location (cfg, inst, TRUE, ainfo->reg, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, inst, TRUE, inst->dreg, 0, code - cfg->native_code, 0); } if (cfg->verbose_level > 2) g_print ("Argument %d assigned to register %s\n", i, mono_arch_regname (inst->dreg)); } else { switch (ainfo->storage) { case RegTypeHFA: for (part = 0; part < ainfo->nregs; part ++) { if (ainfo->esize == 4) ARM_FSTS (code, ainfo->reg + part, inst->inst_basereg, inst->inst_offset + (part * ainfo->esize)); else ARM_FSTD (code, ainfo->reg + (part * 2), inst->inst_basereg, inst->inst_offset + (part * ainfo->esize)); } break; case RegTypeGeneral: case RegTypeIRegPair: case RegTypeGSharedVtInReg: case RegTypeStructByAddr: switch (ainfo->size) { case 1: if (arm_is_imm12 (inst->inst_offset)) ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP); } break; case 2: if (arm_is_imm8 (inst->inst_offset)) { ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP); } break; case 8: if (arm_is_imm12 (inst->inst_offset)) { ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP); } if (arm_is_imm12 (inst->inst_offset + 4)) { ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4); ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP); } break; default: if (arm_is_imm12 (inst->inst_offset)) { ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP); } break; } if (i == 0 && sig->hasthis) { g_assert (ainfo->storage == RegTypeGeneral); mono_add_var_location (cfg, inst, TRUE, ainfo->reg, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, inst, FALSE, inst->inst_basereg, inst->inst_offset, code - cfg->native_code, 0); } break; case RegTypeBaseGen: if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) { ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset)); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset); ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP); } if (arm_is_imm12 (inst->inst_offset + 4)) { ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4); ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4); ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP); code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP); } break; case RegTypeBase: case RegTypeGSharedVtOnStack: case RegTypeStructByAddrOnStack: if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) { ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset)); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset); ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP); } switch (ainfo->size) { case 1: if (arm_is_imm8 (inst->inst_offset)) { ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP); } break; case 2: if (arm_is_imm8 (inst->inst_offset)) { ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP); } break; case 8: if (arm_is_imm12 (inst->inst_offset)) { ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP); } if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) { ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4)); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4); ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP); } if (arm_is_imm12 (inst->inst_offset + 4)) { ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4); ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP); } break; default: if (arm_is_imm12 (inst->inst_offset)) { ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP); } break; } break; case RegTypeFP: { int imm8, rot_amount; if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg); } else ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount); if (ainfo->size == 8) ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0); else ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0); break; } case RegTypeStructByVal: { int doffset = inst->inst_offset; int soffset = 0; int cur_reg; int size = 0; size = mini_type_stack_size_full (inst->inst_vtype, NULL, sig->pinvoke && !sig->marshalling_disabled); for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) { if (arm_is_imm12 (doffset)) { ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset); ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP); } soffset += sizeof (target_mgreg_t); doffset += sizeof (target_mgreg_t); } if (ainfo->vtsize) { /* FIXME: handle overrun! with struct sizes not multiple of 4 */ //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset); code = emit_memcpy (code, ainfo->vtsize * sizeof (target_mgreg_t), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset); } break; } default: g_assert_not_reached (); break; } } } if (method->save_lmf) code = emit_save_lmf (cfg, code, alloc_size - lmf_offset); if (cfg->arch.seq_point_info_var) { MonoInst *ins = cfg->arch.seq_point_info_var; /* Initialize the variable from a GOT slot */ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method); ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0); g_assert (ins->opcode == OP_REGOFFSET); if (arm_is_imm12 (ins->inst_offset)) { ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR); } } /* Initialize ss_trigger_page_var */ if (!cfg->soft_breakpoints) { MonoInst *info_var = cfg->arch.seq_point_info_var; MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var; int dreg = ARMREG_LR; if (info_var) { g_assert (info_var->opcode == OP_REGOFFSET); code = emit_ldr_imm (code, dreg, info_var->inst_basereg, info_var->inst_offset); /* Load the trigger page addr */ ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page)); ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset); } } if (cfg->arch.seq_point_ss_method_var) { MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var; MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var; g_assert (ss_method_ins->opcode == OP_REGOFFSET); g_assert (arm_is_imm12 (ss_method_ins->inst_offset)); if (cfg->compile_aot) { MonoInst *info_var = cfg->arch.seq_point_info_var; int dreg = ARMREG_LR; g_assert (info_var->opcode == OP_REGOFFSET); g_assert (arm_is_imm12 (info_var->inst_offset)); ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset); ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr)); ARM_STR_IMM (code, dreg, ss_method_ins->inst_basereg, ss_method_ins->inst_offset); } else { g_assert (bp_method_ins->opcode == OP_REGOFFSET); g_assert (arm_is_imm12 (bp_method_ins->inst_offset)); ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); ARM_B (code, 1); *(gpointer*)code = &single_step_tramp; code += 4; *(gpointer*)code = breakpoint_tramp; code += 4; ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0); ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4); ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset); } } set_code_cursor (cfg, code); g_free (cinfo); return code; } void mono_arch_emit_epilog (MonoCompile *cfg) { MonoMethod *method = cfg->method; int pos, i, rot_amount; int max_epilog_size = 16 + 20*4; guint8 *code; CallInfo *cinfo; if (cfg->method->save_lmf) max_epilog_size += 128; code = realloc_code (cfg, max_epilog_size); /* Save the uwind state which is needed by the out-of-line code */ mono_emit_unwind_op_remember_state (cfg, code); pos = 0; /* Load returned vtypes into registers if needed */ cinfo = cfg->arch.cinfo; switch (cinfo->ret.storage) { case RegTypeStructByVal: { MonoInst *ins = cfg->ret; if (cinfo->ret.nregs == 1) { if (arm_is_imm12 (ins->inst_offset)) { ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR); } } else { for (i = 0; i < cinfo->ret.nregs; ++i) { int offset = ins->inst_offset + (i * 4); if (arm_is_imm12 (offset)) { ARM_LDR_IMM (code, i, ins->inst_basereg, offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, offset); ARM_LDR_REG_REG (code, i, ins->inst_basereg, ARMREG_LR); } } } break; } case RegTypeHFA: { MonoInst *ins = cfg->ret; for (i = 0; i < cinfo->ret.nregs; ++i) { if (cinfo->ret.esize == 4) ARM_FLDS (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize)); else ARM_FLDD (code, cinfo->ret.reg + (i * 2), ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize)); } break; } default: break; } if (method->save_lmf) { int lmf_offset, reg, sp_adj, regmask, nused_int_regs = 0; /* all but r0-r3, sp and pc */ pos += MONO_ABI_SIZEOF (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t)); lmf_offset = pos; code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset); /* This points to r4 inside MonoLMF->iregs */ sp_adj = (MONO_ABI_SIZEOF (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t)); reg = ARMREG_R4; regmask = 0x9ff0; /* restore lr to pc */ /* Skip caller saved registers not used by the method */ while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) { regmask &= ~(1 << reg); sp_adj += 4; reg ++; } if (iphone_abi) /* Restored later */ regmask &= ~(1 << ARMREG_PC); /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */ code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj); for (i = 0; i < 16; i++) { if (regmask & (1 << i)) nused_int_regs ++; } mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, ((iphone_abi ? 3 : 0) + nused_int_regs) * 4); /* restore iregs */ ARM_POP (code, regmask); if (iphone_abi) { for (i = 0; i < 16; i++) { if (regmask & (1 << i)) mono_emit_unwind_op_same_value (cfg, code, i); } /* Restore saved r7, restore LR to PC */ /* Skip lr from the lmf */ mono_emit_unwind_op_def_cfa_offset (cfg, code, 3 * 4); ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (target_mgreg_t), 0); mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4); ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC)); } } else { int i, nused_int_regs = 0; for (i = 0; i < 16; i++) { if (cfg->used_int_regs & (1 << i)) nused_int_regs ++; } if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) { ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage); ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP); } if (cfg->frame_reg != ARMREG_SP) { mono_emit_unwind_op_def_cfa_reg (cfg, code, ARMREG_SP); } if (iphone_abi) { /* Restore saved gregs */ if (cfg->used_int_regs) { mono_emit_unwind_op_def_cfa_offset (cfg, code, (2 + nused_int_regs) * 4); ARM_POP (code, cfg->used_int_regs); for (i = 0; i < 16; i++) { if (cfg->used_int_regs & (1 << i)) mono_emit_unwind_op_same_value (cfg, code, i); } } mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4); /* Restore saved r7, restore LR to PC */ ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC)); } else { mono_emit_unwind_op_def_cfa_offset (cfg, code, (nused_int_regs + 1) * 4); ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC)); } } /* Restore the unwind state to be the same as before the epilog */ mono_emit_unwind_op_restore_state (cfg, code); set_code_cursor (cfg, code); } void mono_arch_emit_exceptions (MonoCompile *cfg) { MonoJumpInfo *patch_info; int i; guint8 *code; guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM]; guint8 exc_throw_found [MONO_EXC_INTRINS_NUM]; int max_epilog_size = 50; for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) { exc_throw_pos [i] = NULL; exc_throw_found [i] = 0; } /* count the number of exception infos */ /* * make sure we have enough space for exceptions */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { if (patch_info->type == MONO_PATCH_INFO_EXC) { i = mini_exception_id_by_name ((const char*)patch_info->data.target); if (!exc_throw_found [i]) { max_epilog_size += 32; exc_throw_found [i] = TRUE; } } } code = realloc_code (cfg, max_epilog_size); /* add code to raise exceptions */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { switch (patch_info->type) { case MONO_PATCH_INFO_EXC: { MonoClass *exc_class; unsigned char *ip = patch_info->ip.i + cfg->native_code; i = mini_exception_id_by_name ((const char*)patch_info->data.target); if (exc_throw_pos [i]) { arm_patch (ip, exc_throw_pos [i]); patch_info->type = MONO_PATCH_INFO_NONE; break; } else { exc_throw_pos [i] = code; } arm_patch (ip, code); exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name); ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR); ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0); patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID; patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception; patch_info->ip.i = code - cfg->native_code; ARM_BL (code, 0); cfg->thunk_area += THUNK_SIZE; *(guint32*)(gpointer)code = m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF; code += 4; break; } default: /* do nothing */ break; } } set_code_cursor (cfg, code); } #endif /* #ifndef DISABLE_JIT */ void mono_arch_finish_init (void) { } MonoInst* mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { /* FIXME: */ return NULL; } #ifndef DISABLE_JIT #endif guint32 mono_arch_get_patch_offset (guint8 *code) { /* OP_AOTCONST */ return 8; } void mono_arch_flush_register_windows (void) { } MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { return (MonoMethod*)regs [MONO_ARCH_IMT_REG]; } MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code) { return (MonoVTable*)(gsize)regs [MONO_ARCH_RGCTX_REG]; } GSList* mono_arch_get_cie_program (void) { GSList *l = NULL; mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ARMREG_SP, 0); return l; } /* #define ENABLE_WRONG_METHOD_CHECK 1 */ #define BASE_SIZE (6 * 4) #define BSEARCH_ENTRY_SIZE (4 * 4) #define CMP_SIZE (3 * 4) #define BRANCH_SIZE (1 * 4) #define CALL_SIZE (2 * 4) #define WMC_SIZE (8 * 4) #define DISTANCE(A, B) (((gint32)(gssize)(B)) - ((gint32)(gssize)(A))) static arminstr_t * arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value) { guint32 delta = DISTANCE (target, code); delta -= 8; g_assert (delta >= 0 && delta <= 0xFFF); *target = *target | delta; *code = value; return code + 1; } #ifdef ENABLE_WRONG_METHOD_CHECK static void mini_dump_bad_imt (int input_imt, int compared_imt, int pc) { g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc); g_assert (0); } #endif gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int size, i; arminstr_t *code, *start; gboolean large_offsets = FALSE; guint32 **constant_pool_starts; arminstr_t *vtable_target = NULL; int extra_space = 0; #ifdef ENABLE_WRONG_METHOD_CHECK char * cond; #endif GSList *unwind_ops; MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); size = BASE_SIZE; constant_pool_starts = g_new0 (guint32*, count); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { gboolean fail_case = !item->check_target_idx && fail_tramp; if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) { item->chunk_size += 32; large_offsets = TRUE; } if (item->check_target_idx || fail_case) { if (!item->compare_done || fail_case) item->chunk_size += CMP_SIZE; item->chunk_size += BRANCH_SIZE; } else { #ifdef ENABLE_WRONG_METHOD_CHECK item->chunk_size += WMC_SIZE; #endif } if (fail_case) { item->chunk_size += 16; large_offsets = TRUE; } item->chunk_size += CALL_SIZE; } else { item->chunk_size += BSEARCH_ENTRY_SIZE; imt_entries [item->check_target_idx]->compare_done = TRUE; } size += item->chunk_size; } if (large_offsets) size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */ if (fail_tramp) { code = (arminstr_t *)mini_alloc_generic_virtual_trampoline (vtable, size); } else { code = mono_mem_manager_code_reserve (mem_manager, size); } start = code; unwind_ops = mono_arch_get_cie_program (); #ifdef DEBUG_IMT g_print ("Building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count, size, start, ((guint8*)start) + size, vtable, fail_tramp); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size); } #endif if (large_offsets) { ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC); mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 4 * sizeof (target_mgreg_t)); } else { ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1); mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (target_mgreg_t)); } ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4); vtable_target = code; ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL; gint32 vtable_offset; item->code_target = (guint8*)code; if (item->is_equals) { gboolean fail_case = !item->check_target_idx && fail_tramp; if (item->check_target_idx || fail_case) { if (!item->compare_done || fail_case) { imt_method = code; ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1); } item->jmp_code = (guint8*)code; ARM_B_COND (code, ARMCOND_NE, 0); } else { /*Enable the commented code to assert on wrong method*/ #ifdef ENABLE_WRONG_METHOD_CHECK imt_method = code; ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1); cond = code; ARM_B_COND (code, ARMCOND_EQ, 0); /* Define this if your system is so bad that gdb is failing. */ #ifdef BROKEN_DEV_ENV ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC); ARM_BL (code, 0); arm_patch (code - 1, mini_dump_bad_imt); #else ARM_DBRK (code); #endif arm_patch (cond, code); #endif } if (item->has_target_code) { /* Load target address */ target_code_ins = code; ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); /* Save it to the fourth slot */ ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t)); /* Restore registers and branch */ ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC); code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code); } else { vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]); if (!arm_is_imm12 (vtable_offset)) { /* * We need to branch to a computed address but we don't have * a free register to store it, since IP must contain the * vtable address. So we push the two values to the stack, and * load them both using LDM. */ /* Compute target address */ vtable_offset_ins = code; ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1); /* Save it to the fourth slot */ ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t)); /* Restore registers and branch */ ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC); code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset); } else { ARM_POP2 (code, ARMREG_R0, ARMREG_R1); if (large_offsets) { mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (target_mgreg_t)); ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (target_mgreg_t)); } mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 0); ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset); } } if (fail_case) { arm_patch (item->jmp_code, (guchar*)code); target_code_ins = code; /* Load target address */ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); /* Save it to the fourth slot */ ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t)); /* Restore registers and branch */ ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC); code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp); item->jmp_code = NULL; } if (imt_method) code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)(gsize)item->key); /*must emit after unconditional branch*/ if (vtable_target) { code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)(gsize)vtable); item->chunk_size += 4; vtable_target = NULL; } /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/ constant_pool_starts [i] = code; if (extra_space) { code += extra_space; extra_space = 0; } } else { ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1); item->jmp_code = (guint8*)code; ARM_B_COND (code, ARMCOND_HS, 0); ++extra_space; } } for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code) { if (item->check_target_idx) arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target); } if (i > 0 && item->is_equals) { int j; arminstr_t *space_start = constant_pool_starts [i]; for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) { space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)(gsize)imt_entries [j]->key); } } } #ifdef DEBUG_IMT { char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count); mono_disassemble_code (NULL, (guint8*)start, size, buff); g_free (buff); } #endif g_free (constant_pool_starts); mono_arch_flush_icache ((guint8*)start, size); MONO_PROFILER_RAISE (jit_code_buffer, ((guint8*)start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL)); UnlockedAdd (&mono_stats.imt_trampolines_size, code - start); g_assert (DISTANCE (start, code) <= size); mono_tramp_info_register (mono_tramp_info_create (NULL, (guint8*)start, DISTANCE (start, code), NULL, unwind_ops), mem_manager); return start; } host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { return ctx->regs [reg]; } host_mgreg_t* mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { return &ctx->regs [reg]; } void mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val) { ctx->regs [reg] = val; } /* * mono_arch_get_trampolines: * * Return a list of MonoTrampInfo structures describing arch specific trampolines * for AOT. */ GSList * mono_arch_get_trampolines (gboolean aot) { return mono_arm_get_exception_trampolines (aot); } #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED) /* * mono_arch_set_breakpoint: * * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET. * The location should contain code emitted by OP_SEQ_POINT. */ void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; guint32 native_offset = ip - (guint8*)ji->code_start; if (ji->from_aot) { SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start); if (!breakpoint_tramp) breakpoint_tramp = mini_get_breakpoint_trampoline (); g_assert (native_offset % 4 == 0); g_assert (info->bp_addrs [native_offset / 4] == 0); info->bp_addrs [native_offset / 4] = (guint8*)(mini_debug_options.soft_breakpoints ? breakpoint_tramp : bp_trigger_page); } else if (mini_debug_options.soft_breakpoints) { code += 4; ARM_BLX_REG (code, ARMREG_LR); mono_arch_flush_icache (code - 4, 4); } else { int dreg = ARMREG_LR; /* Read from another trigger page */ ARM_LDR_IMM (code, dreg, ARMREG_PC, 0); ARM_B (code, 0); *(int*)code = (int)(gssize)bp_trigger_page; code += 4; ARM_LDR_IMM (code, dreg, dreg, 0); mono_arch_flush_icache (code - 16, 16); #if 0 /* This is currently implemented by emitting an SWI instruction, which * qemu/linux seems to convert to a SIGILL. */ *(int*)code = (0xef << 24) | 8; code += 4; mono_arch_flush_icache (code - 4, 4); #endif } } /* * mono_arch_clear_breakpoint: * * Clear the breakpoint at IP. */ void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; int i; if (ji->from_aot) { guint32 native_offset = ip - (guint8*)ji->code_start; SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start); if (!breakpoint_tramp) breakpoint_tramp = mini_get_breakpoint_trampoline (); g_assert (native_offset % 4 == 0); g_assert (info->bp_addrs [native_offset / 4] == (guint8*)(mini_debug_options.soft_breakpoints ? breakpoint_tramp : bp_trigger_page)); info->bp_addrs [native_offset / 4] = 0; } else if (mini_debug_options.soft_breakpoints) { code += 4; ARM_NOP (code); mono_arch_flush_icache (code - 4, 4); } else { for (i = 0; i < 4; ++i) ARM_NOP (code); mono_arch_flush_icache (ip, code - ip); } } /* * mono_arch_start_single_stepping: * * Start single stepping. */ void mono_arch_start_single_stepping (void) { if (ss_trigger_page) mono_mprotect (ss_trigger_page, mono_pagesize (), 0); else single_step_tramp = mini_get_single_step_trampoline (); } /* * mono_arch_stop_single_stepping: * * Stop single stepping. */ void mono_arch_stop_single_stepping (void) { if (ss_trigger_page) mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ); else single_step_tramp = NULL; } #if __APPLE__ #define DBG_SIGNAL SIGBUS #else #define DBG_SIGNAL SIGSEGV #endif /* * mono_arch_is_single_step_event: * * Return whenever the machine state in SIGCTX corresponds to a single * step event. */ gboolean mono_arch_is_single_step_event (void *info, void *sigctx) { siginfo_t *sinfo = (siginfo_t*)info; if (!ss_trigger_page) return FALSE; /* Sometimes the address is off by 4 */ if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128) return TRUE; else return FALSE; } /* * mono_arch_is_breakpoint_event: * * Return whenever the machine state in SIGCTX corresponds to a breakpoint event. */ gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx) { siginfo_t *sinfo = (siginfo_t*)info; if (!ss_trigger_page) return FALSE; if (sinfo->si_signo == DBG_SIGNAL) { /* Sometimes the address is off by 4 */ if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128) return TRUE; else return FALSE; } else { return FALSE; } } /* * mono_arch_skip_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji) { MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4); } /* * mono_arch_skip_single_step: * * See mini-amd64.c for docs. */ void mono_arch_skip_single_step (MonoContext *ctx) { MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4); } /* * mono_arch_get_seq_point_info: * * See mini-amd64.c for docs. */ SeqPointInfo* mono_arch_get_seq_point_info (guint8 *code) { SeqPointInfo *info; MonoJitInfo *ji; MonoJitMemoryManager *jit_mm; jit_mm = get_default_jit_mm (); // FIXME: Add a free function jit_mm_lock (jit_mm); info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code); jit_mm_unlock (jit_mm); if (!info) { ji = mini_jit_info_table_find (code); g_assert (ji); info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size); info->ss_trigger_page = ss_trigger_page; info->bp_trigger_page = bp_trigger_page; info->ss_tramp_addr = &single_step_tramp; jit_mm_lock (jit_mm); g_hash_table_insert (jit_mm->arch_seq_points, code, info); jit_mm_unlock (jit_mm); } return info; } #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */ /* * mono_arch_set_target: * * Set the target architecture the JIT backend should generate code for, in the form * of a GNU target triplet. Only used in AOT mode. */ void mono_arch_set_target (char *mtriple) { /* The GNU target triple format is not very well documented */ if (strstr (mtriple, "armv7")) { v5_supported = TRUE; v6_supported = TRUE; v7_supported = TRUE; } if (strstr (mtriple, "armv6")) { v5_supported = TRUE; v6_supported = TRUE; } if (strstr (mtriple, "armv7s")) { v7s_supported = TRUE; } if (strstr (mtriple, "armv7k")) { v7k_supported = TRUE; } if (strstr (mtriple, "thumbv7s")) { v5_supported = TRUE; v6_supported = TRUE; v7_supported = TRUE; v7s_supported = TRUE; thumb_supported = TRUE; thumb2_supported = TRUE; } if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) { v5_supported = TRUE; v6_supported = TRUE; thumb_supported = TRUE; iphone_abi = TRUE; } if (strstr (mtriple, "gnueabi")) eabi_supported = TRUE; } gboolean mono_arch_opcode_supported (int opcode) { switch (opcode) { case OP_ATOMIC_ADD_I4: case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_CAS_I4: case OP_ATOMIC_LOAD_I1: case OP_ATOMIC_LOAD_I2: case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_U1: case OP_ATOMIC_LOAD_U2: case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_U4: return v7_supported; case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_LOAD_R8: case OP_ATOMIC_STORE_R4: case OP_ATOMIC_STORE_R8: return v7_supported && IS_VFP; default: return FALSE; } } CallInfo* mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { return get_call_info (mp, sig); } gpointer mono_arch_get_get_tls_tramp (void) { return NULL; } static G_GNUC_UNUSED guint8* emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data) { /* OP_AOTCONST */ mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)patch_type, data); ARM_LDR_IMM (code, dreg, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; /* Load the value from the GOT */ ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg); return code; } guint8* mono_arm_emit_aotconst (gpointer ji_list, guint8 *code, guint8 *buf, int dreg, int patch_type, gconstpointer data) { MonoJumpInfo **ji = (MonoJumpInfo**)ji_list; *ji = mono_patch_info_list_prepend (*ji, code - buf, (MonoJumpInfoType)patch_type, data); ARM_LDR_IMM (code, dreg, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg); return code; } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { gpointer target = NULL; switch (jit_icall_id) { #undef MONO_AOT_ICALL #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break; MONO_AOT_ICALL (mono_arm_resume_unwind) MONO_AOT_ICALL (mono_arm_start_gsharedvt_call) MONO_AOT_ICALL (mono_arm_throw_exception) MONO_AOT_ICALL (mono_arm_throw_exception_by_token) MONO_AOT_ICALL (mono_arm_unaligned_stack) } return target; }
/** * \file * ARM backend for the Mono code generator * * Authors: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * * (C) 2003 Ximian, Inc. * Copyright 2003-2011 Novell, Inc (http://www.novell.com) * Copyright 2011 Xamarin, Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include "mini.h" #include <string.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/tokentype.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/mono-memory-model.h> #include <mono/utils/mono-threads-coop.h> #include <mono/utils/unlocked.h> #include "interp/interp.h" #include "mini-arm.h" #include "cpu-arm.h" #include "ir-emit.h" #include "mini-gc.h" #include "mini-runtime.h" #include "aot-runtime.h" #include "mono/arch/arm/arm-vfp-codegen.h" #include "mono/utils/mono-tls-inline.h" /* Sanity check: This makes no sense */ #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD)) #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined" #endif /* * IS_SOFT_FLOAT: Is full software floating point used? * IS_HARD_FLOAT: Is full hardware floating point used? * IS_VFP: Is hardware floating point with software ABI used? * * These are not necessarily constants, e.g. IS_SOFT_FLOAT and * IS_VFP may delegate to mono_arch_is_soft_float (). */ #if defined(ARM_FPU_VFP_HARD) #define IS_SOFT_FLOAT (FALSE) #define IS_HARD_FLOAT (TRUE) #define IS_VFP (TRUE) #elif defined(ARM_FPU_NONE) #define IS_SOFT_FLOAT (mono_arch_is_soft_float ()) #define IS_HARD_FLOAT (FALSE) #define IS_VFP (!mono_arch_is_soft_float ()) #else #define IS_SOFT_FLOAT (FALSE) #define IS_HARD_FLOAT (FALSE) #define IS_VFP (TRUE) #endif #define THUNK_SIZE (3 * 4) #if __APPLE__ G_BEGIN_DECLS void sys_icache_invalidate (void *start, size_t len); G_END_DECLS #endif /* This mutex protects architecture specific caches */ #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex) #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex) static mono_mutex_t mini_arch_mutex; static gboolean v5_supported = FALSE; static gboolean v6_supported = FALSE; static gboolean v7_supported = FALSE; static gboolean v7s_supported = FALSE; static gboolean v7k_supported = FALSE; static gboolean thumb_supported = FALSE; static gboolean thumb2_supported = FALSE; /* * Whenever to use the ARM EABI */ static gboolean eabi_supported = FALSE; /* * Whenever to use the iphone ABI extensions: * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr. * This is required for debugging/profiling tools to work, but it has some overhead so it should * only be turned on in debug builds. */ static gboolean iphone_abi = FALSE; /* * The FPU we are generating code for. This is NOT runtime configurable right now, * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines. */ static MonoArmFPU arm_fpu; #if defined(ARM_FPU_VFP_HARD) /* * On armhf, d0-d7 are used for argument passing and d8-d15 * must be preserved across calls, which leaves us no room * for scratch registers. So we use d14-d15 but back up their * previous contents to a stack slot before using them - see * mono_arm_emit_vfp_scratch_save/_restore (). */ static int vfp_scratch1 = ARM_VFP_D14; static int vfp_scratch2 = ARM_VFP_D15; #else /* * On armel, d0-d7 do not need to be preserved, so we can * freely make use of them as scratch registers. */ static int vfp_scratch1 = ARM_VFP_D0; static int vfp_scratch2 = ARM_VFP_D1; #endif static int i8_align; static gpointer single_step_tramp, breakpoint_tramp; /* * The code generated for sequence points reads from this location, which is * made read-only when single stepping is enabled. */ static gpointer ss_trigger_page; /* Enabled breakpoints read from this trigger page */ static gpointer bp_trigger_page; /* * TODO: * floating point support: on ARM it is a mess, there are at least 3 * different setups, each of which binary incompat with the other. * 1) FPA: old and ugly, but unfortunately what current distros use * the double binary format has the two words swapped. 8 double registers. * Implemented usually by kernel emulation. * 2) softfloat: the compiler emulates all the fp ops. Usually uses the * ugly swapped double format (I guess a softfloat-vfp exists, too, though). * 3) VFP: the new and actually sensible and useful FP support. Implemented * in HW or kernel-emulated, requires new tools. I think this is what symbian uses. * * We do not care about FPA. We will support soft float and VFP. */ #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096) #define arm_is_imm8(v) ((v) > -256 && (v) < 256) #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020) #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12)) #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12)) #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL) //#define DEBUG_IMT 0 #ifndef DISABLE_JIT static void mono_arch_compute_omit_fp (MonoCompile *cfg); #endif static guint8* emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data); const char* mono_arch_regname (int reg) { static const char * rnames[] = { "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1", "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6", "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr", "arm_pc" }; if (reg >= 0 && reg < 16) return rnames [reg]; return "unknown"; } const char* mono_arch_fregname (int reg) { static const char * rnames[] = { "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4", "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9", "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14", "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19", "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24", "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29", "arm_f30", "arm_f31" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } #ifndef DISABLE_JIT static guint8* emit_big_add_temp (guint8 *code, int dreg, int sreg, int imm, int temp) { int imm8, rot_amount; g_assert (temp == ARMREG_IP || temp == ARMREG_LR); if (imm == 0) { if (sreg != dreg) ARM_MOV_REG_REG (code, dreg, sreg); } else if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) { ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount); return code; } if (dreg == sreg) { code = mono_arm_emit_load_imm (code, temp, imm); ARM_ADD_REG_REG (code, dreg, sreg, temp); } else { code = mono_arm_emit_load_imm (code, dreg, imm); ARM_ADD_REG_REG (code, dreg, dreg, sreg); } return code; } static guint8* emit_big_add (guint8 *code, int dreg, int sreg, int imm) { return emit_big_add_temp (code, dreg, sreg, imm, ARMREG_IP); } static guint8* emit_ldr_imm (guint8 *code, int dreg, int sreg, int imm) { if (!arm_is_imm12 (imm)) { g_assert (dreg != sreg); code = emit_big_add (code, dreg, sreg, imm); ARM_LDR_IMM (code, dreg, dreg, 0); } else { ARM_LDR_IMM (code, dreg, sreg, imm); } return code; } /* If dreg == sreg, this clobbers IP */ static guint8* emit_sub_imm (guint8 *code, int dreg, int sreg, int imm) { int imm8, rot_amount; if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) { ARM_SUB_REG_IMM (code, dreg, sreg, imm8, rot_amount); return code; } if (dreg == sreg) { code = mono_arm_emit_load_imm (code, ARMREG_IP, imm); ARM_SUB_REG_REG (code, dreg, sreg, ARMREG_IP); } else { code = mono_arm_emit_load_imm (code, dreg, imm); ARM_SUB_REG_REG (code, dreg, dreg, sreg); } return code; } static guint8* emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset) { /* we can use r0-r3, since this is called only for incoming args on the stack */ if (size > sizeof (target_mgreg_t) * 4) { guint8 *start_loop; code = emit_big_add (code, ARMREG_R0, sreg, soffset); code = emit_big_add (code, ARMREG_R1, dreg, doffset); start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size); ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0); ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0); ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4); ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4); ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4); ARM_B_COND (code, ARMCOND_NE, 0); arm_patch (code - 4, start_loop); return code; } if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) && arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) { while (size >= 4) { ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset); ARM_STR_IMM (code, ARMREG_LR, dreg, doffset); doffset += 4; soffset += 4; size -= 4; } } else if (size) { code = emit_big_add (code, ARMREG_R0, sreg, soffset); code = emit_big_add (code, ARMREG_R1, dreg, doffset); doffset = soffset = 0; while (size >= 4) { ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset); ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset); doffset += 4; soffset += 4; size -= 4; } } g_assert (size == 0); return code; } static guint8* emit_jmp_reg (guint8 *code, int reg) { if (thumb_supported) ARM_BX (code, reg); else ARM_MOV_REG_REG (code, ARMREG_PC, reg); return code; } static guint8* emit_call_reg (guint8 *code, int reg) { if (v5_supported) { ARM_BLX_REG (code, reg); } else { ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); return emit_jmp_reg (code, reg); } return code; } static guint8* emit_call_seq (MonoCompile *cfg, guint8 *code) { if (cfg->method->dynamic) { ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; code = emit_call_reg (code, ARMREG_IP); } else { ARM_BL (code, 0); } cfg->thunk_area += THUNK_SIZE; return code; } guint8* mono_arm_patchable_b (guint8 *code, int cond) { ARM_B_COND (code, cond, 0); return code; } guint8* mono_arm_patchable_bl (guint8 *code, int cond) { ARM_BL_COND (code, cond, 0); return code; } #if defined(__ARM_EABI__) && defined(__linux__) && !defined(HOST_ANDROID) && !defined(MONO_CROSS_COMPILE) #define HAVE_AEABI_READ_TP 1 #endif #ifdef HAVE_AEABI_READ_TP G_BEGIN_DECLS gpointer __aeabi_read_tp (void); G_END_DECLS #endif gboolean mono_arch_have_fast_tls (void) { #ifdef HAVE_AEABI_READ_TP static gboolean have_fast_tls = FALSE; static gboolean inited = FALSE; if (mini_debug_options.use_fallback_tls) return FALSE; if (inited) return have_fast_tls; if (v7_supported) { gpointer tp1, tp2; tp1 = __aeabi_read_tp (); asm volatile("mrc p15, 0, %0, c13, c0, 3" : "=r" (tp2)); have_fast_tls = tp1 && tp1 == tp2; } inited = TRUE; return have_fast_tls; #else return FALSE; #endif } static guint8* emit_tls_get (guint8 *code, int dreg, int tls_offset) { g_assert (v7_supported); ARM_MRC (code, 15, 0, dreg, 13, 0, 3); ARM_LDR_IMM (code, dreg, dreg, tls_offset); return code; } static guint8* emit_tls_set (guint8 *code, int sreg, int tls_offset) { int tp_reg = (sreg != ARMREG_R0) ? ARMREG_R0 : ARMREG_R1; g_assert (v7_supported); ARM_MRC (code, 15, 0, tp_reg, 13, 0, 3); ARM_STR_IMM (code, sreg, tp_reg, tls_offset); return code; } /* * emit_save_lmf: * * Emit code to push an LMF structure on the LMF stack. * On arm, this is intermixed with the initialization of other fields of the structure. */ static guint8* emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset) { int i; if (mono_arch_have_fast_tls () && mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR) != -1) { code = emit_tls_get (code, ARMREG_R0, mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR)); } else { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern)); code = emit_call_seq (cfg, code); } /* we build the MonoLMF structure on the stack - see mini-arm.h */ /* lmf_offset is the offset from the previous stack pointer, * alloc_size is the total stack space allocated, so the offset * of MonoLMF from the current stack ptr is alloc_size - lmf_offset. * The pointer to the struct is put in r1 (new_lmf). * ip is used as scratch * The callee-saved registers are already in the MonoLMF structure */ code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset); /* r0 is the result from mono_get_lmf_addr () */ ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr)); /* new_lmf->previous_lmf = *lmf_addr */ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf)); ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf)); /* *(lmf_addr) = r1 */ ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf)); /* Skip method (only needed for trampoline LMF frames) */ ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp)); ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp)); /* save the current IP */ ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC); ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip)); for (i = 0; i < MONO_ABI_SIZEOF (MonoLMF); i += sizeof (target_mgreg_t)) mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF); return code; } typedef struct { gint32 vreg; gint32 hreg; } FloatArgData; static guint8 * emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset) { GSList *list; set_code_cursor (cfg, code); for (list = inst->float_args; list; list = list->next) { FloatArgData *fad = (FloatArgData*)list->data; MonoInst *var = get_vreg_to_inst (cfg, fad->vreg); gboolean imm = arm_is_fpimm8 (var->inst_offset); /* 4+1 insns for emit_big_add () and 1 for FLDS. */ if (!imm) *max_len += 20 + 4; *max_len += 4; code = realloc_code (cfg, *max_len); if (!imm) { code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset); ARM_FLDS (code, fad->hreg, ARMREG_LR, 0); } else ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset); set_code_cursor (cfg, code); *offset = code - cfg->native_code; } return code; } static guint8 * mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg) { MonoInst *inst; g_assert (reg == vfp_scratch1 || reg == vfp_scratch2); inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1]; if (IS_HARD_FLOAT) { if (!arm_is_fpimm8 (inst->inst_offset)) { code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset); ARM_FSTD (code, reg, ARMREG_LR, 0); } else ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset); } return code; } static guint8 * mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg) { MonoInst *inst; g_assert (reg == vfp_scratch1 || reg == vfp_scratch2); inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1]; if (IS_HARD_FLOAT) { if (!arm_is_fpimm8 (inst->inst_offset)) { code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset); ARM_FLDD (code, reg, ARMREG_LR, 0); } else ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset); } return code; } /* * emit_restore_lmf: * * Emit code to pop an LMF structure from the LMF stack. */ static guint8* emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset) { int basereg, offset; if (lmf_offset < 32) { basereg = cfg->frame_reg; offset = lmf_offset; } else { basereg = ARMREG_R2; offset = 0; code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset); } /* ip = previous_lmf */ ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf)); /* lr = lmf_addr */ ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr)); /* *(lmf_addr) = previous_lmf */ ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf)); return code; } #endif /* #ifndef DISABLE_JIT */ /* * mono_arch_get_argument_info: * @csig: a method signature * @param_count: the number of parameters to consider * @arg_info: an array to store the result infos * * Gathers information on parameters such as size, alignment and * padding. arg_info should be large enought to hold param_count + 1 entries. * * Returns the size of the activation frame. */ int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { int k, frame_size = 0; guint32 size, align, pad; int offset = 8; MonoType *t; t = mini_get_underlying_type (csig->ret); if (MONO_TYPE_ISSTRUCT (t)) { frame_size += sizeof (target_mgreg_t); offset += 4; } arg_info [0].offset = offset; if (csig->hasthis) { frame_size += sizeof (target_mgreg_t); offset += 4; } arg_info [0].size = frame_size; for (k = 0; k < param_count; k++) { size = mini_type_stack_size_full (csig->params [k], &align, csig->pinvoke && !csig->marshalling_disabled); /* ignore alignment for now */ align = 1; frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; frame_size += size; arg_info [k + 1].pad = 0; arg_info [k + 1].size = size; offset += pad; arg_info [k + 1].offset = offset; offset += size; } align = MONO_ARCH_FRAME_ALIGNMENT; frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; return frame_size; } #define MAX_ARCH_DELEGATE_PARAMS 3 static guint8* get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, gboolean param_count) { guint8 *code, *start; GSList *unwind_ops = mono_arch_get_cie_program (); if (has_target) { start = code = mono_global_codeman_reserve (12); /* Replace the this argument with the target */ ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target)); ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); g_assert ((code - start) <= 12); mono_arch_flush_icache (start, 12); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } else { int size, i; size = 8 + param_count * 4; start = code = mono_global_codeman_reserve (size); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); /* slide down the arguments */ for (i = 0; i < param_count; ++i) { ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1)); } ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); g_assert ((code - start) <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } if (has_target) { *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops); } else { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count); *info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops); g_free (name); } MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); return start; } /* * mono_arch_get_delegate_invoke_impls: * * Return a list of MonoAotTrampInfo structures for the delegate invoke impl * trampolines. */ GSList* mono_arch_get_delegate_invoke_impls (void) { GSList *res = NULL; MonoTrampInfo *info; int i; get_delegate_invoke_impl (&info, TRUE, 0); res = g_slist_prepend (res, info); for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) { get_delegate_invoke_impl (&info, FALSE, i); res = g_slist_prepend (res, info); } return res; } gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target) { guint8 *code, *start; MonoType *sig_ret; /* FIXME: Support more cases */ sig_ret = mini_get_underlying_type (sig->ret); if (MONO_TYPE_ISSTRUCT (sig_ret)) return NULL; if (has_target) { static guint8* cached = NULL; mono_mini_arch_lock (); if (cached) { mono_mini_arch_unlock (); return cached; } if (mono_ee_features.use_aot_trampolines) { start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target"); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, TRUE, 0); mono_tramp_info_register (info, NULL); } cached = start; mono_mini_arch_unlock (); return cached; } else { static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL}; int i; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; for (i = 0; i < sig->param_count; ++i) if (!mono_is_regsize_var (sig->params [i])) return NULL; mono_mini_arch_lock (); code = cache [sig->param_count]; if (code) { mono_mini_arch_unlock (); return code; } if (mono_ee_features.use_aot_trampolines) { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); start = (guint8*)mono_aot_get_trampoline (name); g_free (name); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, FALSE, sig->param_count); mono_tramp_info_register (info, NULL); } cache [sig->param_count] = start; mono_mini_arch_unlock (); return start; } return NULL; } gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg) { return NULL; } gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code) { return (gpointer)regs [ARMREG_R0]; } /* * Initialize the cpu to execute managed code. */ void mono_arch_cpu_init (void) { i8_align = MONO_ABI_ALIGNOF (gint64); #ifdef MONO_CROSS_COMPILE /* Need to set the alignment of i8 since it can different on the target */ #ifdef TARGET_ANDROID /* linux gnueabi */ mono_type_set_alignment (MONO_TYPE_I8, i8_align); #endif #endif } /* * Initialize architecture specific code. */ void mono_arch_init (void) { char *cpu_arch; #ifdef TARGET_WATCHOS mini_debug_options.soft_breakpoints = TRUE; #endif mono_os_mutex_init_recursive (&mini_arch_mutex); if (mini_debug_options.soft_breakpoints) { if (!mono_aot_only) breakpoint_tramp = mini_get_breakpoint_trampoline (); } else { ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER); bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER); mono_mprotect (bp_trigger_page, mono_pagesize (), 0); } #if defined(__ARM_EABI__) eabi_supported = TRUE; #endif #if defined(ARM_FPU_VFP_HARD) arm_fpu = MONO_ARM_FPU_VFP_HARD; #else arm_fpu = MONO_ARM_FPU_VFP; #if defined(ARM_FPU_NONE) && !defined(TARGET_IOS) /* * If we're compiling with a soft float fallback and it * turns out that no VFP unit is available, we need to * switch to soft float. We don't do this for iOS, since * iOS devices always have a VFP unit. */ if (!mono_hwcap_arm_has_vfp) arm_fpu = MONO_ARM_FPU_NONE; /* * This environment variable can be useful in testing * environments to make sure the soft float fallback * works. Most ARM devices have VFP units these days, so * normally soft float code would not be exercised much. */ char *soft = g_getenv ("MONO_ARM_FORCE_SOFT_FLOAT"); if (soft && !strncmp (soft, "1", 1)) arm_fpu = MONO_ARM_FPU_NONE; g_free (soft); #endif #endif v5_supported = mono_hwcap_arm_is_v5; v6_supported = mono_hwcap_arm_is_v6; v7_supported = mono_hwcap_arm_is_v7; /* * On weird devices, the hwcap code may fail to detect * the ARM version. In that case, we can at least safely * assume the version the runtime was compiled for. */ #ifdef HAVE_ARMV5 v5_supported = TRUE; #endif #ifdef HAVE_ARMV6 v6_supported = TRUE; #endif #ifdef HAVE_ARMV7 v7_supported = TRUE; #endif #if defined(TARGET_IOS) /* iOS is special-cased here because we don't yet have a way to properly detect CPU features on it. */ thumb_supported = TRUE; iphone_abi = TRUE; #elif defined(TARGET_ANDROID) thumb_supported = TRUE; #else thumb_supported = mono_hwcap_arm_has_thumb; thumb2_supported = mono_hwcap_arm_has_thumb2; #endif /* Format: armv(5|6|7[s])[-thumb[2]] */ cpu_arch = g_getenv ("MONO_CPU_ARCH"); /* Do this here so it overrides any detection. */ if (cpu_arch) { if (strncmp (cpu_arch, "armv", 4) == 0) { v5_supported = cpu_arch [4] >= '5'; v6_supported = cpu_arch [4] >= '6'; v7_supported = cpu_arch [4] >= '7'; v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0; v7k_supported = strncmp (cpu_arch, "armv7k", 6) == 0; } thumb_supported = strstr (cpu_arch, "thumb") != NULL; thumb2_supported = strstr (cpu_arch, "thumb2") != NULL; g_free (cpu_arch); } } /* * Cleanup architecture specific code. */ void mono_arch_cleanup (void) { } /* * This function returns the optimizations supported on this cpu. */ guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { /* no arm-specific optimizations yet */ *exclude_mask = 0; return 0; } gboolean mono_arm_is_hard_float (void) { return arm_fpu == MONO_ARM_FPU_VFP_HARD; } #ifndef DISABLE_JIT gboolean mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode) { if (v7s_supported || v7k_supported) { switch (opcode) { case OP_IDIV: case OP_IREM: case OP_IDIV_UN: case OP_IREM_UN: return FALSE; default: break; } } return TRUE; } #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK gboolean mono_arch_is_soft_float (void) { return arm_fpu == MONO_ARM_FPU_NONE; } #endif static gboolean is_regsize_var (MonoType *t) { if (m_type_is_byref (t)) return TRUE; t = mini_get_underlying_type (t); switch (t->type) { case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: return TRUE; case MONO_TYPE_OBJECT: return TRUE; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (t)) return TRUE; return FALSE; case MONO_TYPE_VALUETYPE: return FALSE; } return FALSE; } GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG)) continue; /* we can only allocate 32 bit values */ if (is_regsize_var (ins->inst_vtype)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE); } } return vars; } GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; mono_arch_compute_omit_fp (cfg); /* * FIXME: Interface calls might go through a static rgctx trampoline which * sets V5, but it doesn't save it, so we need to save it ourselves, and * avoid using it. */ if (cfg->flags & MONO_CFG_HAS_CALLS) cfg->uses_rgctx_reg = TRUE; if (cfg->arch.omit_fp) regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP)); regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1)); regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2)); regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3)); if (iphone_abi) /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */ regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7)); else regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4)); if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))) /* V5 is reserved for passing the vtable/rgctx/IMT method */ regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5)); /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/ /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/ return regs; } /* * mono_arch_regalloc_cost: * * Return the cost, in number of memory references, of the action of * allocating the variable VMV into a register during global register * allocation. */ guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { /* FIXME: */ return 2; } #endif /* #ifndef DISABLE_JIT */ void mono_arch_flush_icache (guint8 *code, gint size) { #if defined(MONO_CROSS_COMPILE) #elif __APPLE__ sys_icache_invalidate (code, size); #else __builtin___clear_cache ((char*)code, (char*)code + size); #endif } #define DEBUG(a) static void inline add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple) { if (simple) { if (*gr > ARMREG_R3) { ainfo->size = 4; ainfo->offset = *stack_size; ainfo->reg = ARMREG_SP; /* in the caller */ ainfo->storage = RegTypeBase; *stack_size += 4; } else { ainfo->storage = RegTypeGeneral; ainfo->reg = *gr; } } else { gboolean split; if (eabi_supported) split = i8_align == 4; else split = TRUE; ainfo->size = 8; if (*gr == ARMREG_R3 && split) { /* first word in r3 and the second on the stack */ ainfo->offset = *stack_size; ainfo->reg = ARMREG_SP; /* in the caller */ ainfo->storage = RegTypeBaseGen; *stack_size += 4; } else if (*gr >= ARMREG_R3) { if (eabi_supported) { /* darwin aligns longs to 4 byte only */ if (i8_align == 8) { *stack_size += 7; *stack_size &= ~7; } } ainfo->offset = *stack_size; ainfo->reg = ARMREG_SP; /* in the caller */ ainfo->storage = RegTypeBase; *stack_size += 8; } else { if (eabi_supported) { if (i8_align == 8 && ((*gr) & 1)) (*gr) ++; } ainfo->storage = RegTypeIRegPair; ainfo->reg = *gr; } (*gr) ++; } (*gr) ++; } static void inline add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare) { /* * If we're calling a function like this: * * void foo(float a, double b, float c) * * We pass a in s0 and b in d1. That leaves us * with s1 being unused. The armhf ABI recognizes * this and requires register assignment to then * use that for the next single-precision arg, * i.e. c in this example. So float_spare either * tells us which reg to use for the next single- * precision arg, or it's -1, meaning use *fpr. * * Note that even though most of the JIT speaks * double-precision, fpr represents single- * precision registers. * * See parts 5.5 and 6.1.2 of the AAPCS for how * this all works. */ if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) { ainfo->storage = RegTypeFP; if (is_double) { /* * If we're passing a double-precision value * and *fpr is odd (e.g. it's s1, s3, ...) * we need to use the next even register. So * we mark the current *fpr as a spare that * can be used for the next single-precision * value. */ if (*fpr % 2) { *float_spare = *fpr; (*fpr)++; } /* * At this point, we have an even register * so we assign that and move along. */ ainfo->reg = *fpr; *fpr += 2; } else if (*float_spare >= 0) { /* * We're passing a single-precision value * and it looks like a spare single- * precision register is available. Let's * use it. */ ainfo->reg = *float_spare; *float_spare = -1; } else { /* * If we hit this branch, we're passing a * single-precision value and we can simply * use the next available register. */ ainfo->reg = *fpr; (*fpr)++; } } else { /* * We've exhausted available floating point * regs, so pass the rest on the stack. */ if (is_double) { *stack_size += 7; *stack_size &= ~7; } ainfo->offset = *stack_size; ainfo->reg = ARMREG_SP; ainfo->storage = RegTypeBase; *stack_size += is_double ? 8 : 4; } } static gboolean is_hfa (MonoType *t, int *out_nfields, int *out_esize) { MonoClass *klass; gpointer iter; MonoClassField *field; MonoType *ftype, *prev_ftype = NULL; int nfields = 0; klass = mono_class_from_mono_type_internal (t); iter = NULL; while ((field = mono_class_get_fields_internal (klass, &iter))) { if (field->type->attrs & FIELD_ATTRIBUTE_STATIC) continue; ftype = mono_field_get_type_internal (field); ftype = mini_get_underlying_type (ftype); if (MONO_TYPE_ISSTRUCT (ftype)) { int nested_nfields, nested_esize; if (!is_hfa (ftype, &nested_nfields, &nested_esize)) return FALSE; if (nested_esize == 4) ftype = m_class_get_byval_arg (mono_defaults.single_class); else ftype = m_class_get_byval_arg (mono_defaults.double_class); if (prev_ftype && prev_ftype->type != ftype->type) return FALSE; prev_ftype = ftype; nfields += nested_nfields; } else { if (!(!m_type_is_byref (ftype) && (ftype->type == MONO_TYPE_R4 || ftype->type == MONO_TYPE_R8))) return FALSE; if (prev_ftype && prev_ftype->type != ftype->type) return FALSE; prev_ftype = ftype; nfields ++; } } if (nfields == 0 || nfields > 4) return FALSE; *out_nfields = nfields; *out_esize = prev_ftype->type == MONO_TYPE_R4 ? 4 : 8; return TRUE; } static CallInfo* get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { guint i, gr, fpr, pstart; gint float_spare; int n = sig->hasthis + sig->param_count; int nfields, esize; guint32 align; MonoType *t; guint32 stack_size = 0; CallInfo *cinfo; gboolean is_pinvoke = sig->pinvoke; gboolean vtype_retaddr = FALSE; if (mp) cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n)); else cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n)); cinfo->nargs = n; gr = ARMREG_R0; fpr = ARM_VFP_F0; float_spare = -1; t = mini_get_underlying_type (sig->ret); switch (t->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: cinfo->ret.storage = RegTypeGeneral; cinfo->ret.reg = ARMREG_R0; break; case MONO_TYPE_U8: case MONO_TYPE_I8: cinfo->ret.storage = RegTypeIRegPair; cinfo->ret.reg = ARMREG_R0; break; case MONO_TYPE_R4: case MONO_TYPE_R8: cinfo->ret.storage = RegTypeFP; if (t->type == MONO_TYPE_R4) cinfo->ret.size = 4; else cinfo->ret.size = 8; if (IS_HARD_FLOAT) { cinfo->ret.reg = ARM_VFP_F0; } else { cinfo->ret.reg = ARMREG_R0; } break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (t)) { cinfo->ret.storage = RegTypeGeneral; cinfo->ret.reg = ARMREG_R0; break; } if (mini_is_gsharedvt_variable_type (t)) { cinfo->ret.storage = RegTypeStructByAddr; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) { cinfo->ret.storage = RegTypeHFA; cinfo->ret.reg = 0; cinfo->ret.nregs = nfields; cinfo->ret.esize = esize; } else { if (sig->pinvoke && !sig->marshalling_disabled) { int native_size = mono_class_native_size (mono_class_from_mono_type_internal (t), &align); int max_size; #ifdef TARGET_WATCHOS max_size = 16; #else max_size = 4; #endif if (native_size <= max_size) { cinfo->ret.storage = RegTypeStructByVal; cinfo->ret.struct_size = native_size; cinfo->ret.nregs = ALIGN_TO (native_size, 4) / 4; } else { cinfo->ret.storage = RegTypeStructByAddr; } } else { cinfo->ret.storage = RegTypeStructByAddr; } } break; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: g_assert (mini_is_gsharedvt_type (t)); cinfo->ret.storage = RegTypeStructByAddr; break; case MONO_TYPE_VOID: break; default: g_error ("Can't handle as return value 0x%x", sig->ret->type); } vtype_retaddr = cinfo->ret.storage == RegTypeStructByAddr; pstart = 0; n = 0; /* * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after * the first argument, allowing 'this' to be always passed in the first arg reg. * Also do this if the first argument is a reference type, since virtual calls * are sometimes made using calli without sig->hasthis set, like in the delegate * invoke wrappers. */ if (vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) { if (sig->hasthis) { add_general (&gr, &stack_size, cinfo->args + 0, TRUE); } else { add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE); pstart = 1; } n ++; cinfo->ret.reg = gr; gr ++; cinfo->vret_arg_index = 1; } else { /* this */ if (sig->hasthis) { add_general (&gr, &stack_size, cinfo->args + 0, TRUE); n ++; } if (vtype_retaddr) { cinfo->ret.reg = gr; gr ++; } } DEBUG(g_print("params: %d\n", sig->param_count)); for (i = pstart; i < sig->param_count; ++i) { ArgInfo *ainfo = &cinfo->args [n]; if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ gr = ARMREG_R3 + 1; fpr = ARM_VFP_F16; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE); } DEBUG(g_print("param %d: ", i)); if (m_type_is_byref (sig->params [i])) { DEBUG(g_print("byref\n")); add_general (&gr, &stack_size, ainfo, TRUE); n++; continue; } t = mini_get_underlying_type (sig->params [i]); switch (t->type) { case MONO_TYPE_I1: cinfo->args [n].is_signed = 1; case MONO_TYPE_U1: cinfo->args [n].size = 1; add_general (&gr, &stack_size, ainfo, TRUE); break; case MONO_TYPE_I2: cinfo->args [n].is_signed = 1; case MONO_TYPE_U2: cinfo->args [n].size = 2; add_general (&gr, &stack_size, ainfo, TRUE); break; case MONO_TYPE_I4: case MONO_TYPE_U4: cinfo->args [n].size = 4; add_general (&gr, &stack_size, ainfo, TRUE); break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: cinfo->args [n].size = sizeof (target_mgreg_t); add_general (&gr, &stack_size, ainfo, TRUE); break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (t)) { cinfo->args [n].size = sizeof (target_mgreg_t); add_general (&gr, &stack_size, ainfo, TRUE); break; } if (mini_is_gsharedvt_variable_type (t)) { /* gsharedvt arguments are passed by ref */ g_assert (mini_is_gsharedvt_type (t)); add_general (&gr, &stack_size, ainfo, TRUE); switch (ainfo->storage) { case RegTypeGeneral: ainfo->storage = RegTypeGSharedVtInReg; break; case RegTypeBase: ainfo->storage = RegTypeGSharedVtOnStack; break; default: g_assert_not_reached (); } break; } /* Fall through */ case MONO_TYPE_TYPEDBYREF: case MONO_TYPE_VALUETYPE: { gint size; int align_size; int nwords, nfields, esize; guint32 align; if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) { if (fpr + nfields < ARM_VFP_F16) { ainfo->storage = RegTypeHFA; ainfo->reg = fpr; ainfo->nregs = nfields; ainfo->esize = esize; if (esize == 4) fpr += nfields; else fpr += nfields * 2; break; } else { fpr = ARM_VFP_F16; } } if (t->type == MONO_TYPE_TYPEDBYREF) { size = MONO_ABI_SIZEOF (MonoTypedRef); align = sizeof (target_mgreg_t); } else { MonoClass *klass = mono_class_from_mono_type_internal (sig->params [i]); if (sig->pinvoke && !sig->marshalling_disabled) size = mono_class_native_size (klass, &align); else size = mini_type_stack_size_full (t, &align, FALSE); } DEBUG(g_print ("load %d bytes struct\n", size)); #ifdef TARGET_WATCHOS /* Watchos pass large structures by ref */ /* We only do this for pinvoke to make gsharedvt/dyncall simpler */ if (sig->pinvoke && size > 16) { add_general (&gr, &stack_size, ainfo, TRUE); switch (ainfo->storage) { case RegTypeGeneral: ainfo->storage = RegTypeStructByAddr; break; case RegTypeBase: ainfo->storage = RegTypeStructByAddrOnStack; break; default: g_assert_not_reached (); break; } break; } #endif align_size = size; nwords = 0; align_size += (sizeof (target_mgreg_t) - 1); align_size &= ~(sizeof (target_mgreg_t) - 1); nwords = (align_size + sizeof (target_mgreg_t) -1 ) / sizeof (target_mgreg_t); ainfo->storage = RegTypeStructByVal; ainfo->struct_size = size; ainfo->align = align; if (eabi_supported) { if (align >= 8 && (gr & 1)) gr ++; } if (gr > ARMREG_R3) { ainfo->size = 0; ainfo->vtsize = nwords; } else { int rest = ARMREG_R3 - gr + 1; int n_in_regs = rest >= nwords? nwords: rest; ainfo->size = n_in_regs; ainfo->vtsize = nwords - n_in_regs; ainfo->reg = gr; gr += n_in_regs; nwords -= n_in_regs; } stack_size = ALIGN_TO (stack_size, align); ainfo->offset = stack_size; /*g_print ("offset for arg %d at %d\n", n, stack_size);*/ stack_size += nwords * sizeof (target_mgreg_t); break; } case MONO_TYPE_U8: case MONO_TYPE_I8: ainfo->size = 8; add_general (&gr, &stack_size, ainfo, FALSE); break; case MONO_TYPE_R4: ainfo->size = 4; if (IS_HARD_FLOAT) add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare); else add_general (&gr, &stack_size, ainfo, TRUE); break; case MONO_TYPE_R8: ainfo->size = 8; if (IS_HARD_FLOAT) add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare); else add_general (&gr, &stack_size, ainfo, FALSE); break; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: /* gsharedvt arguments are passed by ref */ g_assert (mini_is_gsharedvt_type (t)); add_general (&gr, &stack_size, ainfo, TRUE); switch (ainfo->storage) { case RegTypeGeneral: ainfo->storage = RegTypeGSharedVtInReg; break; case RegTypeBase: ainfo->storage = RegTypeGSharedVtOnStack; break; default: g_assert_not_reached (); } break; default: g_error ("Can't handle 0x%x", sig->params [i]->type); } n ++; } /* Handle the case where there are no implicit arguments */ if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ gr = ARMREG_R3 + 1; fpr = ARM_VFP_F16; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE); } DEBUG (g_print (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size)); stack_size = ALIGN_TO (stack_size, MONO_ARCH_FRAME_ALIGNMENT); cinfo->stack_usage = stack_size; return cinfo; } /* * We need to create a temporary value if the argument is not stored in * a linear memory range in the ccontext (this normally happens for * value types if they are passed both by stack and regs). */ static int arg_need_temp (ArgInfo *ainfo) { if (ainfo->storage == RegTypeStructByVal && ainfo->vtsize) return ainfo->struct_size; return 0; } static gpointer arg_get_storage (CallContext *ccontext, ArgInfo *ainfo) { switch (ainfo->storage) { case RegTypeIRegPair: case RegTypeGeneral: case RegTypeStructByVal: return &ccontext->gregs [ainfo->reg]; case RegTypeHFA: case RegTypeFP: if (IS_HARD_FLOAT) return &ccontext->fregs [ainfo->reg]; else return &ccontext->gregs [ainfo->reg]; case RegTypeBase: return ccontext->stack + ainfo->offset; default: g_error ("Arg storage type not yet supported"); } } static void arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest) { int reg_size = ainfo->size * sizeof (host_mgreg_t); g_assert (arg_need_temp (ainfo)); memcpy (dest, &ccontext->gregs [ainfo->reg], reg_size); memcpy ((host_mgreg_t*)dest + ainfo->size, ccontext->stack + ainfo->offset, ainfo->struct_size - reg_size); } static void arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src) { int reg_size = ainfo->size * sizeof (host_mgreg_t); g_assert (arg_need_temp (ainfo)); memcpy (&ccontext->gregs [ainfo->reg], src, reg_size); memcpy (ccontext->stack + ainfo->offset, (host_mgreg_t*)src + ainfo->size, ainfo->struct_size - reg_size); } /* Set arguments in the ccontext (for i2n entry) */ void mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb = mini_get_interp_callbacks (); CallInfo *cinfo = get_call_info (NULL, sig); gpointer storage; ArgInfo *ainfo; memset (ccontext, 0, sizeof (CallContext)); ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT); if (ccontext->stack_size) ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size); if (sig->ret->type != MONO_TYPE_VOID) { ainfo = &cinfo->ret; if (ainfo->storage == RegTypeStructByAddr) { storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1); ccontext->gregs [cinfo->ret.reg] = (host_mgreg_t)(gsize)storage; } } g_assert (!sig->hasthis); for (int i = 0; i < sig->param_count; i++) { ainfo = &cinfo->args [i]; int temp_size = arg_need_temp (ainfo); if (temp_size) storage = alloca (temp_size); // FIXME? alloca in a loop else storage = arg_get_storage (ccontext, ainfo); interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage); if (temp_size) arg_set_val (ccontext, ainfo, storage); } g_free (cinfo); } /* Set return value in the ccontext (for n2i return) */ void mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp) { const MonoEECallbacks *interp_cb; CallInfo *cinfo; gpointer storage; ArgInfo *ainfo; if (sig->ret->type == MONO_TYPE_VOID) return; interp_cb = mini_get_interp_callbacks (); cinfo = get_call_info (NULL, sig); ainfo = &cinfo->ret; if (retp) { g_assert (ainfo->storage == RegTypeStructByAddr); interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, retp); } else { g_assert (ainfo->storage != RegTypeStructByAddr); g_assert (!arg_need_temp (ainfo)); storage = arg_get_storage (ccontext, ainfo); memset (ccontext, 0, sizeof (CallContext)); // FIXME interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage); } g_free (cinfo); } /* Gets the arguments from ccontext (for n2i entry) */ gpointer mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb = mini_get_interp_callbacks (); CallInfo *cinfo = get_call_info (NULL, sig); gpointer storage; ArgInfo *ainfo; for (int i = 0; i < sig->param_count + sig->hasthis; i++) { ainfo = &cinfo->args [i]; int temp_size = arg_need_temp (ainfo); if (temp_size) { storage = alloca (temp_size); // FIXME? alloca in a loop arg_get_val (ccontext, ainfo, storage); } else { storage = arg_get_storage (ccontext, ainfo); } interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage); } storage = NULL; if (sig->ret->type != MONO_TYPE_VOID) { ainfo = &cinfo->ret; if (ainfo->storage == RegTypeStructByAddr) storage = (gpointer)(gsize)ccontext->gregs [cinfo->ret.reg]; } g_free (cinfo); return storage; } /* Gets the return value from ccontext (for i2n exit) */ void mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb; CallInfo *cinfo; ArgInfo *ainfo; gpointer storage; if (sig->ret->type == MONO_TYPE_VOID) return; interp_cb = mini_get_interp_callbacks (); cinfo = get_call_info (NULL, sig); ainfo = &cinfo->ret; if (ainfo->storage != RegTypeStructByAddr) { g_assert (!arg_need_temp (ainfo)); storage = arg_get_storage (ccontext, ainfo); interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage); } g_free (cinfo); } #ifndef DISABLE_JIT gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { g_assert (caller_sig); g_assert (callee_sig); CallInfo *caller_info = get_call_info (NULL, caller_sig); CallInfo *callee_info = get_call_info (NULL, callee_sig); /* * Tailcalls with more callee stack usage than the caller cannot be supported, since * the extra stack space would be left on the stack after the tailcall. */ gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage) && IS_SUPPORTED_TAILCALL (caller_info->ret.storage == callee_info->ret.storage); // FIXME The limit here is that moving the parameters requires addressing the parameters // with 12bit (4K) immediate offsets. - 4 for TAILCALL_REG/MEMBASE res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (4096 - 4)); res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (4096 - 4)); g_free (caller_info); g_free (callee_info); return res; } static gboolean debug_omit_fp (void) { #if 0 return mono_debug_count (); #else return TRUE; #endif } /** * mono_arch_compute_omit_fp: * Determine whether the frame pointer can be eliminated. */ static void mono_arch_compute_omit_fp (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; int i, locals_size; CallInfo *cinfo; if (cfg->arch.omit_fp_computed) return; header = cfg->header; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; /* * FIXME: Remove some of the restrictions. */ cfg->arch.omit_fp = TRUE; cfg->arch.omit_fp_computed = TRUE; if (cfg->disable_omit_fp) cfg->arch.omit_fp = FALSE; if (!debug_omit_fp ()) cfg->arch.omit_fp = FALSE; /* if (cfg->method->save_lmf) cfg->arch.omit_fp = FALSE; */ if (cfg->flags & MONO_CFG_HAS_ALLOCA) cfg->arch.omit_fp = FALSE; if (header->num_clauses) cfg->arch.omit_fp = FALSE; if (cfg->param_area) cfg->arch.omit_fp = FALSE; if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) cfg->arch.omit_fp = FALSE; if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))) cfg->arch.omit_fp = FALSE; for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = &cinfo->args [i]; if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) { /* * The stack offset can only be determined when the frame * size is known. */ cfg->arch.omit_fp = FALSE; } } locals_size = 0; for (i = cfg->locals_start; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; int ialign; locals_size += mono_type_size (ins->inst_vtype, &ialign); } } /* * Set var information according to the calling convention. arm version. * The locals var stuff should most likely be split in another method. */ void mono_arch_allocate_vars (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; MonoInst *ins; MonoType *sig_ret; int i, offset, size, align, curinst; CallInfo *cinfo; ArgInfo *ainfo; guint32 ualign; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; sig_ret = mini_get_underlying_type (sig->ret); mono_arch_compute_omit_fp (cfg); if (cfg->arch.omit_fp) cfg->frame_reg = ARMREG_SP; else cfg->frame_reg = ARMREG_FP; cfg->flags |= MONO_CFG_HAS_SPILLUP; /* allow room for the vararg method args: void* and long/double */ if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) cfg->param_area = MAX (cfg->param_area, sizeof (target_mgreg_t)*8); header = cfg->header; /* See mono_arch_get_global_int_regs () */ if (cfg->flags & MONO_CFG_HAS_CALLS) cfg->uses_rgctx_reg = TRUE; if (cfg->frame_reg != ARMREG_SP) cfg->used_int_regs |= 1 << cfg->frame_reg; if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)) /* V5 is reserved for passing the vtable/rgctx/IMT method */ cfg->used_int_regs |= (1 << MONO_ARCH_IMT_REG); offset = 0; curinst = 0; if (!MONO_TYPE_ISSTRUCT (sig_ret) && cinfo->ret.storage != RegTypeStructByAddr) { if (sig_ret->type != MONO_TYPE_VOID) { cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = ARMREG_R0; } } /* local vars are at a positive offset from the stack pointer */ /* * also note that if the function uses alloca, we use FP * to point at the local variables. */ offset = 0; /* linkage area */ /* align the offset to 16 bytes: not sure this is needed here */ //offset += 8 - 1; //offset &= ~(8 - 1); /* add parameter area size for called functions */ offset += cfg->param_area; offset += 8 - 1; offset &= ~(8 - 1); if (cfg->flags & MONO_CFG_HAS_FPOUT) offset += 8; /* allow room to save the return value */ if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) offset += 8; switch (cinfo->ret.storage) { case RegTypeStructByVal: case RegTypeHFA: /* Allocate a local to hold the result, the epilog will copy it to the correct place */ offset = ALIGN_TO (offset, 8); cfg->ret->opcode = OP_REGOFFSET; cfg->ret->inst_basereg = cfg->frame_reg; cfg->ret->inst_offset = offset; if (cinfo->ret.storage == RegTypeStructByVal) offset += cinfo->ret.nregs * sizeof (target_mgreg_t); else offset += 32; break; case RegTypeStructByAddr: ins = cfg->vret_addr; offset += sizeof (target_mgreg_t) - 1; offset &= ~(sizeof (target_mgreg_t) - 1); ins->inst_offset = offset; ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; if (G_UNLIKELY (cfg->verbose_level > 1)) { g_print ("vret_addr ="); mono_print_ins (cfg->vret_addr); } offset += sizeof (target_mgreg_t); break; default: break; } /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */ if (cfg->arch.seq_point_info_var) { MonoInst *ins; ins = cfg->arch.seq_point_info_var; size = 4; align = 4; offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += size; } if (cfg->arch.ss_trigger_page_var) { MonoInst *ins; ins = cfg->arch.ss_trigger_page_var; size = 4; align = 4; offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += size; } if (cfg->arch.seq_point_ss_method_var) { MonoInst *ins; ins = cfg->arch.seq_point_ss_method_var; size = 4; align = 4; offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += size; } if (cfg->arch.seq_point_bp_method_var) { MonoInst *ins; ins = cfg->arch.seq_point_bp_method_var; size = 4; align = 4; offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += size; } if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) { /* Allocate a temporary used by the atomic ops */ size = 4; align = 4; /* Allocate a local slot to hold the sig cookie address */ offset += align - 1; offset &= ~(align - 1); cfg->arch.atomic_tmp_offset = offset; offset += size; } else { cfg->arch.atomic_tmp_offset = -1; } cfg->locals_min_stack_offset = offset; curinst = cfg->locals_start; for (i = curinst; i < cfg->num_varinfo; ++i) { MonoType *t; ins = cfg->varinfo [i]; if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET) continue; t = ins->inst_vtype; if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t)) continue; /* inst->backend.is_pinvoke indicates native sized value types, this is used by the * pinvoke wrappers when they call functions returning structure */ if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) { size = mono_class_native_size (mono_class_from_mono_type_internal (t), &ualign); align = ualign; } else size = mono_type_size (t, &align); /* FIXME: if a structure is misaligned, our memcpy doesn't work, * since it loads/stores misaligned words, which don't do the right thing. */ if (align < 4 && size >= 4) align = 4; if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4)) mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF); offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_offset = offset; ins->inst_basereg = cfg->frame_reg; offset += size; //g_print ("allocating local %d to %d\n", i, inst->inst_offset); } cfg->locals_max_stack_offset = offset; curinst = 0; if (sig->hasthis) { ins = cfg->args [curinst]; if (ins->opcode != OP_REGVAR) { ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; offset += sizeof (target_mgreg_t) - 1; offset &= ~(sizeof (target_mgreg_t) - 1); ins->inst_offset = offset; offset += sizeof (target_mgreg_t); } curinst++; } if (sig->call_convention == MONO_CALL_VARARG) { size = 4; align = 4; /* Allocate a local slot to hold the sig cookie address */ offset += align - 1; offset &= ~(align - 1); cfg->sig_cookie = offset; offset += size; } for (i = 0; i < sig->param_count; ++i) { ainfo = cinfo->args + i; ins = cfg->args [curinst]; switch (ainfo->storage) { case RegTypeHFA: offset = ALIGN_TO (offset, 8); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; /* These arguments are saved to the stack in the prolog */ ins->inst_offset = offset; if (cfg->verbose_level >= 2) g_print ("arg %d allocated to %s+0x%0x.\n", i, mono_arch_regname (ins->inst_basereg), (int)ins->inst_offset); // FIXME: offset += 32; break; default: break; } if (ins->opcode != OP_REGVAR) { ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; size = mini_type_stack_size_full (sig->params [i], &ualign, sig->pinvoke && !sig->marshalling_disabled); align = ualign; /* FIXME: if a structure is misaligned, our memcpy doesn't work, * since it loads/stores misaligned words, which don't do the right thing. */ if (align < 4 && size >= 4) align = 4; /* The code in the prolog () stores words when storing vtypes received in a register */ if (MONO_TYPE_ISSTRUCT (sig->params [i])) align = 4; if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4)) mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF); offset += align - 1; offset &= ~(align - 1); ins->inst_offset = offset; offset += size; } curinst++; } /* align the offset to 8 bytes */ if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4)) mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF); offset += 8 - 1; offset &= ~(8 - 1); /* change sign? */ cfg->stack_offset = offset; } void mono_arch_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig; CallInfo *cinfo; int i; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; if (IS_HARD_FLOAT) { for (i = 0; i < 2; i++) { MonoInst *inst = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL); inst->flags |= MONO_INST_VOLATILE; cfg->arch.vfp_scratch_slots [i] = inst; } } if (cinfo->ret.storage == RegTypeStructByVal) cfg->ret_var_is_local = TRUE; if (cinfo->ret.storage == RegTypeStructByAddr) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG); if (G_UNLIKELY (cfg->verbose_level > 1)) { g_print ("vret_addr = "); mono_print_ins (cfg->vret_addr); } } if (cfg->gen_sdb_seq_points) { if (cfg->compile_aot) { MonoInst *ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.seq_point_info_var = ins; if (!cfg->soft_breakpoints) { /* Allocate a separate variable for this to save 1 load per seq point */ ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.ss_trigger_page_var = ins; } } if (cfg->soft_breakpoints) { MonoInst *ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.seq_point_ss_method_var = ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.seq_point_bp_method_var = ins; } } } static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { MonoMethodSignature *tmp_sig; int sig_reg; if (MONO_IS_TAILCALL_OPCODE (call)) NOT_IMPLEMENTED; g_assert (cinfo->sig_cookie.storage == RegTypeBase); /* * mono_ArgIterator_Setup assumes the signature cookie is * passed first and all the arguments which were before it are * passed on the stack after the signature. So compensate by * passing a different signature. */ tmp_sig = mono_metadata_signature_dup (call->signature); tmp_sig->param_count -= call->signature->sentinelpos; tmp_sig->sentinelpos = 0; memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*)); sig_reg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg); } #ifdef ENABLE_LLVM LLVMCallInfo* mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig) { int i, n; CallInfo *cinfo; ArgInfo *ainfo; LLVMCallInfo *linfo; n = sig->param_count + sig->hasthis; cinfo = get_call_info (cfg->mempool, sig); linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n)); /* * LLVM always uses the native ABI while we use our own ABI, the * only difference is the handling of vtypes: * - we only pass/receive them in registers in some cases, and only * in 1 or 2 integer registers. */ switch (cinfo->ret.storage) { case RegTypeNone: linfo->ret.storage = LLVMArgNone; break; case RegTypeGeneral: case RegTypeFP: case RegTypeIRegPair: linfo->ret.storage = LLVMArgNormal; break; case RegTypeStructByAddr: if (sig->pinvoke) { linfo->ret.storage = LLVMArgVtypeByRef; } else { /* Vtype returned using a hidden argument */ linfo->ret.storage = LLVMArgVtypeRetAddr; linfo->vret_arg_index = cinfo->vret_arg_index; } break; #if TARGET_WATCHOS case RegTypeStructByVal: /* LLVM models this by returning an int array */ linfo->ret.storage = LLVMArgAsIArgs; linfo->ret.nslots = cinfo->ret.nregs; break; #endif case RegTypeHFA: linfo->ret.storage = LLVMArgFpStruct; linfo->ret.nslots = cinfo->ret.nregs; linfo->ret.esize = cinfo->ret.esize; break; default: cfg->exception_message = g_strdup_printf ("unknown ret conv (%d)", cinfo->ret.storage); cfg->disable_llvm = TRUE; return linfo; } for (i = 0; i < n; ++i) { LLVMArgInfo *lainfo = &linfo->args [i]; ainfo = cinfo->args + i; lainfo->storage = LLVMArgNone; switch (ainfo->storage) { case RegTypeGeneral: case RegTypeIRegPair: case RegTypeBase: case RegTypeBaseGen: case RegTypeFP: lainfo->storage = LLVMArgNormal; break; case RegTypeStructByVal: { lainfo->storage = LLVMArgAsIArgs; int slotsize = eabi_supported && ainfo->align == 8 ? 8 : 4; lainfo->nslots = ALIGN_TO (ainfo->struct_size, slotsize) / slotsize; lainfo->esize = slotsize; break; } case RegTypeStructByAddr: case RegTypeStructByAddrOnStack: lainfo->storage = LLVMArgVtypeByRef; break; case RegTypeHFA: { int j; lainfo->storage = LLVMArgAsFpArgs; lainfo->nslots = ainfo->nregs; lainfo->esize = ainfo->esize; for (j = 0; j < ainfo->nregs; ++j) lainfo->pair_storage [j] = LLVMArgInFPReg; break; } default: cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage); cfg->disable_llvm = TRUE; break; } } return linfo; } #endif void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoInst *in, *ins; MonoMethodSignature *sig; int i, n; CallInfo *cinfo; sig = call->signature; n = sig->param_count + sig->hasthis; cinfo = get_call_info (cfg->mempool, sig); switch (cinfo->ret.storage) { case RegTypeStructByVal: case RegTypeHFA: if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) { /* The JIT will transform this into a normal call */ call->vret_in_reg = TRUE; break; } if (MONO_IS_TAILCALL_OPCODE (call)) break; /* * The vtype is returned in registers, save the return area address in a local, and save the vtype into * the location pointed to by it after call in emit_move_return_value (). */ if (!cfg->arch.vret_addr_loc) { cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* Prevent it from being register allocated or optimized away */ cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE; } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg); break; case RegTypeStructByAddr: { MonoInst *vtarg; MONO_INST_NEW (cfg, vtarg, OP_MOVE); vtarg->sreg1 = call->vret_var->dreg; vtarg->dreg = mono_alloc_preg (cfg); MONO_ADD_INS (cfg->cbb, vtarg); mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE); break; } default: break; } for (i = 0; i < n; ++i) { ArgInfo *ainfo = cinfo->args + i; MonoType *t; if (i >= sig->hasthis) t = sig->params [i - sig->hasthis]; else t = mono_get_int_type (); t = mini_get_underlying_type (t); if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Emit the signature cookie just before the implicit arguments */ emit_sig_cookie (cfg, call, cinfo); } in = call->args [i]; switch (ainfo->storage) { case RegTypeGeneral: case RegTypeIRegPair: if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = MONO_LVREG_LS (in->dreg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = MONO_LVREG_MS (in->dreg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE); } else if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) { if (ainfo->size == 4) { if (IS_SOFT_FLOAT) { /* mono_emit_call_args () have already done the r8->r4 conversion */ /* The converted value is in an int vreg */ MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); } else { int creg; cfg->param_area = MAX (cfg->param_area, 8); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg); creg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8)); mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE); } } else { if (IS_SOFT_FLOAT) { MONO_INST_NEW (cfg, ins, OP_FGETLOW32); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); MONO_INST_NEW (cfg, ins, OP_FGETHIGH32); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE); } else { int creg; cfg->param_area = MAX (cfg->param_area, 8); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg); creg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8)); mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE); creg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4)); mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE); } } cfg->flags |= MONO_CFG_HAS_FPOUT; } else { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); } break; case RegTypeStructByVal: case RegTypeGSharedVtInReg: case RegTypeGSharedVtOnStack: case RegTypeHFA: case RegTypeStructByAddr: case RegTypeStructByAddrOnStack: MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); mono_call_inst_add_outarg_vt (cfg, call, ins); MONO_ADD_INS (cfg->cbb, ins); break; case RegTypeBase: if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg); } else if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) { if (t->type == MONO_TYPE_R8) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg); } else { if (IS_SOFT_FLOAT) MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg); else MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg); } } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg); } break; case RegTypeBaseGen: if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? MONO_LVREG_LS (in->dreg) : MONO_LVREG_MS (in->dreg)); MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? MONO_LVREG_MS (in->dreg) : MONO_LVREG_LS (in->dreg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE); } else if (!m_type_is_byref (t) && (t->type == MONO_TYPE_R8)) { int creg; /* This should work for soft-float as well */ cfg->param_area = MAX (cfg->param_area, 8); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg); creg = mono_alloc_ireg (cfg); mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8)); creg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg); cfg->flags |= MONO_CFG_HAS_FPOUT; } else { g_assert_not_reached (); } break; case RegTypeFP: { int fdreg = mono_alloc_freg (cfg); if (ainfo->size == 8) { MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->sreg1 = in->dreg; ins->dreg = fdreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE); } else { FloatArgData *fad; /* * Mono's register allocator doesn't speak single-precision registers that * overlap double-precision registers (i.e. armhf). So we have to work around * the register allocator and load the value from memory manually. * * So we create a variable for the float argument and an instruction to store * the argument into the variable. We then store the list of these arguments * in call->float_args. This list is then used by emit_float_args later to * pass the arguments in the various call opcodes. * * This is not very nice, and we should really try to fix the allocator. */ MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL); /* Make sure the instruction isn't seen as pointless and removed. */ float_arg->flags |= MONO_INST_VOLATILE; MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg); /* We use the dreg to look up the instruction later. The hreg is used to * emit the instruction that loads the value into the FP reg. */ fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData)); fad->vreg = float_arg->dreg; fad->hreg = ainfo->reg; call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad); } call->used_iregs |= 1 << ainfo->reg; cfg->flags |= MONO_CFG_HAS_FPOUT; break; } default: g_assert_not_reached (); } } /* Handle the case where there are no implicit arguments */ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) emit_sig_cookie (cfg, call, cinfo); call->call_info = cinfo; call->stack_usage = cinfo->stack_usage; } static void add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *arg) { MonoInst *ins; switch (storage) { case RegTypeFP: MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = arg->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE); break; default: g_assert_not_reached (); break; } } void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst*)ins->inst_p0; MonoInst *load; ArgInfo *ainfo = (ArgInfo*)ins->inst_p1; int ovf_size = ainfo->vtsize; int doffset = ainfo->offset; int struct_size = ainfo->struct_size; int i, soffset, dreg, tmpreg; switch (ainfo->storage) { case RegTypeGSharedVtInReg: case RegTypeStructByAddr: /* Pass by addr */ mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE); break; case RegTypeGSharedVtOnStack: case RegTypeStructByAddrOnStack: /* Pass by addr on stack */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg); break; case RegTypeHFA: for (i = 0; i < ainfo->nregs; ++i) { if (ainfo->esize == 4) MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE); else MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE); load->dreg = mono_alloc_freg (cfg); load->inst_basereg = src->dreg; load->inst_offset = i * ainfo->esize; MONO_ADD_INS (cfg->cbb, load); if (ainfo->esize == 4) { FloatArgData *fad; /* See RegTypeFP in mono_arch_emit_call () */ MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL); float_arg->flags |= MONO_INST_VOLATILE; MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, load->dreg); fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData)); fad->vreg = float_arg->dreg; fad->hreg = ainfo->reg + i; call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad); } else { add_outarg_reg (cfg, call, RegTypeFP, ainfo->reg + (i * 2), load); } } break; default: soffset = 0; for (i = 0; i < ainfo->size; ++i) { dreg = mono_alloc_ireg (cfg); switch (struct_size) { case 1: MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset); break; case 2: MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset); break; case 3: tmpreg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8); MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16); MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg); break; default: MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset); break; } mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE); soffset += sizeof (target_mgreg_t); struct_size -= sizeof (target_mgreg_t); } //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset); if (ovf_size != 0) mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (target_mgreg_t), struct_size), struct_size < 4 ? 1 : 4); break; } } void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret); if (!m_type_is_byref (ret)) { if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) { MonoInst *ins; if (COMPILE_LLVM (cfg)) { MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); } else { MONO_INST_NEW (cfg, ins, OP_SETLRET); ins->sreg1 = MONO_LVREG_LS (val->dreg); ins->sreg2 = MONO_LVREG_MS (val->dreg); MONO_ADD_INS (cfg->cbb, ins); } return; } switch (arm_fpu) { case MONO_ARM_FPU_NONE: if (ret->type == MONO_TYPE_R8) { MonoInst *ins; MONO_INST_NEW (cfg, ins, OP_SETFRET); ins->dreg = cfg->ret->dreg; ins->sreg1 = val->dreg; MONO_ADD_INS (cfg->cbb, ins); return; } if (ret->type == MONO_TYPE_R4) { /* Already converted to an int in method_to_ir () */ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); return; } break; case MONO_ARM_FPU_VFP: case MONO_ARM_FPU_VFP_HARD: if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) { MonoInst *ins; MONO_INST_NEW (cfg, ins, OP_SETFRET); ins->dreg = cfg->ret->dreg; ins->sreg1 = val->dreg; MONO_ADD_INS (cfg->cbb, ins); return; } break; default: g_assert_not_reached (); } } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); } #endif /* #ifndef DISABLE_JIT */ gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return TRUE; } typedef struct { MonoMethodSignature *sig; CallInfo *cinfo; MonoType *rtype; MonoType **param_types; } ArchDynCallInfo; static gboolean dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig) { int i; switch (cinfo->ret.storage) { case RegTypeNone: case RegTypeGeneral: case RegTypeIRegPair: case RegTypeStructByAddr: break; case RegTypeFP: if (IS_VFP) break; else return FALSE; default: return FALSE; } for (i = 0; i < cinfo->nargs; ++i) { ArgInfo *ainfo = &cinfo->args [i]; int last_slot; switch (ainfo->storage) { case RegTypeGeneral: case RegTypeIRegPair: case RegTypeBaseGen: case RegTypeFP: break; case RegTypeBase: break; case RegTypeStructByVal: if (ainfo->size == 0) last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize; else last_slot = ainfo->reg + ainfo->size + ainfo->vtsize; break; default: return FALSE; } } // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */ for (i = 0; i < sig->param_count; ++i) { MonoType *t = sig->params [i]; if (m_type_is_byref (t)) continue; t = mini_get_underlying_type (t); switch (t->type) { case MONO_TYPE_R4: case MONO_TYPE_R8: if (IS_SOFT_FLOAT) return FALSE; else break; /* case MONO_TYPE_I8: case MONO_TYPE_U8: return FALSE; */ default: break; } } return TRUE; } MonoDynCallInfo* mono_arch_dyn_call_prepare (MonoMethodSignature *sig) { ArchDynCallInfo *info; CallInfo *cinfo; int i; cinfo = get_call_info (NULL, sig); if (!dyn_call_supported (cinfo, sig)) { g_free (cinfo); return NULL; } info = g_new0 (ArchDynCallInfo, 1); // FIXME: Preprocess the info to speed up start_dyn_call () info->sig = sig; info->cinfo = cinfo; info->rtype = mini_get_underlying_type (sig->ret); info->param_types = g_new0 (MonoType*, sig->param_count); for (i = 0; i < sig->param_count; ++i) info->param_types [i] = mini_get_underlying_type (sig->params [i]); return (MonoDynCallInfo*)info; } void mono_arch_dyn_call_free (MonoDynCallInfo *info) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; g_free (ainfo->cinfo); g_free (ainfo); } int mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; g_assert (ainfo->cinfo->stack_usage % MONO_ARCH_FRAME_ALIGNMENT == 0); return sizeof (DynCallArgs) + ainfo->cinfo->stack_usage; } void mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf) { ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info; CallInfo *cinfo = dinfo->cinfo; DynCallArgs *p = (DynCallArgs*)buf; int arg_index, greg, i, j, pindex; MonoMethodSignature *sig = dinfo->sig; p->res = 0; p->ret = ret; p->has_fpregs = 0; p->n_stackargs = cinfo->stack_usage / sizeof (host_mgreg_t); arg_index = 0; greg = 0; pindex = 0; if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) { p->regs [greg ++] = (host_mgreg_t)(gsize)*(args [arg_index ++]); if (!sig->hasthis) pindex = 1; } if (dinfo->cinfo->ret.storage == RegTypeStructByAddr) p->regs [greg ++] = (host_mgreg_t)(gsize)ret; for (i = pindex; i < sig->param_count; i++) { MonoType *t = dinfo->param_types [i]; gpointer *arg = args [arg_index ++]; ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis]; int slot = -1; if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal) { slot = ainfo->reg; } else if (ainfo->storage == RegTypeFP) { } else if (ainfo->storage == RegTypeBase) { slot = PARAM_REGS + (ainfo->offset / 4); } else if (ainfo->storage == RegTypeBaseGen) { /* slot + 1 is the first stack slot, so the code below will work */ slot = 3; } else { g_assert_not_reached (); } if (m_type_is_byref (t)) { p->regs [slot] = (host_mgreg_t)(gsize)*arg; continue; } switch (t->type) { case MONO_TYPE_OBJECT: case MONO_TYPE_PTR: case MONO_TYPE_I: case MONO_TYPE_U: p->regs [slot] = (host_mgreg_t)(gsize)*arg; break; case MONO_TYPE_U1: p->regs [slot] = *(guint8*)arg; break; case MONO_TYPE_I1: p->regs [slot] = *(gint8*)arg; break; case MONO_TYPE_I2: p->regs [slot] = *(gint16*)arg; break; case MONO_TYPE_U2: p->regs [slot] = *(guint16*)arg; break; case MONO_TYPE_I4: p->regs [slot] = *(gint32*)arg; break; case MONO_TYPE_U4: p->regs [slot] = *(guint32*)arg; break; case MONO_TYPE_I8: case MONO_TYPE_U8: p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0]; p->regs [slot] = (host_mgreg_t)(gsize)arg [1]; break; case MONO_TYPE_R4: if (ainfo->storage == RegTypeFP) { float f = *(float*)arg; p->fpregs [ainfo->reg / 2] = *(double*)&f; p->has_fpregs = 1; } else { p->regs [slot] = *(host_mgreg_t*)arg; } break; case MONO_TYPE_R8: if (ainfo->storage == RegTypeFP) { p->fpregs [ainfo->reg / 2] = *(double*)arg; p->has_fpregs = 1; } else { p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0]; p->regs [slot] = (host_mgreg_t)(gsize)arg [1]; } break; case MONO_TYPE_GENERICINST: if (MONO_TYPE_IS_REFERENCE (t)) { p->regs [slot] = (host_mgreg_t)(gsize)*arg; break; } else { if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) { MonoClass *klass = mono_class_from_mono_type_internal (t); guint8 *nullable_buf; int size; size = mono_class_value_size (klass, NULL); nullable_buf = g_alloca (size); g_assert (nullable_buf); /* The argument pointed to by arg is either a boxed vtype or null */ mono_nullable_init (nullable_buf, (MonoObject*)arg, klass); arg = (gpointer*)nullable_buf; /* Fall though */ } else { /* Fall though */ } } case MONO_TYPE_VALUETYPE: g_assert (ainfo->storage == RegTypeStructByVal); if (ainfo->size == 0) slot = PARAM_REGS + (ainfo->offset / 4); else slot = ainfo->reg; for (j = 0; j < ainfo->size + ainfo->vtsize; ++j) p->regs [slot ++] = ((host_mgreg_t*)arg) [j]; break; default: g_assert_not_reached (); } } } void mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; DynCallArgs *p = (DynCallArgs*)buf; MonoType *ptype = ainfo->rtype; guint8 *ret = p->ret; host_mgreg_t res = p->res; host_mgreg_t res2 = p->res2; switch (ptype->type) { case MONO_TYPE_VOID: *(gpointer*)ret = NULL; break; case MONO_TYPE_OBJECT: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: *(gpointer*)ret = (gpointer)(gsize)res; break; case MONO_TYPE_I1: *(gint8*)ret = res; break; case MONO_TYPE_U1: *(guint8*)ret = res; break; case MONO_TYPE_I2: *(gint16*)ret = res; break; case MONO_TYPE_U2: *(guint16*)ret = res; break; case MONO_TYPE_I4: *(gint32*)ret = res; break; case MONO_TYPE_U4: *(guint32*)ret = res; break; case MONO_TYPE_I8: case MONO_TYPE_U8: /* This handles endianness as well */ ((gint32*)ret) [0] = res; ((gint32*)ret) [1] = res2; break; case MONO_TYPE_GENERICINST: if (MONO_TYPE_IS_REFERENCE (ptype)) { *(gpointer*)ret = (gpointer)res; break; } else { /* Fall though */ } case MONO_TYPE_VALUETYPE: g_assert (ainfo->cinfo->ret.storage == RegTypeStructByAddr); /* Nothing to do */ break; case MONO_TYPE_R4: g_assert (IS_VFP); if (IS_HARD_FLOAT) *(float*)ret = *(float*)&p->fpregs [0]; else *(float*)ret = *(float*)&res; break; case MONO_TYPE_R8: { host_mgreg_t regs [2]; g_assert (IS_VFP); if (IS_HARD_FLOAT) { *(double*)ret = p->fpregs [0]; } else { regs [0] = res; regs [1] = res2; *(double*)ret = *(double*)&regs; } break; } default: g_assert_not_reached (); } } #ifndef DISABLE_JIT /* * The immediate field for cond branches is big enough for all reasonable methods */ #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \ if (0 && ins->inst_true_bb->native_offset) { \ ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \ } else { \ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \ ARM_B_COND (code, (condcode), 0); \ } #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)]) /* emit an exception if condition is fail * * We assign the extra code used to throw the implicit exceptions * to cfg->bb_exit as far as the big branch handling is concerned */ #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \ do { \ mono_add_patch_info (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_EXC, exc_name); \ ARM_BL_COND (code, (condcode), 0); \ } while (0); #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name)) void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { } void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { MonoInst *last_ins = mono_inst_prev (ins, FILTER_IL_SEQ_POINT); switch (ins->opcode) { case OP_MUL_IMM: case OP_IMUL_IMM: /* Already done by an arch-independent pass */ break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: /* * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg */ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG || last_ins->opcode == OP_STORE_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } else { //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_MOVE; ins->sreg1 = last_ins->sreg1; } /* * Note: reg1 must be different from the basereg in the second load * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_MOVE reg1, reg2 */ } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE || last_ins->opcode == OP_LOAD_MEMBASE) && ins->inst_basereg != last_ins->dreg && ins->inst_basereg == last_ins->inst_basereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->dreg) { MONO_DELETE_INS (bb, ins); continue; } else { ins->opcode = OP_MOVE; ins->sreg1 = last_ins->dreg; } //g_assert_not_reached (); #if 0 /* * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg * --> * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_ICONST reg, imm */ } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM || last_ins->opcode == OP_STORE_MEMBASE_IMM) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_ICONST; ins->inst_c0 = last_ins->inst_imm; g_assert_not_reached (); // check this rule #endif } break; case OP_LOADU1_MEMBASE: case OP_LOADI1_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1; ins->sreg1 = last_ins->sreg1; } break; case OP_LOADU2_MEMBASE: case OP_LOADI2_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2; ins->sreg1 = last_ins->sreg1; } break; case OP_MOVE: ins->opcode = OP_MOVE; /* * OP_MOVE reg, reg */ if (ins->dreg == ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } /* * OP_MOVE sreg, dreg * OP_MOVE dreg, sreg */ if (last_ins && last_ins->opcode == OP_MOVE && ins->sreg1 == last_ins->dreg && ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } break; } } } /* * the branch_cc_table should maintain the order of these * opcodes. case CEE_BEQ: case CEE_BGE: case CEE_BGT: case CEE_BLE: case CEE_BLT: case CEE_BNE_UN: case CEE_BGE_UN: case CEE_BGT_UN: case CEE_BLE_UN: case CEE_BLT_UN: */ static const guchar branch_cc_table [] = { ARMCOND_EQ, ARMCOND_GE, ARMCOND_GT, ARMCOND_LE, ARMCOND_LT, ARMCOND_NE, ARMCOND_HS, ARMCOND_HI, ARMCOND_LS, ARMCOND_LO }; #define ADD_NEW_INS(cfg,dest,op) do { \ MONO_INST_NEW ((cfg), (dest), (op)); \ mono_bblock_insert_before_ins (bb, ins, (dest)); \ } while (0) static int map_to_reg_reg_op (int op) { switch (op) { case OP_ADD_IMM: return OP_IADD; case OP_SUB_IMM: return OP_ISUB; case OP_AND_IMM: return OP_IAND; case OP_COMPARE_IMM: return OP_COMPARE; case OP_ICOMPARE_IMM: return OP_ICOMPARE; case OP_ADDCC_IMM: return OP_ADDCC; case OP_ADC_IMM: return OP_ADC; case OP_SUBCC_IMM: return OP_SUBCC; case OP_SBB_IMM: return OP_SBB; case OP_OR_IMM: return OP_IOR; case OP_XOR_IMM: return OP_IXOR; case OP_LOAD_MEMBASE: return OP_LOAD_MEMINDEX; case OP_LOADI4_MEMBASE: return OP_LOADI4_MEMINDEX; case OP_LOADU4_MEMBASE: return OP_LOADU4_MEMINDEX; case OP_LOADU1_MEMBASE: return OP_LOADU1_MEMINDEX; case OP_LOADI2_MEMBASE: return OP_LOADI2_MEMINDEX; case OP_LOADU2_MEMBASE: return OP_LOADU2_MEMINDEX; case OP_LOADI1_MEMBASE: return OP_LOADI1_MEMINDEX; case OP_STOREI1_MEMBASE_REG: return OP_STOREI1_MEMINDEX; case OP_STOREI2_MEMBASE_REG: return OP_STOREI2_MEMINDEX; case OP_STOREI4_MEMBASE_REG: return OP_STOREI4_MEMINDEX; case OP_STORE_MEMBASE_REG: return OP_STORE_MEMINDEX; case OP_STORER4_MEMBASE_REG: return OP_STORER4_MEMINDEX; case OP_STORER8_MEMBASE_REG: return OP_STORER8_MEMINDEX; case OP_STORE_MEMBASE_IMM: return OP_STORE_MEMBASE_REG; case OP_STOREI1_MEMBASE_IMM: return OP_STOREI1_MEMBASE_REG; case OP_STOREI2_MEMBASE_IMM: return OP_STOREI2_MEMBASE_REG; case OP_STOREI4_MEMBASE_IMM: return OP_STOREI4_MEMBASE_REG; } g_assert_not_reached (); } /* * Remove from the instruction list the instructions that can't be * represented with very simple instructions with no register * requirements. */ void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *temp, *last_ins = NULL; int rot_amount, imm8, low_imm; MONO_BB_FOR_EACH_INS (bb, ins) { loop_start: switch (ins->opcode) { case OP_ADD_IMM: case OP_SUB_IMM: case OP_AND_IMM: case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: case OP_ADDCC_IMM: case OP_ADC_IMM: case OP_SUBCC_IMM: case OP_SBB_IMM: case OP_OR_IMM: case OP_XOR_IMM: case OP_IADD_IMM: case OP_ISUB_IMM: case OP_IAND_IMM: case OP_IADC_IMM: case OP_ISBB_IMM: case OP_IOR_IMM: case OP_IXOR_IMM: if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) { int opcode2 = mono_op_imm_to_op (ins->opcode); ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; if (opcode2 == -1) g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode)); ins->opcode = opcode2; } if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC) goto loop_start; else break; case OP_MUL_IMM: case OP_IMUL_IMM: if (ins->inst_imm == 1) { ins->opcode = OP_MOVE; break; } if (ins->inst_imm == 0) { ins->opcode = OP_ICONST; ins->inst_c0 = 0; break; } imm8 = (ins->inst_imm > 0) ? mono_is_power_of_two (ins->inst_imm) : -1; if (imm8 > 0) { ins->opcode = OP_SHL_IMM; ins->inst_imm = imm8; break; } ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = OP_IMUL; break; case OP_SBB: case OP_ISBB: case OP_SUBCC: case OP_ISUBCC: { int try_count = 2; MonoInst *current = ins; /* may require a look-ahead of a couple instructions due to spilling */ while (try_count-- && current->next) { if (current->next->opcode == OP_COND_EXC_C || current->next->opcode == OP_COND_EXC_IC) { /* ARM sets the C flag to 1 if there was _no_ overflow */ current->next->opcode = OP_COND_EXC_NC; break; } current = current->next; } break; } case OP_IDIV_IMM: case OP_IDIV_UN_IMM: case OP_IREM_IMM: case OP_IREM_UN_IMM: { int opcode2 = mono_op_imm_to_op (ins->opcode); ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; if (opcode2 == -1) g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode)); ins->opcode = opcode2; break; } case OP_LOCALLOC_IMM: ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg1 = temp->dreg; ins->opcode = OP_LOCALLOC; break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: case OP_LOADU4_MEMBASE: case OP_LOADU1_MEMBASE: /* we can do two things: load the immed in a register * and use an indexed load, or see if the immed can be * represented as an ad_imm + a load with a smaller offset * that fits. We just do the first for now, optimize later. */ if (arm_is_imm12 (ins->inst_offset)) break; ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_LOADI2_MEMBASE: case OP_LOADU2_MEMBASE: case OP_LOADI1_MEMBASE: if (arm_is_imm8 (ins->inst_offset)) break; ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_LOADR4_MEMBASE: case OP_LOADR8_MEMBASE: if (arm_is_fpimm8 (ins->inst_offset)) break; low_imm = ins->inst_offset & 0x1ff; if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) { ADD_NEW_INS (cfg, temp, OP_ADD_IMM); temp->inst_imm = ins->inst_offset & ~0x1ff; temp->sreg1 = ins->inst_basereg; temp->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = temp->dreg; ins->inst_offset = low_imm; } else { MonoInst *add_ins; ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ADD_NEW_INS (cfg, add_ins, OP_IADD); add_ins->sreg1 = ins->inst_basereg; add_ins->sreg2 = temp->dreg; add_ins->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = add_ins->dreg; ins->inst_offset = 0; } break; case OP_STORE_MEMBASE_REG: case OP_STOREI4_MEMBASE_REG: case OP_STOREI1_MEMBASE_REG: if (arm_is_imm12 (ins->inst_offset)) break; ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_STOREI2_MEMBASE_REG: if (arm_is_imm8 (ins->inst_offset)) break; ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_STORER4_MEMBASE_REG: case OP_STORER8_MEMBASE_REG: if (arm_is_fpimm8 (ins->inst_offset)) break; low_imm = ins->inst_offset & 0x1ff; if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) { ADD_NEW_INS (cfg, temp, OP_ADD_IMM); temp->inst_imm = ins->inst_offset & ~0x1ff; temp->sreg1 = ins->inst_destbasereg; temp->dreg = mono_alloc_ireg (cfg); ins->inst_destbasereg = temp->dreg; ins->inst_offset = low_imm; } else { MonoInst *add_ins; ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ADD_NEW_INS (cfg, add_ins, OP_IADD); add_ins->sreg1 = ins->inst_destbasereg; add_ins->sreg2 = temp->dreg; add_ins->dreg = mono_alloc_ireg (cfg); ins->inst_destbasereg = add_ins->dreg; ins->inst_offset = 0; } break; case OP_STORE_MEMBASE_IMM: case OP_STOREI1_MEMBASE_IMM: case OP_STOREI2_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg1 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); last_ins = temp; goto loop_start; /* make it handle the possibly big ins->inst_offset */ case OP_FCOMPARE: case OP_RCOMPARE: { gboolean swap = FALSE; int reg; if (!ins->next) { /* Optimized away */ NULLIFY_INS (ins); break; } /* Some fp compares require swapped operands */ switch (ins->next->opcode) { case OP_FBGT: ins->next->opcode = OP_FBLT; swap = TRUE; break; case OP_FBGT_UN: ins->next->opcode = OP_FBLT_UN; swap = TRUE; break; case OP_FBLE: ins->next->opcode = OP_FBGE; swap = TRUE; break; case OP_FBLE_UN: ins->next->opcode = OP_FBGE_UN; swap = TRUE; break; default: break; } if (swap) { reg = ins->sreg1; ins->sreg1 = ins->sreg2; ins->sreg2 = reg; } break; } } last_ins = ins; } bb->last_ins = last_ins; bb->max_vreg = cfg->next_vreg; } void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins) { MonoInst *ins; if (long_ins->opcode == OP_LNEG) { ins = long_ins; MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), 0); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), 0); NULLIFY_INS (ins); } } static guchar* emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed) { /* sreg is a float, dreg is an integer reg */ if (IS_VFP) { code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); if (is_signed) ARM_TOSIZD (code, vfp_scratch1, sreg); else ARM_TOUIZD (code, vfp_scratch1, sreg); ARM_FMRS (code, dreg, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); } if (!is_signed) { if (size == 1) ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff); else if (size == 2) { ARM_SHL_IMM (code, dreg, dreg, 16); ARM_SHR_IMM (code, dreg, dreg, 16); } } else { if (size == 1) { ARM_SHL_IMM (code, dreg, dreg, 24); ARM_SAR_IMM (code, dreg, dreg, 24); } else if (size == 2) { ARM_SHL_IMM (code, dreg, dreg, 16); ARM_SAR_IMM (code, dreg, dreg, 16); } } return code; } static guchar* emit_r4_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed) { /* sreg is a float, dreg is an integer reg */ g_assert (IS_VFP); code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); if (is_signed) ARM_TOSIZS (code, vfp_scratch1, sreg); else ARM_TOUIZS (code, vfp_scratch1, sreg); ARM_FMRS (code, dreg, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); if (!is_signed) { if (size == 1) ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff); else if (size == 2) { ARM_SHL_IMM (code, dreg, dreg, 16); ARM_SHR_IMM (code, dreg, dreg, 16); } } else { if (size == 1) { ARM_SHL_IMM (code, dreg, dreg, 24); ARM_SAR_IMM (code, dreg, dreg, 24); } else if (size == 2) { ARM_SHL_IMM (code, dreg, dreg, 16); ARM_SAR_IMM (code, dreg, dreg, 16); } } return code; } #endif /* #ifndef DISABLE_JIT */ #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431) static void emit_thunk (guint8 *code, gconstpointer target) { guint8 *p = code; ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); if (thumb_supported) ARM_BX (code, ARMREG_IP); else ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); *(guint32*)code = (guint32)(gsize)target; code += 4; mono_arch_flush_icache (p, code - p); } static void handle_thunk (MonoCompile *cfg, guchar *code, const guchar *target) { MonoJitInfo *ji = NULL; MonoThunkJitInfo *info; guint8 *thunks, *p; int thunks_size; guint8 *orig_target; guint8 *target_thunk; if (cfg) { /* * This can be called multiple times during JITting, * save the current position in cfg->arch to avoid * doing a O(n^2) search. */ if (!cfg->arch.thunks) { cfg->arch.thunks = cfg->thunks; cfg->arch.thunks_size = cfg->thunk_area; } thunks = cfg->arch.thunks; thunks_size = cfg->arch.thunks_size; if (!thunks_size) { g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE)); g_assert_not_reached (); } g_assert (*(guint32*)thunks == 0); emit_thunk (thunks, target); arm_patch (code, thunks); cfg->arch.thunks += THUNK_SIZE; cfg->arch.thunks_size -= THUNK_SIZE; } else { ji = mini_jit_info_table_find (code); g_assert (ji); info = mono_jit_info_get_thunk_info (ji); g_assert (info); thunks = (guint8*)ji->code_start + info->thunks_offset; thunks_size = info->thunks_size; orig_target = mono_arch_get_call_target (code + 4); mono_mini_arch_lock (); target_thunk = NULL; if (orig_target >= thunks && orig_target < thunks + thunks_size) { /* The call already points to a thunk, because of trampolines etc. */ target_thunk = orig_target; } else { for (p = thunks; p < thunks + thunks_size; p += THUNK_SIZE) { if (((guint32*)p) [0] == 0) { /* Free entry */ target_thunk = p; break; } else if (((guint32*)p) [2] == (guint32)(gsize)target) { /* Thunk already points to target */ target_thunk = p; break; } } } //g_print ("THUNK: %p %p %p\n", code, target, target_thunk); if (!target_thunk) { mono_mini_arch_unlock (); g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE)); g_assert_not_reached (); } emit_thunk (target_thunk, target); arm_patch (code, target_thunk); mono_arch_flush_icache (code, 4); mono_mini_arch_unlock (); } } static void arm_patch_general (MonoCompile *cfg, guchar *code, const guchar *target) { guint32 *code32 = (guint32*)code; guint32 ins = *code32; guint32 prim = (ins >> 25) & 7; guint32 tval = GPOINTER_TO_UINT (target); //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target); if (prim == 5) { /* 101b */ /* the diff starts 8 bytes from the branch opcode */ gint diff = target - code - 8; gint tbits; gint tmask = 0xffffffff; if (tval & 1) { /* entering thumb mode */ diff = target - 1 - code - 8; g_assert (thumb_supported); tbits = 0xf << 28; /* bl->blx bit pattern */ g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */ /* this low bit of the displacement is moved to bit 24 in the instruction encoding */ if (diff & 2) { tbits |= 1 << 24; } tmask = ~(1 << 24); /* clear the link bit */ /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/ } else { tbits = 0; } if (diff >= 0) { if (diff <= 33554431) { diff >>= 2; ins = (ins & 0xff000000) | diff; ins &= tmask; *code32 = ins | tbits; return; } } else { /* diff between 0 and -33554432 */ if (diff >= -33554432) { diff >>= 2; ins = (ins & 0xff000000) | (diff & ~0xff000000); ins &= tmask; *code32 = ins | tbits; return; } } handle_thunk (cfg, code, target); return; } /* * The alternative call sequences looks like this: * * ldr ip, [pc] // loads the address constant * b 1f // jumps around the constant * address constant embedded in the code * 1f: * mov lr, pc * mov pc, ip * * There are two cases for patching: * a) at the end of method emission: in this case code points to the start * of the call sequence * b) during runtime patching of the call site: in this case code points * to the mov pc, ip instruction * * We have to handle also the thunk jump code sequence: * * ldr ip, [pc] * mov pc, ip * address constant // execution never reaches here */ if ((ins & 0x0ffffff0) == 0x12fff10) { /* Branch and exchange: the address is constructed in a reg * We can patch BX when the code sequence is the following: * ldr ip, [pc, #0] ; 0x8 * b 0xc * .word code_ptr * mov lr, pc * bx ips * */ guint32 ccode [4]; guint8 *emit = (guint8*)ccode; ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0); ARM_B (emit, 0); ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC); ARM_BX (emit, ARMREG_IP); /*patching from magic trampoline*/ if (ins == ccode [3]) { g_assert (code32 [-4] == ccode [0]); g_assert (code32 [-3] == ccode [1]); g_assert (code32 [-1] == ccode [2]); code32 [-2] = (guint32)(gsize)target; return; } /*patching from JIT*/ if (ins == ccode [0]) { g_assert (code32 [1] == ccode [1]); g_assert (code32 [3] == ccode [2]); g_assert (code32 [4] == ccode [3]); code32 [2] = (guint32)(gsize)target; return; } g_assert_not_reached (); } else if ((ins & 0x0ffffff0) == 0x12fff30) { /* * ldr ip, [pc, #0] * b 0xc * .word code_ptr * blx ip */ guint32 ccode [4]; guint8 *emit = (guint8*)ccode; ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0); ARM_B (emit, 0); ARM_BLX_REG (emit, ARMREG_IP); g_assert (code32 [-3] == ccode [0]); g_assert (code32 [-2] == ccode [1]); g_assert (code32 [0] == ccode [2]); code32 [-1] = (guint32)(gsize)target; } else { guint32 ccode [4]; guint32 *tmp = ccode; guint8 *emit = (guint8*)tmp; ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0); ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC); ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP); ARM_BX (emit, ARMREG_IP); if (ins == ccode [2]) { g_assert_not_reached (); // should be -2 ... code32 [-1] = (guint32)(gsize)target; return; } if (ins == ccode [0]) { /* handles both thunk jump code and the far call sequence */ code32 [2] = (guint32)(gsize)target; return; } g_assert_not_reached (); } // g_print ("patched with 0x%08x\n", ins); } void arm_patch (guchar *code, const guchar *target) { arm_patch_general (NULL, code, target); } /* * Return the >= 0 uimm8 value if val can be represented with a byte + rotation * (with the rotation amount in *rot_amount. rot_amount is already adjusted * to be used with the emit macros. * Return -1 otherwise. */ int mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount) { guint32 res, i; for (i = 0; i < 31; i+= 2) { if (i == 0) res = val; else res = (val << (32 - i)) | (val >> i); if (res & ~0xff) continue; *rot_amount = i? 32 - i: 0; return res; } return -1; } /* * Emits in code a sequence of instructions that load the value 'val' * into the dreg register. Uses at most 4 instructions. */ guint8* mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val) { int imm8, rot_amount; #if 0 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0); /* skip the constant pool */ ARM_B (code, 0); *(int*)code = val; code += 4; return code; #endif if (mini_debug_options.single_imm_size && v7_supported) { ARM_MOVW_REG_IMM (code, dreg, val & 0xffff); ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff); return code; } if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) { ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount); } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) { ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount); } else { if (v7_supported) { ARM_MOVW_REG_IMM (code, dreg, val & 0xffff); if (val >> 16) ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff); return code; } if (val & 0xFF) { ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF)); if (val & 0xFF00) { ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24); } if (val & 0xFF0000) { ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16); } if (val & 0xFF000000) { ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8); } } else if (val & 0xFF00) { ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24); if (val & 0xFF0000) { ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16); } if (val & 0xFF000000) { ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8); } } else if (val & 0xFF0000) { ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16); if (val & 0xFF000000) { ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8); } } //g_assert_not_reached (); } return code; } gboolean mono_arm_thumb_supported (void) { return thumb_supported; } gboolean mono_arm_eabi_supported (void) { return eabi_supported; } int mono_arm_i8_align (void) { return i8_align; } #ifndef DISABLE_JIT static guint8* emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code) { CallInfo *cinfo; MonoCallInst *call; call = (MonoCallInst*)ins; cinfo = call->call_info; switch (cinfo->ret.storage) { case RegTypeStructByVal: case RegTypeHFA: { MonoInst *loc = cfg->arch.vret_addr_loc; int i; if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) { /* The JIT treats this as a normal call */ break; } /* Load the destination address */ g_assert (loc && loc->opcode == OP_REGOFFSET); if (arm_is_imm12 (loc->inst_offset)) { ARM_LDR_IMM (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, loc->inst_offset); ARM_LDR_REG_REG (code, ARMREG_LR, loc->inst_basereg, ARMREG_LR); } if (cinfo->ret.storage == RegTypeStructByVal) { int rsize = cinfo->ret.struct_size; for (i = 0; i < cinfo->ret.nregs; ++i) { g_assert (rsize >= 0); switch (rsize) { case 0: break; case 1: ARM_STRB_IMM (code, i, ARMREG_LR, i * 4); break; case 2: ARM_STRH_IMM (code, i, ARMREG_LR, i * 4); break; default: ARM_STR_IMM (code, i, ARMREG_LR, i * 4); break; } rsize -= 4; } } else { for (i = 0; i < cinfo->ret.nregs; ++i) { if (cinfo->ret.esize == 4) ARM_FSTS (code, cinfo->ret.reg + i, ARMREG_LR, i * 4); else ARM_FSTD (code, cinfo->ret.reg + (i * 2), ARMREG_LR, i * 8); } } return code; } default: break; } switch (ins->opcode) { case OP_FCALL: case OP_FCALL_REG: case OP_FCALL_MEMBASE: if (IS_VFP) { MonoType *sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret); if (sig_ret->type == MONO_TYPE_R4) { if (IS_HARD_FLOAT) { ARM_CVTS (code, ins->dreg, ARM_VFP_F0); } else { ARM_FMSR (code, ins->dreg, ARMREG_R0); ARM_CVTS (code, ins->dreg, ins->dreg); } } else { if (IS_HARD_FLOAT) { ARM_CPYD (code, ins->dreg, ARM_VFP_D0); } else { ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg); } } } break; case OP_RCALL: case OP_RCALL_REG: case OP_RCALL_MEMBASE: { MonoType *sig_ret; g_assert (IS_VFP); sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret); g_assert (sig_ret->type == MONO_TYPE_R4); if (IS_HARD_FLOAT) { ARM_CPYS (code, ins->dreg, ARM_VFP_F0); } else { ARM_FMSR (code, ins->dreg, ARMREG_R0); ARM_CPYS (code, ins->dreg, ins->dreg); } break; } default: break; } return code; } void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins; MonoCallInst *call; guint8 *code = cfg->native_code + cfg->code_len; MonoInst *last_ins = NULL; int max_len, cpos; int imm8, rot_amount; /* we don't align basic blocks of loops on arm */ if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); cpos = bb->max_offset; if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break)); code = emit_call_seq (cfg, code); } MONO_BB_FOR_EACH_INS (bb, ins) { guint offset = code - cfg->native_code; set_code_cursor (cfg, code); max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); // if (ins->cil_code) // g_print ("cil code\n"); mono_debug_record_line_number (cfg, ins, offset); switch (ins->opcode) { case OP_MEMORY_BARRIER: if (v7_supported) { ARM_DMB (code, ARM_DMB_ISH); } else if (v6_supported) { ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0); ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5); } break; case OP_TLS_GET: code = emit_tls_get (code, ins->dreg, ins->inst_offset); break; case OP_TLS_SET: code = emit_tls_set (code, ins->sreg1, ins->inst_offset); break; case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_CAS_I4: case OP_ATOMIC_ADD_I4: { int tmpreg; guint8 *buf [16]; g_assert (v7_supported); /* Free up a reg */ if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP) tmpreg = ARMREG_IP; else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0) tmpreg = ARMREG_R0; else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1) tmpreg = ARMREG_R1; else tmpreg = ARMREG_R2; g_assert (cfg->arch.atomic_tmp_offset != -1); ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset); switch (ins->opcode) { case OP_ATOMIC_EXCHANGE_I4: buf [0] = code; ARM_DMB (code, ARM_DMB_ISH); ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1); ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1); ARM_CMP_REG_IMM (code, tmpreg, 0, 0); buf [1] = code; ARM_B_COND (code, ARMCOND_NE, 0); arm_patch (buf [1], buf [0]); break; case OP_ATOMIC_CAS_I4: ARM_DMB (code, ARM_DMB_ISH); buf [0] = code; ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1); ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3); buf [1] = code; ARM_B_COND (code, ARMCOND_NE, 0); ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1); ARM_CMP_REG_IMM (code, tmpreg, 0, 0); buf [2] = code; ARM_B_COND (code, ARMCOND_NE, 0); arm_patch (buf [2], buf [0]); arm_patch (buf [1], code); break; case OP_ATOMIC_ADD_I4: buf [0] = code; ARM_DMB (code, ARM_DMB_ISH); ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1); ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2); ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1); ARM_CMP_REG_IMM (code, tmpreg, 0, 0); buf [1] = code; ARM_B_COND (code, ARMCOND_NE, 0); arm_patch (buf [1], buf [0]); break; default: g_assert_not_reached (); } ARM_DMB (code, ARM_DMB_ISH); if (tmpreg != ins->dreg) ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset); ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR); break; } case OP_ATOMIC_LOAD_I1: case OP_ATOMIC_LOAD_U1: case OP_ATOMIC_LOAD_I2: case OP_ATOMIC_LOAD_U2: case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_LOAD_R8: { if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) ARM_DMB (code, ARM_DMB_ISH); code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); switch (ins->opcode) { case OP_ATOMIC_LOAD_I1: ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR); break; case OP_ATOMIC_LOAD_U1: ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR); break; case OP_ATOMIC_LOAD_I2: ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR); break; case OP_ATOMIC_LOAD_U2: ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR); break; case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_U4: ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR); break; case OP_ATOMIC_LOAD_R4: if (cfg->r4fp) { ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR); ARM_FLDS (code, ins->dreg, ARMREG_LR, 0); } else { code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR); ARM_FLDS (code, vfp_scratch1, ARMREG_LR, 0); ARM_CVTS (code, ins->dreg, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); } break; case OP_ATOMIC_LOAD_R8: ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR); ARM_FLDD (code, ins->dreg, ARMREG_LR, 0); break; } if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE) ARM_DMB (code, ARM_DMB_ISH); break; } case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U4: case OP_ATOMIC_STORE_R4: case OP_ATOMIC_STORE_R8: { if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE) ARM_DMB (code, ARM_DMB_ISH); code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); switch (ins->opcode) { case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_U1: ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR); break; case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_U2: ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR); break; case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U4: ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR); break; case OP_ATOMIC_STORE_R4: if (cfg->r4fp) { ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR); ARM_FSTS (code, ins->sreg1, ARMREG_LR, 0); } else { code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR); ARM_CVTD (code, vfp_scratch1, ins->sreg1); ARM_FSTS (code, vfp_scratch1, ARMREG_LR, 0); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); } break; case OP_ATOMIC_STORE_R8: ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR); ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0); break; } if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) ARM_DMB (code, ARM_DMB_ISH); break; } case OP_BIGMUL: ARM_SMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_BIGMUL_UN: ARM_UMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_STOREI1_MEMBASE_IMM: code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF); g_assert (arm_is_imm12 (ins->inst_offset)); ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); break; case OP_STOREI2_MEMBASE_IMM: code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF); g_assert (arm_is_imm8 (ins->inst_offset)); ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); break; case OP_STORE_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm); g_assert (arm_is_imm12 (ins->inst_offset)); ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); break; case OP_STOREI1_MEMBASE_REG: g_assert (arm_is_imm12 (ins->inst_offset)); ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); break; case OP_STOREI2_MEMBASE_REG: g_assert (arm_is_imm8 (ins->inst_offset)); ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); break; case OP_STORE_MEMBASE_REG: case OP_STOREI4_MEMBASE_REG: /* this case is special, since it happens for spill code after lowering has been called */ if (arm_is_imm12 (ins->inst_offset)) { ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR); } break; case OP_STOREI1_MEMINDEX: ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case OP_STOREI2_MEMINDEX: ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case OP_STORE_MEMINDEX: case OP_STOREI4_MEMINDEX: ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case OP_LOADU4_MEM: g_assert_not_reached (); break; case OP_LOAD_MEMINDEX: case OP_LOADI4_MEMINDEX: case OP_LOADU4_MEMINDEX: ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADI1_MEMINDEX: ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADU1_MEMINDEX: ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADI2_MEMINDEX: ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADU2_MEMINDEX: ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: case OP_LOADU4_MEMBASE: /* this case is special, since it happens for spill code after lowering has been called */ if (arm_is_imm12 (ins->inst_offset)) { ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR); } break; case OP_LOADI1_MEMBASE: g_assert (arm_is_imm8 (ins->inst_offset)); ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADU1_MEMBASE: g_assert (arm_is_imm12 (ins->inst_offset)); ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADU2_MEMBASE: g_assert (arm_is_imm8 (ins->inst_offset)); ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADI2_MEMBASE: g_assert (arm_is_imm8 (ins->inst_offset)); ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; case OP_ICONV_TO_I1: ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24); ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24); break; case OP_ICONV_TO_I2: ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16); ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16); break; case OP_ICONV_TO_U1: ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff); break; case OP_ICONV_TO_U2: ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16); ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16); break; case OP_COMPARE: case OP_ICOMPARE: ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2); break; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount); break; case OP_BREAK: /* * gdb does not like encountering the hw breakpoint ins in the debugged code. * So instead of emitting a trap, we emit a call a C function and place a * breakpoint there. */ //*(int*)code = 0xef9f0001; //code += 4; //ARM_DBRK (code); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break)); code = emit_call_seq (cfg, code); break; case OP_RELAXED_NOP: ARM_NOP (code); break; case OP_NOP: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: case OP_NOT_REACHED: case OP_NOT_NULL: break; case OP_IL_SEQ_POINT: mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); break; case OP_SEQ_POINT: { int i; MonoInst *info_var = cfg->arch.seq_point_info_var; MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var; MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var; MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var; MonoInst *var; int dreg = ARMREG_LR; #if 0 if (cfg->soft_breakpoints) { g_assert (!cfg->compile_aot); } #endif /* * For AOT, we use one got slot per method, which will point to a * SeqPointInfo structure, containing all the information required * by the code below. */ if (cfg->compile_aot) { g_assert (info_var); g_assert (info_var->opcode == OP_REGOFFSET); } if (!cfg->soft_breakpoints && !cfg->compile_aot) { /* * Read from the single stepping trigger page. This will cause a * SIGSEGV when single stepping is enabled. * We do this _before_ the breakpoint, so single stepping after * a breakpoint is hit will step to the next IL offset. */ g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0); } /* Single step check */ if (ins->flags & MONO_INST_SINGLE_STEP_LOC) { if (cfg->soft_breakpoints) { /* Load the address of the sequence point method variable. */ var = ss_method_var; g_assert (var); g_assert (var->opcode == OP_REGOFFSET); code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset); /* Read the value and check whether it is non-zero. */ ARM_LDR_IMM (code, dreg, dreg, 0); ARM_CMP_REG_IMM (code, dreg, 0, 0); /* Call it conditionally. */ ARM_BLX_REG_COND (code, ARMCOND_NE, dreg); } else { if (cfg->compile_aot) { /* Load the trigger page addr from the variable initialized in the prolog */ var = ss_trigger_page_var; g_assert (var); g_assert (var->opcode == OP_REGOFFSET); code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset); } else { ARM_LDR_IMM (code, dreg, ARMREG_PC, 0); ARM_B (code, 0); *(int*)code = (int)(gsize)ss_trigger_page; code += 4; } ARM_LDR_IMM (code, dreg, dreg, 0); } } mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); /* Breakpoint check */ if (cfg->compile_aot) { const guint32 offset = code - cfg->native_code; guint32 val; var = info_var; code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset); /* Add the offset */ val = ((offset / 4) * sizeof (target_mgreg_t)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs); /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */ if (arm_is_imm12 ((int)val)) { ARM_LDR_IMM (code, dreg, dreg, val); } else { ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0); if (val & 0xFF00) ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24); if (val & 0xFF0000) ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16); g_assert (!(val & 0xFF000000)); ARM_LDR_IMM (code, dreg, dreg, 0); } /* What is faster, a branch or a load ? */ ARM_CMP_REG_IMM (code, dreg, 0, 0); /* The breakpoint instruction */ if (cfg->soft_breakpoints) ARM_BLX_REG_COND (code, ARMCOND_NE, dreg); else ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE); } else if (cfg->soft_breakpoints) { /* Load the address of the breakpoint method into ip. */ var = bp_method_var; g_assert (var); g_assert (var->opcode == OP_REGOFFSET); g_assert (arm_is_imm12 (var->inst_offset)); ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset); /* * A placeholder for a possible breakpoint inserted by * mono_arch_set_breakpoint (). */ ARM_NOP (code); } else { /* * A placeholder for a possible breakpoint inserted by * mono_arch_set_breakpoint (). */ for (i = 0; i < 4; ++i) ARM_NOP (code); } /* * Add an additional nop so skipping the bp doesn't cause the ip to point * to another IL offset. */ ARM_NOP (code); break; } case OP_ADDCC: case OP_IADDCC: ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IADD: ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_ADC: case OP_IADC: ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_ADDCC_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_ADD_IMM: case OP_IADD_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_ADC_IMM: case OP_IADC_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_IADD_OVF: ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_IADD_OVF_UN: ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_ISUB_OVF: ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_ISUB_OVF_UN: ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException"); break; case OP_ADD_OVF_CARRY: ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_ADD_OVF_UN_CARRY: ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_SUB_OVF_CARRY: ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_SUB_OVF_UN_CARRY: ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException"); break; case OP_SUBCC: case OP_ISUBCC: ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SUBCC_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_ISUB: ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SBB: case OP_ISBB: ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SUB_IMM: case OP_ISUB_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_SBB_IMM: case OP_ISBB_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_ARM_RSBS_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_ARM_RSC_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_IAND: ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_AND_IMM: case OP_IAND_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_IDIV: g_assert (v7s_supported || v7k_supported); ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IDIV_UN: g_assert (v7s_supported || v7k_supported); ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IREM: g_assert (v7s_supported || v7k_supported); ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2); ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1); break; case OP_IREM_UN: g_assert (v7s_supported || v7k_supported); ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2); ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1); break; case OP_DIV_IMM: case OP_REM_IMM: g_assert_not_reached (); case OP_IOR: ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_OR_IMM: case OP_IOR_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_IXOR: ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_XOR_IMM: case OP_IXOR_IMM: imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount); g_assert (imm8 >= 0); ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount); break; case OP_ISHL: ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SHL_IMM: case OP_ISHL_IMM: if (ins->inst_imm) ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f)); else if (ins->dreg != ins->sreg1) ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1); break; case OP_ISHR: ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SHR_IMM: case OP_ISHR_IMM: if (ins->inst_imm) ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f)); else if (ins->dreg != ins->sreg1) ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1); break; case OP_SHR_UN_IMM: case OP_ISHR_UN_IMM: if (ins->inst_imm) ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f)); else if (ins->dreg != ins->sreg1) ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1); break; case OP_ISHR_UN: ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_INOT: ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1); break; case OP_INEG: ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0); break; case OP_IMUL: if (ins->dreg == ins->sreg2) ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); else ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1); break; case OP_MUL_IMM: g_assert_not_reached (); break; case OP_IMUL_OVF: /* FIXME: handle ovf/ sreg2 != dreg */ ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); /* FIXME: MUL doesn't set the C/O flags on ARM */ break; case OP_IMUL_OVF_UN: /* FIXME: handle ovf/ sreg2 != dreg */ ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2); /* FIXME: MUL doesn't set the C/O flags on ARM */ break; case OP_ICONST: code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0); break; case OP_AOTCONST: /* Load the GOT offset */ mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0); ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; /* Load the value from the GOT */ ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg); break; case OP_OBJC_GET_SELECTOR: mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0); ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg); break; case OP_ICONV_TO_I4: case OP_ICONV_TO_U4: case OP_MOVE: if (ins->dreg != ins->sreg1) ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1); break; case OP_SETLRET: { int saved = ins->sreg2; if (ins->sreg2 == ARM_LSW_REG) { ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2); saved = ARMREG_LR; } if (ins->sreg1 != ARM_LSW_REG) ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1); if (saved != ARM_MSW_REG) ARM_MOV_REG_REG (code, ARM_MSW_REG, saved); break; } case OP_FMOVE: if (IS_VFP && ins->dreg != ins->sreg1) ARM_CPYD (code, ins->dreg, ins->sreg1); break; case OP_RMOVE: if (IS_VFP && ins->dreg != ins->sreg1) ARM_CPYS (code, ins->dreg, ins->sreg1); break; case OP_MOVE_F_TO_I4: if (cfg->r4fp) { ARM_FMRS (code, ins->dreg, ins->sreg1); } else { code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); ARM_CVTD (code, vfp_scratch1, ins->sreg1); ARM_FMRS (code, ins->dreg, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); } break; case OP_MOVE_I4_TO_F: if (cfg->r4fp) { ARM_FMSR (code, ins->dreg, ins->sreg1); } else { ARM_FMSR (code, ins->dreg, ins->sreg1); ARM_CVTS (code, ins->dreg, ins->dreg); } break; case OP_FCONV_TO_R4: if (IS_VFP) { if (cfg->r4fp) { ARM_CVTD (code, ins->dreg, ins->sreg1); } else { ARM_CVTD (code, ins->dreg, ins->sreg1); ARM_CVTS (code, ins->dreg, ins->dreg); } } break; case OP_TAILCALL_PARAMETER: // This opcode helps compute sizes, i.e. // of the subsequent OP_TAILCALL, but contributes no code. g_assert (ins->next); break; case OP_TAILCALL: case OP_TAILCALL_MEMBASE: case OP_TAILCALL_REG: { gboolean const tailcall_membase = ins->opcode == OP_TAILCALL_MEMBASE; gboolean const tailcall_reg = ins->opcode == OP_TAILCALL_REG; MonoCallInst *call = (MonoCallInst*)ins; max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER); if (IS_HARD_FLOAT) code = emit_float_args (cfg, call, code, &max_len, &offset); code = realloc_code (cfg, max_len); // For reg and membase, get destination in IP. if (tailcall_reg) { g_assert (ins->sreg1 > -1); if (ins->sreg1 != ARMREG_IP) ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg1); } else if (tailcall_membase) { g_assert (ins->sreg1 > -1); if (!arm_is_imm12 (ins->inst_offset)) { g_assert (ins->sreg1 != ARMREG_IP); // temp in emit_big_add code = emit_big_add (code, ARMREG_IP, ins->sreg1, ins->inst_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0); } else { ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, ins->inst_offset); } } /* * The stack looks like the following: * <caller argument area> * <saved regs etc> * <rest of frame> * <callee argument area> * <optionally saved IP> (about to be) * Need to copy the arguments from the callee argument area to * the caller argument area, and pop the frame. */ if (call->stack_usage) { int i, prev_sp_offset = 0; // When we get here, the parameters to the tailcall are already formed, // in registers and at the bottom of the grow-down stack. // // Our goal is generally preserve parameters, and trim the stack, // and, before trimming stack, move parameters from the bottom of the // frame to the bottom of the trimmed frame. // For the case of large frames, and presently therefore always, // IP is used as an adjusted frame_reg. // Be conservative and save IP around the movement // of parameters from the bottom of frame to top of the frame. const gboolean save_ip = tailcall_membase || tailcall_reg; if (save_ip) ARM_PUSH (code, 1 << ARMREG_IP); // When moving stacked parameters from the bottom // of the frame (sp) to the top of the frame (ip), // account, 0 or 4, for the conditional save of IP. const int offset_sp = save_ip ? 4 : 0; const int offset_ip = (save_ip && (cfg->frame_reg == ARMREG_SP)) ? 4 : 0; /* Compute size of saved registers restored below */ if (iphone_abi) prev_sp_offset = 2 * 4; else prev_sp_offset = 1 * 4; for (i = 0; i < 16; ++i) { if (cfg->used_int_regs & (1 << i)) prev_sp_offset += 4; } // Point IP at the start of where the parameters will go after trimming stack. // After locals and saved registers. code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset); /* Copy arguments on the stack to our argument area */ // FIXME a fixed size memcpy is desirable here, // at least for larger values of stack_usage. // // FIXME For most functions, with frames < 4K, we can use frame_reg directly here instead of IP. // See https://github.com/mono/mono/pull/12079 // See https://github.com/mono/mono/pull/12079/commits/93e7007a9567b78fa8152ce404b372b26e735516 for (i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) { ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i + offset_sp); ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i + offset_ip); } if (save_ip) ARM_POP (code, 1 << ARMREG_IP); } /* * Keep in sync with mono_arch_emit_epilog */ g_assert (!cfg->method->save_lmf); code = emit_big_add_temp (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage, ARMREG_LR); if (iphone_abi) { if (cfg->used_int_regs) ARM_POP (code, cfg->used_int_regs); ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR)); } else { ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR)); } if (tailcall_reg || tailcall_membase) { code = emit_jmp_reg (code, ARMREG_IP); } else { mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method); if (cfg->compile_aot) { ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP); } else { code = mono_arm_patchable_b (code, ARMCOND_AL); cfg->thunk_area += THUNK_SIZE; } } break; } case OP_CHECK_THIS: /* ensure ins->sreg1 is not NULL */ ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0); break; case OP_ARGLIST: { g_assert (cfg->sig_cookie < 128); ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie); ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0); break; } case OP_FCALL: case OP_RCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: call = (MonoCallInst*)ins; if (IS_HARD_FLOAT) code = emit_float_args (cfg, call, code, &max_len, &offset); mono_call_add_patch_info (cfg, call, code - cfg->native_code); code = emit_call_seq (cfg, code); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; code = emit_move_return_value (cfg, ins, code); break; case OP_FCALL_REG: case OP_RCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: if (IS_HARD_FLOAT) code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset); code = emit_call_reg (code, ins->sreg1); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; code = emit_move_return_value (cfg, ins, code); break; case OP_FCALL_MEMBASE: case OP_RCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: { g_assert (ins->sreg1 != ARMREG_LR); call = (MonoCallInst*)ins; if (IS_HARD_FLOAT) code = emit_float_args (cfg, call, code, &max_len, &offset); if (!arm_is_imm12 (ins->inst_offset)) { /* sreg1 might be IP */ ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1); code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_LR); ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, 0); } else { ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset); } ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; code = emit_move_return_value (cfg, ins, code); break; } case OP_GENERIC_CLASS_INIT: { int byte_offset; guint8 *jump; byte_offset = MONO_STRUCT_OFFSET (MonoVTable, initialized); g_assert (arm_is_imm8 (byte_offset)); ARM_LDRSB_IMM (code, ARMREG_IP, ins->sreg1, byte_offset); ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0); jump = code; ARM_B_COND (code, ARMCOND_NE, 0); /* Uninitialized case */ g_assert (ins->sreg1 == ARMREG_R0); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init)); code = emit_call_seq (cfg, code); /* Initialized case */ arm_patch (jump, code); break; } case OP_LOCALLOC: { /* round the size to 8 bytes */ ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, (MONO_ARCH_FRAME_ALIGNMENT - 1)); ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, (MONO_ARCH_FRAME_ALIGNMENT - 1)); ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg); /* memzero the area: dreg holds the size, sp is the pointer */ if (ins->flags & MONO_INST_INIT) { guint8 *start_loop, *branch_to_cond; ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0); branch_to_cond = code; ARM_B (code, 0); start_loop = code; ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg); arm_patch (branch_to_cond, code); /* decrement by 4 and set flags */ ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (target_mgreg_t)); ARM_B_COND (code, ARMCOND_GE, 0); arm_patch (code - 4, start_loop); } ARM_MOV_REG_REG (code, ins->dreg, ARMREG_SP); if (cfg->param_area) code = emit_sub_imm (code, ARMREG_SP, ARMREG_SP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT)); break; } case OP_DYN_CALL: { int i; MonoInst *var = cfg->dyn_call_var; guint8 *labels [16]; g_assert (var->opcode == OP_REGOFFSET); g_assert (arm_is_imm12 (var->inst_offset)); /* lr = args buffer filled by mono_arch_get_dyn_call_args () */ ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1); /* ip = ftn */ ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg2); /* Save args buffer */ ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset); /* Set fp argument registers */ if (IS_HARD_FLOAT) { ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, has_fpregs)); ARM_CMP_REG_IMM (code, ARMREG_R0, 0, 0); labels [0] = code; ARM_B_COND (code, ARMCOND_EQ, 0); for (i = 0; i < FP_PARAM_REGS; ++i) { const int offset = MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * sizeof (double)); g_assert (arm_is_fpimm8 (offset)); ARM_FLDD (code, i * 2, ARMREG_LR, offset); } arm_patch (labels [0], code); } /* Allocate callee area */ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs)); ARM_SHL_IMM (code, ARMREG_R1, ARMREG_R1, 2); ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_R1); /* Set stack args */ /* R1 = limit */ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs)); /* R2 = pointer into regs */ code = emit_big_add (code, ARMREG_R2, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (PARAM_REGS * sizeof (target_mgreg_t))); /* R3 = pointer to stack */ ARM_MOV_REG_REG (code, ARMREG_R3, ARMREG_SP); /* Loop */ labels [0] = code; ARM_B_COND (code, ARMCOND_AL, 0); labels [1] = code; ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R2, 0); ARM_STR_IMM (code, ARMREG_R0, ARMREG_R3, 0); ARM_ADD_REG_IMM (code, ARMREG_R2, ARMREG_R2, sizeof (target_mgreg_t), 0); ARM_ADD_REG_IMM (code, ARMREG_R3, ARMREG_R3, sizeof (target_mgreg_t), 0); ARM_SUB_REG_IMM (code, ARMREG_R1, ARMREG_R1, 1, 0); arm_patch (labels [0], code); ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0); labels [2] = code; ARM_B_COND (code, ARMCOND_GT, 0); arm_patch (labels [2], labels [1]); /* Set argument registers */ for (i = 0; i < PARAM_REGS; ++i) ARM_LDR_IMM (code, i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (i * sizeof (target_mgreg_t))); /* Make the call */ ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); /* Save result */ ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset); ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res)); ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2)); if (IS_HARD_FLOAT) ARM_FSTD (code, ARM_VFP_D0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, fpregs)); break; } case OP_THROW: { if (ins->sreg1 != ARMREG_R0) ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception)); code = emit_call_seq (cfg, code); break; } case OP_RETHROW: { if (ins->sreg1 != ARMREG_R0) ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception)); code = emit_call_seq (cfg, code); break; } case OP_START_HANDLER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT); int i, rot_amount; /* Reserve a param area, see filter-stack.exe */ if (param_area) { if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) { ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area); ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP); } } if (arm_is_imm12 (spvar->inst_offset)) { ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset); ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP); } break; } case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT); int i, rot_amount; /* Free the param area */ if (param_area) { if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) { ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area); ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP); } } if (ins->sreg1 != ARMREG_R0) ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1); if (arm_is_imm12 (spvar->inst_offset)) { ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset); } else { g_assert (ARMREG_IP != spvar->inst_basereg); code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset); ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP); } ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); break; } case OP_ENDFINALLY: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT); int i, rot_amount; /* Free the param area */ if (param_area) { if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) { ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area); ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP); } } if (arm_is_imm12 (spvar->inst_offset)) { ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset); } else { g_assert (ARMREG_IP != spvar->inst_basereg); code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset); ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP); } ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP); break; } case OP_CALL_HANDLER: mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb); code = mono_arm_patchable_bl (code, ARMCOND_AL); cfg->thunk_area += THUNK_SIZE; for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); break; case OP_GET_EX_OBJ: if (ins->dreg != ARMREG_R0) ARM_MOV_REG_REG (code, ins->dreg, ARMREG_R0); break; case OP_LABEL: ins->inst_c0 = code - cfg->native_code; break; case OP_BR: /*if (ins->inst_target_bb->native_offset) { ARM_B (code, 0); //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); } else*/ { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); code = mono_arm_patchable_b (code, ARMCOND_AL); } break; case OP_BR_REG: ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1); break; case OP_SWITCH: /* * In the normal case we have: * ldr pc, [pc, ins->sreg1 << 2] * nop * If aot, we have: * ldr lr, [pc, ins->sreg1 << 2] * add pc, pc, lr * After follows the data. * FIXME: add aot support. */ mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0); max_len += 4 * GPOINTER_TO_INT (ins->klass); code = realloc_code (cfg, max_len); ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2); ARM_NOP (code); code += 4 * GPOINTER_TO_INT (ins->klass); break; case OP_CEQ: case OP_ICEQ: ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ); break; case OP_CLT: case OP_ICLT: ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT); break; case OP_CLT_UN: case OP_ICLT_UN: ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO); break; case OP_CGT: case OP_ICGT: ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT); break; case OP_CGT_UN: case OP_ICGT_UN: ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI); break; case OP_ICNEQ: ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ); break; case OP_ICGE: ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT); break; case OP_ICLE: ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT); break; case OP_ICGE_UN: ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO); break; case OP_ICLE_UN: ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_HI); break; case OP_COND_EXC_EQ: case OP_COND_EXC_NE_UN: case OP_COND_EXC_LT: case OP_COND_EXC_LT_UN: case OP_COND_EXC_GT: case OP_COND_EXC_GT_UN: case OP_COND_EXC_GE: case OP_COND_EXC_GE_UN: case OP_COND_EXC_LE: case OP_COND_EXC_LE_UN: EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1); break; case OP_COND_EXC_IEQ: case OP_COND_EXC_INE_UN: case OP_COND_EXC_ILT: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_IGT: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_IGE: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_ILE: case OP_COND_EXC_ILE_UN: EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1); break; case OP_COND_EXC_C: case OP_COND_EXC_IC: EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1); break; case OP_COND_EXC_OV: case OP_COND_EXC_IOV: EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1); break; case OP_COND_EXC_NC: case OP_COND_EXC_INC: EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1); break; case OP_COND_EXC_NO: case OP_COND_EXC_INO: EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1); break; case OP_IBEQ: case OP_IBNE_UN: case OP_IBLT: case OP_IBLT_UN: case OP_IBGT: case OP_IBGT_UN: case OP_IBGE: case OP_IBGE_UN: case OP_IBLE: case OP_IBLE_UN: EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ); break; /* floating point opcodes */ case OP_R8CONST: if (cfg->compile_aot) { ARM_FLDD (code, ins->dreg, ARMREG_PC, 0); ARM_B (code, 1); *(guint32*)code = ((guint32*)(ins->inst_p0))[0]; code += 4; *(guint32*)code = ((guint32*)(ins->inst_p0))[1]; code += 4; } else { /* FIXME: we can optimize the imm load by dealing with part of * the displacement in LDFD (aligning to 512). */ code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0); ARM_FLDD (code, ins->dreg, ARMREG_LR, 0); } break; case OP_R4CONST: if (cfg->compile_aot) { ARM_FLDS (code, ins->dreg, ARMREG_PC, 0); ARM_B (code, 0); *(guint32*)code = ((guint32*)(ins->inst_p0))[0]; code += 4; if (!cfg->r4fp) ARM_CVTS (code, ins->dreg, ins->dreg); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0); ARM_FLDS (code, ins->dreg, ARMREG_LR, 0); if (!cfg->r4fp) ARM_CVTS (code, ins->dreg, ins->dreg); } break; case OP_STORER8_MEMBASE_REG: /* This is generated by the local regalloc pass which runs after the lowering pass */ if (!arm_is_fpimm8 (ins->inst_offset)) { code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg); ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0); } else { ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } break; case OP_LOADR8_MEMBASE: /* This is generated by the local regalloc pass which runs after the lowering pass */ if (!arm_is_fpimm8 (ins->inst_offset)) { code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg); ARM_FLDD (code, ins->dreg, ARMREG_LR, 0); } else { ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } break; case OP_STORER4_MEMBASE_REG: g_assert (arm_is_fpimm8 (ins->inst_offset)); if (cfg->r4fp) { ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } else { code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); ARM_CVTD (code, vfp_scratch1, ins->sreg1); ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); } break; case OP_LOADR4_MEMBASE: if (cfg->r4fp) { ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { g_assert (arm_is_fpimm8 (ins->inst_offset)); code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset); ARM_CVTS (code, ins->dreg, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); } break; case OP_ICONV_TO_R_UN: { g_assert_not_reached (); break; } case OP_ICONV_TO_R4: if (cfg->r4fp) { ARM_FMSR (code, ins->dreg, ins->sreg1); ARM_FSITOS (code, ins->dreg, ins->dreg); } else { code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); ARM_FMSR (code, vfp_scratch1, ins->sreg1); ARM_FSITOS (code, vfp_scratch1, vfp_scratch1); ARM_CVTS (code, ins->dreg, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); } break; case OP_ICONV_TO_R8: code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); ARM_FMSR (code, vfp_scratch1, ins->sreg1); ARM_FSITOD (code, ins->dreg, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); break; case OP_SETFRET: { MonoType *sig_ret = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret); if (sig_ret->type == MONO_TYPE_R4) { if (cfg->r4fp) { if (IS_HARD_FLOAT) { if (ins->sreg1 != ARM_VFP_D0) ARM_CPYS (code, ARM_VFP_D0, ins->sreg1); } else { ARM_FMRS (code, ARMREG_R0, ins->sreg1); } } else { ARM_CVTD (code, ARM_VFP_F0, ins->sreg1); if (!IS_HARD_FLOAT) ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0); } } else { if (IS_HARD_FLOAT) ARM_CPYD (code, ARM_VFP_D0, ins->sreg1); else ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1); } break; } case OP_FCONV_TO_I1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE); break; case OP_FCONV_TO_U1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE); break; case OP_FCONV_TO_I2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE); break; case OP_FCONV_TO_U2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE); break; case OP_FCONV_TO_I4: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE); break; case OP_FCONV_TO_U4: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE); break; case OP_FCONV_TO_I8: case OP_FCONV_TO_U8: g_assert_not_reached (); /* Implemented as helper calls */ break; case OP_LCONV_TO_R_UN: g_assert_not_reached (); /* Implemented as helper calls */ break; case OP_LCONV_TO_OVF_I4_2: { guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive; /* * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000 */ ARM_CMP_REG_IMM8 (code, ins->sreg1, 0); high_bit_not_set = code; ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/ ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */ valid_negative = code; ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */ invalid_negative = code; ARM_B_COND (code, ARMCOND_AL, 0); arm_patch (high_bit_not_set, code); ARM_CMP_REG_IMM8 (code, ins->sreg2, 0); valid_positive = code; ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/ arm_patch (invalid_negative, code); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException"); arm_patch (valid_negative, code); arm_patch (valid_positive, code); if (ins->dreg != ins->sreg1) ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1); break; } case OP_FADD: ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FSUB: ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FMUL: ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FDIV: ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FNEG: ARM_NEGD (code, ins->dreg, ins->sreg1); break; case OP_FREM: /* emulated */ g_assert_not_reached (); break; case OP_FCOMPARE: if (IS_VFP) { ARM_CMPD (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } break; case OP_RCOMPARE: g_assert (IS_VFP); ARM_CMPS (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); break; case OP_FCEQ: if (IS_VFP) { ARM_CMPD (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ); break; case OP_FCLT: if (IS_VFP) { ARM_CMPD (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); break; case OP_FCLT_UN: if (IS_VFP) { ARM_CMPD (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS); break; case OP_FCGT: if (IS_VFP) { ARM_CMPD (code, ins->sreg2, ins->sreg1); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); break; case OP_FCGT_UN: if (IS_VFP) { ARM_CMPD (code, ins->sreg2, ins->sreg1); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS); break; case OP_FCNEQ: if (IS_VFP) { ARM_CMPD (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ); break; case OP_FCGE: if (IS_VFP) { ARM_CMPD (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI); break; case OP_FCLE: if (IS_VFP) { ARM_CMPD (code, ins->sreg2, ins->sreg1); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI); break; /* ARM FPA flags table: * N Less than ARMCOND_MI * Z Equal ARMCOND_EQ * C Greater Than or Equal ARMCOND_CS * V Unordered ARMCOND_VS */ case OP_FBEQ: EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ); break; case OP_FBNE_UN: EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ); break; case OP_FBLT: EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */ break; case OP_FBLT_UN: EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */ EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */ break; case OP_FBGT: case OP_FBGT_UN: case OP_FBLE: case OP_FBLE_UN: g_assert_not_reached (); break; case OP_FBGE: if (IS_VFP) { EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE); } else { /* FPA requires EQ even thou the docs suggests that just CS is enough */ EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ); EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS); } break; case OP_FBGE_UN: EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */ EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE); break; case OP_CKFINITE: { if (IS_VFP) { code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1); code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2); ARM_ABSD (code, vfp_scratch2, ins->sreg1); ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0); ARM_B (code, 1); *(guint32*)code = 0xffffffff; code += 4; *(guint32*)code = 0x7fefffff; code += 4; ARM_CMPD (code, vfp_scratch2, vfp_scratch1); ARM_FMSTAT (code); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "OverflowException"); ARM_CMPD (code, ins->sreg1, ins->sreg1); ARM_FMSTAT (code); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "OverflowException"); ARM_CPYD (code, ins->dreg, ins->sreg1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1); code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2); } break; } case OP_RCONV_TO_I1: code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE); break; case OP_RCONV_TO_U1: code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE); break; case OP_RCONV_TO_I2: code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE); break; case OP_RCONV_TO_U2: code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE); break; case OP_RCONV_TO_I4: code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE); break; case OP_RCONV_TO_U4: code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE); break; case OP_RCONV_TO_R4: g_assert (IS_VFP); if (ins->dreg != ins->sreg1) ARM_CPYS (code, ins->dreg, ins->sreg1); break; case OP_RCONV_TO_R8: g_assert (IS_VFP); ARM_CVTS (code, ins->dreg, ins->sreg1); break; case OP_RADD: ARM_VFP_ADDS (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_RSUB: ARM_VFP_SUBS (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_RMUL: ARM_VFP_MULS (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_RDIV: ARM_VFP_DIVS (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_RNEG: ARM_NEGS (code, ins->dreg, ins->sreg1); break; case OP_RCEQ: if (IS_VFP) { ARM_CMPS (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ); break; case OP_RCLT: if (IS_VFP) { ARM_CMPS (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); break; case OP_RCLT_UN: if (IS_VFP) { ARM_CMPS (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS); break; case OP_RCGT: if (IS_VFP) { ARM_CMPS (code, ins->sreg2, ins->sreg1); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); break; case OP_RCGT_UN: if (IS_VFP) { ARM_CMPS (code, ins->sreg2, ins->sreg1); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 0); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS); break; case OP_RCNEQ: if (IS_VFP) { ARM_CMPS (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ); break; case OP_RCGE: if (IS_VFP) { ARM_CMPS (code, ins->sreg1, ins->sreg2); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI); break; case OP_RCLE: if (IS_VFP) { ARM_CMPS (code, ins->sreg2, ins->sreg1); ARM_FMSTAT (code); } ARM_MOV_REG_IMM8 (code, ins->dreg, 1); ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI); break; case OP_GC_LIVENESS_DEF: case OP_GC_LIVENESS_USE: case OP_GC_PARAM_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; break; case OP_GC_SPILL_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins); break; case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: { guint8 *buf [1]; ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, 0); ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0); buf [0] = code; ARM_B_COND (code, ARMCOND_EQ, 0); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll)); code = emit_call_seq (cfg, code); arm_patch (buf [0], code); break; } case OP_FILL_PROF_CALL_CTX: for (int i = 0; i < ARMREG_MAX; i++) if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) || i == ARMREG_SP || i == ARMREG_FP) ARM_STR_IMM (code, i, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, regs) + i * sizeof (target_mgreg_t)); break; default: g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__); g_assert_not_reached (); } if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) { g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)", mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset); g_assert_not_reached (); } cpos += max_len; last_ins = ins; } set_code_cursor (cfg, code); } #endif /* DISABLE_JIT */ void mono_arch_register_lowlevel_calls (void) { /* The signature doesn't matter */ mono_register_jit_icall (mono_arm_throw_exception, mono_icall_sig_void, TRUE); mono_register_jit_icall (mono_arm_throw_exception_by_token, mono_icall_sig_void, TRUE); mono_register_jit_icall (mono_arm_unaligned_stack, mono_icall_sig_void, TRUE); } #define patch_lis_ori(ip,val) do {\ guint16 *__lis_ori = (guint16*)(ip); \ __lis_ori [1] = (((guint32)(gsize)(val)) >> 16) & 0xffff; \ __lis_ori [3] = ((guint32)(gsize)(val)) & 0xffff; \ } while (0) void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target) { unsigned char *ip = ji->ip.i + code; switch (ji->type) { case MONO_PATCH_INFO_SWITCH: { gpointer *jt = (gpointer*)(ip + 8); int i; /* jt is the inlined jump table, 2 instructions after ip * In the normal case we store the absolute addresses, * otherwise the displacements. */ for (i = 0; i < ji->data.table->table_size; i++) jt [i] = code + (int)(gsize)ji->data.table->table [i]; break; } case MONO_PATCH_INFO_IP: g_assert_not_reached (); patch_lis_ori (ip, ip); break; case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_IMAGE: case MONO_PATCH_INFO_FIELD: case MONO_PATCH_INFO_VTABLE: case MONO_PATCH_INFO_IID: case MONO_PATCH_INFO_SFLDA: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDTOKEN: g_assert_not_reached (); /* from OP_AOTCONST : lis + ori */ patch_lis_ori (ip, target); break; case MONO_PATCH_INFO_R4: case MONO_PATCH_INFO_R8: g_assert_not_reached (); *((gconstpointer *)(ip + 2)) = target; break; case MONO_PATCH_INFO_EXC_NAME: g_assert_not_reached (); *((gconstpointer *)(ip + 1)) = target; break; case MONO_PATCH_INFO_NONE: case MONO_PATCH_INFO_BB_OVF: case MONO_PATCH_INFO_EXC_OVF: /* everything is dealt with at epilog output time */ break; default: arm_patch_general (cfg, ip, (const guchar*)target); break; } } void mono_arm_unaligned_stack (MonoMethod *method) { g_assert_not_reached (); } #ifndef DISABLE_JIT /* * Stack frame layout: * * ------------------- fp * MonoLMF structure or saved registers * ------------------- * locals * ------------------- * spilled regs * ------------------- * param area size is cfg->param_area * ------------------- sp */ guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoBasicBlock *bb; MonoMethodSignature *sig; MonoInst *inst; int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount, part; guint8 *code; CallInfo *cinfo; int lmf_offset = 0; int prev_sp_offset, reg_offset; sig = mono_method_signature_internal (method); cfg->code_size = 256 + sig->param_count * 64; code = cfg->native_code = g_malloc (cfg->code_size); mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0); alloc_size = cfg->stack_offset; pos = 0; prev_sp_offset = 0; if (iphone_abi) { /* * The iphone uses R7 as the frame pointer, and it points at the saved * r7+lr: * <lr> * r7 -> <r7> * <rest of frame> * We can't use r7 as a frame pointer since it points into the middle of * the frame, so we keep using our own frame pointer. * FIXME: Optimize this. */ ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR)); prev_sp_offset += 8; /* r7 and lr */ mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset); mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0); ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP); } if (!method->save_lmf) { if (iphone_abi) { /* No need to push LR again */ if (cfg->used_int_regs) ARM_PUSH (code, cfg->used_int_regs); } else { ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR)); prev_sp_offset += 4; } for (i = 0; i < 16; ++i) { if (cfg->used_int_regs & (1 << i)) prev_sp_offset += 4; } mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset); reg_offset = 0; for (i = 0; i < 16; ++i) { if ((cfg->used_int_regs & (1 << i))) { mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset); mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF); reg_offset += 4; } } mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4); mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF); } else { ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP); ARM_PUSH (code, 0x5ff0); prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */ mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset); reg_offset = 0; for (i = 0; i < 16; ++i) { if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) { /* The original r7 is saved at the start */ if (!(iphone_abi && i == ARMREG_R7)) mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset); reg_offset += 4; } } g_assert (reg_offset == 4 * 10); pos += MONO_ABI_SIZEOF (MonoLMF) - (4 * 10); lmf_offset = pos; } alloc_size += pos; orig_alloc_size = alloc_size; // align to MONO_ARCH_FRAME_ALIGNMENT bytes if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) { alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1; alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1); } /* the stack used in the pushed regs */ alloc_size += ALIGN_TO (prev_sp_offset, MONO_ARCH_FRAME_ALIGNMENT) - prev_sp_offset; cfg->stack_usage = alloc_size; if (alloc_size) { if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) { ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size); ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP); } mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size); } if (cfg->frame_reg != ARMREG_SP) { ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP); mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg); } //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size); prev_sp_offset += alloc_size; for (i = 0; i < alloc_size - orig_alloc_size; i += 4) mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF); /* compute max_offset in order to use short forward jumps * we could skip do it on arm because the immediate displacement * for jumps is large enough, it may be useful later for constant pools */ max_offset = 0; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins = bb->code; bb->max_offset = max_offset; MONO_BB_FOR_EACH_INS (bb, ins) max_offset += ins_get_size (ins->opcode); } /* stack alignment check */ /* { guint8 *buf [16]; ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_SP); code = mono_arm_emit_load_imm (code, ARMREG_IP, MONO_ARCH_FRAME_ALIGNMENT -1); ARM_AND_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP); ARM_CMP_REG_IMM (code, ARMREG_LR, 0, 0); buf [0] = code; ARM_B_COND (code, ARMCOND_EQ, 0); if (cfg->compile_aot) ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0); else code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arm_unaligned_stack)); code = emit_call_seq (cfg, code); arm_patch (buf [0], code); } */ /* store runtime generic context */ if (cfg->rgctx_var) { MonoInst *ins = cfg->rgctx_var; g_assert (ins->opcode == OP_REGOFFSET); if (arm_is_imm12 (ins->inst_offset)) { ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR); } mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, cfg->rgctx_var, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0); } /* load arguments allocated to register from the stack */ cinfo = get_call_info (NULL, sig); if (cinfo->ret.storage == RegTypeStructByAddr) { ArgInfo *ainfo = &cinfo->ret; inst = cfg->vret_addr; g_assert (arm_is_imm12 (inst->inst_offset)); ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } if (sig->call_convention == MONO_CALL_VARARG) { ArgInfo *cookie = &cinfo->sig_cookie; /* Save the sig cookie address */ g_assert (cookie->storage == RegTypeBase); g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset)); g_assert (arm_is_imm12 (cfg->sig_cookie)); ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset); ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie); } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; inst = cfg->args [i]; if (cfg->verbose_level > 2) g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage); if (inst->opcode == OP_REGVAR) { if (ainfo->storage == RegTypeGeneral) ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg); else if (ainfo->storage == RegTypeFP) { g_assert_not_reached (); } else if (ainfo->storage == RegTypeBase) { if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) { ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset)); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset); ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP); } } else g_assert_not_reached (); if (i == 0 && sig->hasthis) { g_assert (ainfo->storage == RegTypeGeneral); mono_add_var_location (cfg, inst, TRUE, ainfo->reg, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, inst, TRUE, inst->dreg, 0, code - cfg->native_code, 0); } if (cfg->verbose_level > 2) g_print ("Argument %d assigned to register %s\n", i, mono_arch_regname (inst->dreg)); } else { switch (ainfo->storage) { case RegTypeHFA: for (part = 0; part < ainfo->nregs; part ++) { if (ainfo->esize == 4) ARM_FSTS (code, ainfo->reg + part, inst->inst_basereg, inst->inst_offset + (part * ainfo->esize)); else ARM_FSTD (code, ainfo->reg + (part * 2), inst->inst_basereg, inst->inst_offset + (part * ainfo->esize)); } break; case RegTypeGeneral: case RegTypeIRegPair: case RegTypeGSharedVtInReg: case RegTypeStructByAddr: switch (ainfo->size) { case 1: if (arm_is_imm12 (inst->inst_offset)) ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP); } break; case 2: if (arm_is_imm8 (inst->inst_offset)) { ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP); } break; case 8: if (arm_is_imm12 (inst->inst_offset)) { ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP); } if (arm_is_imm12 (inst->inst_offset + 4)) { ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4); ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP); } break; default: if (arm_is_imm12 (inst->inst_offset)) { ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP); } break; } if (i == 0 && sig->hasthis) { g_assert (ainfo->storage == RegTypeGeneral); mono_add_var_location (cfg, inst, TRUE, ainfo->reg, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, inst, FALSE, inst->inst_basereg, inst->inst_offset, code - cfg->native_code, 0); } break; case RegTypeBaseGen: if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) { ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset)); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset); ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP); } if (arm_is_imm12 (inst->inst_offset + 4)) { ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4); ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4); ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP); code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP); } break; case RegTypeBase: case RegTypeGSharedVtOnStack: case RegTypeStructByAddrOnStack: if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) { ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset)); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset); ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP); } switch (ainfo->size) { case 1: if (arm_is_imm8 (inst->inst_offset)) { ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP); } break; case 2: if (arm_is_imm8 (inst->inst_offset)) { ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP); } break; case 8: if (arm_is_imm12 (inst->inst_offset)) { ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP); } if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) { ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4)); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4); ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP); } if (arm_is_imm12 (inst->inst_offset + 4)) { ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4); ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP); } break; default: if (arm_is_imm12 (inst->inst_offset)) { ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP); } break; } break; case RegTypeFP: { int imm8, rot_amount; if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) { code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset); ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg); } else ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount); if (ainfo->size == 8) ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0); else ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0); break; } case RegTypeStructByVal: { int doffset = inst->inst_offset; int soffset = 0; int cur_reg; int size = 0; size = mini_type_stack_size_full (inst->inst_vtype, NULL, sig->pinvoke && !sig->marshalling_disabled); for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) { if (arm_is_imm12 (doffset)) { ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset); ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP); } soffset += sizeof (target_mgreg_t); doffset += sizeof (target_mgreg_t); } if (ainfo->vtsize) { /* FIXME: handle overrun! with struct sizes not multiple of 4 */ //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset); code = emit_memcpy (code, ainfo->vtsize * sizeof (target_mgreg_t), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset); } break; } default: g_assert_not_reached (); break; } } } if (method->save_lmf) code = emit_save_lmf (cfg, code, alloc_size - lmf_offset); if (cfg->arch.seq_point_info_var) { MonoInst *ins = cfg->arch.seq_point_info_var; /* Initialize the variable from a GOT slot */ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method); ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0); g_assert (ins->opcode == OP_REGOFFSET); if (arm_is_imm12 (ins->inst_offset)) { ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR); } } /* Initialize ss_trigger_page_var */ if (!cfg->soft_breakpoints) { MonoInst *info_var = cfg->arch.seq_point_info_var; MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var; int dreg = ARMREG_LR; if (info_var) { g_assert (info_var->opcode == OP_REGOFFSET); code = emit_ldr_imm (code, dreg, info_var->inst_basereg, info_var->inst_offset); /* Load the trigger page addr */ ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page)); ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset); } } if (cfg->arch.seq_point_ss_method_var) { MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var; MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var; g_assert (ss_method_ins->opcode == OP_REGOFFSET); g_assert (arm_is_imm12 (ss_method_ins->inst_offset)); if (cfg->compile_aot) { MonoInst *info_var = cfg->arch.seq_point_info_var; int dreg = ARMREG_LR; g_assert (info_var->opcode == OP_REGOFFSET); g_assert (arm_is_imm12 (info_var->inst_offset)); ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset); ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr)); ARM_STR_IMM (code, dreg, ss_method_ins->inst_basereg, ss_method_ins->inst_offset); } else { g_assert (bp_method_ins->opcode == OP_REGOFFSET); g_assert (arm_is_imm12 (bp_method_ins->inst_offset)); ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC); ARM_B (code, 1); *(gpointer*)code = &single_step_tramp; code += 4; *(gpointer*)code = breakpoint_tramp; code += 4; ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0); ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset); ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4); ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset); } } set_code_cursor (cfg, code); g_free (cinfo); return code; } void mono_arch_emit_epilog (MonoCompile *cfg) { MonoMethod *method = cfg->method; int pos, i, rot_amount; int max_epilog_size = 16 + 20*4; guint8 *code; CallInfo *cinfo; if (cfg->method->save_lmf) max_epilog_size += 128; code = realloc_code (cfg, max_epilog_size); /* Save the uwind state which is needed by the out-of-line code */ mono_emit_unwind_op_remember_state (cfg, code); pos = 0; /* Load returned vtypes into registers if needed */ cinfo = cfg->arch.cinfo; switch (cinfo->ret.storage) { case RegTypeStructByVal: { MonoInst *ins = cfg->ret; if (cinfo->ret.nregs == 1) { if (arm_is_imm12 (ins->inst_offset)) { ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset); ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR); } } else { for (i = 0; i < cinfo->ret.nregs; ++i) { int offset = ins->inst_offset + (i * 4); if (arm_is_imm12 (offset)) { ARM_LDR_IMM (code, i, ins->inst_basereg, offset); } else { code = mono_arm_emit_load_imm (code, ARMREG_LR, offset); ARM_LDR_REG_REG (code, i, ins->inst_basereg, ARMREG_LR); } } } break; } case RegTypeHFA: { MonoInst *ins = cfg->ret; for (i = 0; i < cinfo->ret.nregs; ++i) { if (cinfo->ret.esize == 4) ARM_FLDS (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize)); else ARM_FLDD (code, cinfo->ret.reg + (i * 2), ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize)); } break; } default: break; } if (method->save_lmf) { int lmf_offset, reg, sp_adj, regmask, nused_int_regs = 0; /* all but r0-r3, sp and pc */ pos += MONO_ABI_SIZEOF (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t)); lmf_offset = pos; code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset); /* This points to r4 inside MonoLMF->iregs */ sp_adj = (MONO_ABI_SIZEOF (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t)); reg = ARMREG_R4; regmask = 0x9ff0; /* restore lr to pc */ /* Skip caller saved registers not used by the method */ while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) { regmask &= ~(1 << reg); sp_adj += 4; reg ++; } if (iphone_abi) /* Restored later */ regmask &= ~(1 << ARMREG_PC); /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */ code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj); for (i = 0; i < 16; i++) { if (regmask & (1 << i)) nused_int_regs ++; } mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, ((iphone_abi ? 3 : 0) + nused_int_regs) * 4); /* restore iregs */ ARM_POP (code, regmask); if (iphone_abi) { for (i = 0; i < 16; i++) { if (regmask & (1 << i)) mono_emit_unwind_op_same_value (cfg, code, i); } /* Restore saved r7, restore LR to PC */ /* Skip lr from the lmf */ mono_emit_unwind_op_def_cfa_offset (cfg, code, 3 * 4); ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (target_mgreg_t), 0); mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4); ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC)); } } else { int i, nused_int_regs = 0; for (i = 0; i < 16; i++) { if (cfg->used_int_regs & (1 << i)) nused_int_regs ++; } if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) { ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount); } else { code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage); ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP); } if (cfg->frame_reg != ARMREG_SP) { mono_emit_unwind_op_def_cfa_reg (cfg, code, ARMREG_SP); } if (iphone_abi) { /* Restore saved gregs */ if (cfg->used_int_regs) { mono_emit_unwind_op_def_cfa_offset (cfg, code, (2 + nused_int_regs) * 4); ARM_POP (code, cfg->used_int_regs); for (i = 0; i < 16; i++) { if (cfg->used_int_regs & (1 << i)) mono_emit_unwind_op_same_value (cfg, code, i); } } mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4); /* Restore saved r7, restore LR to PC */ ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC)); } else { mono_emit_unwind_op_def_cfa_offset (cfg, code, (nused_int_regs + 1) * 4); ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC)); } } /* Restore the unwind state to be the same as before the epilog */ mono_emit_unwind_op_restore_state (cfg, code); set_code_cursor (cfg, code); } void mono_arch_emit_exceptions (MonoCompile *cfg) { MonoJumpInfo *patch_info; int i; guint8 *code; guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM]; guint8 exc_throw_found [MONO_EXC_INTRINS_NUM]; int max_epilog_size = 50; for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) { exc_throw_pos [i] = NULL; exc_throw_found [i] = 0; } /* count the number of exception infos */ /* * make sure we have enough space for exceptions */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { if (patch_info->type == MONO_PATCH_INFO_EXC) { i = mini_exception_id_by_name ((const char*)patch_info->data.target); if (!exc_throw_found [i]) { max_epilog_size += 32; exc_throw_found [i] = TRUE; } } } code = realloc_code (cfg, max_epilog_size); /* add code to raise exceptions */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { switch (patch_info->type) { case MONO_PATCH_INFO_EXC: { MonoClass *exc_class; unsigned char *ip = patch_info->ip.i + cfg->native_code; i = mini_exception_id_by_name ((const char*)patch_info->data.target); if (exc_throw_pos [i]) { arm_patch (ip, exc_throw_pos [i]); patch_info->type = MONO_PATCH_INFO_NONE; break; } else { exc_throw_pos [i] = code; } arm_patch (ip, code); exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name); ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR); ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0); patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID; patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception; patch_info->ip.i = code - cfg->native_code; ARM_BL (code, 0); cfg->thunk_area += THUNK_SIZE; *(guint32*)(gpointer)code = m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF; code += 4; break; } default: /* do nothing */ break; } } set_code_cursor (cfg, code); } #endif /* #ifndef DISABLE_JIT */ void mono_arch_finish_init (void) { } MonoInst* mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { /* FIXME: */ return NULL; } #ifndef DISABLE_JIT #endif guint32 mono_arch_get_patch_offset (guint8 *code) { /* OP_AOTCONST */ return 8; } void mono_arch_flush_register_windows (void) { } MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { return (MonoMethod*)regs [MONO_ARCH_IMT_REG]; } MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code) { return (MonoVTable*)(gsize)regs [MONO_ARCH_RGCTX_REG]; } GSList* mono_arch_get_cie_program (void) { GSList *l = NULL; mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ARMREG_SP, 0); return l; } /* #define ENABLE_WRONG_METHOD_CHECK 1 */ #define BASE_SIZE (6 * 4) #define BSEARCH_ENTRY_SIZE (4 * 4) #define CMP_SIZE (3 * 4) #define BRANCH_SIZE (1 * 4) #define CALL_SIZE (2 * 4) #define WMC_SIZE (8 * 4) #define DISTANCE(A, B) (((gint32)(gssize)(B)) - ((gint32)(gssize)(A))) static arminstr_t * arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value) { guint32 delta = DISTANCE (target, code); delta -= 8; g_assert (delta >= 0 && delta <= 0xFFF); *target = *target | delta; *code = value; return code + 1; } #ifdef ENABLE_WRONG_METHOD_CHECK static void mini_dump_bad_imt (int input_imt, int compared_imt, int pc) { g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc); g_assert (0); } #endif gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int size, i; arminstr_t *code, *start; gboolean large_offsets = FALSE; guint32 **constant_pool_starts; arminstr_t *vtable_target = NULL; int extra_space = 0; #ifdef ENABLE_WRONG_METHOD_CHECK char * cond; #endif GSList *unwind_ops; MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); size = BASE_SIZE; constant_pool_starts = g_new0 (guint32*, count); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { gboolean fail_case = !item->check_target_idx && fail_tramp; if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) { item->chunk_size += 32; large_offsets = TRUE; } if (item->check_target_idx || fail_case) { if (!item->compare_done || fail_case) item->chunk_size += CMP_SIZE; item->chunk_size += BRANCH_SIZE; } else { #ifdef ENABLE_WRONG_METHOD_CHECK item->chunk_size += WMC_SIZE; #endif } if (fail_case) { item->chunk_size += 16; large_offsets = TRUE; } item->chunk_size += CALL_SIZE; } else { item->chunk_size += BSEARCH_ENTRY_SIZE; imt_entries [item->check_target_idx]->compare_done = TRUE; } size += item->chunk_size; } if (large_offsets) size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */ if (fail_tramp) { code = (arminstr_t *)mini_alloc_generic_virtual_trampoline (vtable, size); } else { code = mono_mem_manager_code_reserve (mem_manager, size); } start = code; unwind_ops = mono_arch_get_cie_program (); #ifdef DEBUG_IMT g_print ("Building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count, size, start, ((guint8*)start) + size, vtable, fail_tramp); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size); } #endif if (large_offsets) { ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC); mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 4 * sizeof (target_mgreg_t)); } else { ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1); mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (target_mgreg_t)); } ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4); vtable_target = code; ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL; gint32 vtable_offset; item->code_target = (guint8*)code; if (item->is_equals) { gboolean fail_case = !item->check_target_idx && fail_tramp; if (item->check_target_idx || fail_case) { if (!item->compare_done || fail_case) { imt_method = code; ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1); } item->jmp_code = (guint8*)code; ARM_B_COND (code, ARMCOND_NE, 0); } else { /*Enable the commented code to assert on wrong method*/ #ifdef ENABLE_WRONG_METHOD_CHECK imt_method = code; ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1); cond = code; ARM_B_COND (code, ARMCOND_EQ, 0); /* Define this if your system is so bad that gdb is failing. */ #ifdef BROKEN_DEV_ENV ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC); ARM_BL (code, 0); arm_patch (code - 1, mini_dump_bad_imt); #else ARM_DBRK (code); #endif arm_patch (cond, code); #endif } if (item->has_target_code) { /* Load target address */ target_code_ins = code; ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); /* Save it to the fourth slot */ ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t)); /* Restore registers and branch */ ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC); code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code); } else { vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]); if (!arm_is_imm12 (vtable_offset)) { /* * We need to branch to a computed address but we don't have * a free register to store it, since IP must contain the * vtable address. So we push the two values to the stack, and * load them both using LDM. */ /* Compute target address */ vtable_offset_ins = code; ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1); /* Save it to the fourth slot */ ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t)); /* Restore registers and branch */ ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC); code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset); } else { ARM_POP2 (code, ARMREG_R0, ARMREG_R1); if (large_offsets) { mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (target_mgreg_t)); ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (target_mgreg_t)); } mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 0); ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset); } } if (fail_case) { arm_patch (item->jmp_code, (guchar*)code); target_code_ins = code; /* Load target address */ ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); /* Save it to the fourth slot */ ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t)); /* Restore registers and branch */ ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC); code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp); item->jmp_code = NULL; } if (imt_method) code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)(gsize)item->key); /*must emit after unconditional branch*/ if (vtable_target) { code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)(gsize)vtable); item->chunk_size += 4; vtable_target = NULL; } /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/ constant_pool_starts [i] = code; if (extra_space) { code += extra_space; extra_space = 0; } } else { ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1); item->jmp_code = (guint8*)code; ARM_B_COND (code, ARMCOND_HS, 0); ++extra_space; } } for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code) { if (item->check_target_idx) arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target); } if (i > 0 && item->is_equals) { int j; arminstr_t *space_start = constant_pool_starts [i]; for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) { space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)(gsize)imt_entries [j]->key); } } } #ifdef DEBUG_IMT { char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count); mono_disassemble_code (NULL, (guint8*)start, size, buff); g_free (buff); } #endif g_free (constant_pool_starts); mono_arch_flush_icache ((guint8*)start, size); MONO_PROFILER_RAISE (jit_code_buffer, ((guint8*)start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL)); UnlockedAdd (&mono_stats.imt_trampolines_size, code - start); g_assert (DISTANCE (start, code) <= size); mono_tramp_info_register (mono_tramp_info_create (NULL, (guint8*)start, DISTANCE (start, code), NULL, unwind_ops), mem_manager); return start; } host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { return ctx->regs [reg]; } host_mgreg_t* mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { return &ctx->regs [reg]; } void mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val) { ctx->regs [reg] = val; } /* * mono_arch_get_trampolines: * * Return a list of MonoTrampInfo structures describing arch specific trampolines * for AOT. */ GSList * mono_arch_get_trampolines (gboolean aot) { return mono_arm_get_exception_trampolines (aot); } #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED) /* * mono_arch_set_breakpoint: * * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET. * The location should contain code emitted by OP_SEQ_POINT. */ void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; guint32 native_offset = ip - (guint8*)ji->code_start; if (ji->from_aot) { SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start); if (!breakpoint_tramp) breakpoint_tramp = mini_get_breakpoint_trampoline (); g_assert (native_offset % 4 == 0); g_assert (info->bp_addrs [native_offset / 4] == 0); info->bp_addrs [native_offset / 4] = (guint8*)(mini_debug_options.soft_breakpoints ? breakpoint_tramp : bp_trigger_page); } else if (mini_debug_options.soft_breakpoints) { code += 4; ARM_BLX_REG (code, ARMREG_LR); mono_arch_flush_icache (code - 4, 4); } else { int dreg = ARMREG_LR; /* Read from another trigger page */ ARM_LDR_IMM (code, dreg, ARMREG_PC, 0); ARM_B (code, 0); *(int*)code = (int)(gssize)bp_trigger_page; code += 4; ARM_LDR_IMM (code, dreg, dreg, 0); mono_arch_flush_icache (code - 16, 16); #if 0 /* This is currently implemented by emitting an SWI instruction, which * qemu/linux seems to convert to a SIGILL. */ *(int*)code = (0xef << 24) | 8; code += 4; mono_arch_flush_icache (code - 4, 4); #endif } } /* * mono_arch_clear_breakpoint: * * Clear the breakpoint at IP. */ void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; int i; if (ji->from_aot) { guint32 native_offset = ip - (guint8*)ji->code_start; SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start); if (!breakpoint_tramp) breakpoint_tramp = mini_get_breakpoint_trampoline (); g_assert (native_offset % 4 == 0); g_assert (info->bp_addrs [native_offset / 4] == (guint8*)(mini_debug_options.soft_breakpoints ? breakpoint_tramp : bp_trigger_page)); info->bp_addrs [native_offset / 4] = 0; } else if (mini_debug_options.soft_breakpoints) { code += 4; ARM_NOP (code); mono_arch_flush_icache (code - 4, 4); } else { for (i = 0; i < 4; ++i) ARM_NOP (code); mono_arch_flush_icache (ip, code - ip); } } /* * mono_arch_start_single_stepping: * * Start single stepping. */ void mono_arch_start_single_stepping (void) { if (ss_trigger_page) mono_mprotect (ss_trigger_page, mono_pagesize (), 0); else single_step_tramp = mini_get_single_step_trampoline (); } /* * mono_arch_stop_single_stepping: * * Stop single stepping. */ void mono_arch_stop_single_stepping (void) { if (ss_trigger_page) mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ); else single_step_tramp = NULL; } #if __APPLE__ #define DBG_SIGNAL SIGBUS #else #define DBG_SIGNAL SIGSEGV #endif /* * mono_arch_is_single_step_event: * * Return whenever the machine state in SIGCTX corresponds to a single * step event. */ gboolean mono_arch_is_single_step_event (void *info, void *sigctx) { siginfo_t *sinfo = (siginfo_t*)info; if (!ss_trigger_page) return FALSE; /* Sometimes the address is off by 4 */ if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128) return TRUE; else return FALSE; } /* * mono_arch_is_breakpoint_event: * * Return whenever the machine state in SIGCTX corresponds to a breakpoint event. */ gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx) { siginfo_t *sinfo = (siginfo_t*)info; if (!ss_trigger_page) return FALSE; if (sinfo->si_signo == DBG_SIGNAL) { /* Sometimes the address is off by 4 */ if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128) return TRUE; else return FALSE; } else { return FALSE; } } /* * mono_arch_skip_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji) { MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4); } /* * mono_arch_skip_single_step: * * See mini-amd64.c for docs. */ void mono_arch_skip_single_step (MonoContext *ctx) { MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4); } /* * mono_arch_get_seq_point_info: * * See mini-amd64.c for docs. */ SeqPointInfo* mono_arch_get_seq_point_info (guint8 *code) { SeqPointInfo *info; MonoJitInfo *ji; MonoJitMemoryManager *jit_mm; jit_mm = get_default_jit_mm (); // FIXME: Add a free function jit_mm_lock (jit_mm); info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code); jit_mm_unlock (jit_mm); if (!info) { ji = mini_jit_info_table_find (code); g_assert (ji); info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size); info->ss_trigger_page = ss_trigger_page; info->bp_trigger_page = bp_trigger_page; info->ss_tramp_addr = &single_step_tramp; jit_mm_lock (jit_mm); g_hash_table_insert (jit_mm->arch_seq_points, code, info); jit_mm_unlock (jit_mm); } return info; } #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */ /* * mono_arch_set_target: * * Set the target architecture the JIT backend should generate code for, in the form * of a GNU target triplet. Only used in AOT mode. */ void mono_arch_set_target (char *mtriple) { /* The GNU target triple format is not very well documented */ if (strstr (mtriple, "armv7")) { v5_supported = TRUE; v6_supported = TRUE; v7_supported = TRUE; } if (strstr (mtriple, "armv6")) { v5_supported = TRUE; v6_supported = TRUE; } if (strstr (mtriple, "armv7s")) { v7s_supported = TRUE; } if (strstr (mtriple, "armv7k")) { v7k_supported = TRUE; } if (strstr (mtriple, "thumbv7s")) { v5_supported = TRUE; v6_supported = TRUE; v7_supported = TRUE; v7s_supported = TRUE; thumb_supported = TRUE; thumb2_supported = TRUE; } if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) { v5_supported = TRUE; v6_supported = TRUE; thumb_supported = TRUE; iphone_abi = TRUE; } if (strstr (mtriple, "gnueabi")) eabi_supported = TRUE; } gboolean mono_arch_opcode_supported (int opcode) { switch (opcode) { case OP_ATOMIC_ADD_I4: case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_CAS_I4: case OP_ATOMIC_LOAD_I1: case OP_ATOMIC_LOAD_I2: case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_U1: case OP_ATOMIC_LOAD_U2: case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_U4: return v7_supported; case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_LOAD_R8: case OP_ATOMIC_STORE_R4: case OP_ATOMIC_STORE_R8: return v7_supported && IS_VFP; default: return FALSE; } } CallInfo* mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { return get_call_info (mp, sig); } gpointer mono_arch_get_get_tls_tramp (void) { return NULL; } static G_GNUC_UNUSED guint8* emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data) { /* OP_AOTCONST */ mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)patch_type, data); ARM_LDR_IMM (code, dreg, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; /* Load the value from the GOT */ ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg); return code; } guint8* mono_arm_emit_aotconst (gpointer ji_list, guint8 *code, guint8 *buf, int dreg, int patch_type, gconstpointer data) { MonoJumpInfo **ji = (MonoJumpInfo**)ji_list; *ji = mono_patch_info_list_prepend (*ji, code - buf, (MonoJumpInfoType)patch_type, data); ARM_LDR_IMM (code, dreg, ARMREG_PC, 0); ARM_B (code, 0); *(gpointer*)code = NULL; code += 4; ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg); return code; } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { gpointer target = NULL; switch (jit_icall_id) { #undef MONO_AOT_ICALL #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break; MONO_AOT_ICALL (mono_arm_resume_unwind) MONO_AOT_ICALL (mono_arm_start_gsharedvt_call) MONO_AOT_ICALL (mono_arm_throw_exception) MONO_AOT_ICALL (mono_arm_throw_exception_by_token) MONO_AOT_ICALL (mono_arm_unaligned_stack) } return target; }
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/mini-arm64.c
/** * \file * ARM64 backend for the Mono code generator * * Copyright 2013 Xamarin, Inc (http://www.xamarin.com) * * Based on mini-arm.c: * * Authors: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * * (C) 2003 Ximian, Inc. * Copyright 2003-2011 Novell, Inc (http://www.novell.com) * Copyright 2011 Xamarin, Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include "mini.h" #include "cpu-arm64.h" #include "ir-emit.h" #include "aot-runtime.h" #include "mini-runtime.h" #include <mono/arch/arm64/arm64-codegen.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-memory-model.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/tokentype.h> #include "interp/interp.h" /* * Documentation: * * - ARM(R) Architecture Reference Manual, ARMv8, for ARMv8-A architecture profile (DDI0487A_a_armv8_arm.pdf) * - Procedure Call Standard for the ARM 64-bit Architecture (AArch64) (IHI0055B_aapcs64.pdf) * - ELF for the ARM 64-bit Architecture (IHI0056B_aaelf64.pdf) * * Register usage: * - ip0/ip1/lr are used as temporary registers * - r27 is used as the rgctx/imt register * - r28 is used to access arguments passed on the stack * - d15/d16 are used as fp temporary registers */ #define FP_TEMP_REG ARMREG_D16 #define FP_TEMP_REG2 ARMREG_D17 #define THUNK_SIZE (4 * 4) /* The single step trampoline */ static gpointer ss_trampoline; /* The breakpoint trampoline */ static gpointer bp_trampoline; static gboolean ios_abi; static gboolean enable_ptrauth; #if defined(HOST_WIN32) #define WARN_UNUSED_RESULT _Check_return_ #else #define WARN_UNUSED_RESULT __attribute__ ((__warn_unused_result__)) #endif static WARN_UNUSED_RESULT guint8* emit_load_regset (guint8 *code, guint64 regs, int basereg, int offset); static guint8* emit_brx (guint8 *code, int reg); static guint8* emit_blrx (guint8 *code, int reg); const char* mono_arch_regname (int reg) { static const char * rnames[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "fp", "lr", "sp" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } const char* mono_arch_fregname (int reg) { static const char * rnames[] = { "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown fp"; } const char * mono_arch_xregname (int reg) { static const char * rnames[] = { "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { NOT_IMPLEMENTED; return 0; } #define MAX_ARCH_DELEGATE_PARAMS 7 static gpointer get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size) { guint8 *code, *start; MINI_BEGIN_CODEGEN (); if (has_target) { start = code = mono_global_codeman_reserve (12); /* Replace the this argument with the target */ arm_ldrx (code, ARMREG_IP0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); arm_ldrx (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target)); code = mono_arm_emit_brx (code, ARMREG_IP0); g_assert ((code - start) <= 12); } else { int size, i; size = 8 + param_count * 4; start = code = mono_global_codeman_reserve (size); arm_ldrx (code, ARMREG_IP0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); /* slide down the arguments */ for (i = 0; i < param_count; ++i) arm_movx (code, i, i + 1); code = mono_arm_emit_brx (code, ARMREG_IP0); g_assert ((code - start) <= size); } MINI_END_CODEGEN (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL); if (code_size) *code_size = code - start; return MINI_ADDR_TO_FTNPTR (start); } /* * mono_arch_get_delegate_invoke_impls: * * Return a list of MonoAotTrampInfo structures for the delegate invoke impl * trampolines. */ GSList* mono_arch_get_delegate_invoke_impls (void) { GSList *res = NULL; guint8 *code; guint32 code_len; int i; char *tramp_name; code = (guint8*)get_delegate_invoke_impl (TRUE, 0, &code_len); res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL)); for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) { code = (guint8*)get_delegate_invoke_impl (FALSE, i, &code_len); tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i); res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL)); g_free (tramp_name); } return res; } gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target) { guint8 *code, *start; /* * vtypes are returned in registers, or using the dedicated r8 register, so * they can be supported by delegate invokes. */ if (has_target) { static guint8* cached = NULL; if (cached) return cached; if (mono_ee_features.use_aot_trampolines) start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target"); else start = (guint8*)get_delegate_invoke_impl (TRUE, 0, NULL); mono_memory_barrier (); cached = start; return cached; } else { static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL}; int i; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; for (i = 0; i < sig->param_count; ++i) if (!mono_is_regsize_var (sig->params [i])) return NULL; code = cache [sig->param_count]; if (code) return code; if (mono_ee_features.use_aot_trampolines) { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); start = (guint8*)mono_aot_get_trampoline (name); g_free (name); } else { start = (guint8*)get_delegate_invoke_impl (FALSE, sig->param_count, NULL); } mono_memory_barrier (); cache [sig->param_count] = start; return start; } return NULL; } gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg) { return NULL; } gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code) { return (gpointer)regs [ARMREG_R0]; } void mono_arch_cpu_init (void) { } void mono_arch_init (void) { #if defined(TARGET_IOS) || defined(TARGET_WATCHOS) || defined(TARGET_OSX) ios_abi = TRUE; #endif #ifdef MONO_ARCH_ENABLE_PTRAUTH enable_ptrauth = TRUE; #endif if (!mono_aot_only) bp_trampoline = mini_get_breakpoint_trampoline (); mono_arm_gsharedvt_init (); } void mono_arch_cleanup (void) { } guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { *exclude_mask = 0; return 0; } void mono_arch_register_lowlevel_calls (void) { } void mono_arch_finish_init (void) { } /* The maximum length is 2 instructions */ static guint8* emit_imm (guint8 *code, int dreg, int imm) { // FIXME: Optimize this if (imm < 0) { gint64 limm = imm; arm_movnx (code, dreg, (~limm) & 0xffff, 0); arm_movkx (code, dreg, (limm >> 16) & 0xffff, 16); } else { arm_movzx (code, dreg, imm & 0xffff, 0); if (imm >> 16) arm_movkx (code, dreg, (imm >> 16) & 0xffff, 16); } return code; } /* The maximum length is 4 instructions */ static guint8* emit_imm64 (guint8 *code, int dreg, guint64 imm) { // FIXME: Optimize this arm_movzx (code, dreg, imm & 0xffff, 0); if ((imm >> 16) & 0xffff) arm_movkx (code, dreg, (imm >> 16) & 0xffff, 16); if ((imm >> 32) & 0xffff) arm_movkx (code, dreg, (imm >> 32) & 0xffff, 32); if ((imm >> 48) & 0xffff) arm_movkx (code, dreg, (imm >> 48) & 0xffff, 48); return code; } guint8* mono_arm_emit_imm64 (guint8 *code, int dreg, gint64 imm) { return emit_imm64 (code, dreg, imm); } /* * emit_imm_template: * * Emit a patchable code sequence for constructing a 64 bit immediate. */ static guint8* emit_imm64_template (guint8 *code, int dreg) { arm_movzx (code, dreg, 0, 0); arm_movkx (code, dreg, 0, 16); arm_movkx (code, dreg, 0, 32); arm_movkx (code, dreg, 0, 48); return code; } static WARN_UNUSED_RESULT guint8* emit_addw_imm (guint8 *code, int dreg, int sreg, int imm) { if (!arm_is_arith_imm (imm)) { code = emit_imm (code, ARMREG_LR, imm); arm_addw (code, dreg, sreg, ARMREG_LR); } else { arm_addw_imm (code, dreg, sreg, imm); } return code; } static WARN_UNUSED_RESULT guint8* emit_addx_imm (guint8 *code, int dreg, int sreg, int imm) { if (!arm_is_arith_imm (imm)) { code = emit_imm (code, ARMREG_LR, imm); arm_addx (code, dreg, sreg, ARMREG_LR); } else { arm_addx_imm (code, dreg, sreg, imm); } return code; } static WARN_UNUSED_RESULT guint8* emit_subw_imm (guint8 *code, int dreg, int sreg, int imm) { if (!arm_is_arith_imm (imm)) { code = emit_imm (code, ARMREG_LR, imm); arm_subw (code, dreg, sreg, ARMREG_LR); } else { arm_subw_imm (code, dreg, sreg, imm); } return code; } static WARN_UNUSED_RESULT guint8* emit_subx_imm (guint8 *code, int dreg, int sreg, int imm) { if (!arm_is_arith_imm (imm)) { code = emit_imm (code, ARMREG_LR, imm); arm_subx (code, dreg, sreg, ARMREG_LR); } else { arm_subx_imm (code, dreg, sreg, imm); } return code; } /* Emit sp+=imm. Clobbers ip0/ip1 */ static WARN_UNUSED_RESULT guint8* emit_addx_sp_imm (guint8 *code, int imm) { code = emit_imm (code, ARMREG_IP0, imm); arm_movspx (code, ARMREG_IP1, ARMREG_SP); arm_addx (code, ARMREG_IP1, ARMREG_IP1, ARMREG_IP0); arm_movspx (code, ARMREG_SP, ARMREG_IP1); return code; } /* Emit sp-=imm. Clobbers ip0/ip1 */ static WARN_UNUSED_RESULT guint8* emit_subx_sp_imm (guint8 *code, int imm) { code = emit_imm (code, ARMREG_IP0, imm); arm_movspx (code, ARMREG_IP1, ARMREG_SP); arm_subx (code, ARMREG_IP1, ARMREG_IP1, ARMREG_IP0); arm_movspx (code, ARMREG_SP, ARMREG_IP1); return code; } static WARN_UNUSED_RESULT guint8* emit_andw_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_andw (code, dreg, sreg, ARMREG_LR); return code; } static WARN_UNUSED_RESULT guint8* emit_andx_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_andx (code, dreg, sreg, ARMREG_LR); return code; } static WARN_UNUSED_RESULT guint8* emit_orrw_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_orrw (code, dreg, sreg, ARMREG_LR); return code; } static WARN_UNUSED_RESULT guint8* emit_orrx_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_orrx (code, dreg, sreg, ARMREG_LR); return code; } static WARN_UNUSED_RESULT guint8* emit_eorw_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_eorw (code, dreg, sreg, ARMREG_LR); return code; } static WARN_UNUSED_RESULT guint8* emit_eorx_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_eorx (code, dreg, sreg, ARMREG_LR); return code; } static WARN_UNUSED_RESULT guint8* emit_cmpw_imm (guint8 *code, int sreg, int imm) { if (imm == 0) { arm_cmpw (code, sreg, ARMREG_RZR); } else { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_cmpw (code, sreg, ARMREG_LR); } return code; } static WARN_UNUSED_RESULT guint8* emit_cmpx_imm (guint8 *code, int sreg, int imm) { if (imm == 0) { arm_cmpx (code, sreg, ARMREG_RZR); } else { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_cmpx (code, sreg, ARMREG_LR); } return code; } static WARN_UNUSED_RESULT guint8* emit_strb (guint8 *code, int rt, int rn, int imm) { if (arm_is_strb_imm (imm)) { arm_strb (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_strb_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_strh (guint8 *code, int rt, int rn, int imm) { if (arm_is_strh_imm (imm)) { arm_strh (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_strh_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_strw (guint8 *code, int rt, int rn, int imm) { if (arm_is_strw_imm (imm)) { arm_strw (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_strw_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_strfpw (guint8 *code, int rt, int rn, int imm) { if (arm_is_strw_imm (imm)) { arm_strfpw (code, rt, rn, imm); } else { g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0); arm_strfpw (code, rt, ARMREG_IP0, 0); } return code; } static WARN_UNUSED_RESULT guint8* emit_strfpx (guint8 *code, int rt, int rn, int imm) { if (arm_is_strx_imm (imm)) { arm_strfpx (code, rt, rn, imm); } else { g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0); arm_strfpx (code, rt, ARMREG_IP0, 0); } return code; } static WARN_UNUSED_RESULT guint8* emit_strx (guint8 *code, int rt, int rn, int imm) { if (arm_is_strx_imm (imm)) { arm_strx (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_strx_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrb (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 1)) { arm_ldrb (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_ldrb_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrsbx (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 1)) { arm_ldrsbx (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_ldrsbx_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrh (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 2)) { arm_ldrh (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_ldrh_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrshx (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 2)) { arm_ldrshx (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_ldrshx_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrswx (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 4)) { arm_ldrswx (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_ldrswx_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrw (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 4)) { arm_ldrw (code, rt, rn, imm); } else { g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_ldrw_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrx (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 8)) { arm_ldrx (code, rt, rn, imm); } else { g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_ldrx_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrfpw (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 4)) { arm_ldrfpw (code, rt, rn, imm); } else { g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0); arm_ldrfpw (code, rt, ARMREG_IP0, 0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrfpx (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 8)) { arm_ldrfpx (code, rt, rn, imm); } else { g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0); arm_ldrfpx (code, rt, ARMREG_IP0, 0); } return code; } guint8* mono_arm_emit_ldrx (guint8 *code, int rt, int rn, int imm) { return emit_ldrx (code, rt, rn, imm); } static guint8* emit_call (MonoCompile *cfg, guint8* code, MonoJumpInfoType patch_type, gconstpointer data) { /* mono_add_patch_info_rel (cfg, code - cfg->native_code, patch_type, data, MONO_R_ARM64_IMM); code = emit_imm64_template (code, ARMREG_LR); arm_blrx (code, ARMREG_LR); */ mono_add_patch_info_rel (cfg, code - cfg->native_code, patch_type, data, MONO_R_ARM64_BL); arm_bl (code, code); cfg->thunk_area += THUNK_SIZE; return code; } static guint8* emit_aotconst_full (MonoCompile *cfg, MonoJumpInfo **ji, guint8 *code, guint8 *start, int dreg, guint32 patch_type, gconstpointer data) { if (cfg) mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)patch_type, data); else *ji = mono_patch_info_list_prepend (*ji, code - start, (MonoJumpInfoType)patch_type, data); /* See arch_emit_got_access () in aot-compiler.c */ arm_ldrx_lit (code, dreg, 0); arm_nop (code); arm_nop (code); return code; } static guint8* emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, guint32 patch_type, gconstpointer data) { return emit_aotconst_full (cfg, NULL, code, NULL, dreg, patch_type, data); } /* * mono_arm_emit_aotconst: * * Emit code to load an AOT constant into DREG. Usable from trampolines. */ guint8* mono_arm_emit_aotconst (gpointer ji, guint8 *code, guint8 *code_start, int dreg, guint32 patch_type, gconstpointer data) { return emit_aotconst_full (NULL, (MonoJumpInfo**)ji, code, code_start, dreg, patch_type, data); } gboolean mono_arch_have_fast_tls (void) { #ifdef TARGET_IOS return FALSE; #else return TRUE; #endif } static guint8* emit_tls_get (guint8 *code, int dreg, int tls_offset) { arm_mrs (code, dreg, ARM_MRS_REG_TPIDR_EL0); if (tls_offset < 256) { arm_ldrx (code, dreg, dreg, tls_offset); } else { code = emit_addx_imm (code, dreg, dreg, tls_offset); arm_ldrx (code, dreg, dreg, 0); } return code; } static guint8* emit_tls_set (guint8 *code, int sreg, int tls_offset) { int tmpreg = ARMREG_IP0; g_assert (sreg != tmpreg); arm_mrs (code, tmpreg, ARM_MRS_REG_TPIDR_EL0); if (tls_offset < 256) { arm_strx (code, sreg, tmpreg, tls_offset); } else { code = emit_addx_imm (code, tmpreg, tmpreg, tls_offset); arm_strx (code, sreg, tmpreg, 0); } return code; } /* * Emits * - mov sp, fp * - ldrp [fp, lr], [sp], !stack_offfset * Clobbers TEMP_REGS. */ WARN_UNUSED_RESULT guint8* mono_arm_emit_destroy_frame (guint8 *code, int stack_offset, guint64 temp_regs) { // At least one of these registers must be available, or both. gboolean const temp0 = (temp_regs & (1 << ARMREG_IP0)) != 0; gboolean const temp1 = (temp_regs & (1 << ARMREG_IP1)) != 0; g_assert (temp0 || temp1); int const temp = temp0 ? ARMREG_IP0 : ARMREG_IP1; arm_movspx (code, ARMREG_SP, ARMREG_FP); if (arm_is_ldpx_imm (stack_offset)) { arm_ldpx_post (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, stack_offset); } else { arm_ldpx (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, 0); /* sp += stack_offset */ if (temp0 && temp1) { code = emit_addx_sp_imm (code, stack_offset); } else { int imm = stack_offset; /* Can't use addx_sp_imm () since we can't clobber both ip0/ip1 */ arm_addx_imm (code, temp, ARMREG_SP, 0); while (imm > 256) { arm_addx_imm (code, temp, temp, 256); imm -= 256; } arm_addx_imm (code, ARMREG_SP, temp, imm); } } return code; } #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431) static guint8* emit_thunk (guint8 *code, gconstpointer target) { guint8 *p = code; arm_ldrx_lit (code, ARMREG_IP0, code + 8); arm_brx (code, ARMREG_IP0); *(guint64*)code = (guint64)target; code += sizeof (guint64); mono_arch_flush_icache (p, code - p); return code; } static gpointer create_thunk (MonoCompile *cfg, guchar *code, const guchar *target) { MonoJitInfo *ji; MonoThunkJitInfo *info; guint8 *thunks, *p; int thunks_size; guint8 *orig_target; guint8 *target_thunk; MonoJitMemoryManager* jit_mm; if (cfg) { /* * This can be called multiple times during JITting, * save the current position in cfg->arch to avoid * doing a O(n^2) search. */ if (!cfg->arch.thunks) { cfg->arch.thunks = cfg->thunks; cfg->arch.thunks_size = cfg->thunk_area; } thunks = cfg->arch.thunks; thunks_size = cfg->arch.thunks_size; if (!thunks_size) { g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE)); g_assert_not_reached (); } g_assert (*(guint32*)thunks == 0); emit_thunk (thunks, target); cfg->arch.thunks += THUNK_SIZE; cfg->arch.thunks_size -= THUNK_SIZE; return thunks; } else { ji = mini_jit_info_table_find (code); g_assert (ji); info = mono_jit_info_get_thunk_info (ji); g_assert (info); thunks = (guint8*)ji->code_start + info->thunks_offset; thunks_size = info->thunks_size; orig_target = mono_arch_get_call_target (code + 4); /* Arbitrary lock */ jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); target_thunk = NULL; if (orig_target >= thunks && orig_target < thunks + thunks_size) { /* The call already points to a thunk, because of trampolines etc. */ target_thunk = orig_target; } else { for (p = thunks; p < thunks + thunks_size; p += THUNK_SIZE) { if (((guint32*)p) [0] == 0) { /* Free entry */ target_thunk = p; break; } else if (((guint64*)p) [1] == (guint64)target) { /* Thunk already points to target */ target_thunk = p; break; } } } //printf ("THUNK: %p %p %p\n", code, target, target_thunk); if (!target_thunk) { jit_mm_unlock (jit_mm); g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE)); g_assert_not_reached (); } emit_thunk (target_thunk, target); jit_mm_unlock (jit_mm); return target_thunk; } } static void arm_patch_full (MonoCompile *cfg, guint8 *code, guint8 *target, int relocation) { switch (relocation) { case MONO_R_ARM64_B: target = MINI_FTNPTR_TO_ADDR (target); if (arm_is_bl_disp (code, target)) { arm_b (code, target); } else { gpointer thunk; thunk = create_thunk (cfg, code, target); g_assert (arm_is_bl_disp (code, thunk)); arm_b (code, thunk); } break; case MONO_R_ARM64_BCC: { int cond; cond = arm_get_bcc_cond (code); arm_bcc (code, cond, target); break; } case MONO_R_ARM64_CBZ: arm_set_cbz_target (code, target); break; case MONO_R_ARM64_IMM: { guint64 imm = (guint64)target; int dreg; /* emit_imm64_template () */ dreg = arm_get_movzx_rd (code); arm_movzx (code, dreg, imm & 0xffff, 0); arm_movkx (code, dreg, (imm >> 16) & 0xffff, 16); arm_movkx (code, dreg, (imm >> 32) & 0xffff, 32); arm_movkx (code, dreg, (imm >> 48) & 0xffff, 48); break; } case MONO_R_ARM64_BL: target = MINI_FTNPTR_TO_ADDR (target); if (arm_is_bl_disp (code, target)) { arm_bl (code, target); } else { gpointer thunk; thunk = create_thunk (cfg, code, target); g_assert (arm_is_bl_disp (code, thunk)); arm_bl (code, thunk); } break; default: g_assert_not_reached (); } } static void arm_patch_rel (guint8 *code, guint8 *target, int relocation) { arm_patch_full (NULL, code, target, relocation); } void mono_arm_patch (guint8 *code, guint8 *target, int relocation) { arm_patch_rel (code, target, relocation); } void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target) { guint8 *ip; ip = ji->ip.i + code; switch (ji->type) { case MONO_PATCH_INFO_METHOD_JUMP: /* ji->relocation is not set by the caller */ arm_patch_full (cfg, ip, (guint8*)target, MONO_R_ARM64_B); mono_arch_flush_icache (ip, 8); break; default: arm_patch_full (cfg, ip, (guint8*)target, ji->relocation); break; case MONO_PATCH_INFO_NONE: break; } } void mono_arch_flush_register_windows (void) { } MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { return (MonoMethod*)regs [MONO_ARCH_RGCTX_REG]; } MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code) { return (MonoVTable*)regs [MONO_ARCH_RGCTX_REG]; } GSList* mono_arch_get_cie_program (void) { GSList *l = NULL; mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ARMREG_SP, 0); return l; } host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { return ctx->regs [reg]; } host_mgreg_t* mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { return &ctx->regs [reg]; } void mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val) { ctx->regs [reg] = val; } /* * mono_arch_set_target: * * Set the target architecture the JIT backend should generate code for, in the form * of a GNU target triplet. Only used in AOT mode. */ void mono_arch_set_target (char *mtriple) { if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) { ios_abi = TRUE; } } static void add_general (CallInfo *cinfo, ArgInfo *ainfo, int size, gboolean sign) { if (cinfo->gr >= PARAM_REGS) { ainfo->storage = ArgOnStack; /* * FIXME: The vararg argument handling code in ves_icall_System_ArgIterator_IntGetNextArg * assumes every argument is allocated to a separate full size stack slot. */ if (ios_abi && !cinfo->vararg) { /* Assume size == align */ } else { /* Put arguments into 8 byte aligned stack slots */ size = 8; sign = FALSE; } cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, size); ainfo->offset = cinfo->stack_usage; ainfo->slot_size = size; ainfo->sign = sign; cinfo->stack_usage += size; } else { ainfo->storage = ArgInIReg; ainfo->reg = cinfo->gr; cinfo->gr ++; } } static void add_fp (CallInfo *cinfo, ArgInfo *ainfo, gboolean single) { int size = single ? 4 : 8; if (cinfo->fr >= FP_PARAM_REGS) { ainfo->storage = single ? ArgOnStackR4 : ArgOnStackR8; if (ios_abi) { cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, size); ainfo->offset = cinfo->stack_usage; ainfo->slot_size = size; cinfo->stack_usage += size; } else { ainfo->offset = cinfo->stack_usage; ainfo->slot_size = 8; /* Put arguments into 8 byte aligned stack slots */ cinfo->stack_usage += 8; } } else { if (single) ainfo->storage = ArgInFRegR4; else ainfo->storage = ArgInFReg; ainfo->reg = cinfo->fr; cinfo->fr ++; } } static gboolean is_hfa (MonoType *t, int *out_nfields, int *out_esize, int *field_offsets) { MonoClass *klass; gpointer iter; MonoClassField *field; MonoType *ftype, *prev_ftype = NULL; int i, nfields = 0; klass = mono_class_from_mono_type_internal (t); iter = NULL; while ((field = mono_class_get_fields_internal (klass, &iter))) { if (field->type->attrs & FIELD_ATTRIBUTE_STATIC) continue; ftype = mono_field_get_type_internal (field); ftype = mini_get_underlying_type (ftype); if (MONO_TYPE_ISSTRUCT (ftype)) { int nested_nfields, nested_esize; int nested_field_offsets [16]; if (!is_hfa (ftype, &nested_nfields, &nested_esize, nested_field_offsets)) return FALSE; if (nested_esize == 4) ftype = m_class_get_byval_arg (mono_defaults.single_class); else ftype = m_class_get_byval_arg (mono_defaults.double_class); if (prev_ftype && prev_ftype->type != ftype->type) return FALSE; prev_ftype = ftype; for (i = 0; i < nested_nfields; ++i) { if (nfields + i < 4) field_offsets [nfields + i] = field->offset - MONO_ABI_SIZEOF (MonoObject) + nested_field_offsets [i]; } nfields += nested_nfields; } else { if (!(!m_type_is_byref (ftype) && (ftype->type == MONO_TYPE_R4 || ftype->type == MONO_TYPE_R8))) return FALSE; if (prev_ftype && prev_ftype->type != ftype->type) return FALSE; prev_ftype = ftype; if (nfields < 4) field_offsets [nfields] = field->offset - MONO_ABI_SIZEOF (MonoObject); nfields ++; } } if (nfields == 0 || nfields > 4) return FALSE; *out_nfields = nfields; *out_esize = prev_ftype->type == MONO_TYPE_R4 ? 4 : 8; return TRUE; } static void add_valuetype (CallInfo *cinfo, ArgInfo *ainfo, MonoType *t) { int i, size, align_size, nregs, nfields, esize; int field_offsets [16]; guint32 align; size = mini_type_stack_size_full (t, &align, cinfo->pinvoke); align_size = ALIGN_TO (size, 8); nregs = align_size / 8; if (is_hfa (t, &nfields, &esize, field_offsets)) { /* * The struct might include nested float structs aligned at 8, * so need to keep track of the offsets of the individual fields. */ if (cinfo->fr + nfields <= FP_PARAM_REGS) { ainfo->storage = ArgHFA; ainfo->reg = cinfo->fr; ainfo->nregs = nfields; ainfo->size = size; ainfo->esize = esize; for (i = 0; i < nfields; ++i) ainfo->foffsets [i] = field_offsets [i]; cinfo->fr += ainfo->nregs; } else { ainfo->nfregs_to_skip = FP_PARAM_REGS > cinfo->fr ? FP_PARAM_REGS - cinfo->fr : 0; cinfo->fr = FP_PARAM_REGS; size = ALIGN_TO (size, 8); ainfo->storage = ArgVtypeOnStack; cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, align); ainfo->offset = cinfo->stack_usage; ainfo->size = size; ainfo->hfa = TRUE; ainfo->nregs = nfields; ainfo->esize = esize; cinfo->stack_usage += size; } return; } if (align_size > 16) { ainfo->storage = ArgVtypeByRef; ainfo->size = size; return; } if (cinfo->gr + nregs > PARAM_REGS) { size = ALIGN_TO (size, 8); ainfo->storage = ArgVtypeOnStack; cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, align); ainfo->offset = cinfo->stack_usage; ainfo->size = size; cinfo->stack_usage += size; cinfo->gr = PARAM_REGS; } else { ainfo->storage = ArgVtypeInIRegs; ainfo->reg = cinfo->gr; ainfo->nregs = nregs; ainfo->size = size; cinfo->gr += nregs; } } static void add_param (CallInfo *cinfo, ArgInfo *ainfo, MonoType *t) { MonoType *ptype; ptype = mini_get_underlying_type (t); switch (ptype->type) { case MONO_TYPE_I1: add_general (cinfo, ainfo, 1, TRUE); break; case MONO_TYPE_U1: add_general (cinfo, ainfo, 1, FALSE); break; case MONO_TYPE_I2: add_general (cinfo, ainfo, 2, TRUE); break; case MONO_TYPE_U2: add_general (cinfo, ainfo, 2, FALSE); break; #ifdef MONO_ARCH_ILP32 case MONO_TYPE_I: #endif case MONO_TYPE_I4: add_general (cinfo, ainfo, 4, TRUE); break; #ifdef MONO_ARCH_ILP32 case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: #endif case MONO_TYPE_U4: add_general (cinfo, ainfo, 4, FALSE); break; #ifndef MONO_ARCH_ILP32 case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: #endif case MONO_TYPE_U8: case MONO_TYPE_I8: add_general (cinfo, ainfo, 8, FALSE); break; case MONO_TYPE_R8: add_fp (cinfo, ainfo, FALSE); break; case MONO_TYPE_R4: add_fp (cinfo, ainfo, TRUE); break; case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: add_valuetype (cinfo, ainfo, ptype); break; case MONO_TYPE_VOID: ainfo->storage = ArgNone; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ptype)) { add_general (cinfo, ainfo, 8, FALSE); } else if (mini_is_gsharedvt_variable_type (ptype)) { /* * Treat gsharedvt arguments as large vtypes */ ainfo->storage = ArgVtypeByRef; ainfo->gsharedvt = TRUE; } else { add_valuetype (cinfo, ainfo, ptype); } break; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: g_assert (mini_is_gsharedvt_type (ptype)); ainfo->storage = ArgVtypeByRef; ainfo->gsharedvt = TRUE; break; default: g_assert_not_reached (); break; } } /* * get_call_info: * * Obtain information about a call according to the calling convention. */ static CallInfo* get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { CallInfo *cinfo; ArgInfo *ainfo; int n, pstart, pindex; n = sig->hasthis + sig->param_count; if (mp) cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n)); else cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n)); cinfo->nargs = n; cinfo->pinvoke = sig->pinvoke; // Constrain this to OSX only for now #ifdef TARGET_OSX cinfo->vararg = sig->call_convention == MONO_CALL_VARARG; #endif /* Return value */ add_param (cinfo, &cinfo->ret, sig->ret); if (cinfo->ret.storage == ArgVtypeByRef) cinfo->ret.reg = ARMREG_R8; /* Reset state */ cinfo->gr = 0; cinfo->fr = 0; cinfo->stack_usage = 0; /* Parameters */ if (sig->hasthis) add_general (cinfo, cinfo->args + 0, 8, FALSE); pstart = 0; for (pindex = pstart; pindex < sig->param_count; ++pindex) { ainfo = cinfo->args + sig->hasthis + pindex; if ((sig->call_convention == MONO_CALL_VARARG) && (pindex == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ cinfo->gr = PARAM_REGS; cinfo->fr = FP_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_param (cinfo, &cinfo->sig_cookie, mono_get_int_type ()); } add_param (cinfo, ainfo, sig->params [pindex]); if (ainfo->storage == ArgVtypeByRef) { /* Pass the argument address in the next register */ if (cinfo->gr >= PARAM_REGS) { ainfo->storage = ArgVtypeByRefOnStack; cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, 8); ainfo->offset = cinfo->stack_usage; cinfo->stack_usage += 8; } else { ainfo->reg = cinfo->gr; cinfo->gr ++; } } } /* Handle the case where there are no implicit arguments */ if ((sig->call_convention == MONO_CALL_VARARG) && (pindex == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ cinfo->gr = PARAM_REGS; cinfo->fr = FP_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_param (cinfo, &cinfo->sig_cookie, mono_get_int_type ()); } cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT); return cinfo; } static int arg_need_temp (ArgInfo *ainfo) { if (ainfo->storage == ArgHFA && ainfo->esize == 4) return ainfo->size; return 0; } static gpointer arg_get_storage (CallContext *ccontext, ArgInfo *ainfo) { switch (ainfo->storage) { case ArgVtypeInIRegs: case ArgInIReg: return &ccontext->gregs [ainfo->reg]; case ArgInFReg: case ArgInFRegR4: case ArgHFA: return &ccontext->fregs [ainfo->reg]; case ArgOnStack: case ArgOnStackR4: case ArgOnStackR8: case ArgVtypeOnStack: return ccontext->stack + ainfo->offset; case ArgVtypeByRef: return (gpointer) ccontext->gregs [ainfo->reg]; default: g_error ("Arg storage type not yet supported"); } } static void arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest) { g_assert (arg_need_temp (ainfo)); float *dest_float = (float*)dest; for (int k = 0; k < ainfo->nregs; k++) { *dest_float = *(float*)&ccontext->fregs [ainfo->reg + k]; dest_float++; } } static void arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src) { g_assert (arg_need_temp (ainfo)); float *src_float = (float*)src; for (int k = 0; k < ainfo->nregs; k++) { *(float*)&ccontext->fregs [ainfo->reg + k] = *src_float; src_float++; } } /* Set arguments in the ccontext (for i2n entry) */ void mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb = mini_get_interp_callbacks (); CallInfo *cinfo = get_call_info (NULL, sig); gpointer storage; ArgInfo *ainfo; memset (ccontext, 0, sizeof (CallContext)); ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT); if (ccontext->stack_size) ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size); if (sig->ret->type != MONO_TYPE_VOID) { ainfo = &cinfo->ret; if (ainfo->storage == ArgVtypeByRef) { storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1); ccontext->gregs [cinfo->ret.reg] = (gsize)storage; } } g_assert (!sig->hasthis); for (int i = 0; i < sig->param_count; i++) { ainfo = &cinfo->args [i]; if (ainfo->storage == ArgVtypeByRef) { ccontext->gregs [ainfo->reg] = (host_mgreg_t)interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, i); continue; } int temp_size = arg_need_temp (ainfo); if (temp_size) storage = alloca (temp_size); // FIXME? alloca in a loop else storage = arg_get_storage (ccontext, ainfo); interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage); if (temp_size) arg_set_val (ccontext, ainfo, storage); } g_free (cinfo); } /* Set return value in the ccontext (for n2i return) */ void mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp) { const MonoEECallbacks *interp_cb; CallInfo *cinfo; gpointer storage; ArgInfo *ainfo; if (sig->ret->type == MONO_TYPE_VOID) return; interp_cb = mini_get_interp_callbacks (); cinfo = get_call_info (NULL, sig); ainfo = &cinfo->ret; if (retp) { g_assert (ainfo->storage == ArgVtypeByRef); interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, retp); } else { g_assert (ainfo->storage != ArgVtypeByRef); int temp_size = arg_need_temp (ainfo); if (temp_size) storage = alloca (temp_size); else storage = arg_get_storage (ccontext, ainfo); memset (ccontext, 0, sizeof (CallContext)); // FIXME interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage); if (temp_size) arg_set_val (ccontext, ainfo, storage); } g_free (cinfo); } /* Gets the arguments from ccontext (for n2i entry) */ gpointer mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb = mini_get_interp_callbacks (); CallInfo *cinfo = get_call_info (NULL, sig); gpointer storage; ArgInfo *ainfo; for (int i = 0; i < sig->param_count + sig->hasthis; i++) { ainfo = &cinfo->args [i]; int temp_size = arg_need_temp (ainfo); if (temp_size) { storage = alloca (temp_size); // FIXME? alloca in a loop arg_get_val (ccontext, ainfo, storage); } else { storage = arg_get_storage (ccontext, ainfo); } interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage); } storage = NULL; if (sig->ret->type != MONO_TYPE_VOID) { ainfo = &cinfo->ret; if (ainfo->storage == ArgVtypeByRef) storage = (gpointer) ccontext->gregs [cinfo->ret.reg]; } g_free (cinfo); return storage; } /* Gets the return value from ccontext (for i2n exit) */ void mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb; CallInfo *cinfo; ArgInfo *ainfo; gpointer storage; if (sig->ret->type == MONO_TYPE_VOID) return; interp_cb = mini_get_interp_callbacks (); cinfo = get_call_info (NULL, sig); ainfo = &cinfo->ret; if (ainfo->storage != ArgVtypeByRef) { int temp_size = arg_need_temp (ainfo); if (temp_size) { storage = alloca (temp_size); arg_get_val (ccontext, ainfo, storage); } else { storage = arg_get_storage (ccontext, ainfo); } interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage); } g_free (cinfo); } typedef struct { MonoMethodSignature *sig; CallInfo *cinfo; MonoType *rtype; MonoType **param_types; int n_fpargs, n_fpret, nullable_area; } ArchDynCallInfo; static gboolean dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig) { int i; // FIXME: Add more cases switch (cinfo->ret.storage) { case ArgNone: case ArgInIReg: case ArgInFReg: case ArgInFRegR4: case ArgVtypeByRef: break; case ArgVtypeInIRegs: if (cinfo->ret.nregs > 2) return FALSE; break; case ArgHFA: break; default: return FALSE; } for (i = 0; i < cinfo->nargs; ++i) { ArgInfo *ainfo = &cinfo->args [i]; switch (ainfo->storage) { case ArgInIReg: case ArgVtypeInIRegs: case ArgInFReg: case ArgInFRegR4: case ArgHFA: case ArgVtypeByRef: case ArgVtypeByRefOnStack: case ArgOnStack: case ArgVtypeOnStack: break; default: return FALSE; } } return TRUE; } MonoDynCallInfo* mono_arch_dyn_call_prepare (MonoMethodSignature *sig) { ArchDynCallInfo *info; CallInfo *cinfo; int i, aindex; cinfo = get_call_info (NULL, sig); if (!dyn_call_supported (cinfo, sig)) { g_free (cinfo); return NULL; } info = g_new0 (ArchDynCallInfo, 1); // FIXME: Preprocess the info to speed up start_dyn_call () info->sig = sig; info->cinfo = cinfo; info->rtype = mini_get_underlying_type (sig->ret); info->param_types = g_new0 (MonoType*, sig->param_count); for (i = 0; i < sig->param_count; ++i) info->param_types [i] = mini_get_underlying_type (sig->params [i]); switch (cinfo->ret.storage) { case ArgInFReg: case ArgInFRegR4: info->n_fpret = 1; break; case ArgHFA: info->n_fpret = cinfo->ret.nregs; break; default: break; } for (aindex = 0; aindex < sig->param_count; aindex++) { MonoType *t = info->param_types [aindex]; if (m_type_is_byref (t)) continue; switch (t->type) { case MONO_TYPE_GENERICINST: if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) { MonoClass *klass = mono_class_from_mono_type_internal (t); int size; /* Nullables need a temporary buffer, its stored at the end of DynCallArgs.regs after the stack args */ size = mono_class_value_size (klass, NULL); info->nullable_area += size; } break; default: break; } } return (MonoDynCallInfo*)info; } void mono_arch_dyn_call_free (MonoDynCallInfo *info) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; g_free (ainfo->cinfo); g_free (ainfo->param_types); g_free (ainfo); } int mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; g_assert (ainfo->cinfo->stack_usage % MONO_ARCH_FRAME_ALIGNMENT == 0); return sizeof (DynCallArgs) + ainfo->cinfo->stack_usage + ainfo->nullable_area; } static double bitcast_r4_to_r8 (float f) { float *p = &f; return *(double*)p; } static float bitcast_r8_to_r4 (double f) { double *p = &f; return *(float*)p; } void mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf) { ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info; DynCallArgs *p = (DynCallArgs*)buf; int aindex, arg_index, greg, i, pindex; MonoMethodSignature *sig = dinfo->sig; CallInfo *cinfo = dinfo->cinfo; int buffer_offset = 0; guint8 *nullable_buffer; p->res = 0; p->ret = ret; p->n_fpargs = dinfo->n_fpargs; p->n_fpret = dinfo->n_fpret; p->n_stackargs = cinfo->stack_usage / sizeof (host_mgreg_t); arg_index = 0; greg = 0; pindex = 0; /* Stored after the stack arguments */ nullable_buffer = (guint8*)&(p->regs [PARAM_REGS + 1 + (cinfo->stack_usage / sizeof (host_mgreg_t))]); if (sig->hasthis) p->regs [greg ++] = (host_mgreg_t)*(args [arg_index ++]); if (cinfo->ret.storage == ArgVtypeByRef) p->regs [ARMREG_R8] = (host_mgreg_t)ret; for (aindex = pindex; aindex < sig->param_count; aindex++) { MonoType *t = dinfo->param_types [aindex]; gpointer *arg = args [arg_index ++]; ArgInfo *ainfo = &cinfo->args [aindex + sig->hasthis]; int slot = -1; if (ainfo->storage == ArgOnStack || ainfo->storage == ArgVtypeOnStack || ainfo->storage == ArgVtypeByRefOnStack) { slot = PARAM_REGS + 1 + (ainfo->offset / sizeof (host_mgreg_t)); } else { slot = ainfo->reg; } if (m_type_is_byref (t)) { p->regs [slot] = (host_mgreg_t)*arg; continue; } if (ios_abi && ainfo->storage == ArgOnStack) { guint8 *stack_arg = (guint8*)&(p->regs [PARAM_REGS + 1]) + ainfo->offset; gboolean handled = TRUE; /* Special case arguments smaller than 1 machine word */ switch (t->type) { case MONO_TYPE_U1: *(guint8*)stack_arg = *(guint8*)arg; break; case MONO_TYPE_I1: *(gint8*)stack_arg = *(gint8*)arg; break; case MONO_TYPE_U2: *(guint16*)stack_arg = *(guint16*)arg; break; case MONO_TYPE_I2: *(gint16*)stack_arg = *(gint16*)arg; break; case MONO_TYPE_I4: *(gint32*)stack_arg = *(gint32*)arg; break; case MONO_TYPE_U4: *(guint32*)stack_arg = *(guint32*)arg; break; default: handled = FALSE; break; } if (handled) continue; } switch (t->type) { case MONO_TYPE_OBJECT: case MONO_TYPE_PTR: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_I8: case MONO_TYPE_U8: p->regs [slot] = (host_mgreg_t)*arg; break; case MONO_TYPE_U1: p->regs [slot] = *(guint8*)arg; break; case MONO_TYPE_I1: p->regs [slot] = *(gint8*)arg; break; case MONO_TYPE_I2: p->regs [slot] = *(gint16*)arg; break; case MONO_TYPE_U2: p->regs [slot] = *(guint16*)arg; break; case MONO_TYPE_I4: p->regs [slot] = *(gint32*)arg; break; case MONO_TYPE_U4: p->regs [slot] = *(guint32*)arg; break; case MONO_TYPE_R4: p->fpregs [ainfo->reg] = bitcast_r4_to_r8 (*(float*)arg); p->n_fpargs ++; break; case MONO_TYPE_R8: p->fpregs [ainfo->reg] = *(double*)arg; p->n_fpargs ++; break; case MONO_TYPE_GENERICINST: if (MONO_TYPE_IS_REFERENCE (t)) { p->regs [slot] = (host_mgreg_t)*arg; break; } else { if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) { MonoClass *klass = mono_class_from_mono_type_internal (t); guint8 *nullable_buf; int size; /* * Use p->buffer as a temporary buffer since the data needs to be available after this call * if the nullable param is passed by ref. */ size = mono_class_value_size (klass, NULL); nullable_buf = nullable_buffer + buffer_offset; buffer_offset += size; g_assert (buffer_offset <= dinfo->nullable_area); /* The argument pointed to by arg is either a boxed vtype or null */ mono_nullable_init (nullable_buf, (MonoObject*)arg, klass); arg = (gpointer*)nullable_buf; /* Fall though */ } else { /* Fall though */ } } case MONO_TYPE_VALUETYPE: switch (ainfo->storage) { case ArgVtypeInIRegs: for (i = 0; i < ainfo->nregs; ++i) p->regs [slot ++] = ((host_mgreg_t*)arg) [i]; break; case ArgHFA: if (ainfo->esize == 4) { for (i = 0; i < ainfo->nregs; ++i) p->fpregs [ainfo->reg + i] = bitcast_r4_to_r8 (((float*)arg) [ainfo->foffsets [i] / 4]); } else { for (i = 0; i < ainfo->nregs; ++i) p->fpregs [ainfo->reg + i] = ((double*)arg) [ainfo->foffsets [i] / 8]; } p->n_fpargs += ainfo->nregs; break; case ArgVtypeByRef: case ArgVtypeByRefOnStack: p->regs [slot] = (host_mgreg_t)arg; break; case ArgVtypeOnStack: for (i = 0; i < ainfo->size / 8; ++i) p->regs [slot ++] = ((host_mgreg_t*)arg) [i]; break; default: g_assert_not_reached (); break; } break; default: g_assert_not_reached (); } } } void mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; CallInfo *cinfo = ainfo->cinfo; DynCallArgs *args = (DynCallArgs*)buf; MonoType *ptype = ainfo->rtype; guint8 *ret = args->ret; host_mgreg_t res = args->res; host_mgreg_t res2 = args->res2; int i; if (cinfo->ret.storage == ArgVtypeByRef) return; switch (ptype->type) { case MONO_TYPE_VOID: *(gpointer*)ret = NULL; break; case MONO_TYPE_OBJECT: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: *(gpointer*)ret = (gpointer)res; break; case MONO_TYPE_I1: *(gint8*)ret = res; break; case MONO_TYPE_U1: *(guint8*)ret = res; break; case MONO_TYPE_I2: *(gint16*)ret = res; break; case MONO_TYPE_U2: *(guint16*)ret = res; break; case MONO_TYPE_I4: *(gint32*)ret = res; break; case MONO_TYPE_U4: *(guint32*)ret = res; break; case MONO_TYPE_I8: case MONO_TYPE_U8: *(guint64*)ret = res; break; case MONO_TYPE_R4: *(float*)ret = bitcast_r8_to_r4 (args->fpregs [0]); break; case MONO_TYPE_R8: *(double*)ret = args->fpregs [0]; break; case MONO_TYPE_GENERICINST: if (MONO_TYPE_IS_REFERENCE (ptype)) { *(gpointer*)ret = (gpointer)res; break; } else { /* Fall though */ } case MONO_TYPE_VALUETYPE: switch (ainfo->cinfo->ret.storage) { case ArgVtypeInIRegs: *(host_mgreg_t*)ret = res; if (ainfo->cinfo->ret.nregs > 1) ((host_mgreg_t*)ret) [1] = res2; break; case ArgHFA: /* Use the same area for returning fp values */ if (cinfo->ret.esize == 4) { for (i = 0; i < cinfo->ret.nregs; ++i) ((float*)ret) [cinfo->ret.foffsets [i] / 4] = bitcast_r8_to_r4 (args->fpregs [i]); } else { for (i = 0; i < cinfo->ret.nregs; ++i) ((double*)ret) [cinfo->ret.foffsets [i] / 8] = args->fpregs [i]; } break; default: g_assert_not_reached (); break; } break; default: g_assert_not_reached (); } } #if __APPLE__ G_BEGIN_DECLS void sys_icache_invalidate (void *start, size_t len); G_END_DECLS #endif void mono_arch_flush_icache (guint8 *code, gint size) { #ifndef MONO_CROSS_COMPILE #if __APPLE__ sys_icache_invalidate (code, size); #else /* Don't rely on GCC's __clear_cache implementation, as it caches * icache/dcache cache line sizes, that can vary between cores on * big.LITTLE architectures. */ guint64 end = (guint64) (code + size); guint64 addr; /* always go with cacheline size of 4 bytes as this code isn't perf critical * anyway. Reading the cache line size from a machine register can be racy * on a big.LITTLE architecture if the cores don't have the same cache line * sizes. */ const size_t icache_line_size = 4; const size_t dcache_line_size = 4; addr = (guint64) code & ~(guint64) (dcache_line_size - 1); for (; addr < end; addr += dcache_line_size) asm volatile("dc civac, %0" : : "r" (addr) : "memory"); asm volatile("dsb ish" : : : "memory"); addr = (guint64) code & ~(guint64) (icache_line_size - 1); for (; addr < end; addr += icache_line_size) asm volatile("ic ivau, %0" : : "r" (addr) : "memory"); asm volatile ("dsb ish" : : : "memory"); asm volatile ("isb" : : : "memory"); #endif #endif } #ifndef DISABLE_JIT gboolean mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode) { NOT_IMPLEMENTED; return FALSE; } GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG)) continue; if (mono_is_regsize_var (ins->inst_vtype)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = g_list_prepend (vars, vmv); } } vars = mono_varlist_sort (cfg, vars, 0); return vars; } GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; int i; /* r28 is reserved for cfg->arch.args_reg */ /* r27 is reserved for the imt argument */ for (i = ARMREG_R19; i <= ARMREG_R26; ++i) regs = g_list_prepend (regs, GUINT_TO_POINTER (i)); return regs; } guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { MonoInst *ins = cfg->varinfo [vmv->idx]; if (ins->opcode == OP_ARG) return 1; else return 2; } void mono_arch_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; if (cinfo->ret.storage == ArgVtypeByRef) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); cfg->vret_addr->flags |= MONO_INST_VOLATILE; } if (cfg->gen_sdb_seq_points) { MonoInst *ins; if (cfg->compile_aot) { ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.seq_point_info_var = ins; } ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.ss_tramp_var = ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.bp_tramp_var = ins; } if (cfg->method->save_lmf) { cfg->create_lmf_var = TRUE; cfg->lmf_ir = TRUE; } } void mono_arch_allocate_vars (MonoCompile *cfg) { MonoMethodSignature *sig; MonoInst *ins; CallInfo *cinfo; ArgInfo *ainfo; int i, offset, size, align; guint32 locals_stack_size, locals_stack_align; gint32 *offsets; /* * Allocate arguments and locals to either register (OP_REGVAR) or to a stack slot (OP_REGOFFSET). * Compute cfg->stack_offset and update cfg->used_int_regs. */ sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; /* * The ARM64 ABI always uses a frame pointer. * The instruction set prefers positive offsets, so fp points to the bottom of the * frame, and stack slots are at positive offsets. * If some arguments are received on the stack, their offsets relative to fp can * not be computed right now because the stack frame might grow due to spilling * done by the local register allocator. To solve this, we reserve a register * which points to them. * The stack frame looks like this: * args_reg -> <bottom of parent frame> * <locals etc> * fp -> <saved fp+lr> * sp -> <localloc/params area> */ cfg->frame_reg = ARMREG_FP; cfg->flags |= MONO_CFG_HAS_SPILLUP; offset = 0; /* Saved fp+lr */ offset += 16; if (cinfo->stack_usage) { g_assert (!(cfg->used_int_regs & (1 << ARMREG_R28))); cfg->arch.args_reg = ARMREG_R28; cfg->used_int_regs |= 1 << ARMREG_R28; } if (cfg->method->save_lmf) { /* The LMF var is allocated normally */ } else { /* Callee saved regs */ cfg->arch.saved_gregs_offset = offset; for (i = 0; i < 32; ++i) if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) && (cfg->used_int_regs & (1 << i))) offset += 8; } /* Return value */ switch (cinfo->ret.storage) { case ArgNone: break; case ArgInIReg: case ArgInFReg: case ArgInFRegR4: cfg->ret->opcode = OP_REGVAR; cfg->ret->dreg = cinfo->ret.reg; break; case ArgVtypeInIRegs: case ArgHFA: /* Allocate a local to hold the result, the epilog will copy it to the correct place */ cfg->ret->opcode = OP_REGOFFSET; cfg->ret->inst_basereg = cfg->frame_reg; cfg->ret->inst_offset = offset; if (cinfo->ret.storage == ArgHFA) // FIXME: offset += 64; else offset += 16; break; case ArgVtypeByRef: /* This variable will be initalized in the prolog from R8 */ cfg->vret_addr->opcode = OP_REGOFFSET; cfg->vret_addr->inst_basereg = cfg->frame_reg; cfg->vret_addr->inst_offset = offset; offset += 8; if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr ="); mono_print_ins (cfg->vret_addr); } break; default: g_assert_not_reached (); break; } /* Arguments */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ainfo = cinfo->args + i; ins = cfg->args [i]; if (ins->opcode == OP_REGVAR) continue; ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; switch (ainfo->storage) { case ArgInIReg: case ArgInFReg: case ArgInFRegR4: // FIXME: Use nregs/size /* These will be copied to the stack in the prolog */ ins->inst_offset = offset; offset += 8; break; case ArgOnStack: case ArgOnStackR4: case ArgOnStackR8: case ArgVtypeOnStack: /* These are in the parent frame */ g_assert (cfg->arch.args_reg); ins->inst_basereg = cfg->arch.args_reg; ins->inst_offset = ainfo->offset; break; case ArgVtypeInIRegs: case ArgHFA: ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; /* These arguments are saved to the stack in the prolog */ ins->inst_offset = offset; if (cfg->verbose_level >= 2) printf ("arg %d allocated to %s+0x%0x.\n", i, mono_arch_regname (ins->inst_basereg), (int)ins->inst_offset); if (ainfo->storage == ArgHFA) // FIXME: offset += 64; else offset += 16; break; case ArgVtypeByRefOnStack: { MonoInst *vtaddr; if (ainfo->gsharedvt) { ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->arch.args_reg; ins->inst_offset = ainfo->offset; break; } /* The vtype address is in the parent frame */ g_assert (cfg->arch.args_reg); MONO_INST_NEW (cfg, vtaddr, 0); vtaddr->opcode = OP_REGOFFSET; vtaddr->inst_basereg = cfg->arch.args_reg; vtaddr->inst_offset = ainfo->offset; /* Need an indirection */ ins->opcode = OP_VTARG_ADDR; ins->inst_left = vtaddr; break; } case ArgVtypeByRef: { MonoInst *vtaddr; if (ainfo->gsharedvt) { ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += 8; break; } /* The vtype address is in a register, will be copied to the stack in the prolog */ MONO_INST_NEW (cfg, vtaddr, 0); vtaddr->opcode = OP_REGOFFSET; vtaddr->inst_basereg = cfg->frame_reg; vtaddr->inst_offset = offset; offset += 8; /* Need an indirection */ ins->opcode = OP_VTARG_ADDR; ins->inst_left = vtaddr; break; } default: g_assert_not_reached (); break; } } /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */ // FIXME: Allocate these to registers ins = cfg->arch.seq_point_info_var; if (ins) { size = 8; align = 8; offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += size; } ins = cfg->arch.ss_tramp_var; if (ins) { size = 8; align = 8; offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += size; } ins = cfg->arch.bp_tramp_var; if (ins) { size = 8; align = 8; offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += size; } /* Locals */ offsets = mono_allocate_stack_slots (cfg, FALSE, &locals_stack_size, &locals_stack_align); if (locals_stack_align) offset = ALIGN_TO (offset, locals_stack_align); for (i = cfg->locals_start; i < cfg->num_varinfo; i++) { if (offsets [i] != -1) { ins = cfg->varinfo [i]; ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset + offsets [i]; //printf ("allocated local %d to ", i); mono_print_tree_nl (ins); } } offset += locals_stack_size; offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT); cfg->stack_offset = offset; } #ifdef ENABLE_LLVM LLVMCallInfo* mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig) { int i, n; CallInfo *cinfo; ArgInfo *ainfo; LLVMCallInfo *linfo; n = sig->param_count + sig->hasthis; cinfo = get_call_info (cfg->mempool, sig); linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n)); switch (cinfo->ret.storage) { case ArgInIReg: case ArgInFReg: case ArgInFRegR4: linfo->ret.storage = LLVMArgNormal; break; case ArgNone: linfo->ret.storage = LLVMArgNone; break; case ArgVtypeByRef: linfo->ret.storage = LLVMArgVtypeByRef; break; // // FIXME: This doesn't work yet since the llvm backend represents these types as an i8 // array which is returned in int regs // case ArgHFA: linfo->ret.storage = LLVMArgFpStruct; linfo->ret.nslots = cinfo->ret.nregs; linfo->ret.esize = cinfo->ret.esize; break; case ArgVtypeInIRegs: /* LLVM models this by returning an int */ linfo->ret.storage = LLVMArgVtypeAsScalar; linfo->ret.nslots = cinfo->ret.nregs; linfo->ret.esize = cinfo->ret.esize; break; default: g_assert_not_reached (); break; } for (i = 0; i < n; ++i) { LLVMArgInfo *lainfo = &linfo->args [i]; ainfo = cinfo->args + i; lainfo->storage = LLVMArgNone; switch (ainfo->storage) { case ArgInIReg: case ArgInFReg: case ArgInFRegR4: case ArgOnStack: case ArgOnStackR4: case ArgOnStackR8: lainfo->storage = LLVMArgNormal; break; case ArgVtypeByRef: case ArgVtypeByRefOnStack: lainfo->storage = LLVMArgVtypeByRef; break; case ArgHFA: { int j; lainfo->storage = LLVMArgAsFpArgs; lainfo->nslots = ainfo->nregs; lainfo->esize = ainfo->esize; for (j = 0; j < ainfo->nregs; ++j) lainfo->pair_storage [j] = LLVMArgInFPReg; break; } case ArgVtypeInIRegs: lainfo->storage = LLVMArgAsIArgs; lainfo->nslots = ainfo->nregs; break; case ArgVtypeOnStack: if (ainfo->hfa) { int j; /* Same as above */ lainfo->storage = LLVMArgAsFpArgs; lainfo->nslots = ainfo->nregs; lainfo->esize = ainfo->esize; lainfo->ndummy_fpargs = ainfo->nfregs_to_skip; for (j = 0; j < ainfo->nregs; ++j) lainfo->pair_storage [j] = LLVMArgInFPReg; } else { lainfo->storage = LLVMArgAsIArgs; lainfo->nslots = ainfo->size / 8; } break; default: g_assert_not_reached (); break; } } return linfo; } #endif static void add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *arg) { MonoInst *ins; switch (storage) { case ArgInIReg: MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg_copy (cfg, arg->dreg); ins->sreg1 = arg->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE); break; case ArgInFReg: MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = arg->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE); break; case ArgInFRegR4: if (COMPILE_LLVM (cfg)) MONO_INST_NEW (cfg, ins, OP_FMOVE); else if (cfg->r4fp) MONO_INST_NEW (cfg, ins, OP_RMOVE); else MONO_INST_NEW (cfg, ins, OP_ARM_SETFREG_R4); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = arg->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE); break; default: g_assert_not_reached (); break; } } static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { MonoMethodSignature *tmp_sig; int sig_reg; if (MONO_IS_TAILCALL_OPCODE (call)) NOT_IMPLEMENTED; g_assert (cinfo->sig_cookie.storage == ArgOnStack); /* * mono_ArgIterator_Setup assumes the signature cookie is * passed first and all the arguments which were before it are * passed on the stack after the signature. So compensate by * passing a different signature. */ tmp_sig = mono_metadata_signature_dup (call->signature); tmp_sig->param_count -= call->signature->sentinelpos; tmp_sig->sentinelpos = 0; memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*)); sig_reg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg); } void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoMethodSignature *sig; MonoInst *arg, *vtarg; CallInfo *cinfo; ArgInfo *ainfo; int i; sig = call->signature; cinfo = get_call_info (cfg->mempool, sig); switch (cinfo->ret.storage) { case ArgVtypeInIRegs: case ArgHFA: if (MONO_IS_TAILCALL_OPCODE (call)) break; /* * The vtype is returned in registers, save the return area address in a local, and save the vtype into * the location pointed to by it after call in emit_move_return_value (). */ if (!cfg->arch.vret_addr_loc) { cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* Prevent it from being register allocated or optimized away */ cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE; } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg); break; case ArgVtypeByRef: /* Pass the vtype return address in R8 */ g_assert (!MONO_IS_TAILCALL_OPCODE (call) || call->vret_var == cfg->vret_addr); MONO_INST_NEW (cfg, vtarg, OP_MOVE); vtarg->sreg1 = call->vret_var->dreg; vtarg->dreg = mono_alloc_preg (cfg); MONO_ADD_INS (cfg->cbb, vtarg); mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE); break; default: break; } for (i = 0; i < cinfo->nargs; ++i) { ainfo = cinfo->args + i; arg = call->args [i]; if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Emit the signature cookie just before the implicit arguments */ emit_sig_cookie (cfg, call, cinfo); } switch (ainfo->storage) { case ArgInIReg: case ArgInFReg: case ArgInFRegR4: add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, arg); break; case ArgOnStack: switch (ainfo->slot_size) { case 8: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg); break; case 4: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg); break; case 2: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg); break; case 1: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg); break; default: g_assert_not_reached (); break; } break; case ArgOnStackR8: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg); break; case ArgOnStackR4: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg); break; case ArgVtypeInIRegs: case ArgVtypeByRef: case ArgVtypeByRefOnStack: case ArgVtypeOnStack: case ArgHFA: { MonoInst *ins; guint32 align; guint32 size; size = mono_class_value_size (arg->klass, &align); MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->sreg1 = arg->dreg; ins->klass = arg->klass; ins->backend.size = size; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); break; } default: g_assert_not_reached (); break; } } /* Handle the case where there are no implicit arguments */ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (cinfo->nargs == sig->sentinelpos)) emit_sig_cookie (cfg, call, cinfo); call->call_info = cinfo; call->stack_usage = cinfo->stack_usage; } void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst*)ins->inst_p0; ArgInfo *ainfo = (ArgInfo*)ins->inst_p1; MonoInst *load; int i; if (ins->backend.size == 0 && !ainfo->gsharedvt) return; switch (ainfo->storage) { case ArgVtypeInIRegs: for (i = 0; i < ainfo->nregs; ++i) { // FIXME: Smaller sizes MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE); load->dreg = mono_alloc_ireg (cfg); load->inst_basereg = src->dreg; load->inst_offset = i * sizeof (target_mgreg_t); MONO_ADD_INS (cfg->cbb, load); add_outarg_reg (cfg, call, ArgInIReg, ainfo->reg + i, load); } break; case ArgHFA: for (i = 0; i < ainfo->nregs; ++i) { if (ainfo->esize == 4) MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE); else MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE); load->dreg = mono_alloc_freg (cfg); load->inst_basereg = src->dreg; load->inst_offset = ainfo->foffsets [i]; MONO_ADD_INS (cfg->cbb, load); add_outarg_reg (cfg, call, ainfo->esize == 4 ? ArgInFRegR4 : ArgInFReg, ainfo->reg + i, load); } break; case ArgVtypeByRef: case ArgVtypeByRefOnStack: { MonoInst *vtaddr, *load, *arg; /* Pass the vtype address in a reg/on the stack */ if (ainfo->gsharedvt) { load = src; } else { /* Make a copy of the argument */ vtaddr = mono_compile_create_var (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL); MONO_INST_NEW (cfg, load, OP_LDADDR); load->inst_p0 = vtaddr; vtaddr->flags |= MONO_INST_INDIRECT; load->type = STACK_MP; load->klass = vtaddr->klass; load->dreg = mono_alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, load); mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, ainfo->size, 8); } if (ainfo->storage == ArgVtypeByRef) { MONO_INST_NEW (cfg, arg, OP_MOVE); arg->dreg = mono_alloc_preg (cfg); arg->sreg1 = load->dreg; MONO_ADD_INS (cfg->cbb, arg); add_outarg_reg (cfg, call, ArgInIReg, ainfo->reg, arg); } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, load->dreg); } break; } case ArgVtypeOnStack: for (i = 0; i < ainfo->size / 8; ++i) { MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE); load->dreg = mono_alloc_ireg (cfg); load->inst_basereg = src->dreg; load->inst_offset = i * 8; MONO_ADD_INS (cfg->cbb, load); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset + (i * 8), load->dreg); } break; default: g_assert_not_reached (); break; } } void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { MonoMethodSignature *sig; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; switch (cinfo->ret.storage) { case ArgNone: break; case ArgInIReg: MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); break; case ArgInFReg: MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); break; case ArgInFRegR4: if (COMPILE_LLVM (cfg)) MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); else if (cfg->r4fp) MONO_EMIT_NEW_UNALU (cfg, OP_RMOVE, cfg->ret->dreg, val->dreg); else MONO_EMIT_NEW_UNALU (cfg, OP_ARM_SETFREG_R4, cfg->ret->dreg, val->dreg); break; default: g_assert_not_reached (); break; } } #ifndef DISABLE_JIT gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { g_assert (caller_sig); g_assert (callee_sig); CallInfo *caller_info = get_call_info (NULL, caller_sig); CallInfo *callee_info = get_call_info (NULL, callee_sig); gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage) && IS_SUPPORTED_TAILCALL (caller_info->ret.storage == callee_info->ret.storage); // FIXME Limit stack_usage to 1G. emit_ldrx / strx has 32bit limits. res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (1 << 30)); res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (1 << 30)); // valuetype parameters are the address of a local const ArgInfo *ainfo; ainfo = callee_info->args + callee_sig->hasthis; for (int i = 0; res && i < callee_sig->param_count; ++i) { res = IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgVtypeByRef) && IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgVtypeByRefOnStack); } g_free (caller_info); g_free (callee_info); return res; } #endif gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return (imm >= -((gint64)1<<31) && imm <= (((gint64)1<<31)-1)); } void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { //NOT_IMPLEMENTED; } void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { //NOT_IMPLEMENTED; } #define ADD_NEW_INS(cfg,dest,op) do { \ MONO_INST_NEW ((cfg), (dest), (op)); \ mono_bblock_insert_before_ins (bb, ins, (dest)); \ } while (0) void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *temp, *last_ins = NULL; MONO_BB_FOR_EACH_INS (bb, ins) { switch (ins->opcode) { case OP_SBB: case OP_ISBB: case OP_SUBCC: case OP_ISUBCC: if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC)) /* ARM sets the C flag to 1 if there was _no_ overflow */ ins->next->opcode = OP_COND_EXC_NC; break; case OP_IDIV_IMM: case OP_IREM_IMM: case OP_IDIV_UN_IMM: case OP_IREM_UN_IMM: case OP_LREM_IMM: mono_decompose_op_imm (cfg, bb, ins); break; case OP_LOCALLOC_IMM: if (ins->inst_imm > 32) { ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg1 = temp->dreg; ins->opcode = mono_op_imm_to_op (ins->opcode); } break; case OP_ICOMPARE_IMM: if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_IBEQ) { ins->next->opcode = OP_ARM64_CBZW; ins->next->sreg1 = ins->sreg1; NULLIFY_INS (ins); } else if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_IBNE_UN) { ins->next->opcode = OP_ARM64_CBNZW; ins->next->sreg1 = ins->sreg1; NULLIFY_INS (ins); } break; case OP_LCOMPARE_IMM: case OP_COMPARE_IMM: if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_LBEQ) { ins->next->opcode = OP_ARM64_CBZX; ins->next->sreg1 = ins->sreg1; NULLIFY_INS (ins); } else if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_LBNE_UN) { ins->next->opcode = OP_ARM64_CBNZX; ins->next->sreg1 = ins->sreg1; NULLIFY_INS (ins); } break; case OP_FCOMPARE: case OP_RCOMPARE: { gboolean swap = FALSE; int reg; if (!ins->next) { /* Optimized away */ NULLIFY_INS (ins); break; } /* * FP compares with unordered operands set the flags * to NZCV=0011, which matches some non-unordered compares * as well, like LE, so have to swap the operands. */ switch (ins->next->opcode) { case OP_FBLT: ins->next->opcode = OP_FBGT; swap = TRUE; break; case OP_FBLE: ins->next->opcode = OP_FBGE; swap = TRUE; break; case OP_RBLT: ins->next->opcode = OP_RBGT; swap = TRUE; break; case OP_RBLE: ins->next->opcode = OP_RBGE; swap = TRUE; break; default: break; } if (swap) { reg = ins->sreg1; ins->sreg1 = ins->sreg2; ins->sreg2 = reg; } break; } default: break; } last_ins = ins; } bb->last_ins = last_ins; bb->max_vreg = cfg->next_vreg; } void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins) { } static int opcode_to_armcond (int opcode) { switch (opcode) { case OP_IBEQ: case OP_LBEQ: case OP_FBEQ: case OP_CEQ: case OP_ICEQ: case OP_LCEQ: case OP_FCEQ: case OP_RCEQ: case OP_COND_EXC_IEQ: case OP_COND_EXC_EQ: return ARMCOND_EQ; case OP_IBGE: case OP_LBGE: case OP_FBGE: case OP_ICGE: case OP_FCGE: case OP_RCGE: return ARMCOND_GE; case OP_IBGT: case OP_LBGT: case OP_FBGT: case OP_CGT: case OP_ICGT: case OP_LCGT: case OP_FCGT: case OP_RCGT: case OP_COND_EXC_IGT: case OP_COND_EXC_GT: return ARMCOND_GT; case OP_IBLE: case OP_LBLE: case OP_FBLE: case OP_ICLE: case OP_FCLE: case OP_RCLE: return ARMCOND_LE; case OP_IBLT: case OP_LBLT: case OP_FBLT: case OP_CLT: case OP_ICLT: case OP_LCLT: case OP_COND_EXC_ILT: case OP_COND_EXC_LT: return ARMCOND_LT; case OP_IBNE_UN: case OP_LBNE_UN: case OP_FBNE_UN: case OP_ICNEQ: case OP_FCNEQ: case OP_RCNEQ: case OP_COND_EXC_INE_UN: case OP_COND_EXC_NE_UN: return ARMCOND_NE; case OP_IBGE_UN: case OP_LBGE_UN: case OP_FBGE_UN: case OP_ICGE_UN: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_GE_UN: return ARMCOND_HS; case OP_IBGT_UN: case OP_LBGT_UN: case OP_FBGT_UN: case OP_CGT_UN: case OP_ICGT_UN: case OP_LCGT_UN: case OP_FCGT_UN: case OP_RCGT_UN: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_GT_UN: return ARMCOND_HI; case OP_IBLE_UN: case OP_LBLE_UN: case OP_FBLE_UN: case OP_ICLE_UN: case OP_COND_EXC_ILE_UN: case OP_COND_EXC_LE_UN: return ARMCOND_LS; case OP_IBLT_UN: case OP_LBLT_UN: case OP_FBLT_UN: case OP_CLT_UN: case OP_ICLT_UN: case OP_LCLT_UN: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_LT_UN: return ARMCOND_LO; /* * FCMP sets the NZCV condition bits as follows: * eq = 0110 * < = 1000 * > = 0010 * unordered = 0011 * ARMCOND_LT is N!=V, so it matches unordered too, so * fclt and fclt_un need to be special cased. */ case OP_FCLT: case OP_RCLT: /* N==1 */ return ARMCOND_MI; case OP_FCLT_UN: case OP_RCLT_UN: return ARMCOND_LT; case OP_COND_EXC_C: case OP_COND_EXC_IC: return ARMCOND_CS; case OP_COND_EXC_OV: case OP_COND_EXC_IOV: return ARMCOND_VS; case OP_COND_EXC_NC: case OP_COND_EXC_INC: return ARMCOND_CC; case OP_COND_EXC_NO: case OP_COND_EXC_INO: return ARMCOND_VC; default: printf ("%s\n", mono_inst_name (opcode)); g_assert_not_reached (); return -1; } } /* This clobbers LR */ static WARN_UNUSED_RESULT guint8* emit_cond_exc (MonoCompile *cfg, guint8 *code, int opcode, const char *exc_name) { int cond; cond = opcode_to_armcond (opcode); /* Capture PC */ arm_adrx (code, ARMREG_IP1, code); mono_add_patch_info_rel (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, exc_name, MONO_R_ARM64_BCC); arm_bcc (code, cond, 0); return code; } static guint8* emit_move_return_value (MonoCompile *cfg, guint8 * code, MonoInst *ins) { CallInfo *cinfo; MonoCallInst *call; call = (MonoCallInst*)ins; cinfo = call->call_info; g_assert (cinfo); switch (cinfo->ret.storage) { case ArgNone: break; case ArgInIReg: /* LLVM compiled code might only set the bottom bits */ if (call->signature && mini_get_underlying_type (call->signature->ret)->type == MONO_TYPE_I4) arm_sxtwx (code, call->inst.dreg, cinfo->ret.reg); else if (call->inst.dreg != cinfo->ret.reg) arm_movx (code, call->inst.dreg, cinfo->ret.reg); break; case ArgInFReg: if (call->inst.dreg != cinfo->ret.reg) arm_fmovd (code, call->inst.dreg, cinfo->ret.reg); break; case ArgInFRegR4: if (cfg->r4fp) arm_fmovs (code, call->inst.dreg, cinfo->ret.reg); else arm_fcvt_sd (code, call->inst.dreg, cinfo->ret.reg); break; case ArgVtypeInIRegs: { MonoInst *loc = cfg->arch.vret_addr_loc; int i; /* Load the destination address */ g_assert (loc && loc->opcode == OP_REGOFFSET); code = emit_ldrx (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset); for (i = 0; i < cinfo->ret.nregs; ++i) arm_strx (code, cinfo->ret.reg + i, ARMREG_LR, i * 8); break; } case ArgHFA: { MonoInst *loc = cfg->arch.vret_addr_loc; int i; /* Load the destination address */ g_assert (loc && loc->opcode == OP_REGOFFSET); code = emit_ldrx (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset); for (i = 0; i < cinfo->ret.nregs; ++i) { if (cinfo->ret.esize == 4) arm_strfpw (code, cinfo->ret.reg + i, ARMREG_LR, cinfo->ret.foffsets [i]); else arm_strfpx (code, cinfo->ret.reg + i, ARMREG_LR, cinfo->ret.foffsets [i]); } break; } case ArgVtypeByRef: break; default: g_assert_not_reached (); break; } return code; } /* * emit_branch_island: * * Emit a branch island for the conditional branches from cfg->native_code + start_offset to code. */ static guint8* emit_branch_island (MonoCompile *cfg, guint8 *code, int start_offset) { MonoJumpInfo *ji; /* Iterate over the patch infos added so far by this bb */ int island_size = 0; for (ji = cfg->patch_info; ji; ji = ji->next) { if (ji->ip.i < start_offset) /* The patch infos are in reverse order, so this means the end */ break; if (ji->relocation == MONO_R_ARM64_BCC || ji->relocation == MONO_R_ARM64_CBZ) island_size += 4; } if (island_size) { code = realloc_code (cfg, island_size); /* Branch over the island */ arm_b (code, code + 4 + island_size); for (ji = cfg->patch_info; ji; ji = ji->next) { if (ji->ip.i < start_offset) break; if (ji->relocation == MONO_R_ARM64_BCC || ji->relocation == MONO_R_ARM64_CBZ) { /* Rewrite the cond branch so it branches to an unconditional branch in the branch island */ arm_patch_rel (cfg->native_code + ji->ip.i, code, ji->relocation); /* Rewrite the patch so it points to the unconditional branch */ ji->ip.i = code - cfg->native_code; ji->relocation = MONO_R_ARM64_B; arm_b (code, code); } } set_code_cursor (cfg, code); } return code; } void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins; MonoCallInst *call; guint8 *code = cfg->native_code + cfg->code_len; int start_offset, max_len, dreg, sreg1, sreg2; target_mgreg_t imm; if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); start_offset = code - cfg->native_code; g_assert (start_offset <= cfg->code_size); MONO_BB_FOR_EACH_INS (bb, ins) { guint offset = code - cfg->native_code; set_code_cursor (cfg, code); max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); if (G_UNLIKELY (cfg->arch.cond_branch_islands && offset - start_offset > 4 * 0x1ffff)) { /* Emit a branch island for large basic blocks */ code = emit_branch_island (cfg, code, start_offset); offset = code - cfg->native_code; start_offset = offset; } mono_debug_record_line_number (cfg, ins, offset); dreg = ins->dreg; sreg1 = ins->sreg1; sreg2 = ins->sreg2; imm = ins->inst_imm; switch (ins->opcode) { case OP_ICONST: code = emit_imm (code, dreg, ins->inst_c0); break; case OP_I8CONST: code = emit_imm64 (code, dreg, ins->inst_c0); break; case OP_MOVE: if (dreg != sreg1) arm_movx (code, dreg, sreg1); break; case OP_NOP: case OP_RELAXED_NOP: break; case OP_JUMP_TABLE: mono_add_patch_info_rel (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0, MONO_R_ARM64_IMM); code = emit_imm64_template (code, dreg); break; case OP_BREAK: /* * gdb does not like encountering the hw breakpoint ins in the debugged code. * So instead of emitting a trap, we emit a call a C function and place a * breakpoint there. */ code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break)); break; case OP_LOCALLOC: { guint8 *buf [16]; arm_addx_imm (code, ARMREG_IP0, sreg1, (MONO_ARCH_FRAME_ALIGNMENT - 1)); // FIXME: andx_imm doesn't work yet code = emit_imm (code, ARMREG_IP1, -MONO_ARCH_FRAME_ALIGNMENT); arm_andx (code, ARMREG_IP0, ARMREG_IP0, ARMREG_IP1); //arm_andx_imm (code, ARMREG_IP0, sreg1, - MONO_ARCH_FRAME_ALIGNMENT); arm_movspx (code, ARMREG_IP1, ARMREG_SP); arm_subx (code, ARMREG_IP1, ARMREG_IP1, ARMREG_IP0); arm_movspx (code, ARMREG_SP, ARMREG_IP1); /* Init */ /* ip1 = pointer, ip0 = end */ arm_addx (code, ARMREG_IP0, ARMREG_IP1, ARMREG_IP0); buf [0] = code; arm_cmpx (code, ARMREG_IP1, ARMREG_IP0); buf [1] = code; arm_bcc (code, ARMCOND_EQ, 0); arm_stpx (code, ARMREG_RZR, ARMREG_RZR, ARMREG_IP1, 0); arm_addx_imm (code, ARMREG_IP1, ARMREG_IP1, 16); arm_b (code, buf [0]); arm_patch_rel (buf [1], code, MONO_R_ARM64_BCC); arm_movspx (code, dreg, ARMREG_SP); if (cfg->param_area) code = emit_subx_sp_imm (code, cfg->param_area); break; } case OP_LOCALLOC_IMM: { int imm, offset; imm = ALIGN_TO (ins->inst_imm, MONO_ARCH_FRAME_ALIGNMENT); g_assert (arm_is_arith_imm (imm)); arm_subx_imm (code, ARMREG_SP, ARMREG_SP, imm); /* Init */ g_assert (MONO_ARCH_FRAME_ALIGNMENT == 16); offset = 0; while (offset < imm) { arm_stpx (code, ARMREG_RZR, ARMREG_RZR, ARMREG_SP, offset); offset += 16; } arm_movspx (code, dreg, ARMREG_SP); if (cfg->param_area) code = emit_subx_sp_imm (code, cfg->param_area); break; } case OP_AOTCONST: code = emit_aotconst (cfg, code, dreg, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0); break; case OP_OBJC_GET_SELECTOR: mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0); /* See arch_emit_objc_selector_ref () in aot-compiler.c */ arm_ldrx_lit (code, ins->dreg, 0); arm_nop (code); arm_nop (code); break; case OP_SEQ_POINT: { MonoInst *info_var = cfg->arch.seq_point_info_var; /* * For AOT, we use one got slot per method, which will point to a * SeqPointInfo structure, containing all the information required * by the code below. */ if (cfg->compile_aot) { g_assert (info_var); g_assert (info_var->opcode == OP_REGOFFSET); } if (ins->flags & MONO_INST_SINGLE_STEP_LOC) { MonoInst *var = cfg->arch.ss_tramp_var; g_assert (var); g_assert (var->opcode == OP_REGOFFSET); /* Load ss_tramp_var */ /* This is equal to &ss_trampoline */ arm_ldrx (code, ARMREG_IP1, var->inst_basereg, var->inst_offset); /* Load the trampoline address */ arm_ldrx (code, ARMREG_IP1, ARMREG_IP1, 0); /* Call it if it is non-null */ arm_cbzx (code, ARMREG_IP1, code + 8); code = mono_arm_emit_blrx (code, ARMREG_IP1); } mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); if (cfg->compile_aot) { const guint32 offset = code - cfg->native_code; guint32 val; arm_ldrx (code, ARMREG_IP1, info_var->inst_basereg, info_var->inst_offset); /* Add the offset */ val = ((offset / 4) * sizeof (target_mgreg_t)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs); /* Load the info->bp_addrs [offset], which is either 0 or the address of the bp trampoline */ code = emit_ldrx (code, ARMREG_IP1, ARMREG_IP1, val); /* Skip the load if its 0 */ arm_cbzx (code, ARMREG_IP1, code + 8); /* Call the breakpoint trampoline */ code = mono_arm_emit_blrx (code, ARMREG_IP1); } else { MonoInst *var = cfg->arch.bp_tramp_var; g_assert (var); g_assert (var->opcode == OP_REGOFFSET); /* Load the address of the bp trampoline into IP0 */ arm_ldrx (code, ARMREG_IP0, var->inst_basereg, var->inst_offset); /* * A placeholder for a possible breakpoint inserted by * mono_arch_set_breakpoint (). */ arm_nop (code); } break; } /* BRANCH */ case OP_BR: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb, MONO_R_ARM64_B); arm_b (code, code); break; case OP_BR_REG: arm_brx (code, sreg1); break; case OP_IBEQ: case OP_IBGE: case OP_IBGT: case OP_IBLE: case OP_IBLT: case OP_IBNE_UN: case OP_IBGE_UN: case OP_IBGT_UN: case OP_IBLE_UN: case OP_IBLT_UN: case OP_LBEQ: case OP_LBGE: case OP_LBGT: case OP_LBLE: case OP_LBLT: case OP_LBNE_UN: case OP_LBGE_UN: case OP_LBGT_UN: case OP_LBLE_UN: case OP_LBLT_UN: case OP_FBEQ: case OP_FBNE_UN: case OP_FBLT: case OP_FBGT: case OP_FBGT_UN: case OP_FBLE: case OP_FBGE: case OP_FBGE_UN: { int cond; mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC); cond = opcode_to_armcond (ins->opcode); arm_bcc (code, cond, 0); break; } case OP_FBLT_UN: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC); /* For fp compares, ARMCOND_LT is lt or unordered */ arm_bcc (code, ARMCOND_LT, 0); break; case OP_FBLE_UN: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC); arm_bcc (code, ARMCOND_EQ, 0); mono_add_patch_info_rel (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC); /* For fp compares, ARMCOND_LT is lt or unordered */ arm_bcc (code, ARMCOND_LT, 0); break; case OP_ARM64_CBZW: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ); arm_cbzw (code, sreg1, 0); break; case OP_ARM64_CBZX: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ); arm_cbzx (code, sreg1, 0); break; case OP_ARM64_CBNZW: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ); arm_cbnzw (code, sreg1, 0); break; case OP_ARM64_CBNZX: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ); arm_cbnzx (code, sreg1, 0); break; /* ALU */ case OP_IADD: arm_addw (code, dreg, sreg1, sreg2); break; case OP_LADD: arm_addx (code, dreg, sreg1, sreg2); break; case OP_ISUB: arm_subw (code, dreg, sreg1, sreg2); break; case OP_LSUB: arm_subx (code, dreg, sreg1, sreg2); break; case OP_IAND: arm_andw (code, dreg, sreg1, sreg2); break; case OP_LAND: arm_andx (code, dreg, sreg1, sreg2); break; case OP_IOR: arm_orrw (code, dreg, sreg1, sreg2); break; case OP_LOR: arm_orrx (code, dreg, sreg1, sreg2); break; case OP_IXOR: arm_eorw (code, dreg, sreg1, sreg2); break; case OP_LXOR: arm_eorx (code, dreg, sreg1, sreg2); break; case OP_INEG: arm_negw (code, dreg, sreg1); break; case OP_LNEG: arm_negx (code, dreg, sreg1); break; case OP_INOT: arm_mvnw (code, dreg, sreg1); break; case OP_LNOT: arm_mvnx (code, dreg, sreg1); break; case OP_IADDCC: arm_addsw (code, dreg, sreg1, sreg2); break; case OP_ADDCC: case OP_LADDCC: arm_addsx (code, dreg, sreg1, sreg2); break; case OP_ISUBCC: arm_subsw (code, dreg, sreg1, sreg2); break; case OP_LSUBCC: case OP_SUBCC: arm_subsx (code, dreg, sreg1, sreg2); break; case OP_ICOMPARE: arm_cmpw (code, sreg1, sreg2); break; case OP_COMPARE: case OP_LCOMPARE: arm_cmpx (code, sreg1, sreg2); break; case OP_IADD_IMM: code = emit_addw_imm (code, dreg, sreg1, imm); break; case OP_LADD_IMM: case OP_ADD_IMM: code = emit_addx_imm (code, dreg, sreg1, imm); break; case OP_ISUB_IMM: code = emit_subw_imm (code, dreg, sreg1, imm); break; case OP_LSUB_IMM: code = emit_subx_imm (code, dreg, sreg1, imm); break; case OP_IAND_IMM: code = emit_andw_imm (code, dreg, sreg1, imm); break; case OP_LAND_IMM: case OP_AND_IMM: code = emit_andx_imm (code, dreg, sreg1, imm); break; case OP_IOR_IMM: code = emit_orrw_imm (code, dreg, sreg1, imm); break; case OP_LOR_IMM: code = emit_orrx_imm (code, dreg, sreg1, imm); break; case OP_IXOR_IMM: code = emit_eorw_imm (code, dreg, sreg1, imm); break; case OP_LXOR_IMM: code = emit_eorx_imm (code, dreg, sreg1, imm); break; case OP_ICOMPARE_IMM: code = emit_cmpw_imm (code, sreg1, imm); break; case OP_LCOMPARE_IMM: case OP_COMPARE_IMM: if (imm == 0) { arm_cmpx (code, sreg1, ARMREG_RZR); } else { // FIXME: 32 vs 64 bit issues for 0xffffffff code = emit_imm64 (code, ARMREG_LR, imm); arm_cmpx (code, sreg1, ARMREG_LR); } break; case OP_ISHL: arm_lslvw (code, dreg, sreg1, sreg2); break; case OP_LSHL: arm_lslvx (code, dreg, sreg1, sreg2); break; case OP_ISHR: arm_asrvw (code, dreg, sreg1, sreg2); break; case OP_LSHR: arm_asrvx (code, dreg, sreg1, sreg2); break; case OP_ISHR_UN: arm_lsrvw (code, dreg, sreg1, sreg2); break; case OP_LSHR_UN: arm_lsrvx (code, dreg, sreg1, sreg2); break; case OP_ISHL_IMM: if (imm == 0) arm_movx (code, dreg, sreg1); else arm_lslw (code, dreg, sreg1, imm); break; case OP_SHL_IMM: case OP_LSHL_IMM: if (imm == 0) arm_movx (code, dreg, sreg1); else arm_lslx (code, dreg, sreg1, imm); break; case OP_ISHR_IMM: if (imm == 0) arm_movx (code, dreg, sreg1); else arm_asrw (code, dreg, sreg1, imm); break; case OP_LSHR_IMM: case OP_SHR_IMM: if (imm == 0) arm_movx (code, dreg, sreg1); else arm_asrx (code, dreg, sreg1, imm); break; case OP_ISHR_UN_IMM: if (imm == 0) arm_movx (code, dreg, sreg1); else arm_lsrw (code, dreg, sreg1, imm); break; case OP_SHR_UN_IMM: case OP_LSHR_UN_IMM: if (imm == 0) arm_movx (code, dreg, sreg1); else arm_lsrx (code, dreg, sreg1, imm); break; /* 64BIT ALU */ case OP_SEXT_I4: arm_sxtwx (code, dreg, sreg1); break; case OP_ZEXT_I4: /* Clean out the upper word */ arm_movw (code, dreg, sreg1); break; /* MULTIPLY/DIVISION */ case OP_IDIV: case OP_IREM: // FIXME: Optimize this /* Check for zero */ arm_cmpx_imm (code, sreg2, 0); code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException"); /* Check for INT_MIN/-1 */ code = emit_imm (code, ARMREG_IP0, 0x80000000); arm_cmpx (code, sreg1, ARMREG_IP0); arm_cset (code, ARMCOND_EQ, ARMREG_IP1); code = emit_imm (code, ARMREG_IP0, 0xffffffff); arm_cmpx (code, sreg2, ARMREG_IP0); arm_cset (code, ARMCOND_EQ, ARMREG_IP0); arm_andx (code, ARMREG_IP0, ARMREG_IP0, ARMREG_IP1); arm_cmpx_imm (code, ARMREG_IP0, 1); code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "OverflowException"); if (ins->opcode == OP_IREM) { arm_sdivw (code, ARMREG_LR, sreg1, sreg2); arm_msubw (code, dreg, ARMREG_LR, sreg2, sreg1); } else { arm_sdivw (code, dreg, sreg1, sreg2); } break; case OP_IDIV_UN: arm_cmpx_imm (code, sreg2, 0); code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException"); arm_udivw (code, dreg, sreg1, sreg2); break; case OP_IREM_UN: arm_cmpx_imm (code, sreg2, 0); code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException"); arm_udivw (code, ARMREG_LR, sreg1, sreg2); arm_msubw (code, dreg, ARMREG_LR, sreg2, sreg1); break; case OP_LDIV: case OP_LREM: // FIXME: Optimize this /* Check for zero */ arm_cmpx_imm (code, sreg2, 0); code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException"); /* Check for INT64_MIN/-1 */ code = emit_imm64 (code, ARMREG_IP0, 0x8000000000000000); arm_cmpx (code, sreg1, ARMREG_IP0); arm_cset (code, ARMCOND_EQ, ARMREG_IP1); code = emit_imm64 (code, ARMREG_IP0, 0xffffffffffffffff); arm_cmpx (code, sreg2, ARMREG_IP0); arm_cset (code, ARMCOND_EQ, ARMREG_IP0); arm_andx (code, ARMREG_IP0, ARMREG_IP0, ARMREG_IP1); arm_cmpx_imm (code, ARMREG_IP0, 1); /* 64 bit uses OverflowException */ code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "OverflowException"); if (ins->opcode == OP_LREM) { arm_sdivx (code, ARMREG_LR, sreg1, sreg2); arm_msubx (code, dreg, ARMREG_LR, sreg2, sreg1); } else { arm_sdivx (code, dreg, sreg1, sreg2); } break; case OP_LDIV_UN: arm_cmpx_imm (code, sreg2, 0); code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException"); arm_udivx (code, dreg, sreg1, sreg2); break; case OP_LREM_UN: arm_cmpx_imm (code, sreg2, 0); code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException"); arm_udivx (code, ARMREG_LR, sreg1, sreg2); arm_msubx (code, dreg, ARMREG_LR, sreg2, sreg1); break; case OP_IMUL: arm_mulw (code, dreg, sreg1, sreg2); break; case OP_LMUL: arm_mulx (code, dreg, sreg1, sreg2); break; case OP_IMUL_IMM: code = emit_imm (code, ARMREG_LR, imm); arm_mulw (code, dreg, sreg1, ARMREG_LR); break; case OP_MUL_IMM: case OP_LMUL_IMM: code = emit_imm (code, ARMREG_LR, imm); arm_mulx (code, dreg, sreg1, ARMREG_LR); break; /* CONVERSIONS */ case OP_ICONV_TO_I1: case OP_LCONV_TO_I1: arm_sxtbx (code, dreg, sreg1); break; case OP_ICONV_TO_I2: case OP_LCONV_TO_I2: arm_sxthx (code, dreg, sreg1); break; case OP_ICONV_TO_U1: case OP_LCONV_TO_U1: arm_uxtbw (code, dreg, sreg1); break; case OP_ICONV_TO_U2: case OP_LCONV_TO_U2: arm_uxthw (code, dreg, sreg1); break; /* CSET */ case OP_CEQ: case OP_ICEQ: case OP_LCEQ: case OP_CLT: case OP_ICLT: case OP_LCLT: case OP_CGT: case OP_ICGT: case OP_LCGT: case OP_CLT_UN: case OP_ICLT_UN: case OP_LCLT_UN: case OP_CGT_UN: case OP_ICGT_UN: case OP_LCGT_UN: case OP_ICNEQ: case OP_ICGE: case OP_ICLE: case OP_ICGE_UN: case OP_ICLE_UN: { int cond; cond = opcode_to_armcond (ins->opcode); arm_cset (code, cond, dreg); break; } case OP_FCEQ: case OP_FCLT: case OP_FCLT_UN: case OP_FCGT: case OP_FCGT_UN: case OP_FCNEQ: case OP_FCLE: case OP_FCGE: { int cond; cond = opcode_to_armcond (ins->opcode); arm_fcmpd (code, sreg1, sreg2); arm_cset (code, cond, dreg); break; } /* MEMORY */ case OP_LOADI1_MEMBASE: code = emit_ldrsbx (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADU1_MEMBASE: code = emit_ldrb (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADI2_MEMBASE: code = emit_ldrshx (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADU2_MEMBASE: code = emit_ldrh (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADI4_MEMBASE: code = emit_ldrswx (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADU4_MEMBASE: code = emit_ldrw (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOAD_MEMBASE: case OP_LOADI8_MEMBASE: code = emit_ldrx (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_STOREI1_MEMBASE_IMM: case OP_STOREI2_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: case OP_STORE_MEMBASE_IMM: case OP_STOREI8_MEMBASE_IMM: { int immreg; if (imm != 0) { code = emit_imm (code, ARMREG_LR, imm); immreg = ARMREG_LR; } else { immreg = ARMREG_RZR; } switch (ins->opcode) { case OP_STOREI1_MEMBASE_IMM: code = emit_strb (code, immreg, ins->inst_destbasereg, ins->inst_offset); break; case OP_STOREI2_MEMBASE_IMM: code = emit_strh (code, immreg, ins->inst_destbasereg, ins->inst_offset); break; case OP_STOREI4_MEMBASE_IMM: code = emit_strw (code, immreg, ins->inst_destbasereg, ins->inst_offset); break; case OP_STORE_MEMBASE_IMM: case OP_STOREI8_MEMBASE_IMM: code = emit_strx (code, immreg, ins->inst_destbasereg, ins->inst_offset); break; default: g_assert_not_reached (); break; } break; } case OP_STOREI1_MEMBASE_REG: code = emit_strb (code, sreg1, ins->inst_destbasereg, ins->inst_offset); break; case OP_STOREI2_MEMBASE_REG: code = emit_strh (code, sreg1, ins->inst_destbasereg, ins->inst_offset); break; case OP_STOREI4_MEMBASE_REG: code = emit_strw (code, sreg1, ins->inst_destbasereg, ins->inst_offset); break; case OP_STORE_MEMBASE_REG: case OP_STOREI8_MEMBASE_REG: code = emit_strx (code, sreg1, ins->inst_destbasereg, ins->inst_offset); break; case OP_TLS_GET: code = emit_tls_get (code, dreg, ins->inst_offset); break; case OP_TLS_SET: code = emit_tls_set (code, sreg1, ins->inst_offset); break; /* Atomic */ case OP_MEMORY_BARRIER: arm_dmb (code, ARM_DMB_ISH); break; case OP_ATOMIC_ADD_I4: { guint8 *buf [16]; buf [0] = code; arm_ldxrw (code, ARMREG_IP0, sreg1); arm_addx (code, ARMREG_IP0, ARMREG_IP0, sreg2); arm_stlxrw (code, ARMREG_IP1, ARMREG_IP0, sreg1); arm_cbnzw (code, ARMREG_IP1, buf [0]); arm_dmb (code, ARM_DMB_ISH); arm_movx (code, dreg, ARMREG_IP0); break; } case OP_ATOMIC_ADD_I8: { guint8 *buf [16]; buf [0] = code; arm_ldxrx (code, ARMREG_IP0, sreg1); arm_addx (code, ARMREG_IP0, ARMREG_IP0, sreg2); arm_stlxrx (code, ARMREG_IP1, ARMREG_IP0, sreg1); arm_cbnzx (code, ARMREG_IP1, buf [0]); arm_dmb (code, ARM_DMB_ISH); arm_movx (code, dreg, ARMREG_IP0); break; } case OP_ATOMIC_EXCHANGE_I4: { guint8 *buf [16]; buf [0] = code; arm_ldxrw (code, ARMREG_IP0, sreg1); arm_stlxrw (code, ARMREG_IP1, sreg2, sreg1); arm_cbnzw (code, ARMREG_IP1, buf [0]); arm_dmb (code, ARM_DMB_ISH); arm_movx (code, dreg, ARMREG_IP0); break; } case OP_ATOMIC_EXCHANGE_I8: { guint8 *buf [16]; buf [0] = code; arm_ldxrx (code, ARMREG_IP0, sreg1); arm_stlxrx (code, ARMREG_IP1, sreg2, sreg1); arm_cbnzw (code, ARMREG_IP1, buf [0]); arm_dmb (code, ARM_DMB_ISH); arm_movx (code, dreg, ARMREG_IP0); break; } case OP_ATOMIC_CAS_I4: { guint8 *buf [16]; /* sreg2 is the value, sreg3 is the comparand */ buf [0] = code; arm_ldxrw (code, ARMREG_IP0, sreg1); arm_cmpw (code, ARMREG_IP0, ins->sreg3); buf [1] = code; arm_bcc (code, ARMCOND_NE, 0); arm_stlxrw (code, ARMREG_IP1, sreg2, sreg1); arm_cbnzw (code, ARMREG_IP1, buf [0]); arm_patch_rel (buf [1], code, MONO_R_ARM64_BCC); arm_dmb (code, ARM_DMB_ISH); arm_movx (code, dreg, ARMREG_IP0); break; } case OP_ATOMIC_CAS_I8: { guint8 *buf [16]; buf [0] = code; arm_ldxrx (code, ARMREG_IP0, sreg1); arm_cmpx (code, ARMREG_IP0, ins->sreg3); buf [1] = code; arm_bcc (code, ARMCOND_NE, 0); arm_stlxrx (code, ARMREG_IP1, sreg2, sreg1); arm_cbnzw (code, ARMREG_IP1, buf [0]); arm_patch_rel (buf [1], code, MONO_R_ARM64_BCC); arm_dmb (code, ARM_DMB_ISH); arm_movx (code, dreg, ARMREG_IP0); break; } case OP_ATOMIC_LOAD_I1: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarb (code, ins->dreg, ARMREG_LR); arm_sxtbx (code, ins->dreg, ins->dreg); break; } case OP_ATOMIC_LOAD_U1: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarb (code, ins->dreg, ARMREG_LR); arm_uxtbx (code, ins->dreg, ins->dreg); break; } case OP_ATOMIC_LOAD_I2: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarh (code, ins->dreg, ARMREG_LR); arm_sxthx (code, ins->dreg, ins->dreg); break; } case OP_ATOMIC_LOAD_U2: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarh (code, ins->dreg, ARMREG_LR); arm_uxthx (code, ins->dreg, ins->dreg); break; } case OP_ATOMIC_LOAD_I4: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarw (code, ins->dreg, ARMREG_LR); arm_sxtwx (code, ins->dreg, ins->dreg); break; } case OP_ATOMIC_LOAD_U4: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarw (code, ins->dreg, ARMREG_LR); arm_movw (code, ins->dreg, ins->dreg); /* Clear upper half of the register. */ break; } case OP_ATOMIC_LOAD_I8: case OP_ATOMIC_LOAD_U8: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarx (code, ins->dreg, ARMREG_LR); break; } case OP_ATOMIC_LOAD_R4: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); if (cfg->r4fp) { arm_ldarw (code, ARMREG_LR, ARMREG_LR); arm_fmov_rx_to_double (code, ins->dreg, ARMREG_LR); } else { arm_ldarw (code, ARMREG_LR, ARMREG_LR); arm_fmov_rx_to_double (code, FP_TEMP_REG, ARMREG_LR); arm_fcvt_sd (code, ins->dreg, FP_TEMP_REG); } break; } case OP_ATOMIC_LOAD_R8: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarx (code, ARMREG_LR, ARMREG_LR); arm_fmov_rx_to_double (code, ins->dreg, ARMREG_LR); break; } case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_U1: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); arm_stlrb (code, ARMREG_LR, ins->sreg1); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); break; } case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_U2: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); arm_stlrh (code, ARMREG_LR, ins->sreg1); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); break; } case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U4: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); arm_stlrw (code, ARMREG_LR, ins->sreg1); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); break; } case OP_ATOMIC_STORE_I8: case OP_ATOMIC_STORE_U8: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); arm_stlrx (code, ARMREG_LR, ins->sreg1); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); break; } case OP_ATOMIC_STORE_R4: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); if (cfg->r4fp) { arm_fmov_double_to_rx (code, ARMREG_IP0, ins->sreg1); arm_stlrw (code, ARMREG_LR, ARMREG_IP0); } else { arm_fcvt_ds (code, FP_TEMP_REG, ins->sreg1); arm_fmov_double_to_rx (code, ARMREG_IP0, FP_TEMP_REG); arm_stlrw (code, ARMREG_LR, ARMREG_IP0); } if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); break; } case OP_ATOMIC_STORE_R8: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); arm_fmov_double_to_rx (code, ARMREG_IP0, ins->sreg1); arm_stlrx (code, ARMREG_LR, ARMREG_IP0); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); break; } /* FP */ case OP_R8CONST: { guint64 imm = *(guint64*)ins->inst_p0; if (imm == 0) { arm_fmov_rx_to_double (code, dreg, ARMREG_RZR); } else { code = emit_imm64 (code, ARMREG_LR, imm); arm_fmov_rx_to_double (code, ins->dreg, ARMREG_LR); } break; } case OP_R4CONST: { guint64 imm = *(guint32*)ins->inst_p0; code = emit_imm64 (code, ARMREG_LR, imm); if (cfg->r4fp) { arm_fmov_rx_to_double (code, dreg, ARMREG_LR); } else { arm_fmov_rx_to_double (code, FP_TEMP_REG, ARMREG_LR); arm_fcvt_sd (code, dreg, FP_TEMP_REG); } break; } case OP_LOADR8_MEMBASE: code = emit_ldrfpx (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADR4_MEMBASE: if (cfg->r4fp) { code = emit_ldrfpw (code, dreg, ins->inst_basereg, ins->inst_offset); } else { code = emit_ldrfpw (code, FP_TEMP_REG, ins->inst_basereg, ins->inst_offset); arm_fcvt_sd (code, dreg, FP_TEMP_REG); } break; case OP_STORER8_MEMBASE_REG: code = emit_strfpx (code, sreg1, ins->inst_destbasereg, ins->inst_offset); break; case OP_STORER4_MEMBASE_REG: if (cfg->r4fp) { code = emit_strfpw (code, sreg1, ins->inst_destbasereg, ins->inst_offset); } else { arm_fcvt_ds (code, FP_TEMP_REG, sreg1); code = emit_strfpw (code, FP_TEMP_REG, ins->inst_destbasereg, ins->inst_offset); } break; case OP_FMOVE: if (dreg != sreg1) arm_fmovd (code, dreg, sreg1); break; case OP_RMOVE: if (dreg != sreg1) arm_fmovs (code, dreg, sreg1); break; case OP_MOVE_F_TO_I4: if (cfg->r4fp) { arm_fmov_double_to_rx (code, ins->dreg, ins->sreg1); } else { arm_fcvt_ds (code, ins->dreg, ins->sreg1); arm_fmov_double_to_rx (code, ins->dreg, ins->dreg); } break; case OP_MOVE_I4_TO_F: if (cfg->r4fp) { arm_fmov_rx_to_double (code, ins->dreg, ins->sreg1); } else { arm_fmov_rx_to_double (code, ins->dreg, ins->sreg1); arm_fcvt_sd (code, ins->dreg, ins->dreg); } break; case OP_MOVE_F_TO_I8: arm_fmov_double_to_rx (code, ins->dreg, ins->sreg1); break; case OP_MOVE_I8_TO_F: arm_fmov_rx_to_double (code, ins->dreg, ins->sreg1); break; case OP_FCOMPARE: arm_fcmpd (code, sreg1, sreg2); break; case OP_RCOMPARE: arm_fcmps (code, sreg1, sreg2); break; case OP_FCONV_TO_I1: arm_fcvtzs_dx (code, dreg, sreg1); arm_sxtbx (code, dreg, dreg); break; case OP_FCONV_TO_U1: arm_fcvtzu_dx (code, dreg, sreg1); arm_uxtbw (code, dreg, dreg); break; case OP_FCONV_TO_I2: arm_fcvtzs_dx (code, dreg, sreg1); arm_sxthx (code, dreg, dreg); break; case OP_FCONV_TO_U2: arm_fcvtzu_dx (code, dreg, sreg1); arm_uxthw (code, dreg, dreg); break; case OP_FCONV_TO_I4: arm_fcvtzs_dx (code, dreg, sreg1); arm_sxtwx (code, dreg, dreg); break; case OP_FCONV_TO_U4: arm_fcvtzu_dx (code, dreg, sreg1); break; case OP_FCONV_TO_I8: case OP_FCONV_TO_I: arm_fcvtzs_dx (code, dreg, sreg1); break; case OP_FCONV_TO_U8: arm_fcvtzu_dx (code, dreg, sreg1); break; case OP_FCONV_TO_R4: if (cfg->r4fp) { arm_fcvt_ds (code, dreg, sreg1); } else { arm_fcvt_ds (code, FP_TEMP_REG, sreg1); arm_fcvt_sd (code, dreg, FP_TEMP_REG); } break; case OP_ICONV_TO_R4: if (cfg->r4fp) { arm_scvtf_rw_to_s (code, dreg, sreg1); } else { arm_scvtf_rw_to_s (code, FP_TEMP_REG, sreg1); arm_fcvt_sd (code, dreg, FP_TEMP_REG); } break; case OP_LCONV_TO_R4: if (cfg->r4fp) { arm_scvtf_rx_to_s (code, dreg, sreg1); } else { arm_scvtf_rx_to_s (code, FP_TEMP_REG, sreg1); arm_fcvt_sd (code, dreg, FP_TEMP_REG); } break; case OP_ICONV_TO_R8: arm_scvtf_rw_to_d (code, dreg, sreg1); break; case OP_LCONV_TO_R8: arm_scvtf_rx_to_d (code, dreg, sreg1); break; case OP_ICONV_TO_R_UN: arm_ucvtf_rw_to_d (code, dreg, sreg1); break; case OP_LCONV_TO_R_UN: arm_ucvtf_rx_to_d (code, dreg, sreg1); break; case OP_FADD: arm_fadd_d (code, dreg, sreg1, sreg2); break; case OP_FSUB: arm_fsub_d (code, dreg, sreg1, sreg2); break; case OP_FMUL: arm_fmul_d (code, dreg, sreg1, sreg2); break; case OP_FDIV: arm_fdiv_d (code, dreg, sreg1, sreg2); break; case OP_FREM: /* Emulated */ g_assert_not_reached (); break; case OP_FNEG: arm_fneg_d (code, dreg, sreg1); break; case OP_ARM_SETFREG_R4: arm_fcvt_ds (code, dreg, sreg1); break; case OP_CKFINITE: /* Check for infinity */ code = emit_imm64 (code, ARMREG_LR, 0x7fefffffffffffffLL); arm_fmov_rx_to_double (code, FP_TEMP_REG, ARMREG_LR); arm_fabs_d (code, FP_TEMP_REG2, sreg1); arm_fcmpd (code, FP_TEMP_REG2, FP_TEMP_REG); code = emit_cond_exc (cfg, code, OP_COND_EXC_GT, "ArithmeticException"); /* Check for nans */ arm_fcmpd (code, FP_TEMP_REG2, FP_TEMP_REG2); code = emit_cond_exc (cfg, code, OP_COND_EXC_OV, "ArithmeticException"); arm_fmovd (code, dreg, sreg1); break; /* R4 */ case OP_RADD: arm_fadd_s (code, dreg, sreg1, sreg2); break; case OP_RSUB: arm_fsub_s (code, dreg, sreg1, sreg2); break; case OP_RMUL: arm_fmul_s (code, dreg, sreg1, sreg2); break; case OP_RDIV: arm_fdiv_s (code, dreg, sreg1, sreg2); break; case OP_RNEG: arm_fneg_s (code, dreg, sreg1); break; case OP_RCONV_TO_I1: arm_fcvtzs_sx (code, dreg, sreg1); arm_sxtbx (code, dreg, dreg); break; case OP_RCONV_TO_U1: arm_fcvtzu_sx (code, dreg, sreg1); arm_uxtbw (code, dreg, dreg); break; case OP_RCONV_TO_I2: arm_fcvtzs_sx (code, dreg, sreg1); arm_sxthx (code, dreg, dreg); break; case OP_RCONV_TO_U2: arm_fcvtzu_sx (code, dreg, sreg1); arm_uxthw (code, dreg, dreg); break; case OP_RCONV_TO_I4: arm_fcvtzs_sx (code, dreg, sreg1); arm_sxtwx (code, dreg, dreg); break; case OP_RCONV_TO_U4: arm_fcvtzu_sx (code, dreg, sreg1); break; case OP_RCONV_TO_I8: case OP_RCONV_TO_I: arm_fcvtzs_sx (code, dreg, sreg1); break; case OP_RCONV_TO_U8: arm_fcvtzu_sx (code, dreg, sreg1); break; case OP_RCONV_TO_R8: arm_fcvt_sd (code, dreg, sreg1); break; case OP_RCONV_TO_R4: if (dreg != sreg1) arm_fmovs (code, dreg, sreg1); break; case OP_RCEQ: case OP_RCLT: case OP_RCLT_UN: case OP_RCGT: case OP_RCGT_UN: case OP_RCNEQ: case OP_RCLE: case OP_RCGE: { int cond; cond = opcode_to_armcond (ins->opcode); arm_fcmps (code, sreg1, sreg2); arm_cset (code, cond, dreg); break; } /* CALLS */ case OP_VOIDCALL: case OP_CALL: case OP_LCALL: case OP_FCALL: case OP_RCALL: case OP_VCALL2: { call = (MonoCallInst*)ins; const MonoJumpInfoTarget patch = mono_call_to_patch (call); code = emit_call (cfg, code, patch.type, patch.target); code = emit_move_return_value (cfg, code, ins); break; } case OP_VOIDCALL_REG: case OP_CALL_REG: case OP_LCALL_REG: case OP_FCALL_REG: case OP_RCALL_REG: case OP_VCALL2_REG: code = mono_arm_emit_blrx (code, sreg1); code = emit_move_return_value (cfg, code, ins); break; case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_FCALL_MEMBASE: case OP_RCALL_MEMBASE: case OP_VCALL2_MEMBASE: code = emit_ldrx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset); code = mono_arm_emit_blrx (code, ARMREG_IP0); code = emit_move_return_value (cfg, code, ins); break; case OP_TAILCALL_PARAMETER: // This opcode helps compute sizes, i.e. // of the subsequent OP_TAILCALL, but contributes no code. g_assert (ins->next); break; case OP_TAILCALL: case OP_TAILCALL_MEMBASE: case OP_TAILCALL_REG: { int branch_reg = ARMREG_IP0; guint64 free_reg = 1 << ARMREG_IP1; call = (MonoCallInst*)ins; g_assert (!cfg->method->save_lmf); max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER); while (G_UNLIKELY (offset + max_len > cfg->code_size)) { cfg->code_size *= 2; cfg->native_code = (unsigned char *)mono_realloc_native_code (cfg); code = cfg->native_code + offset; cfg->stat_code_reallocs++; } switch (ins->opcode) { case OP_TAILCALL: free_reg = (1 << ARMREG_IP0) | (1 << ARMREG_IP1); break; case OP_TAILCALL_REG: g_assert (sreg1 != -1); g_assert (sreg1 != ARMREG_IP0); g_assert (sreg1 != ARMREG_IP1); g_assert (sreg1 != ARMREG_LR); g_assert (sreg1 != ARMREG_SP); g_assert (sreg1 != ARMREG_R28); if ((sreg1 << 1) & MONO_ARCH_CALLEE_SAVED_REGS) { arm_movx (code, branch_reg, sreg1); } else { free_reg = (1 << ARMREG_IP0) | (1 << ARMREG_IP1); branch_reg = sreg1; } break; case OP_TAILCALL_MEMBASE: g_assert (ins->inst_basereg != -1); g_assert (ins->inst_basereg != ARMREG_IP0); g_assert (ins->inst_basereg != ARMREG_IP1); g_assert (ins->inst_basereg != ARMREG_LR); g_assert (ins->inst_basereg != ARMREG_SP); g_assert (ins->inst_basereg != ARMREG_R28); code = emit_ldrx (code, branch_reg, ins->inst_basereg, ins->inst_offset); break; default: g_assert_not_reached (); } // Copy stack arguments. // FIXME a fixed size memcpy is desirable here, // at least for larger values of stack_usage. for (int i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) { code = emit_ldrx (code, ARMREG_LR, ARMREG_SP, i); code = emit_strx (code, ARMREG_LR, ARMREG_R28, i); } /* Restore registers */ code = emit_load_regset (code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->arch.saved_gregs_offset); /* Destroy frame */ code = mono_arm_emit_destroy_frame (code, cfg->stack_offset, free_reg); if (enable_ptrauth) /* There is no retab to authenticate lr */ arm_autibsp (code); switch (ins->opcode) { case OP_TAILCALL: if (cfg->compile_aot) { /* This is not a PLT patch */ code = emit_aotconst (cfg, code, branch_reg, MONO_PATCH_INFO_METHOD_JUMP, call->method); } else { mono_add_patch_info_rel (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method, MONO_R_ARM64_B); arm_b (code, code); cfg->thunk_area += THUNK_SIZE; break; } // fallthrough case OP_TAILCALL_MEMBASE: case OP_TAILCALL_REG: code = mono_arm_emit_brx (code, branch_reg); break; default: g_assert_not_reached (); } ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; break; } case OP_ARGLIST: g_assert (cfg->arch.cinfo); code = emit_addx_imm (code, ARMREG_IP0, cfg->arch.args_reg, cfg->arch.cinfo->sig_cookie.offset); arm_strx (code, ARMREG_IP0, sreg1, 0); break; case OP_DYN_CALL: { MonoInst *var = cfg->dyn_call_var; guint8 *labels [16]; int i; /* * sreg1 points to a DynCallArgs structure initialized by mono_arch_start_dyn_call (). * sreg2 is the function to call. */ g_assert (var->opcode == OP_REGOFFSET); arm_movx (code, ARMREG_LR, sreg1); arm_movx (code, ARMREG_IP1, sreg2); /* Save args buffer */ code = emit_strx (code, ARMREG_LR, var->inst_basereg, var->inst_offset); /* Set fp argument regs */ code = emit_ldrw (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_fpargs)); arm_cmpw (code, ARMREG_R0, ARMREG_RZR); labels [0] = code; arm_bcc (code, ARMCOND_EQ, 0); for (i = 0; i < 8; ++i) code = emit_ldrfpx (code, ARMREG_D0 + i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * 8)); arm_patch_rel (labels [0], code, MONO_R_ARM64_BCC); /* Allocate callee area */ code = emit_ldrx (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs)); arm_lslw (code, ARMREG_R0, ARMREG_R0, 3); arm_movspx (code, ARMREG_R1, ARMREG_SP); arm_subx (code, ARMREG_R1, ARMREG_R1, ARMREG_R0); arm_movspx (code, ARMREG_SP, ARMREG_R1); /* Set stack args */ /* R1 = limit */ code = emit_ldrx (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs)); /* R2 = pointer into 'regs' */ code = emit_imm (code, ARMREG_R2, MONO_STRUCT_OFFSET (DynCallArgs, regs) + ((PARAM_REGS + 1) * sizeof (target_mgreg_t))); arm_addx (code, ARMREG_R2, ARMREG_LR, ARMREG_R2); /* R3 = pointer to stack */ arm_movspx (code, ARMREG_R3, ARMREG_SP); labels [0] = code; arm_b (code, code); labels [1] = code; code = emit_ldrx (code, ARMREG_R5, ARMREG_R2, 0); code = emit_strx (code, ARMREG_R5, ARMREG_R3, 0); code = emit_addx_imm (code, ARMREG_R2, ARMREG_R2, sizeof (target_mgreg_t)); code = emit_addx_imm (code, ARMREG_R3, ARMREG_R3, sizeof (target_mgreg_t)); code = emit_subx_imm (code, ARMREG_R1, ARMREG_R1, 1); arm_patch_rel (labels [0], code, MONO_R_ARM64_B); arm_cmpw (code, ARMREG_R1, ARMREG_RZR); arm_bcc (code, ARMCOND_GT, labels [1]); /* Set argument registers + r8 */ code = mono_arm_emit_load_regarray (code, 0x1ff, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs)); /* Make the call */ code = mono_arm_emit_blrx (code, ARMREG_IP1); /* Save result */ code = emit_ldrx (code, ARMREG_LR, var->inst_basereg, var->inst_offset); arm_strx (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, res)); arm_strx (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, res2)); /* Save fp result */ code = emit_ldrw (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_fpret)); arm_cmpw (code, ARMREG_R0, ARMREG_RZR); labels [1] = code; arm_bcc (code, ARMCOND_EQ, 0); for (i = 0; i < 8; ++i) code = emit_strfpx (code, ARMREG_D0 + i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * 8)); arm_patch_rel (labels [1], code, MONO_R_ARM64_BCC); break; } case OP_GENERIC_CLASS_INIT: { int byte_offset; guint8 *jump; byte_offset = MONO_STRUCT_OFFSET (MonoVTable, initialized); /* Load vtable->initialized */ arm_ldrsbx (code, ARMREG_IP0, sreg1, byte_offset); jump = code; arm_cbnzx (code, ARMREG_IP0, 0); /* Slowpath */ g_assert (sreg1 == ARMREG_R0); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init)); mono_arm_patch (jump, code, MONO_R_ARM64_CBZ); break; } case OP_CHECK_THIS: arm_ldrb (code, ARMREG_LR, sreg1, 0); break; case OP_NOT_NULL: case OP_NOT_REACHED: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_I8CONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: break; case OP_IL_SEQ_POINT: mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); break; /* EH */ case OP_COND_EXC_C: case OP_COND_EXC_IC: case OP_COND_EXC_OV: case OP_COND_EXC_IOV: case OP_COND_EXC_NC: case OP_COND_EXC_INC: case OP_COND_EXC_NO: case OP_COND_EXC_INO: case OP_COND_EXC_EQ: case OP_COND_EXC_IEQ: case OP_COND_EXC_NE_UN: case OP_COND_EXC_INE_UN: case OP_COND_EXC_ILT: case OP_COND_EXC_LT: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_LT_UN: case OP_COND_EXC_IGT: case OP_COND_EXC_GT: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_GT_UN: case OP_COND_EXC_IGE: case OP_COND_EXC_GE: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_GE_UN: case OP_COND_EXC_ILE: case OP_COND_EXC_LE: case OP_COND_EXC_ILE_UN: case OP_COND_EXC_LE_UN: code = emit_cond_exc (cfg, code, ins->opcode, (const char*)ins->inst_p1); break; case OP_THROW: if (sreg1 != ARMREG_R0) arm_movx (code, ARMREG_R0, sreg1); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception)); break; case OP_RETHROW: if (sreg1 != ARMREG_R0) arm_movx (code, ARMREG_R0, sreg1); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception)); break; case OP_CALL_HANDLER: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb, MONO_R_ARM64_BL); arm_bl (code, 0); cfg->thunk_area += THUNK_SIZE; for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); break; case OP_START_HANDLER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); /* Save caller address */ code = emit_strx (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset); /* * Reserve a param area, see test_0_finally_param_area (). * This is needed because the param area is not set up when * we are called from EH code. */ if (cfg->param_area) code = emit_subx_sp_imm (code, cfg->param_area); break; } case OP_ENDFINALLY: case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); if (cfg->param_area) code = emit_addx_sp_imm (code, cfg->param_area); if (ins->opcode == OP_ENDFILTER && sreg1 != ARMREG_R0) arm_movx (code, ARMREG_R0, sreg1); /* Return to either after the branch in OP_CALL_HANDLER, or to the EH code */ code = emit_ldrx (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset); arm_brx (code, ARMREG_LR); break; } case OP_GET_EX_OBJ: if (ins->dreg != ARMREG_R0) arm_movx (code, ins->dreg, ARMREG_R0); break; case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: { guint8 *buf [1]; arm_ldrx (code, ARMREG_IP1, ins->sreg1, 0); /* Call it if it is non-null */ buf [0] = code; arm_cbzx (code, ARMREG_IP1, 0); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll)); mono_arm_patch (buf [0], code, MONO_R_ARM64_CBZ); break; } case OP_FILL_PROF_CALL_CTX: for (int i = 0; i < MONO_MAX_IREGS; i++) if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) || i == ARMREG_SP || i == ARMREG_FP) arm_strx (code, i, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, regs) + i * sizeof (target_mgreg_t)); break; default: g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__); g_assert_not_reached (); } if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) { g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)", mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset); g_assert_not_reached (); } } set_code_cursor (cfg, code); /* * If the compiled code size is larger than the bcc displacement (19 bits signed), * insert branch islands between/inside basic blocks. */ if (cfg->arch.cond_branch_islands) code = emit_branch_island (cfg, code, start_offset); } static guint8* emit_move_args (MonoCompile *cfg, guint8 *code) { MonoInst *ins; CallInfo *cinfo; ArgInfo *ainfo; int i, part; MonoMethodSignature *sig = mono_method_signature_internal (cfg->method); cinfo = cfg->arch.cinfo; g_assert (cinfo); for (i = 0; i < cinfo->nargs; ++i) { ainfo = cinfo->args + i; ins = cfg->args [i]; if (ins->opcode == OP_REGVAR) { switch (ainfo->storage) { case ArgInIReg: arm_movx (code, ins->dreg, ainfo->reg); if (i == 0 && sig->hasthis) { mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, ins, TRUE, ins->dreg, 0, code - cfg->native_code, 0); } break; case ArgOnStack: switch (ainfo->slot_size) { case 1: if (ainfo->sign) code = emit_ldrsbx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset); else code = emit_ldrb (code, ins->dreg, cfg->arch.args_reg, ainfo->offset); break; case 2: if (ainfo->sign) code = emit_ldrshx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset); else code = emit_ldrh (code, ins->dreg, cfg->arch.args_reg, ainfo->offset); break; case 4: if (ainfo->sign) code = emit_ldrswx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset); else code = emit_ldrw (code, ins->dreg, cfg->arch.args_reg, ainfo->offset); break; default: code = emit_ldrx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset); break; } break; default: g_assert_not_reached (); break; } } else { if (ainfo->storage != ArgVtypeByRef && ainfo->storage != ArgVtypeByRefOnStack) g_assert (ins->opcode == OP_REGOFFSET); switch (ainfo->storage) { case ArgInIReg: /* Stack slots for arguments have size 8 */ code = emit_strx (code, ainfo->reg, ins->inst_basereg, ins->inst_offset); if (i == 0 && sig->hasthis) { mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, ins, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0); } break; case ArgInFReg: code = emit_strfpx (code, ainfo->reg, ins->inst_basereg, ins->inst_offset); break; case ArgInFRegR4: code = emit_strfpw (code, ainfo->reg, ins->inst_basereg, ins->inst_offset); break; case ArgOnStack: case ArgOnStackR4: case ArgOnStackR8: case ArgVtypeByRefOnStack: case ArgVtypeOnStack: break; case ArgVtypeByRef: { MonoInst *addr_arg = ins->inst_left; if (ainfo->gsharedvt) { g_assert (ins->opcode == OP_GSHAREDVT_ARG_REGOFFSET); arm_strx (code, ainfo->reg, ins->inst_basereg, ins->inst_offset); } else { g_assert (ins->opcode == OP_VTARG_ADDR); g_assert (addr_arg->opcode == OP_REGOFFSET); arm_strx (code, ainfo->reg, addr_arg->inst_basereg, addr_arg->inst_offset); } break; } case ArgVtypeInIRegs: for (part = 0; part < ainfo->nregs; part ++) { code = emit_strx (code, ainfo->reg + part, ins->inst_basereg, ins->inst_offset + (part * 8)); } break; case ArgHFA: for (part = 0; part < ainfo->nregs; part ++) { if (ainfo->esize == 4) code = emit_strfpw (code, ainfo->reg + part, ins->inst_basereg, ins->inst_offset + ainfo->foffsets [part]); else code = emit_strfpx (code, ainfo->reg + part, ins->inst_basereg, ins->inst_offset + ainfo->foffsets [part]); } break; default: g_assert_not_reached (); break; } } } return code; } /* * emit_store_regarray: * * Emit code to store the registers in REGS into the appropriate elements of * the register array at BASEREG+OFFSET. */ static WARN_UNUSED_RESULT guint8* emit_store_regarray (guint8 *code, guint64 regs, int basereg, int offset) { int i; for (i = 0; i < 32; ++i) { if (regs & (1 << i)) { if (i + 1 < 32 && (regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) { arm_stpx (code, i, i + 1, basereg, offset + (i * 8)); i++; } else if (i == ARMREG_SP) { arm_movspx (code, ARMREG_IP1, ARMREG_SP); arm_strx (code, ARMREG_IP1, basereg, offset + (i * 8)); } else { arm_strx (code, i, basereg, offset + (i * 8)); } } } return code; } /* * emit_load_regarray: * * Emit code to load the registers in REGS from the appropriate elements of * the register array at BASEREG+OFFSET. */ static WARN_UNUSED_RESULT guint8* emit_load_regarray (guint8 *code, guint64 regs, int basereg, int offset) { int i; for (i = 0; i < 32; ++i) { if (regs & (1 << i)) { if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) { if (offset + (i * 8) < 500) arm_ldpx (code, i, i + 1, basereg, offset + (i * 8)); else { code = emit_ldrx (code, i, basereg, offset + (i * 8)); code = emit_ldrx (code, i + 1, basereg, offset + ((i + 1) * 8)); } i++; } else if (i == ARMREG_SP) { g_assert_not_reached (); } else { code = emit_ldrx (code, i, basereg, offset + (i * 8)); } } } return code; } /* * emit_store_regset: * * Emit code to store the registers in REGS into consecutive memory locations starting * at BASEREG+OFFSET. */ static WARN_UNUSED_RESULT guint8* emit_store_regset (guint8 *code, guint64 regs, int basereg, int offset) { int i, pos; pos = 0; for (i = 0; i < 32; ++i) { if (regs & (1 << i)) { if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) { arm_stpx (code, i, i + 1, basereg, offset + (pos * 8)); i++; pos++; } else if (i == ARMREG_SP) { arm_movspx (code, ARMREG_IP1, ARMREG_SP); arm_strx (code, ARMREG_IP1, basereg, offset + (pos * 8)); } else { arm_strx (code, i, basereg, offset + (pos * 8)); } pos++; } } return code; } /* * emit_load_regset: * * Emit code to load the registers in REGS from consecutive memory locations starting * at BASEREG+OFFSET. */ static WARN_UNUSED_RESULT guint8* emit_load_regset (guint8 *code, guint64 regs, int basereg, int offset) { int i, pos; pos = 0; for (i = 0; i < 32; ++i) { if (regs & (1 << i)) { if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) { arm_ldpx (code, i, i + 1, basereg, offset + (pos * 8)); i++; pos++; } else if (i == ARMREG_SP) { g_assert_not_reached (); } else { arm_ldrx (code, i, basereg, offset + (pos * 8)); } pos++; } } return code; } WARN_UNUSED_RESULT guint8* mono_arm_emit_load_regarray (guint8 *code, guint64 regs, int basereg, int offset) { return emit_load_regarray (code, regs, basereg, offset); } WARN_UNUSED_RESULT guint8* mono_arm_emit_store_regarray (guint8 *code, guint64 regs, int basereg, int offset) { return emit_store_regarray (code, regs, basereg, offset); } WARN_UNUSED_RESULT guint8* mono_arm_emit_store_regset (guint8 *code, guint64 regs, int basereg, int offset) { return emit_store_regset (code, regs, basereg, offset); } /* Same as emit_store_regset, but emit unwind info too */ /* CFA_OFFSET is the offset between the CFA and basereg */ static WARN_UNUSED_RESULT guint8* emit_store_regset_cfa (MonoCompile *cfg, guint8 *code, guint64 regs, int basereg, int offset, int cfa_offset, guint64 no_cfa_regset) { int i, j, pos, nregs; guint32 cfa_regset = regs & ~no_cfa_regset; pos = 0; for (i = 0; i < 32; ++i) { nregs = 1; if (regs & (1 << i)) { if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) { if (offset < 256) { arm_stpx (code, i, i + 1, basereg, offset + (pos * 8)); } else { code = emit_strx (code, i, basereg, offset + (pos * 8)); code = emit_strx (code, i + 1, basereg, offset + (pos * 8) + 8); } nregs = 2; } else if (i == ARMREG_SP) { arm_movspx (code, ARMREG_IP1, ARMREG_SP); code = emit_strx (code, ARMREG_IP1, basereg, offset + (pos * 8)); } else { code = emit_strx (code, i, basereg, offset + (pos * 8)); } for (j = 0; j < nregs; ++j) { if (cfa_regset & (1 << (i + j))) mono_emit_unwind_op_offset (cfg, code, i + j, (- cfa_offset) + offset + ((pos + j) * 8)); } i += nregs - 1; pos += nregs; } } return code; } /* * emit_setup_lmf: * * Emit code to initialize an LMF structure at LMF_OFFSET. * Clobbers ip0/ip1. */ static guint8* emit_setup_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, int cfa_offset) { /* * The LMF should contain all the state required to be able to reconstruct the machine state * at the current point of execution. Since the LMF is only read during EH, only callee * saved etc. registers need to be saved. * FIXME: Save callee saved fp regs, JITted code doesn't use them, but native code does, and they * need to be restored during EH. */ /* pc */ arm_adrx (code, ARMREG_LR, code); code = emit_strx (code, ARMREG_LR, ARMREG_FP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, pc)); /* gregs + fp + sp */ /* Don't emit unwind info for sp/fp, they are already handled in the prolog */ code = emit_store_regset_cfa (cfg, code, MONO_ARCH_LMF_REGS, ARMREG_FP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, gregs), cfa_offset, (1 << ARMREG_FP) | (1 << ARMREG_SP)); return code; } guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoMethodSignature *sig; MonoBasicBlock *bb; guint8 *code; int cfa_offset, max_offset; sig = mono_method_signature_internal (method); cfg->code_size = 256 + sig->param_count * 64; code = cfg->native_code = g_malloc (cfg->code_size); /* This can be unaligned */ cfg->stack_offset = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT); /* * - Setup frame */ cfa_offset = 0; mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0); if (enable_ptrauth) arm_pacibsp (code); /* Setup frame */ if (arm_is_ldpx_imm (-cfg->stack_offset)) { arm_stpx_pre (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, -cfg->stack_offset); } else { /* sp -= cfg->stack_offset */ /* This clobbers ip0/ip1 */ code = emit_subx_sp_imm (code, cfg->stack_offset); arm_stpx (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, 0); } cfa_offset += cfg->stack_offset; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); mono_emit_unwind_op_offset (cfg, code, ARMREG_FP, (- cfa_offset) + 0); mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, (- cfa_offset) + 8); arm_movspx (code, ARMREG_FP, ARMREG_SP); mono_emit_unwind_op_def_cfa_reg (cfg, code, ARMREG_FP); if (cfg->param_area) { /* The param area is below the frame pointer */ code = emit_subx_sp_imm (code, cfg->param_area); } if (cfg->method->save_lmf) { code = emit_setup_lmf (cfg, code, cfg->lmf_var->inst_offset, cfa_offset); } else { /* Save gregs */ code = emit_store_regset_cfa (cfg, code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->arch.saved_gregs_offset, cfa_offset, 0); } /* Setup args reg */ if (cfg->arch.args_reg) { /* The register was already saved above */ code = emit_addx_imm (code, cfg->arch.args_reg, ARMREG_FP, cfg->stack_offset); } /* Save return area addr received in R8 */ if (cfg->vret_addr) { MonoInst *ins = cfg->vret_addr; g_assert (ins->opcode == OP_REGOFFSET); code = emit_strx (code, ARMREG_R8, ins->inst_basereg, ins->inst_offset); } /* Save mrgctx received in MONO_ARCH_RGCTX_REG */ if (cfg->rgctx_var) { MonoInst *ins = cfg->rgctx_var; g_assert (ins->opcode == OP_REGOFFSET); code = emit_strx (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset); mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, cfg->rgctx_var, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0); } /* * Move arguments to their registers/stack locations. */ code = emit_move_args (cfg, code); /* Initialize seq_point_info_var */ if (cfg->arch.seq_point_info_var) { MonoInst *ins = cfg->arch.seq_point_info_var; /* Initialize the variable from a GOT slot */ code = emit_aotconst (cfg, code, ARMREG_IP0, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method); g_assert (ins->opcode == OP_REGOFFSET); code = emit_strx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset); /* Initialize ss_tramp_var */ ins = cfg->arch.ss_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); code = emit_ldrx (code, ARMREG_IP1, ARMREG_IP0, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr)); code = emit_strx (code, ARMREG_IP1, ins->inst_basereg, ins->inst_offset); } else { MonoInst *ins; if (cfg->arch.ss_tramp_var) { /* Initialize ss_tramp_var */ ins = cfg->arch.ss_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); code = emit_imm64 (code, ARMREG_IP0, (guint64)&ss_trampoline); code = emit_strx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset); } if (cfg->arch.bp_tramp_var) { /* Initialize bp_tramp_var */ ins = cfg->arch.bp_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); code = emit_imm64 (code, ARMREG_IP0, (guint64)bp_trampoline); code = emit_strx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset); } } max_offset = 0; if (cfg->opt & MONO_OPT_BRANCH) { for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins; bb->max_offset = max_offset; MONO_BB_FOR_EACH_INS (bb, ins) { max_offset += ins_get_size (ins->opcode); } } } if (max_offset > 0x3ffff * 4) cfg->arch.cond_branch_islands = TRUE; return code; } void mono_arch_emit_epilog (MonoCompile *cfg) { CallInfo *cinfo; int max_epilog_size; guint8 *code; int i; max_epilog_size = 16 + 20*4; code = realloc_code (cfg, max_epilog_size); if (cfg->method->save_lmf) { code = mono_arm_emit_load_regarray (code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->lmf_var->inst_offset + MONO_STRUCT_OFFSET (MonoLMF, gregs) - (MONO_ARCH_FIRST_LMF_REG * 8)); } else { /* Restore gregs */ code = emit_load_regset (code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->arch.saved_gregs_offset); } /* Load returned vtypes into registers if needed */ cinfo = cfg->arch.cinfo; switch (cinfo->ret.storage) { case ArgVtypeInIRegs: { MonoInst *ins = cfg->ret; for (i = 0; i < cinfo->ret.nregs; ++i) code = emit_ldrx (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + (i * 8)); break; } case ArgHFA: { MonoInst *ins = cfg->ret; for (i = 0; i < cinfo->ret.nregs; ++i) { if (cinfo->ret.esize == 4) code = emit_ldrfpw (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + cinfo->ret.foffsets [i]); else code = emit_ldrfpx (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + cinfo->ret.foffsets [i]); } break; } default: break; } /* Destroy frame */ code = mono_arm_emit_destroy_frame (code, cfg->stack_offset, (1 << ARMREG_IP0) | (1 << ARMREG_IP1)); if (enable_ptrauth) arm_retab (code); else arm_retx (code, ARMREG_LR); g_assert (code - (cfg->native_code + cfg->code_len) < max_epilog_size); set_code_cursor (cfg, code); } void mono_arch_emit_exceptions (MonoCompile *cfg) { MonoJumpInfo *ji; MonoClass *exc_class; guint8 *code, *ip; guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM]; guint8 exc_throw_found [MONO_EXC_INTRINS_NUM]; int i, id, size = 0; for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) { exc_throw_pos [i] = NULL; exc_throw_found [i] = 0; } for (ji = cfg->patch_info; ji; ji = ji->next) { if (ji->type == MONO_PATCH_INFO_EXC) { i = mini_exception_id_by_name ((const char*)ji->data.target); if (!exc_throw_found [i]) { size += 32; exc_throw_found [i] = TRUE; } } } code = realloc_code (cfg, size); /* Emit code to raise corlib exceptions */ for (ji = cfg->patch_info; ji; ji = ji->next) { if (ji->type != MONO_PATCH_INFO_EXC) continue; ip = cfg->native_code + ji->ip.i; id = mini_exception_id_by_name ((const char*)ji->data.target); if (exc_throw_pos [id]) { /* ip points to the bcc () in OP_COND_EXC_... */ arm_patch_rel (ip, exc_throw_pos [id], ji->relocation); ji->type = MONO_PATCH_INFO_NONE; continue; } exc_throw_pos [id] = code; arm_patch_rel (ip, code, ji->relocation); /* We are being branched to from the code generated by emit_cond_exc (), the pc is in ip1 */ /* r0 = type token */ exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", ji->data.name); code = emit_imm (code, ARMREG_R0, m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF); /* r1 = throw ip */ arm_movx (code, ARMREG_R1, ARMREG_IP1); /* Branch to the corlib exception throwing trampoline */ ji->ip.i = code - cfg->native_code; ji->type = MONO_PATCH_INFO_JIT_ICALL_ID; ji->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception; ji->relocation = MONO_R_ARM64_BL; arm_bl (code, 0); cfg->thunk_area += THUNK_SIZE; set_code_cursor (cfg, code); } set_code_cursor (cfg, code); } MonoInst* mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { return NULL; } guint32 mono_arch_get_patch_offset (guint8 *code) { return 0; } gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int i, buf_len, imt_reg; guint8 *buf, *code; #if DEBUG_IMT printf ("building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p\n", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count, size, start, ((guint8*)start) + size, vtable); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size); } #endif buf_len = 0; for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { gboolean fail_case = !item->check_target_idx && fail_tramp; if (item->check_target_idx || fail_case) { if (!item->compare_done || fail_case) { buf_len += 4 * 4 + 4; } buf_len += 4; if (item->has_target_code) { buf_len += 5 * 4; } else { buf_len += 6 * 4; } if (fail_case) { buf_len += 5 * 4; } } else { buf_len += 6 * 4; } } else { buf_len += 6 * 4; } } if (fail_tramp) { buf = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, buf_len); } else { MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); buf = mono_mem_manager_code_reserve (mem_manager, buf_len); } code = buf; MINI_BEGIN_CODEGEN (); /* * We are called by JITted code, which passes in the IMT argument in * MONO_ARCH_RGCTX_REG (r27). We need to preserve all caller saved regs * except ip0/ip1. */ imt_reg = MONO_ARCH_RGCTX_REG; for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; item->code_target = code; if (item->is_equals) { /* * Check the imt argument against item->key, if equals, jump to either * item->value.target_code or to vtable [item->value.vtable_slot]. * If fail_tramp is set, jump to it if not-equals. */ gboolean fail_case = !item->check_target_idx && fail_tramp; if (item->check_target_idx || fail_case) { /* Compare imt_reg with item->key */ if (!item->compare_done || fail_case) { // FIXME: Optimize this code = emit_imm64 (code, ARMREG_IP0, (guint64)item->key); arm_cmpx (code, imt_reg, ARMREG_IP0); } item->jmp_code = code; arm_bcc (code, ARMCOND_NE, 0); /* Jump to target if equals */ if (item->has_target_code) { code = emit_imm64 (code, ARMREG_IP0, (guint64)item->value.target_code); code = mono_arm_emit_brx (code, ARMREG_IP0); } else { guint64 imm = (guint64)&(vtable->vtable [item->value.vtable_slot]); code = emit_imm64 (code, ARMREG_IP0, imm); arm_ldrx (code, ARMREG_IP0, ARMREG_IP0, 0); code = mono_arm_emit_brx (code, ARMREG_IP0); } if (fail_case) { arm_patch_rel (item->jmp_code, code, MONO_R_ARM64_BCC); item->jmp_code = NULL; code = emit_imm64 (code, ARMREG_IP0, (guint64)fail_tramp); code = mono_arm_emit_brx (code, ARMREG_IP0); } } else { guint64 imm = (guint64)&(vtable->vtable [item->value.vtable_slot]); code = emit_imm64 (code, ARMREG_IP0, imm); arm_ldrx (code, ARMREG_IP0, ARMREG_IP0, 0); code = mono_arm_emit_brx (code, ARMREG_IP0); } } else { code = emit_imm64 (code, ARMREG_IP0, (guint64)item->key); arm_cmpx (code, imt_reg, ARMREG_IP0); item->jmp_code = code; arm_bcc (code, ARMCOND_HS, 0); } } /* Patch the branches */ for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code && item->check_target_idx) arm_patch_rel (item->jmp_code, imt_entries [item->check_target_idx]->code_target, MONO_R_ARM64_BCC); } g_assert ((code - buf) <= buf_len); MINI_END_CODEGEN (buf, code - buf, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL); return MINI_ADDR_TO_FTNPTR (buf); } GSList * mono_arch_get_trampolines (gboolean aot) { return mono_arm_get_exception_trampolines (aot); } #else /* DISABLE_JIT */ gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { g_assert_not_reached (); return NULL; } #endif /* !DISABLE_JIT */ #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = MINI_FTNPTR_TO_ADDR (ip); guint32 native_offset = ip - (guint8*)ji->code_start; if (ji->from_aot) { SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start); if (enable_ptrauth) NOT_IMPLEMENTED; g_assert (native_offset % 4 == 0); g_assert (info->bp_addrs [native_offset / 4] == 0); info->bp_addrs [native_offset / 4] = (guint8*)mini_get_breakpoint_trampoline (); } else { /* ip points to an ldrx */ code += 4; mono_codeman_enable_write (); code = mono_arm_emit_blrx (code, ARMREG_IP0); mono_codeman_disable_write (); mono_arch_flush_icache (ip, code - ip); } } void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = MINI_FTNPTR_TO_ADDR (ip); if (ji->from_aot) { guint32 native_offset = ip - (guint8*)ji->code_start; SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start); if (enable_ptrauth) NOT_IMPLEMENTED; g_assert (native_offset % 4 == 0); info->bp_addrs [native_offset / 4] = NULL; } else { /* ip points to an ldrx */ code += 4; mono_codeman_enable_write (); arm_nop (code); mono_codeman_disable_write (); mono_arch_flush_icache (ip, code - ip); } } void mono_arch_start_single_stepping (void) { ss_trampoline = mini_get_single_step_trampoline (); } void mono_arch_stop_single_stepping (void) { ss_trampoline = NULL; } gboolean mono_arch_is_single_step_event (void *info, void *sigctx) { /* We use soft breakpoints on arm64 */ return FALSE; } gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx) { /* We use soft breakpoints on arm64 */ return FALSE; } void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji) { g_assert_not_reached (); } void mono_arch_skip_single_step (MonoContext *ctx) { g_assert_not_reached (); } SeqPointInfo* mono_arch_get_seq_point_info (guint8 *code) { SeqPointInfo *info; MonoJitInfo *ji; MonoJitMemoryManager *jit_mm; jit_mm = get_default_jit_mm (); // FIXME: Add a free function jit_mm_lock (jit_mm); info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code); jit_mm_unlock (jit_mm); if (!info) { ji = mini_jit_info_table_find (code); g_assert (ji); info = g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size / 4) * sizeof(guint8*)); info->ss_tramp_addr = &ss_trampoline; jit_mm_lock (jit_mm); g_hash_table_insert (jit_mm->arch_seq_points, code, info); jit_mm_unlock (jit_mm); } return info; } #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */ gboolean mono_arch_opcode_supported (int opcode) { switch (opcode) { case OP_ATOMIC_ADD_I4: case OP_ATOMIC_ADD_I8: case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_EXCHANGE_I8: case OP_ATOMIC_CAS_I4: case OP_ATOMIC_CAS_I8: case OP_ATOMIC_LOAD_I1: case OP_ATOMIC_LOAD_I2: case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_I8: case OP_ATOMIC_LOAD_U1: case OP_ATOMIC_LOAD_U2: case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_LOAD_U8: case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_LOAD_R8: case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_I8: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_U4: case OP_ATOMIC_STORE_U8: case OP_ATOMIC_STORE_R4: case OP_ATOMIC_STORE_R8: return TRUE; default: return FALSE; } } CallInfo* mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { return get_call_info (mp, sig); } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { gpointer target = NULL; switch (jit_icall_id) { #undef MONO_AOT_ICALL #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break; MONO_AOT_ICALL (mono_arm_resume_unwind) MONO_AOT_ICALL (mono_arm_start_gsharedvt_call) MONO_AOT_ICALL (mono_arm_throw_exception) } return target; } static guint8* emit_blrx (guint8 *code, int reg) { if (enable_ptrauth) arm_blraaz (code, reg); else arm_blrx (code, reg); return code; } static guint8* emit_brx (guint8 *code, int reg) { if (enable_ptrauth) arm_braaz (code, reg); else arm_brx (code, reg); return code; } guint8* mono_arm_emit_blrx (guint8 *code, int reg) { return emit_blrx (code, reg); } guint8* mono_arm_emit_brx (guint8 *code, int reg) { return emit_brx (code, reg); }
/** * \file * ARM64 backend for the Mono code generator * * Copyright 2013 Xamarin, Inc (http://www.xamarin.com) * * Based on mini-arm.c: * * Authors: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * * (C) 2003 Ximian, Inc. * Copyright 2003-2011 Novell, Inc (http://www.novell.com) * Copyright 2011 Xamarin, Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include "mini.h" #include "cpu-arm64.h" #include "ir-emit.h" #include "aot-runtime.h" #include "mini-runtime.h" #include <mono/arch/arm64/arm64-codegen.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-memory-model.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/tokentype.h> #include "interp/interp.h" /* * Documentation: * * - ARM(R) Architecture Reference Manual, ARMv8, for ARMv8-A architecture profile (DDI0487A_a_armv8_arm.pdf) * - Procedure Call Standard for the ARM 64-bit Architecture (AArch64) (IHI0055B_aapcs64.pdf) * - ELF for the ARM 64-bit Architecture (IHI0056B_aaelf64.pdf) * * Register usage: * - ip0/ip1/lr are used as temporary registers * - r27 is used as the rgctx/imt register * - r28 is used to access arguments passed on the stack * - d15/d16 are used as fp temporary registers */ #define FP_TEMP_REG ARMREG_D16 #define FP_TEMP_REG2 ARMREG_D17 #define THUNK_SIZE (4 * 4) /* The single step trampoline */ static gpointer ss_trampoline; /* The breakpoint trampoline */ static gpointer bp_trampoline; static gboolean ios_abi; static gboolean enable_ptrauth; #if defined(HOST_WIN32) #define WARN_UNUSED_RESULT _Check_return_ #else #define WARN_UNUSED_RESULT __attribute__ ((__warn_unused_result__)) #endif static WARN_UNUSED_RESULT guint8* emit_load_regset (guint8 *code, guint64 regs, int basereg, int offset); static guint8* emit_brx (guint8 *code, int reg); static guint8* emit_blrx (guint8 *code, int reg); const char* mono_arch_regname (int reg) { static const char * rnames[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "fp", "lr", "sp" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } const char* mono_arch_fregname (int reg) { static const char * rnames[] = { "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown fp"; } const char * mono_arch_xregname (int reg) { static const char * rnames[] = { "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { NOT_IMPLEMENTED; return 0; } #define MAX_ARCH_DELEGATE_PARAMS 7 static gpointer get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size) { guint8 *code, *start; MINI_BEGIN_CODEGEN (); if (has_target) { start = code = mono_global_codeman_reserve (12); /* Replace the this argument with the target */ arm_ldrx (code, ARMREG_IP0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); arm_ldrx (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target)); code = mono_arm_emit_brx (code, ARMREG_IP0); g_assert ((code - start) <= 12); } else { int size, i; size = 8 + param_count * 4; start = code = mono_global_codeman_reserve (size); arm_ldrx (code, ARMREG_IP0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); /* slide down the arguments */ for (i = 0; i < param_count; ++i) arm_movx (code, i, i + 1); code = mono_arm_emit_brx (code, ARMREG_IP0); g_assert ((code - start) <= size); } MINI_END_CODEGEN (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL); if (code_size) *code_size = code - start; return MINI_ADDR_TO_FTNPTR (start); } /* * mono_arch_get_delegate_invoke_impls: * * Return a list of MonoAotTrampInfo structures for the delegate invoke impl * trampolines. */ GSList* mono_arch_get_delegate_invoke_impls (void) { GSList *res = NULL; guint8 *code; guint32 code_len; int i; char *tramp_name; code = (guint8*)get_delegate_invoke_impl (TRUE, 0, &code_len); res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL)); for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) { code = (guint8*)get_delegate_invoke_impl (FALSE, i, &code_len); tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i); res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL)); g_free (tramp_name); } return res; } gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target) { guint8 *code, *start; /* * vtypes are returned in registers, or using the dedicated r8 register, so * they can be supported by delegate invokes. */ if (has_target) { static guint8* cached = NULL; if (cached) return cached; if (mono_ee_features.use_aot_trampolines) start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target"); else start = (guint8*)get_delegate_invoke_impl (TRUE, 0, NULL); mono_memory_barrier (); cached = start; return cached; } else { static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL}; int i; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; for (i = 0; i < sig->param_count; ++i) if (!mono_is_regsize_var (sig->params [i])) return NULL; code = cache [sig->param_count]; if (code) return code; if (mono_ee_features.use_aot_trampolines) { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); start = (guint8*)mono_aot_get_trampoline (name); g_free (name); } else { start = (guint8*)get_delegate_invoke_impl (FALSE, sig->param_count, NULL); } mono_memory_barrier (); cache [sig->param_count] = start; return start; } return NULL; } gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg) { return NULL; } gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code) { return (gpointer)regs [ARMREG_R0]; } void mono_arch_cpu_init (void) { } void mono_arch_init (void) { #if defined(TARGET_IOS) || defined(TARGET_WATCHOS) || defined(TARGET_OSX) ios_abi = TRUE; #endif #ifdef MONO_ARCH_ENABLE_PTRAUTH enable_ptrauth = TRUE; #endif if (!mono_aot_only) bp_trampoline = mini_get_breakpoint_trampoline (); mono_arm_gsharedvt_init (); } void mono_arch_cleanup (void) { } guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { *exclude_mask = 0; return 0; } void mono_arch_register_lowlevel_calls (void) { } void mono_arch_finish_init (void) { } /* The maximum length is 2 instructions */ static guint8* emit_imm (guint8 *code, int dreg, int imm) { // FIXME: Optimize this if (imm < 0) { gint64 limm = imm; arm_movnx (code, dreg, (~limm) & 0xffff, 0); arm_movkx (code, dreg, (limm >> 16) & 0xffff, 16); } else { arm_movzx (code, dreg, imm & 0xffff, 0); if (imm >> 16) arm_movkx (code, dreg, (imm >> 16) & 0xffff, 16); } return code; } /* The maximum length is 4 instructions */ static guint8* emit_imm64 (guint8 *code, int dreg, guint64 imm) { // FIXME: Optimize this arm_movzx (code, dreg, imm & 0xffff, 0); if ((imm >> 16) & 0xffff) arm_movkx (code, dreg, (imm >> 16) & 0xffff, 16); if ((imm >> 32) & 0xffff) arm_movkx (code, dreg, (imm >> 32) & 0xffff, 32); if ((imm >> 48) & 0xffff) arm_movkx (code, dreg, (imm >> 48) & 0xffff, 48); return code; } guint8* mono_arm_emit_imm64 (guint8 *code, int dreg, gint64 imm) { return emit_imm64 (code, dreg, imm); } /* * emit_imm_template: * * Emit a patchable code sequence for constructing a 64 bit immediate. */ static guint8* emit_imm64_template (guint8 *code, int dreg) { arm_movzx (code, dreg, 0, 0); arm_movkx (code, dreg, 0, 16); arm_movkx (code, dreg, 0, 32); arm_movkx (code, dreg, 0, 48); return code; } static WARN_UNUSED_RESULT guint8* emit_addw_imm (guint8 *code, int dreg, int sreg, int imm) { if (!arm_is_arith_imm (imm)) { code = emit_imm (code, ARMREG_LR, imm); arm_addw (code, dreg, sreg, ARMREG_LR); } else { arm_addw_imm (code, dreg, sreg, imm); } return code; } static WARN_UNUSED_RESULT guint8* emit_addx_imm (guint8 *code, int dreg, int sreg, int imm) { if (!arm_is_arith_imm (imm)) { code = emit_imm (code, ARMREG_LR, imm); arm_addx (code, dreg, sreg, ARMREG_LR); } else { arm_addx_imm (code, dreg, sreg, imm); } return code; } static WARN_UNUSED_RESULT guint8* emit_subw_imm (guint8 *code, int dreg, int sreg, int imm) { if (!arm_is_arith_imm (imm)) { code = emit_imm (code, ARMREG_LR, imm); arm_subw (code, dreg, sreg, ARMREG_LR); } else { arm_subw_imm (code, dreg, sreg, imm); } return code; } static WARN_UNUSED_RESULT guint8* emit_subx_imm (guint8 *code, int dreg, int sreg, int imm) { if (!arm_is_arith_imm (imm)) { code = emit_imm (code, ARMREG_LR, imm); arm_subx (code, dreg, sreg, ARMREG_LR); } else { arm_subx_imm (code, dreg, sreg, imm); } return code; } /* Emit sp+=imm. Clobbers ip0/ip1 */ static WARN_UNUSED_RESULT guint8* emit_addx_sp_imm (guint8 *code, int imm) { code = emit_imm (code, ARMREG_IP0, imm); arm_movspx (code, ARMREG_IP1, ARMREG_SP); arm_addx (code, ARMREG_IP1, ARMREG_IP1, ARMREG_IP0); arm_movspx (code, ARMREG_SP, ARMREG_IP1); return code; } /* Emit sp-=imm. Clobbers ip0/ip1 */ static WARN_UNUSED_RESULT guint8* emit_subx_sp_imm (guint8 *code, int imm) { code = emit_imm (code, ARMREG_IP0, imm); arm_movspx (code, ARMREG_IP1, ARMREG_SP); arm_subx (code, ARMREG_IP1, ARMREG_IP1, ARMREG_IP0); arm_movspx (code, ARMREG_SP, ARMREG_IP1); return code; } static WARN_UNUSED_RESULT guint8* emit_andw_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_andw (code, dreg, sreg, ARMREG_LR); return code; } static WARN_UNUSED_RESULT guint8* emit_andx_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_andx (code, dreg, sreg, ARMREG_LR); return code; } static WARN_UNUSED_RESULT guint8* emit_orrw_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_orrw (code, dreg, sreg, ARMREG_LR); return code; } static WARN_UNUSED_RESULT guint8* emit_orrx_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_orrx (code, dreg, sreg, ARMREG_LR); return code; } static WARN_UNUSED_RESULT guint8* emit_eorw_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_eorw (code, dreg, sreg, ARMREG_LR); return code; } static WARN_UNUSED_RESULT guint8* emit_eorx_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_eorx (code, dreg, sreg, ARMREG_LR); return code; } static WARN_UNUSED_RESULT guint8* emit_cmpw_imm (guint8 *code, int sreg, int imm) { if (imm == 0) { arm_cmpw (code, sreg, ARMREG_RZR); } else { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_cmpw (code, sreg, ARMREG_LR); } return code; } static WARN_UNUSED_RESULT guint8* emit_cmpx_imm (guint8 *code, int sreg, int imm) { if (imm == 0) { arm_cmpx (code, sreg, ARMREG_RZR); } else { // FIXME: code = emit_imm (code, ARMREG_LR, imm); arm_cmpx (code, sreg, ARMREG_LR); } return code; } static WARN_UNUSED_RESULT guint8* emit_strb (guint8 *code, int rt, int rn, int imm) { if (arm_is_strb_imm (imm)) { arm_strb (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_strb_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_strh (guint8 *code, int rt, int rn, int imm) { if (arm_is_strh_imm (imm)) { arm_strh (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_strh_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_strw (guint8 *code, int rt, int rn, int imm) { if (arm_is_strw_imm (imm)) { arm_strw (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_strw_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_strfpw (guint8 *code, int rt, int rn, int imm) { if (arm_is_strw_imm (imm)) { arm_strfpw (code, rt, rn, imm); } else { g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0); arm_strfpw (code, rt, ARMREG_IP0, 0); } return code; } static WARN_UNUSED_RESULT guint8* emit_strfpx (guint8 *code, int rt, int rn, int imm) { if (arm_is_strx_imm (imm)) { arm_strfpx (code, rt, rn, imm); } else { g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0); arm_strfpx (code, rt, ARMREG_IP0, 0); } return code; } static WARN_UNUSED_RESULT guint8* emit_strx (guint8 *code, int rt, int rn, int imm) { if (arm_is_strx_imm (imm)) { arm_strx (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_strx_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrb (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 1)) { arm_ldrb (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_ldrb_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrsbx (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 1)) { arm_ldrsbx (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_ldrsbx_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrh (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 2)) { arm_ldrh (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_ldrh_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrshx (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 2)) { arm_ldrshx (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_ldrshx_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrswx (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 4)) { arm_ldrswx (code, rt, rn, imm); } else { g_assert (rt != ARMREG_IP0); g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_ldrswx_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrw (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 4)) { arm_ldrw (code, rt, rn, imm); } else { g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_ldrw_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrx (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 8)) { arm_ldrx (code, rt, rn, imm); } else { g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_ldrx_reg (code, rt, rn, ARMREG_IP0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrfpw (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 4)) { arm_ldrfpw (code, rt, rn, imm); } else { g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0); arm_ldrfpw (code, rt, ARMREG_IP0, 0); } return code; } static WARN_UNUSED_RESULT guint8* emit_ldrfpx (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 8)) { arm_ldrfpx (code, rt, rn, imm); } else { g_assert (rn != ARMREG_IP0); code = emit_imm (code, ARMREG_IP0, imm); arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0); arm_ldrfpx (code, rt, ARMREG_IP0, 0); } return code; } guint8* mono_arm_emit_ldrx (guint8 *code, int rt, int rn, int imm) { return emit_ldrx (code, rt, rn, imm); } static guint8* emit_call (MonoCompile *cfg, guint8* code, MonoJumpInfoType patch_type, gconstpointer data) { /* mono_add_patch_info_rel (cfg, code - cfg->native_code, patch_type, data, MONO_R_ARM64_IMM); code = emit_imm64_template (code, ARMREG_LR); arm_blrx (code, ARMREG_LR); */ mono_add_patch_info_rel (cfg, code - cfg->native_code, patch_type, data, MONO_R_ARM64_BL); arm_bl (code, code); cfg->thunk_area += THUNK_SIZE; return code; } static guint8* emit_aotconst_full (MonoCompile *cfg, MonoJumpInfo **ji, guint8 *code, guint8 *start, int dreg, guint32 patch_type, gconstpointer data) { if (cfg) mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)patch_type, data); else *ji = mono_patch_info_list_prepend (*ji, code - start, (MonoJumpInfoType)patch_type, data); /* See arch_emit_got_access () in aot-compiler.c */ arm_ldrx_lit (code, dreg, 0); arm_nop (code); arm_nop (code); return code; } static guint8* emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, guint32 patch_type, gconstpointer data) { return emit_aotconst_full (cfg, NULL, code, NULL, dreg, patch_type, data); } /* * mono_arm_emit_aotconst: * * Emit code to load an AOT constant into DREG. Usable from trampolines. */ guint8* mono_arm_emit_aotconst (gpointer ji, guint8 *code, guint8 *code_start, int dreg, guint32 patch_type, gconstpointer data) { return emit_aotconst_full (NULL, (MonoJumpInfo**)ji, code, code_start, dreg, patch_type, data); } gboolean mono_arch_have_fast_tls (void) { #ifdef TARGET_IOS return FALSE; #else return TRUE; #endif } static guint8* emit_tls_get (guint8 *code, int dreg, int tls_offset) { arm_mrs (code, dreg, ARM_MRS_REG_TPIDR_EL0); if (tls_offset < 256) { arm_ldrx (code, dreg, dreg, tls_offset); } else { code = emit_addx_imm (code, dreg, dreg, tls_offset); arm_ldrx (code, dreg, dreg, 0); } return code; } static guint8* emit_tls_set (guint8 *code, int sreg, int tls_offset) { int tmpreg = ARMREG_IP0; g_assert (sreg != tmpreg); arm_mrs (code, tmpreg, ARM_MRS_REG_TPIDR_EL0); if (tls_offset < 256) { arm_strx (code, sreg, tmpreg, tls_offset); } else { code = emit_addx_imm (code, tmpreg, tmpreg, tls_offset); arm_strx (code, sreg, tmpreg, 0); } return code; } /* * Emits * - mov sp, fp * - ldrp [fp, lr], [sp], !stack_offfset * Clobbers TEMP_REGS. */ WARN_UNUSED_RESULT guint8* mono_arm_emit_destroy_frame (guint8 *code, int stack_offset, guint64 temp_regs) { // At least one of these registers must be available, or both. gboolean const temp0 = (temp_regs & (1 << ARMREG_IP0)) != 0; gboolean const temp1 = (temp_regs & (1 << ARMREG_IP1)) != 0; g_assert (temp0 || temp1); int const temp = temp0 ? ARMREG_IP0 : ARMREG_IP1; arm_movspx (code, ARMREG_SP, ARMREG_FP); if (arm_is_ldpx_imm (stack_offset)) { arm_ldpx_post (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, stack_offset); } else { arm_ldpx (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, 0); /* sp += stack_offset */ if (temp0 && temp1) { code = emit_addx_sp_imm (code, stack_offset); } else { int imm = stack_offset; /* Can't use addx_sp_imm () since we can't clobber both ip0/ip1 */ arm_addx_imm (code, temp, ARMREG_SP, 0); while (imm > 256) { arm_addx_imm (code, temp, temp, 256); imm -= 256; } arm_addx_imm (code, ARMREG_SP, temp, imm); } } return code; } #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431) static guint8* emit_thunk (guint8 *code, gconstpointer target) { guint8 *p = code; arm_ldrx_lit (code, ARMREG_IP0, code + 8); arm_brx (code, ARMREG_IP0); *(guint64*)code = (guint64)target; code += sizeof (guint64); mono_arch_flush_icache (p, code - p); return code; } static gpointer create_thunk (MonoCompile *cfg, guchar *code, const guchar *target) { MonoJitInfo *ji; MonoThunkJitInfo *info; guint8 *thunks, *p; int thunks_size; guint8 *orig_target; guint8 *target_thunk; MonoJitMemoryManager* jit_mm; if (cfg) { /* * This can be called multiple times during JITting, * save the current position in cfg->arch to avoid * doing a O(n^2) search. */ if (!cfg->arch.thunks) { cfg->arch.thunks = cfg->thunks; cfg->arch.thunks_size = cfg->thunk_area; } thunks = cfg->arch.thunks; thunks_size = cfg->arch.thunks_size; if (!thunks_size) { g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE)); g_assert_not_reached (); } g_assert (*(guint32*)thunks == 0); emit_thunk (thunks, target); cfg->arch.thunks += THUNK_SIZE; cfg->arch.thunks_size -= THUNK_SIZE; return thunks; } else { ji = mini_jit_info_table_find (code); g_assert (ji); info = mono_jit_info_get_thunk_info (ji); g_assert (info); thunks = (guint8*)ji->code_start + info->thunks_offset; thunks_size = info->thunks_size; orig_target = mono_arch_get_call_target (code + 4); /* Arbitrary lock */ jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); target_thunk = NULL; if (orig_target >= thunks && orig_target < thunks + thunks_size) { /* The call already points to a thunk, because of trampolines etc. */ target_thunk = orig_target; } else { for (p = thunks; p < thunks + thunks_size; p += THUNK_SIZE) { if (((guint32*)p) [0] == 0) { /* Free entry */ target_thunk = p; break; } else if (((guint64*)p) [1] == (guint64)target) { /* Thunk already points to target */ target_thunk = p; break; } } } //printf ("THUNK: %p %p %p\n", code, target, target_thunk); if (!target_thunk) { jit_mm_unlock (jit_mm); g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE)); g_assert_not_reached (); } emit_thunk (target_thunk, target); jit_mm_unlock (jit_mm); return target_thunk; } } static void arm_patch_full (MonoCompile *cfg, guint8 *code, guint8 *target, int relocation) { switch (relocation) { case MONO_R_ARM64_B: target = MINI_FTNPTR_TO_ADDR (target); if (arm_is_bl_disp (code, target)) { arm_b (code, target); } else { gpointer thunk; thunk = create_thunk (cfg, code, target); g_assert (arm_is_bl_disp (code, thunk)); arm_b (code, thunk); } break; case MONO_R_ARM64_BCC: { int cond; cond = arm_get_bcc_cond (code); arm_bcc (code, cond, target); break; } case MONO_R_ARM64_CBZ: arm_set_cbz_target (code, target); break; case MONO_R_ARM64_IMM: { guint64 imm = (guint64)target; int dreg; /* emit_imm64_template () */ dreg = arm_get_movzx_rd (code); arm_movzx (code, dreg, imm & 0xffff, 0); arm_movkx (code, dreg, (imm >> 16) & 0xffff, 16); arm_movkx (code, dreg, (imm >> 32) & 0xffff, 32); arm_movkx (code, dreg, (imm >> 48) & 0xffff, 48); break; } case MONO_R_ARM64_BL: target = MINI_FTNPTR_TO_ADDR (target); if (arm_is_bl_disp (code, target)) { arm_bl (code, target); } else { gpointer thunk; thunk = create_thunk (cfg, code, target); g_assert (arm_is_bl_disp (code, thunk)); arm_bl (code, thunk); } break; default: g_assert_not_reached (); } } static void arm_patch_rel (guint8 *code, guint8 *target, int relocation) { arm_patch_full (NULL, code, target, relocation); } void mono_arm_patch (guint8 *code, guint8 *target, int relocation) { arm_patch_rel (code, target, relocation); } void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target) { guint8 *ip; ip = ji->ip.i + code; switch (ji->type) { case MONO_PATCH_INFO_METHOD_JUMP: /* ji->relocation is not set by the caller */ arm_patch_full (cfg, ip, (guint8*)target, MONO_R_ARM64_B); mono_arch_flush_icache (ip, 8); break; default: arm_patch_full (cfg, ip, (guint8*)target, ji->relocation); break; case MONO_PATCH_INFO_NONE: break; } } void mono_arch_flush_register_windows (void) { } MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { return (MonoMethod*)regs [MONO_ARCH_RGCTX_REG]; } MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code) { return (MonoVTable*)regs [MONO_ARCH_RGCTX_REG]; } GSList* mono_arch_get_cie_program (void) { GSList *l = NULL; mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ARMREG_SP, 0); return l; } host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { return ctx->regs [reg]; } host_mgreg_t* mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { return &ctx->regs [reg]; } void mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val) { ctx->regs [reg] = val; } /* * mono_arch_set_target: * * Set the target architecture the JIT backend should generate code for, in the form * of a GNU target triplet. Only used in AOT mode. */ void mono_arch_set_target (char *mtriple) { if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) { ios_abi = TRUE; } } static void add_general (CallInfo *cinfo, ArgInfo *ainfo, int size, gboolean sign) { if (cinfo->gr >= PARAM_REGS) { ainfo->storage = ArgOnStack; /* * FIXME: The vararg argument handling code in ves_icall_System_ArgIterator_IntGetNextArg * assumes every argument is allocated to a separate full size stack slot. */ if (ios_abi && !cinfo->vararg) { /* Assume size == align */ } else { /* Put arguments into 8 byte aligned stack slots */ size = 8; sign = FALSE; } cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, size); ainfo->offset = cinfo->stack_usage; ainfo->slot_size = size; ainfo->sign = sign; cinfo->stack_usage += size; } else { ainfo->storage = ArgInIReg; ainfo->reg = cinfo->gr; cinfo->gr ++; } } static void add_fp (CallInfo *cinfo, ArgInfo *ainfo, gboolean single) { int size = single ? 4 : 8; if (cinfo->fr >= FP_PARAM_REGS) { ainfo->storage = single ? ArgOnStackR4 : ArgOnStackR8; if (ios_abi) { cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, size); ainfo->offset = cinfo->stack_usage; ainfo->slot_size = size; cinfo->stack_usage += size; } else { ainfo->offset = cinfo->stack_usage; ainfo->slot_size = 8; /* Put arguments into 8 byte aligned stack slots */ cinfo->stack_usage += 8; } } else { if (single) ainfo->storage = ArgInFRegR4; else ainfo->storage = ArgInFReg; ainfo->reg = cinfo->fr; cinfo->fr ++; } } static gboolean is_hfa (MonoType *t, int *out_nfields, int *out_esize, int *field_offsets) { MonoClass *klass; gpointer iter; MonoClassField *field; MonoType *ftype, *prev_ftype = NULL; int i, nfields = 0; klass = mono_class_from_mono_type_internal (t); iter = NULL; while ((field = mono_class_get_fields_internal (klass, &iter))) { if (field->type->attrs & FIELD_ATTRIBUTE_STATIC) continue; ftype = mono_field_get_type_internal (field); ftype = mini_get_underlying_type (ftype); if (MONO_TYPE_ISSTRUCT (ftype)) { int nested_nfields, nested_esize; int nested_field_offsets [16]; if (!is_hfa (ftype, &nested_nfields, &nested_esize, nested_field_offsets)) return FALSE; if (nested_esize == 4) ftype = m_class_get_byval_arg (mono_defaults.single_class); else ftype = m_class_get_byval_arg (mono_defaults.double_class); if (prev_ftype && prev_ftype->type != ftype->type) return FALSE; prev_ftype = ftype; for (i = 0; i < nested_nfields; ++i) { if (nfields + i < 4) field_offsets [nfields + i] = field->offset - MONO_ABI_SIZEOF (MonoObject) + nested_field_offsets [i]; } nfields += nested_nfields; } else { if (!(!m_type_is_byref (ftype) && (ftype->type == MONO_TYPE_R4 || ftype->type == MONO_TYPE_R8))) return FALSE; if (prev_ftype && prev_ftype->type != ftype->type) return FALSE; prev_ftype = ftype; if (nfields < 4) field_offsets [nfields] = field->offset - MONO_ABI_SIZEOF (MonoObject); nfields ++; } } if (nfields == 0 || nfields > 4) return FALSE; *out_nfields = nfields; *out_esize = prev_ftype->type == MONO_TYPE_R4 ? 4 : 8; return TRUE; } static void add_valuetype (CallInfo *cinfo, ArgInfo *ainfo, MonoType *t) { int i, size, align_size, nregs, nfields, esize; int field_offsets [16]; guint32 align; size = mini_type_stack_size_full (t, &align, cinfo->pinvoke); align_size = ALIGN_TO (size, 8); nregs = align_size / 8; if (is_hfa (t, &nfields, &esize, field_offsets)) { /* * The struct might include nested float structs aligned at 8, * so need to keep track of the offsets of the individual fields. */ if (cinfo->fr + nfields <= FP_PARAM_REGS) { ainfo->storage = ArgHFA; ainfo->reg = cinfo->fr; ainfo->nregs = nfields; ainfo->size = size; ainfo->esize = esize; for (i = 0; i < nfields; ++i) ainfo->foffsets [i] = field_offsets [i]; cinfo->fr += ainfo->nregs; } else { ainfo->nfregs_to_skip = FP_PARAM_REGS > cinfo->fr ? FP_PARAM_REGS - cinfo->fr : 0; cinfo->fr = FP_PARAM_REGS; size = ALIGN_TO (size, 8); ainfo->storage = ArgVtypeOnStack; cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, align); ainfo->offset = cinfo->stack_usage; ainfo->size = size; ainfo->hfa = TRUE; ainfo->nregs = nfields; ainfo->esize = esize; cinfo->stack_usage += size; } return; } if (align_size > 16) { ainfo->storage = ArgVtypeByRef; ainfo->size = size; return; } if (cinfo->gr + nregs > PARAM_REGS) { size = ALIGN_TO (size, 8); ainfo->storage = ArgVtypeOnStack; cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, align); ainfo->offset = cinfo->stack_usage; ainfo->size = size; cinfo->stack_usage += size; cinfo->gr = PARAM_REGS; } else { ainfo->storage = ArgVtypeInIRegs; ainfo->reg = cinfo->gr; ainfo->nregs = nregs; ainfo->size = size; cinfo->gr += nregs; } } static void add_param (CallInfo *cinfo, ArgInfo *ainfo, MonoType *t) { MonoType *ptype; ptype = mini_get_underlying_type (t); switch (ptype->type) { case MONO_TYPE_I1: add_general (cinfo, ainfo, 1, TRUE); break; case MONO_TYPE_U1: add_general (cinfo, ainfo, 1, FALSE); break; case MONO_TYPE_I2: add_general (cinfo, ainfo, 2, TRUE); break; case MONO_TYPE_U2: add_general (cinfo, ainfo, 2, FALSE); break; #ifdef MONO_ARCH_ILP32 case MONO_TYPE_I: #endif case MONO_TYPE_I4: add_general (cinfo, ainfo, 4, TRUE); break; #ifdef MONO_ARCH_ILP32 case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: #endif case MONO_TYPE_U4: add_general (cinfo, ainfo, 4, FALSE); break; #ifndef MONO_ARCH_ILP32 case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: #endif case MONO_TYPE_U8: case MONO_TYPE_I8: add_general (cinfo, ainfo, 8, FALSE); break; case MONO_TYPE_R8: add_fp (cinfo, ainfo, FALSE); break; case MONO_TYPE_R4: add_fp (cinfo, ainfo, TRUE); break; case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: add_valuetype (cinfo, ainfo, ptype); break; case MONO_TYPE_VOID: ainfo->storage = ArgNone; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ptype)) { add_general (cinfo, ainfo, 8, FALSE); } else if (mini_is_gsharedvt_variable_type (ptype)) { /* * Treat gsharedvt arguments as large vtypes */ ainfo->storage = ArgVtypeByRef; ainfo->gsharedvt = TRUE; } else { add_valuetype (cinfo, ainfo, ptype); } break; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: g_assert (mini_is_gsharedvt_type (ptype)); ainfo->storage = ArgVtypeByRef; ainfo->gsharedvt = TRUE; break; default: g_assert_not_reached (); break; } } /* * get_call_info: * * Obtain information about a call according to the calling convention. */ static CallInfo* get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { CallInfo *cinfo; ArgInfo *ainfo; int n, pstart, pindex; n = sig->hasthis + sig->param_count; if (mp) cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n)); else cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n)); cinfo->nargs = n; cinfo->pinvoke = sig->pinvoke; // Constrain this to OSX only for now #ifdef TARGET_OSX cinfo->vararg = sig->call_convention == MONO_CALL_VARARG; #endif /* Return value */ add_param (cinfo, &cinfo->ret, sig->ret); if (cinfo->ret.storage == ArgVtypeByRef) cinfo->ret.reg = ARMREG_R8; /* Reset state */ cinfo->gr = 0; cinfo->fr = 0; cinfo->stack_usage = 0; /* Parameters */ if (sig->hasthis) add_general (cinfo, cinfo->args + 0, 8, FALSE); pstart = 0; for (pindex = pstart; pindex < sig->param_count; ++pindex) { ainfo = cinfo->args + sig->hasthis + pindex; if ((sig->call_convention == MONO_CALL_VARARG) && (pindex == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ cinfo->gr = PARAM_REGS; cinfo->fr = FP_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_param (cinfo, &cinfo->sig_cookie, mono_get_int_type ()); } add_param (cinfo, ainfo, sig->params [pindex]); if (ainfo->storage == ArgVtypeByRef) { /* Pass the argument address in the next register */ if (cinfo->gr >= PARAM_REGS) { ainfo->storage = ArgVtypeByRefOnStack; cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, 8); ainfo->offset = cinfo->stack_usage; cinfo->stack_usage += 8; } else { ainfo->reg = cinfo->gr; cinfo->gr ++; } } } /* Handle the case where there are no implicit arguments */ if ((sig->call_convention == MONO_CALL_VARARG) && (pindex == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ cinfo->gr = PARAM_REGS; cinfo->fr = FP_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_param (cinfo, &cinfo->sig_cookie, mono_get_int_type ()); } cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT); return cinfo; } static int arg_need_temp (ArgInfo *ainfo) { if (ainfo->storage == ArgHFA && ainfo->esize == 4) return ainfo->size; return 0; } static gpointer arg_get_storage (CallContext *ccontext, ArgInfo *ainfo) { switch (ainfo->storage) { case ArgVtypeInIRegs: case ArgInIReg: return &ccontext->gregs [ainfo->reg]; case ArgInFReg: case ArgInFRegR4: case ArgHFA: return &ccontext->fregs [ainfo->reg]; case ArgOnStack: case ArgOnStackR4: case ArgOnStackR8: case ArgVtypeOnStack: return ccontext->stack + ainfo->offset; case ArgVtypeByRef: return (gpointer) ccontext->gregs [ainfo->reg]; default: g_error ("Arg storage type not yet supported"); } } static void arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest) { g_assert (arg_need_temp (ainfo)); float *dest_float = (float*)dest; for (int k = 0; k < ainfo->nregs; k++) { *dest_float = *(float*)&ccontext->fregs [ainfo->reg + k]; dest_float++; } } static void arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src) { g_assert (arg_need_temp (ainfo)); float *src_float = (float*)src; for (int k = 0; k < ainfo->nregs; k++) { *(float*)&ccontext->fregs [ainfo->reg + k] = *src_float; src_float++; } } /* Set arguments in the ccontext (for i2n entry) */ void mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb = mini_get_interp_callbacks (); CallInfo *cinfo = get_call_info (NULL, sig); gpointer storage; ArgInfo *ainfo; memset (ccontext, 0, sizeof (CallContext)); ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT); if (ccontext->stack_size) ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size); if (sig->ret->type != MONO_TYPE_VOID) { ainfo = &cinfo->ret; if (ainfo->storage == ArgVtypeByRef) { storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1); ccontext->gregs [cinfo->ret.reg] = (gsize)storage; } } g_assert (!sig->hasthis); for (int i = 0; i < sig->param_count; i++) { ainfo = &cinfo->args [i]; if (ainfo->storage == ArgVtypeByRef) { ccontext->gregs [ainfo->reg] = (host_mgreg_t)interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, i); continue; } int temp_size = arg_need_temp (ainfo); if (temp_size) storage = alloca (temp_size); // FIXME? alloca in a loop else storage = arg_get_storage (ccontext, ainfo); interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage); if (temp_size) arg_set_val (ccontext, ainfo, storage); } g_free (cinfo); } /* Set return value in the ccontext (for n2i return) */ void mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp) { const MonoEECallbacks *interp_cb; CallInfo *cinfo; gpointer storage; ArgInfo *ainfo; if (sig->ret->type == MONO_TYPE_VOID) return; interp_cb = mini_get_interp_callbacks (); cinfo = get_call_info (NULL, sig); ainfo = &cinfo->ret; if (retp) { g_assert (ainfo->storage == ArgVtypeByRef); interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, retp); } else { g_assert (ainfo->storage != ArgVtypeByRef); int temp_size = arg_need_temp (ainfo); if (temp_size) storage = alloca (temp_size); else storage = arg_get_storage (ccontext, ainfo); memset (ccontext, 0, sizeof (CallContext)); // FIXME interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage); if (temp_size) arg_set_val (ccontext, ainfo, storage); } g_free (cinfo); } /* Gets the arguments from ccontext (for n2i entry) */ gpointer mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb = mini_get_interp_callbacks (); CallInfo *cinfo = get_call_info (NULL, sig); gpointer storage; ArgInfo *ainfo; for (int i = 0; i < sig->param_count + sig->hasthis; i++) { ainfo = &cinfo->args [i]; int temp_size = arg_need_temp (ainfo); if (temp_size) { storage = alloca (temp_size); // FIXME? alloca in a loop arg_get_val (ccontext, ainfo, storage); } else { storage = arg_get_storage (ccontext, ainfo); } interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage); } storage = NULL; if (sig->ret->type != MONO_TYPE_VOID) { ainfo = &cinfo->ret; if (ainfo->storage == ArgVtypeByRef) storage = (gpointer) ccontext->gregs [cinfo->ret.reg]; } g_free (cinfo); return storage; } /* Gets the return value from ccontext (for i2n exit) */ void mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb; CallInfo *cinfo; ArgInfo *ainfo; gpointer storage; if (sig->ret->type == MONO_TYPE_VOID) return; interp_cb = mini_get_interp_callbacks (); cinfo = get_call_info (NULL, sig); ainfo = &cinfo->ret; if (ainfo->storage != ArgVtypeByRef) { int temp_size = arg_need_temp (ainfo); if (temp_size) { storage = alloca (temp_size); arg_get_val (ccontext, ainfo, storage); } else { storage = arg_get_storage (ccontext, ainfo); } interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage); } g_free (cinfo); } typedef struct { MonoMethodSignature *sig; CallInfo *cinfo; MonoType *rtype; MonoType **param_types; int n_fpargs, n_fpret, nullable_area; } ArchDynCallInfo; static gboolean dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig) { int i; // FIXME: Add more cases switch (cinfo->ret.storage) { case ArgNone: case ArgInIReg: case ArgInFReg: case ArgInFRegR4: case ArgVtypeByRef: break; case ArgVtypeInIRegs: if (cinfo->ret.nregs > 2) return FALSE; break; case ArgHFA: break; default: return FALSE; } for (i = 0; i < cinfo->nargs; ++i) { ArgInfo *ainfo = &cinfo->args [i]; switch (ainfo->storage) { case ArgInIReg: case ArgVtypeInIRegs: case ArgInFReg: case ArgInFRegR4: case ArgHFA: case ArgVtypeByRef: case ArgVtypeByRefOnStack: case ArgOnStack: case ArgVtypeOnStack: break; default: return FALSE; } } return TRUE; } MonoDynCallInfo* mono_arch_dyn_call_prepare (MonoMethodSignature *sig) { ArchDynCallInfo *info; CallInfo *cinfo; int i, aindex; cinfo = get_call_info (NULL, sig); if (!dyn_call_supported (cinfo, sig)) { g_free (cinfo); return NULL; } info = g_new0 (ArchDynCallInfo, 1); // FIXME: Preprocess the info to speed up start_dyn_call () info->sig = sig; info->cinfo = cinfo; info->rtype = mini_get_underlying_type (sig->ret); info->param_types = g_new0 (MonoType*, sig->param_count); for (i = 0; i < sig->param_count; ++i) info->param_types [i] = mini_get_underlying_type (sig->params [i]); switch (cinfo->ret.storage) { case ArgInFReg: case ArgInFRegR4: info->n_fpret = 1; break; case ArgHFA: info->n_fpret = cinfo->ret.nregs; break; default: break; } for (aindex = 0; aindex < sig->param_count; aindex++) { MonoType *t = info->param_types [aindex]; if (m_type_is_byref (t)) continue; switch (t->type) { case MONO_TYPE_GENERICINST: if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) { MonoClass *klass = mono_class_from_mono_type_internal (t); int size; /* Nullables need a temporary buffer, its stored at the end of DynCallArgs.regs after the stack args */ size = mono_class_value_size (klass, NULL); info->nullable_area += size; } break; default: break; } } return (MonoDynCallInfo*)info; } void mono_arch_dyn_call_free (MonoDynCallInfo *info) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; g_free (ainfo->cinfo); g_free (ainfo->param_types); g_free (ainfo); } int mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; g_assert (ainfo->cinfo->stack_usage % MONO_ARCH_FRAME_ALIGNMENT == 0); return sizeof (DynCallArgs) + ainfo->cinfo->stack_usage + ainfo->nullable_area; } static double bitcast_r4_to_r8 (float f) { float *p = &f; return *(double*)p; } static float bitcast_r8_to_r4 (double f) { double *p = &f; return *(float*)p; } void mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf) { ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info; DynCallArgs *p = (DynCallArgs*)buf; int aindex, arg_index, greg, i, pindex; MonoMethodSignature *sig = dinfo->sig; CallInfo *cinfo = dinfo->cinfo; int buffer_offset = 0; guint8 *nullable_buffer; p->res = 0; p->ret = ret; p->n_fpargs = dinfo->n_fpargs; p->n_fpret = dinfo->n_fpret; p->n_stackargs = cinfo->stack_usage / sizeof (host_mgreg_t); arg_index = 0; greg = 0; pindex = 0; /* Stored after the stack arguments */ nullable_buffer = (guint8*)&(p->regs [PARAM_REGS + 1 + (cinfo->stack_usage / sizeof (host_mgreg_t))]); if (sig->hasthis) p->regs [greg ++] = (host_mgreg_t)*(args [arg_index ++]); if (cinfo->ret.storage == ArgVtypeByRef) p->regs [ARMREG_R8] = (host_mgreg_t)ret; for (aindex = pindex; aindex < sig->param_count; aindex++) { MonoType *t = dinfo->param_types [aindex]; gpointer *arg = args [arg_index ++]; ArgInfo *ainfo = &cinfo->args [aindex + sig->hasthis]; int slot = -1; if (ainfo->storage == ArgOnStack || ainfo->storage == ArgVtypeOnStack || ainfo->storage == ArgVtypeByRefOnStack) { slot = PARAM_REGS + 1 + (ainfo->offset / sizeof (host_mgreg_t)); } else { slot = ainfo->reg; } if (m_type_is_byref (t)) { p->regs [slot] = (host_mgreg_t)*arg; continue; } if (ios_abi && ainfo->storage == ArgOnStack) { guint8 *stack_arg = (guint8*)&(p->regs [PARAM_REGS + 1]) + ainfo->offset; gboolean handled = TRUE; /* Special case arguments smaller than 1 machine word */ switch (t->type) { case MONO_TYPE_U1: *(guint8*)stack_arg = *(guint8*)arg; break; case MONO_TYPE_I1: *(gint8*)stack_arg = *(gint8*)arg; break; case MONO_TYPE_U2: *(guint16*)stack_arg = *(guint16*)arg; break; case MONO_TYPE_I2: *(gint16*)stack_arg = *(gint16*)arg; break; case MONO_TYPE_I4: *(gint32*)stack_arg = *(gint32*)arg; break; case MONO_TYPE_U4: *(guint32*)stack_arg = *(guint32*)arg; break; default: handled = FALSE; break; } if (handled) continue; } switch (t->type) { case MONO_TYPE_OBJECT: case MONO_TYPE_PTR: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_I8: case MONO_TYPE_U8: p->regs [slot] = (host_mgreg_t)*arg; break; case MONO_TYPE_U1: p->regs [slot] = *(guint8*)arg; break; case MONO_TYPE_I1: p->regs [slot] = *(gint8*)arg; break; case MONO_TYPE_I2: p->regs [slot] = *(gint16*)arg; break; case MONO_TYPE_U2: p->regs [slot] = *(guint16*)arg; break; case MONO_TYPE_I4: p->regs [slot] = *(gint32*)arg; break; case MONO_TYPE_U4: p->regs [slot] = *(guint32*)arg; break; case MONO_TYPE_R4: p->fpregs [ainfo->reg] = bitcast_r4_to_r8 (*(float*)arg); p->n_fpargs ++; break; case MONO_TYPE_R8: p->fpregs [ainfo->reg] = *(double*)arg; p->n_fpargs ++; break; case MONO_TYPE_GENERICINST: if (MONO_TYPE_IS_REFERENCE (t)) { p->regs [slot] = (host_mgreg_t)*arg; break; } else { if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) { MonoClass *klass = mono_class_from_mono_type_internal (t); guint8 *nullable_buf; int size; /* * Use p->buffer as a temporary buffer since the data needs to be available after this call * if the nullable param is passed by ref. */ size = mono_class_value_size (klass, NULL); nullable_buf = nullable_buffer + buffer_offset; buffer_offset += size; g_assert (buffer_offset <= dinfo->nullable_area); /* The argument pointed to by arg is either a boxed vtype or null */ mono_nullable_init (nullable_buf, (MonoObject*)arg, klass); arg = (gpointer*)nullable_buf; /* Fall though */ } else { /* Fall though */ } } case MONO_TYPE_VALUETYPE: switch (ainfo->storage) { case ArgVtypeInIRegs: for (i = 0; i < ainfo->nregs; ++i) p->regs [slot ++] = ((host_mgreg_t*)arg) [i]; break; case ArgHFA: if (ainfo->esize == 4) { for (i = 0; i < ainfo->nregs; ++i) p->fpregs [ainfo->reg + i] = bitcast_r4_to_r8 (((float*)arg) [ainfo->foffsets [i] / 4]); } else { for (i = 0; i < ainfo->nregs; ++i) p->fpregs [ainfo->reg + i] = ((double*)arg) [ainfo->foffsets [i] / 8]; } p->n_fpargs += ainfo->nregs; break; case ArgVtypeByRef: case ArgVtypeByRefOnStack: p->regs [slot] = (host_mgreg_t)arg; break; case ArgVtypeOnStack: for (i = 0; i < ainfo->size / 8; ++i) p->regs [slot ++] = ((host_mgreg_t*)arg) [i]; break; default: g_assert_not_reached (); break; } break; default: g_assert_not_reached (); } } } void mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf) { ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; CallInfo *cinfo = ainfo->cinfo; DynCallArgs *args = (DynCallArgs*)buf; MonoType *ptype = ainfo->rtype; guint8 *ret = args->ret; host_mgreg_t res = args->res; host_mgreg_t res2 = args->res2; int i; if (cinfo->ret.storage == ArgVtypeByRef) return; switch (ptype->type) { case MONO_TYPE_VOID: *(gpointer*)ret = NULL; break; case MONO_TYPE_OBJECT: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: *(gpointer*)ret = (gpointer)res; break; case MONO_TYPE_I1: *(gint8*)ret = res; break; case MONO_TYPE_U1: *(guint8*)ret = res; break; case MONO_TYPE_I2: *(gint16*)ret = res; break; case MONO_TYPE_U2: *(guint16*)ret = res; break; case MONO_TYPE_I4: *(gint32*)ret = res; break; case MONO_TYPE_U4: *(guint32*)ret = res; break; case MONO_TYPE_I8: case MONO_TYPE_U8: *(guint64*)ret = res; break; case MONO_TYPE_R4: *(float*)ret = bitcast_r8_to_r4 (args->fpregs [0]); break; case MONO_TYPE_R8: *(double*)ret = args->fpregs [0]; break; case MONO_TYPE_GENERICINST: if (MONO_TYPE_IS_REFERENCE (ptype)) { *(gpointer*)ret = (gpointer)res; break; } else { /* Fall though */ } case MONO_TYPE_VALUETYPE: switch (ainfo->cinfo->ret.storage) { case ArgVtypeInIRegs: *(host_mgreg_t*)ret = res; if (ainfo->cinfo->ret.nregs > 1) ((host_mgreg_t*)ret) [1] = res2; break; case ArgHFA: /* Use the same area for returning fp values */ if (cinfo->ret.esize == 4) { for (i = 0; i < cinfo->ret.nregs; ++i) ((float*)ret) [cinfo->ret.foffsets [i] / 4] = bitcast_r8_to_r4 (args->fpregs [i]); } else { for (i = 0; i < cinfo->ret.nregs; ++i) ((double*)ret) [cinfo->ret.foffsets [i] / 8] = args->fpregs [i]; } break; default: g_assert_not_reached (); break; } break; default: g_assert_not_reached (); } } #if __APPLE__ G_BEGIN_DECLS void sys_icache_invalidate (void *start, size_t len); G_END_DECLS #endif void mono_arch_flush_icache (guint8 *code, gint size) { #ifndef MONO_CROSS_COMPILE #if __APPLE__ sys_icache_invalidate (code, size); #else /* Don't rely on GCC's __clear_cache implementation, as it caches * icache/dcache cache line sizes, that can vary between cores on * big.LITTLE architectures. */ guint64 end = (guint64) (code + size); guint64 addr; /* always go with cacheline size of 4 bytes as this code isn't perf critical * anyway. Reading the cache line size from a machine register can be racy * on a big.LITTLE architecture if the cores don't have the same cache line * sizes. */ const size_t icache_line_size = 4; const size_t dcache_line_size = 4; addr = (guint64) code & ~(guint64) (dcache_line_size - 1); for (; addr < end; addr += dcache_line_size) asm volatile("dc civac, %0" : : "r" (addr) : "memory"); asm volatile("dsb ish" : : : "memory"); addr = (guint64) code & ~(guint64) (icache_line_size - 1); for (; addr < end; addr += icache_line_size) asm volatile("ic ivau, %0" : : "r" (addr) : "memory"); asm volatile ("dsb ish" : : : "memory"); asm volatile ("isb" : : : "memory"); #endif #endif } #ifndef DISABLE_JIT gboolean mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode) { NOT_IMPLEMENTED; return FALSE; } GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG)) continue; if (mono_is_regsize_var (ins->inst_vtype)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = g_list_prepend (vars, vmv); } } vars = mono_varlist_sort (cfg, vars, 0); return vars; } GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; int i; /* r28 is reserved for cfg->arch.args_reg */ /* r27 is reserved for the imt argument */ for (i = ARMREG_R19; i <= ARMREG_R26; ++i) regs = g_list_prepend (regs, GUINT_TO_POINTER (i)); return regs; } guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { MonoInst *ins = cfg->varinfo [vmv->idx]; if (ins->opcode == OP_ARG) return 1; else return 2; } void mono_arch_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; if (cinfo->ret.storage == ArgVtypeByRef) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); cfg->vret_addr->flags |= MONO_INST_VOLATILE; } if (cfg->gen_sdb_seq_points) { MonoInst *ins; if (cfg->compile_aot) { ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.seq_point_info_var = ins; } ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.ss_tramp_var = ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.bp_tramp_var = ins; } if (cfg->method->save_lmf) { cfg->create_lmf_var = TRUE; cfg->lmf_ir = TRUE; } } void mono_arch_allocate_vars (MonoCompile *cfg) { MonoMethodSignature *sig; MonoInst *ins; CallInfo *cinfo; ArgInfo *ainfo; int i, offset, size, align; guint32 locals_stack_size, locals_stack_align; gint32 *offsets; /* * Allocate arguments and locals to either register (OP_REGVAR) or to a stack slot (OP_REGOFFSET). * Compute cfg->stack_offset and update cfg->used_int_regs. */ sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; /* * The ARM64 ABI always uses a frame pointer. * The instruction set prefers positive offsets, so fp points to the bottom of the * frame, and stack slots are at positive offsets. * If some arguments are received on the stack, their offsets relative to fp can * not be computed right now because the stack frame might grow due to spilling * done by the local register allocator. To solve this, we reserve a register * which points to them. * The stack frame looks like this: * args_reg -> <bottom of parent frame> * <locals etc> * fp -> <saved fp+lr> * sp -> <localloc/params area> */ cfg->frame_reg = ARMREG_FP; cfg->flags |= MONO_CFG_HAS_SPILLUP; offset = 0; /* Saved fp+lr */ offset += 16; if (cinfo->stack_usage) { g_assert (!(cfg->used_int_regs & (1 << ARMREG_R28))); cfg->arch.args_reg = ARMREG_R28; cfg->used_int_regs |= 1 << ARMREG_R28; } if (cfg->method->save_lmf) { /* The LMF var is allocated normally */ } else { /* Callee saved regs */ cfg->arch.saved_gregs_offset = offset; for (i = 0; i < 32; ++i) if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) && (cfg->used_int_regs & (1 << i))) offset += 8; } /* Return value */ switch (cinfo->ret.storage) { case ArgNone: break; case ArgInIReg: case ArgInFReg: case ArgInFRegR4: cfg->ret->opcode = OP_REGVAR; cfg->ret->dreg = cinfo->ret.reg; break; case ArgVtypeInIRegs: case ArgHFA: /* Allocate a local to hold the result, the epilog will copy it to the correct place */ cfg->ret->opcode = OP_REGOFFSET; cfg->ret->inst_basereg = cfg->frame_reg; cfg->ret->inst_offset = offset; if (cinfo->ret.storage == ArgHFA) // FIXME: offset += 64; else offset += 16; break; case ArgVtypeByRef: /* This variable will be initalized in the prolog from R8 */ cfg->vret_addr->opcode = OP_REGOFFSET; cfg->vret_addr->inst_basereg = cfg->frame_reg; cfg->vret_addr->inst_offset = offset; offset += 8; if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr ="); mono_print_ins (cfg->vret_addr); } break; default: g_assert_not_reached (); break; } /* Arguments */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ainfo = cinfo->args + i; ins = cfg->args [i]; if (ins->opcode == OP_REGVAR) continue; ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; switch (ainfo->storage) { case ArgInIReg: case ArgInFReg: case ArgInFRegR4: // FIXME: Use nregs/size /* These will be copied to the stack in the prolog */ ins->inst_offset = offset; offset += 8; break; case ArgOnStack: case ArgOnStackR4: case ArgOnStackR8: case ArgVtypeOnStack: /* These are in the parent frame */ g_assert (cfg->arch.args_reg); ins->inst_basereg = cfg->arch.args_reg; ins->inst_offset = ainfo->offset; break; case ArgVtypeInIRegs: case ArgHFA: ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; /* These arguments are saved to the stack in the prolog */ ins->inst_offset = offset; if (cfg->verbose_level >= 2) printf ("arg %d allocated to %s+0x%0x.\n", i, mono_arch_regname (ins->inst_basereg), (int)ins->inst_offset); if (ainfo->storage == ArgHFA) // FIXME: offset += 64; else offset += 16; break; case ArgVtypeByRefOnStack: { MonoInst *vtaddr; if (ainfo->gsharedvt) { ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->arch.args_reg; ins->inst_offset = ainfo->offset; break; } /* The vtype address is in the parent frame */ g_assert (cfg->arch.args_reg); MONO_INST_NEW (cfg, vtaddr, 0); vtaddr->opcode = OP_REGOFFSET; vtaddr->inst_basereg = cfg->arch.args_reg; vtaddr->inst_offset = ainfo->offset; /* Need an indirection */ ins->opcode = OP_VTARG_ADDR; ins->inst_left = vtaddr; break; } case ArgVtypeByRef: { MonoInst *vtaddr; if (ainfo->gsharedvt) { ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += 8; break; } /* The vtype address is in a register, will be copied to the stack in the prolog */ MONO_INST_NEW (cfg, vtaddr, 0); vtaddr->opcode = OP_REGOFFSET; vtaddr->inst_basereg = cfg->frame_reg; vtaddr->inst_offset = offset; offset += 8; /* Need an indirection */ ins->opcode = OP_VTARG_ADDR; ins->inst_left = vtaddr; break; } default: g_assert_not_reached (); break; } } /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */ // FIXME: Allocate these to registers ins = cfg->arch.seq_point_info_var; if (ins) { size = 8; align = 8; offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += size; } ins = cfg->arch.ss_tramp_var; if (ins) { size = 8; align = 8; offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += size; } ins = cfg->arch.bp_tramp_var; if (ins) { size = 8; align = 8; offset += align - 1; offset &= ~(align - 1); ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset; offset += size; } /* Locals */ offsets = mono_allocate_stack_slots (cfg, FALSE, &locals_stack_size, &locals_stack_align); if (locals_stack_align) offset = ALIGN_TO (offset, locals_stack_align); for (i = cfg->locals_start; i < cfg->num_varinfo; i++) { if (offsets [i] != -1) { ins = cfg->varinfo [i]; ins->opcode = OP_REGOFFSET; ins->inst_basereg = cfg->frame_reg; ins->inst_offset = offset + offsets [i]; //printf ("allocated local %d to ", i); mono_print_tree_nl (ins); } } offset += locals_stack_size; offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT); cfg->stack_offset = offset; } #ifdef ENABLE_LLVM LLVMCallInfo* mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig) { int i, n; CallInfo *cinfo; ArgInfo *ainfo; LLVMCallInfo *linfo; n = sig->param_count + sig->hasthis; cinfo = get_call_info (cfg->mempool, sig); linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n)); switch (cinfo->ret.storage) { case ArgInIReg: case ArgInFReg: case ArgInFRegR4: linfo->ret.storage = LLVMArgNormal; break; case ArgNone: linfo->ret.storage = LLVMArgNone; break; case ArgVtypeByRef: linfo->ret.storage = LLVMArgVtypeByRef; break; // // FIXME: This doesn't work yet since the llvm backend represents these types as an i8 // array which is returned in int regs // case ArgHFA: linfo->ret.storage = LLVMArgFpStruct; linfo->ret.nslots = cinfo->ret.nregs; linfo->ret.esize = cinfo->ret.esize; break; case ArgVtypeInIRegs: /* LLVM models this by returning an int */ linfo->ret.storage = LLVMArgVtypeAsScalar; linfo->ret.nslots = cinfo->ret.nregs; linfo->ret.esize = cinfo->ret.esize; break; default: g_assert_not_reached (); break; } for (i = 0; i < n; ++i) { LLVMArgInfo *lainfo = &linfo->args [i]; ainfo = cinfo->args + i; lainfo->storage = LLVMArgNone; switch (ainfo->storage) { case ArgInIReg: case ArgInFReg: case ArgInFRegR4: case ArgOnStack: case ArgOnStackR4: case ArgOnStackR8: lainfo->storage = LLVMArgNormal; break; case ArgVtypeByRef: case ArgVtypeByRefOnStack: lainfo->storage = LLVMArgVtypeByRef; break; case ArgHFA: { int j; lainfo->storage = LLVMArgAsFpArgs; lainfo->nslots = ainfo->nregs; lainfo->esize = ainfo->esize; for (j = 0; j < ainfo->nregs; ++j) lainfo->pair_storage [j] = LLVMArgInFPReg; break; } case ArgVtypeInIRegs: lainfo->storage = LLVMArgAsIArgs; lainfo->nslots = ainfo->nregs; break; case ArgVtypeOnStack: if (ainfo->hfa) { int j; /* Same as above */ lainfo->storage = LLVMArgAsFpArgs; lainfo->nslots = ainfo->nregs; lainfo->esize = ainfo->esize; lainfo->ndummy_fpargs = ainfo->nfregs_to_skip; for (j = 0; j < ainfo->nregs; ++j) lainfo->pair_storage [j] = LLVMArgInFPReg; } else { lainfo->storage = LLVMArgAsIArgs; lainfo->nslots = ainfo->size / 8; } break; default: g_assert_not_reached (); break; } } return linfo; } #endif static void add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *arg) { MonoInst *ins; switch (storage) { case ArgInIReg: MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg_copy (cfg, arg->dreg); ins->sreg1 = arg->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE); break; case ArgInFReg: MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = arg->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE); break; case ArgInFRegR4: if (COMPILE_LLVM (cfg)) MONO_INST_NEW (cfg, ins, OP_FMOVE); else if (cfg->r4fp) MONO_INST_NEW (cfg, ins, OP_RMOVE); else MONO_INST_NEW (cfg, ins, OP_ARM_SETFREG_R4); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = arg->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE); break; default: g_assert_not_reached (); break; } } static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { MonoMethodSignature *tmp_sig; int sig_reg; if (MONO_IS_TAILCALL_OPCODE (call)) NOT_IMPLEMENTED; g_assert (cinfo->sig_cookie.storage == ArgOnStack); /* * mono_ArgIterator_Setup assumes the signature cookie is * passed first and all the arguments which were before it are * passed on the stack after the signature. So compensate by * passing a different signature. */ tmp_sig = mono_metadata_signature_dup (call->signature); tmp_sig->param_count -= call->signature->sentinelpos; tmp_sig->sentinelpos = 0; memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*)); sig_reg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg); } void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoMethodSignature *sig; MonoInst *arg, *vtarg; CallInfo *cinfo; ArgInfo *ainfo; int i; sig = call->signature; cinfo = get_call_info (cfg->mempool, sig); switch (cinfo->ret.storage) { case ArgVtypeInIRegs: case ArgHFA: if (MONO_IS_TAILCALL_OPCODE (call)) break; /* * The vtype is returned in registers, save the return area address in a local, and save the vtype into * the location pointed to by it after call in emit_move_return_value (). */ if (!cfg->arch.vret_addr_loc) { cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); /* Prevent it from being register allocated or optimized away */ cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE; } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg); break; case ArgVtypeByRef: /* Pass the vtype return address in R8 */ g_assert (!MONO_IS_TAILCALL_OPCODE (call) || call->vret_var == cfg->vret_addr); MONO_INST_NEW (cfg, vtarg, OP_MOVE); vtarg->sreg1 = call->vret_var->dreg; vtarg->dreg = mono_alloc_preg (cfg); MONO_ADD_INS (cfg->cbb, vtarg); mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE); break; default: break; } for (i = 0; i < cinfo->nargs; ++i) { ainfo = cinfo->args + i; arg = call->args [i]; if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Emit the signature cookie just before the implicit arguments */ emit_sig_cookie (cfg, call, cinfo); } switch (ainfo->storage) { case ArgInIReg: case ArgInFReg: case ArgInFRegR4: add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, arg); break; case ArgOnStack: switch (ainfo->slot_size) { case 8: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg); break; case 4: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg); break; case 2: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg); break; case 1: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg); break; default: g_assert_not_reached (); break; } break; case ArgOnStackR8: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg); break; case ArgOnStackR4: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg); break; case ArgVtypeInIRegs: case ArgVtypeByRef: case ArgVtypeByRefOnStack: case ArgVtypeOnStack: case ArgHFA: { MonoInst *ins; guint32 align; guint32 size; size = mono_class_value_size (arg->klass, &align); MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->sreg1 = arg->dreg; ins->klass = arg->klass; ins->backend.size = size; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); break; } default: g_assert_not_reached (); break; } } /* Handle the case where there are no implicit arguments */ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (cinfo->nargs == sig->sentinelpos)) emit_sig_cookie (cfg, call, cinfo); call->call_info = cinfo; call->stack_usage = cinfo->stack_usage; } void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst*)ins->inst_p0; ArgInfo *ainfo = (ArgInfo*)ins->inst_p1; MonoInst *load; int i; if (ins->backend.size == 0 && !ainfo->gsharedvt) return; switch (ainfo->storage) { case ArgVtypeInIRegs: for (i = 0; i < ainfo->nregs; ++i) { // FIXME: Smaller sizes MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE); load->dreg = mono_alloc_ireg (cfg); load->inst_basereg = src->dreg; load->inst_offset = i * sizeof (target_mgreg_t); MONO_ADD_INS (cfg->cbb, load); add_outarg_reg (cfg, call, ArgInIReg, ainfo->reg + i, load); } break; case ArgHFA: for (i = 0; i < ainfo->nregs; ++i) { if (ainfo->esize == 4) MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE); else MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE); load->dreg = mono_alloc_freg (cfg); load->inst_basereg = src->dreg; load->inst_offset = ainfo->foffsets [i]; MONO_ADD_INS (cfg->cbb, load); add_outarg_reg (cfg, call, ainfo->esize == 4 ? ArgInFRegR4 : ArgInFReg, ainfo->reg + i, load); } break; case ArgVtypeByRef: case ArgVtypeByRefOnStack: { MonoInst *vtaddr, *load, *arg; /* Pass the vtype address in a reg/on the stack */ if (ainfo->gsharedvt) { load = src; } else { /* Make a copy of the argument */ vtaddr = mono_compile_create_var (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL); MONO_INST_NEW (cfg, load, OP_LDADDR); load->inst_p0 = vtaddr; vtaddr->flags |= MONO_INST_INDIRECT; load->type = STACK_MP; load->klass = vtaddr->klass; load->dreg = mono_alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, load); mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, ainfo->size, 8); } if (ainfo->storage == ArgVtypeByRef) { MONO_INST_NEW (cfg, arg, OP_MOVE); arg->dreg = mono_alloc_preg (cfg); arg->sreg1 = load->dreg; MONO_ADD_INS (cfg->cbb, arg); add_outarg_reg (cfg, call, ArgInIReg, ainfo->reg, arg); } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, load->dreg); } break; } case ArgVtypeOnStack: for (i = 0; i < ainfo->size / 8; ++i) { MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE); load->dreg = mono_alloc_ireg (cfg); load->inst_basereg = src->dreg; load->inst_offset = i * 8; MONO_ADD_INS (cfg->cbb, load); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset + (i * 8), load->dreg); } break; default: g_assert_not_reached (); break; } } void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { MonoMethodSignature *sig; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; switch (cinfo->ret.storage) { case ArgNone: break; case ArgInIReg: MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); break; case ArgInFReg: MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); break; case ArgInFRegR4: if (COMPILE_LLVM (cfg)) MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); else if (cfg->r4fp) MONO_EMIT_NEW_UNALU (cfg, OP_RMOVE, cfg->ret->dreg, val->dreg); else MONO_EMIT_NEW_UNALU (cfg, OP_ARM_SETFREG_R4, cfg->ret->dreg, val->dreg); break; default: g_assert_not_reached (); break; } } #ifndef DISABLE_JIT gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { g_assert (caller_sig); g_assert (callee_sig); CallInfo *caller_info = get_call_info (NULL, caller_sig); CallInfo *callee_info = get_call_info (NULL, callee_sig); gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage) && IS_SUPPORTED_TAILCALL (caller_info->ret.storage == callee_info->ret.storage); // FIXME Limit stack_usage to 1G. emit_ldrx / strx has 32bit limits. res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (1 << 30)); res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (1 << 30)); // valuetype parameters are the address of a local const ArgInfo *ainfo; ainfo = callee_info->args + callee_sig->hasthis; for (int i = 0; res && i < callee_sig->param_count; ++i) { res = IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgVtypeByRef) && IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgVtypeByRefOnStack); } g_free (caller_info); g_free (callee_info); return res; } #endif gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return (imm >= -((gint64)1<<31) && imm <= (((gint64)1<<31)-1)); } void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { //NOT_IMPLEMENTED; } void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { //NOT_IMPLEMENTED; } #define ADD_NEW_INS(cfg,dest,op) do { \ MONO_INST_NEW ((cfg), (dest), (op)); \ mono_bblock_insert_before_ins (bb, ins, (dest)); \ } while (0) void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *temp, *last_ins = NULL; MONO_BB_FOR_EACH_INS (bb, ins) { switch (ins->opcode) { case OP_SBB: case OP_ISBB: case OP_SUBCC: case OP_ISUBCC: if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC)) /* ARM sets the C flag to 1 if there was _no_ overflow */ ins->next->opcode = OP_COND_EXC_NC; break; case OP_IDIV_IMM: case OP_IREM_IMM: case OP_IDIV_UN_IMM: case OP_IREM_UN_IMM: case OP_LREM_IMM: mono_decompose_op_imm (cfg, bb, ins); break; case OP_LOCALLOC_IMM: if (ins->inst_imm > 32) { ADD_NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg1 = temp->dreg; ins->opcode = mono_op_imm_to_op (ins->opcode); } break; case OP_ICOMPARE_IMM: if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_IBEQ) { ins->next->opcode = OP_ARM64_CBZW; ins->next->sreg1 = ins->sreg1; NULLIFY_INS (ins); } else if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_IBNE_UN) { ins->next->opcode = OP_ARM64_CBNZW; ins->next->sreg1 = ins->sreg1; NULLIFY_INS (ins); } break; case OP_LCOMPARE_IMM: case OP_COMPARE_IMM: if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_LBEQ) { ins->next->opcode = OP_ARM64_CBZX; ins->next->sreg1 = ins->sreg1; NULLIFY_INS (ins); } else if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_LBNE_UN) { ins->next->opcode = OP_ARM64_CBNZX; ins->next->sreg1 = ins->sreg1; NULLIFY_INS (ins); } break; case OP_FCOMPARE: case OP_RCOMPARE: { gboolean swap = FALSE; int reg; if (!ins->next) { /* Optimized away */ NULLIFY_INS (ins); break; } /* * FP compares with unordered operands set the flags * to NZCV=0011, which matches some non-unordered compares * as well, like LE, so have to swap the operands. */ switch (ins->next->opcode) { case OP_FBLT: ins->next->opcode = OP_FBGT; swap = TRUE; break; case OP_FBLE: ins->next->opcode = OP_FBGE; swap = TRUE; break; case OP_RBLT: ins->next->opcode = OP_RBGT; swap = TRUE; break; case OP_RBLE: ins->next->opcode = OP_RBGE; swap = TRUE; break; default: break; } if (swap) { reg = ins->sreg1; ins->sreg1 = ins->sreg2; ins->sreg2 = reg; } break; } default: break; } last_ins = ins; } bb->last_ins = last_ins; bb->max_vreg = cfg->next_vreg; } void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins) { } static int opcode_to_armcond (int opcode) { switch (opcode) { case OP_IBEQ: case OP_LBEQ: case OP_FBEQ: case OP_CEQ: case OP_ICEQ: case OP_LCEQ: case OP_FCEQ: case OP_RCEQ: case OP_COND_EXC_IEQ: case OP_COND_EXC_EQ: return ARMCOND_EQ; case OP_IBGE: case OP_LBGE: case OP_FBGE: case OP_ICGE: case OP_FCGE: case OP_RCGE: return ARMCOND_GE; case OP_IBGT: case OP_LBGT: case OP_FBGT: case OP_CGT: case OP_ICGT: case OP_LCGT: case OP_FCGT: case OP_RCGT: case OP_COND_EXC_IGT: case OP_COND_EXC_GT: return ARMCOND_GT; case OP_IBLE: case OP_LBLE: case OP_FBLE: case OP_ICLE: case OP_FCLE: case OP_RCLE: return ARMCOND_LE; case OP_IBLT: case OP_LBLT: case OP_FBLT: case OP_CLT: case OP_ICLT: case OP_LCLT: case OP_COND_EXC_ILT: case OP_COND_EXC_LT: return ARMCOND_LT; case OP_IBNE_UN: case OP_LBNE_UN: case OP_FBNE_UN: case OP_ICNEQ: case OP_FCNEQ: case OP_RCNEQ: case OP_COND_EXC_INE_UN: case OP_COND_EXC_NE_UN: return ARMCOND_NE; case OP_IBGE_UN: case OP_LBGE_UN: case OP_FBGE_UN: case OP_ICGE_UN: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_GE_UN: return ARMCOND_HS; case OP_IBGT_UN: case OP_LBGT_UN: case OP_FBGT_UN: case OP_CGT_UN: case OP_ICGT_UN: case OP_LCGT_UN: case OP_FCGT_UN: case OP_RCGT_UN: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_GT_UN: return ARMCOND_HI; case OP_IBLE_UN: case OP_LBLE_UN: case OP_FBLE_UN: case OP_ICLE_UN: case OP_COND_EXC_ILE_UN: case OP_COND_EXC_LE_UN: return ARMCOND_LS; case OP_IBLT_UN: case OP_LBLT_UN: case OP_FBLT_UN: case OP_CLT_UN: case OP_ICLT_UN: case OP_LCLT_UN: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_LT_UN: return ARMCOND_LO; /* * FCMP sets the NZCV condition bits as follows: * eq = 0110 * < = 1000 * > = 0010 * unordered = 0011 * ARMCOND_LT is N!=V, so it matches unordered too, so * fclt and fclt_un need to be special cased. */ case OP_FCLT: case OP_RCLT: /* N==1 */ return ARMCOND_MI; case OP_FCLT_UN: case OP_RCLT_UN: return ARMCOND_LT; case OP_COND_EXC_C: case OP_COND_EXC_IC: return ARMCOND_CS; case OP_COND_EXC_OV: case OP_COND_EXC_IOV: return ARMCOND_VS; case OP_COND_EXC_NC: case OP_COND_EXC_INC: return ARMCOND_CC; case OP_COND_EXC_NO: case OP_COND_EXC_INO: return ARMCOND_VC; default: printf ("%s\n", mono_inst_name (opcode)); g_assert_not_reached (); return -1; } } /* This clobbers LR */ static WARN_UNUSED_RESULT guint8* emit_cond_exc (MonoCompile *cfg, guint8 *code, int opcode, const char *exc_name) { int cond; cond = opcode_to_armcond (opcode); /* Capture PC */ arm_adrx (code, ARMREG_IP1, code); mono_add_patch_info_rel (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, exc_name, MONO_R_ARM64_BCC); arm_bcc (code, cond, 0); return code; } static guint8* emit_move_return_value (MonoCompile *cfg, guint8 * code, MonoInst *ins) { CallInfo *cinfo; MonoCallInst *call; call = (MonoCallInst*)ins; cinfo = call->call_info; g_assert (cinfo); switch (cinfo->ret.storage) { case ArgNone: break; case ArgInIReg: /* LLVM compiled code might only set the bottom bits */ if (call->signature && mini_get_underlying_type (call->signature->ret)->type == MONO_TYPE_I4) arm_sxtwx (code, call->inst.dreg, cinfo->ret.reg); else if (call->inst.dreg != cinfo->ret.reg) arm_movx (code, call->inst.dreg, cinfo->ret.reg); break; case ArgInFReg: if (call->inst.dreg != cinfo->ret.reg) arm_fmovd (code, call->inst.dreg, cinfo->ret.reg); break; case ArgInFRegR4: if (cfg->r4fp) arm_fmovs (code, call->inst.dreg, cinfo->ret.reg); else arm_fcvt_sd (code, call->inst.dreg, cinfo->ret.reg); break; case ArgVtypeInIRegs: { MonoInst *loc = cfg->arch.vret_addr_loc; int i; /* Load the destination address */ g_assert (loc && loc->opcode == OP_REGOFFSET); code = emit_ldrx (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset); for (i = 0; i < cinfo->ret.nregs; ++i) arm_strx (code, cinfo->ret.reg + i, ARMREG_LR, i * 8); break; } case ArgHFA: { MonoInst *loc = cfg->arch.vret_addr_loc; int i; /* Load the destination address */ g_assert (loc && loc->opcode == OP_REGOFFSET); code = emit_ldrx (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset); for (i = 0; i < cinfo->ret.nregs; ++i) { if (cinfo->ret.esize == 4) arm_strfpw (code, cinfo->ret.reg + i, ARMREG_LR, cinfo->ret.foffsets [i]); else arm_strfpx (code, cinfo->ret.reg + i, ARMREG_LR, cinfo->ret.foffsets [i]); } break; } case ArgVtypeByRef: break; default: g_assert_not_reached (); break; } return code; } /* * emit_branch_island: * * Emit a branch island for the conditional branches from cfg->native_code + start_offset to code. */ static guint8* emit_branch_island (MonoCompile *cfg, guint8 *code, int start_offset) { MonoJumpInfo *ji; /* Iterate over the patch infos added so far by this bb */ int island_size = 0; for (ji = cfg->patch_info; ji; ji = ji->next) { if (ji->ip.i < start_offset) /* The patch infos are in reverse order, so this means the end */ break; if (ji->relocation == MONO_R_ARM64_BCC || ji->relocation == MONO_R_ARM64_CBZ) island_size += 4; } if (island_size) { code = realloc_code (cfg, island_size); /* Branch over the island */ arm_b (code, code + 4 + island_size); for (ji = cfg->patch_info; ji; ji = ji->next) { if (ji->ip.i < start_offset) break; if (ji->relocation == MONO_R_ARM64_BCC || ji->relocation == MONO_R_ARM64_CBZ) { /* Rewrite the cond branch so it branches to an unconditional branch in the branch island */ arm_patch_rel (cfg->native_code + ji->ip.i, code, ji->relocation); /* Rewrite the patch so it points to the unconditional branch */ ji->ip.i = code - cfg->native_code; ji->relocation = MONO_R_ARM64_B; arm_b (code, code); } } set_code_cursor (cfg, code); } return code; } void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins; MonoCallInst *call; guint8 *code = cfg->native_code + cfg->code_len; int start_offset, max_len, dreg, sreg1, sreg2; target_mgreg_t imm; if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); start_offset = code - cfg->native_code; g_assert (start_offset <= cfg->code_size); MONO_BB_FOR_EACH_INS (bb, ins) { guint offset = code - cfg->native_code; set_code_cursor (cfg, code); max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); if (G_UNLIKELY (cfg->arch.cond_branch_islands && offset - start_offset > 4 * 0x1ffff)) { /* Emit a branch island for large basic blocks */ code = emit_branch_island (cfg, code, start_offset); offset = code - cfg->native_code; start_offset = offset; } mono_debug_record_line_number (cfg, ins, offset); dreg = ins->dreg; sreg1 = ins->sreg1; sreg2 = ins->sreg2; imm = ins->inst_imm; switch (ins->opcode) { case OP_ICONST: code = emit_imm (code, dreg, ins->inst_c0); break; case OP_I8CONST: code = emit_imm64 (code, dreg, ins->inst_c0); break; case OP_MOVE: if (dreg != sreg1) arm_movx (code, dreg, sreg1); break; case OP_NOP: case OP_RELAXED_NOP: break; case OP_JUMP_TABLE: mono_add_patch_info_rel (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0, MONO_R_ARM64_IMM); code = emit_imm64_template (code, dreg); break; case OP_BREAK: /* * gdb does not like encountering the hw breakpoint ins in the debugged code. * So instead of emitting a trap, we emit a call a C function and place a * breakpoint there. */ code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break)); break; case OP_LOCALLOC: { guint8 *buf [16]; arm_addx_imm (code, ARMREG_IP0, sreg1, (MONO_ARCH_FRAME_ALIGNMENT - 1)); // FIXME: andx_imm doesn't work yet code = emit_imm (code, ARMREG_IP1, -MONO_ARCH_FRAME_ALIGNMENT); arm_andx (code, ARMREG_IP0, ARMREG_IP0, ARMREG_IP1); //arm_andx_imm (code, ARMREG_IP0, sreg1, - MONO_ARCH_FRAME_ALIGNMENT); arm_movspx (code, ARMREG_IP1, ARMREG_SP); arm_subx (code, ARMREG_IP1, ARMREG_IP1, ARMREG_IP0); arm_movspx (code, ARMREG_SP, ARMREG_IP1); /* Init */ /* ip1 = pointer, ip0 = end */ arm_addx (code, ARMREG_IP0, ARMREG_IP1, ARMREG_IP0); buf [0] = code; arm_cmpx (code, ARMREG_IP1, ARMREG_IP0); buf [1] = code; arm_bcc (code, ARMCOND_EQ, 0); arm_stpx (code, ARMREG_RZR, ARMREG_RZR, ARMREG_IP1, 0); arm_addx_imm (code, ARMREG_IP1, ARMREG_IP1, 16); arm_b (code, buf [0]); arm_patch_rel (buf [1], code, MONO_R_ARM64_BCC); arm_movspx (code, dreg, ARMREG_SP); if (cfg->param_area) code = emit_subx_sp_imm (code, cfg->param_area); break; } case OP_LOCALLOC_IMM: { int imm, offset; imm = ALIGN_TO (ins->inst_imm, MONO_ARCH_FRAME_ALIGNMENT); g_assert (arm_is_arith_imm (imm)); arm_subx_imm (code, ARMREG_SP, ARMREG_SP, imm); /* Init */ g_assert (MONO_ARCH_FRAME_ALIGNMENT == 16); offset = 0; while (offset < imm) { arm_stpx (code, ARMREG_RZR, ARMREG_RZR, ARMREG_SP, offset); offset += 16; } arm_movspx (code, dreg, ARMREG_SP); if (cfg->param_area) code = emit_subx_sp_imm (code, cfg->param_area); break; } case OP_AOTCONST: code = emit_aotconst (cfg, code, dreg, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0); break; case OP_OBJC_GET_SELECTOR: mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0); /* See arch_emit_objc_selector_ref () in aot-compiler.c */ arm_ldrx_lit (code, ins->dreg, 0); arm_nop (code); arm_nop (code); break; case OP_SEQ_POINT: { MonoInst *info_var = cfg->arch.seq_point_info_var; /* * For AOT, we use one got slot per method, which will point to a * SeqPointInfo structure, containing all the information required * by the code below. */ if (cfg->compile_aot) { g_assert (info_var); g_assert (info_var->opcode == OP_REGOFFSET); } if (ins->flags & MONO_INST_SINGLE_STEP_LOC) { MonoInst *var = cfg->arch.ss_tramp_var; g_assert (var); g_assert (var->opcode == OP_REGOFFSET); /* Load ss_tramp_var */ /* This is equal to &ss_trampoline */ arm_ldrx (code, ARMREG_IP1, var->inst_basereg, var->inst_offset); /* Load the trampoline address */ arm_ldrx (code, ARMREG_IP1, ARMREG_IP1, 0); /* Call it if it is non-null */ arm_cbzx (code, ARMREG_IP1, code + 8); code = mono_arm_emit_blrx (code, ARMREG_IP1); } mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); if (cfg->compile_aot) { const guint32 offset = code - cfg->native_code; guint32 val; arm_ldrx (code, ARMREG_IP1, info_var->inst_basereg, info_var->inst_offset); /* Add the offset */ val = ((offset / 4) * sizeof (target_mgreg_t)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs); /* Load the info->bp_addrs [offset], which is either 0 or the address of the bp trampoline */ code = emit_ldrx (code, ARMREG_IP1, ARMREG_IP1, val); /* Skip the load if its 0 */ arm_cbzx (code, ARMREG_IP1, code + 8); /* Call the breakpoint trampoline */ code = mono_arm_emit_blrx (code, ARMREG_IP1); } else { MonoInst *var = cfg->arch.bp_tramp_var; g_assert (var); g_assert (var->opcode == OP_REGOFFSET); /* Load the address of the bp trampoline into IP0 */ arm_ldrx (code, ARMREG_IP0, var->inst_basereg, var->inst_offset); /* * A placeholder for a possible breakpoint inserted by * mono_arch_set_breakpoint (). */ arm_nop (code); } break; } /* BRANCH */ case OP_BR: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb, MONO_R_ARM64_B); arm_b (code, code); break; case OP_BR_REG: arm_brx (code, sreg1); break; case OP_IBEQ: case OP_IBGE: case OP_IBGT: case OP_IBLE: case OP_IBLT: case OP_IBNE_UN: case OP_IBGE_UN: case OP_IBGT_UN: case OP_IBLE_UN: case OP_IBLT_UN: case OP_LBEQ: case OP_LBGE: case OP_LBGT: case OP_LBLE: case OP_LBLT: case OP_LBNE_UN: case OP_LBGE_UN: case OP_LBGT_UN: case OP_LBLE_UN: case OP_LBLT_UN: case OP_FBEQ: case OP_FBNE_UN: case OP_FBLT: case OP_FBGT: case OP_FBGT_UN: case OP_FBLE: case OP_FBGE: case OP_FBGE_UN: { int cond; mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC); cond = opcode_to_armcond (ins->opcode); arm_bcc (code, cond, 0); break; } case OP_FBLT_UN: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC); /* For fp compares, ARMCOND_LT is lt or unordered */ arm_bcc (code, ARMCOND_LT, 0); break; case OP_FBLE_UN: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC); arm_bcc (code, ARMCOND_EQ, 0); mono_add_patch_info_rel (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC); /* For fp compares, ARMCOND_LT is lt or unordered */ arm_bcc (code, ARMCOND_LT, 0); break; case OP_ARM64_CBZW: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ); arm_cbzw (code, sreg1, 0); break; case OP_ARM64_CBZX: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ); arm_cbzx (code, sreg1, 0); break; case OP_ARM64_CBNZW: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ); arm_cbnzw (code, sreg1, 0); break; case OP_ARM64_CBNZX: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ); arm_cbnzx (code, sreg1, 0); break; /* ALU */ case OP_IADD: arm_addw (code, dreg, sreg1, sreg2); break; case OP_LADD: arm_addx (code, dreg, sreg1, sreg2); break; case OP_ISUB: arm_subw (code, dreg, sreg1, sreg2); break; case OP_LSUB: arm_subx (code, dreg, sreg1, sreg2); break; case OP_IAND: arm_andw (code, dreg, sreg1, sreg2); break; case OP_LAND: arm_andx (code, dreg, sreg1, sreg2); break; case OP_IOR: arm_orrw (code, dreg, sreg1, sreg2); break; case OP_LOR: arm_orrx (code, dreg, sreg1, sreg2); break; case OP_IXOR: arm_eorw (code, dreg, sreg1, sreg2); break; case OP_LXOR: arm_eorx (code, dreg, sreg1, sreg2); break; case OP_INEG: arm_negw (code, dreg, sreg1); break; case OP_LNEG: arm_negx (code, dreg, sreg1); break; case OP_INOT: arm_mvnw (code, dreg, sreg1); break; case OP_LNOT: arm_mvnx (code, dreg, sreg1); break; case OP_IADDCC: arm_addsw (code, dreg, sreg1, sreg2); break; case OP_ADDCC: case OP_LADDCC: arm_addsx (code, dreg, sreg1, sreg2); break; case OP_ISUBCC: arm_subsw (code, dreg, sreg1, sreg2); break; case OP_LSUBCC: case OP_SUBCC: arm_subsx (code, dreg, sreg1, sreg2); break; case OP_ICOMPARE: arm_cmpw (code, sreg1, sreg2); break; case OP_COMPARE: case OP_LCOMPARE: arm_cmpx (code, sreg1, sreg2); break; case OP_IADD_IMM: code = emit_addw_imm (code, dreg, sreg1, imm); break; case OP_LADD_IMM: case OP_ADD_IMM: code = emit_addx_imm (code, dreg, sreg1, imm); break; case OP_ISUB_IMM: code = emit_subw_imm (code, dreg, sreg1, imm); break; case OP_LSUB_IMM: code = emit_subx_imm (code, dreg, sreg1, imm); break; case OP_IAND_IMM: code = emit_andw_imm (code, dreg, sreg1, imm); break; case OP_LAND_IMM: case OP_AND_IMM: code = emit_andx_imm (code, dreg, sreg1, imm); break; case OP_IOR_IMM: code = emit_orrw_imm (code, dreg, sreg1, imm); break; case OP_LOR_IMM: code = emit_orrx_imm (code, dreg, sreg1, imm); break; case OP_IXOR_IMM: code = emit_eorw_imm (code, dreg, sreg1, imm); break; case OP_LXOR_IMM: code = emit_eorx_imm (code, dreg, sreg1, imm); break; case OP_ICOMPARE_IMM: code = emit_cmpw_imm (code, sreg1, imm); break; case OP_LCOMPARE_IMM: case OP_COMPARE_IMM: if (imm == 0) { arm_cmpx (code, sreg1, ARMREG_RZR); } else { // FIXME: 32 vs 64 bit issues for 0xffffffff code = emit_imm64 (code, ARMREG_LR, imm); arm_cmpx (code, sreg1, ARMREG_LR); } break; case OP_ISHL: arm_lslvw (code, dreg, sreg1, sreg2); break; case OP_LSHL: arm_lslvx (code, dreg, sreg1, sreg2); break; case OP_ISHR: arm_asrvw (code, dreg, sreg1, sreg2); break; case OP_LSHR: arm_asrvx (code, dreg, sreg1, sreg2); break; case OP_ISHR_UN: arm_lsrvw (code, dreg, sreg1, sreg2); break; case OP_LSHR_UN: arm_lsrvx (code, dreg, sreg1, sreg2); break; case OP_ISHL_IMM: if (imm == 0) arm_movx (code, dreg, sreg1); else arm_lslw (code, dreg, sreg1, imm); break; case OP_SHL_IMM: case OP_LSHL_IMM: if (imm == 0) arm_movx (code, dreg, sreg1); else arm_lslx (code, dreg, sreg1, imm); break; case OP_ISHR_IMM: if (imm == 0) arm_movx (code, dreg, sreg1); else arm_asrw (code, dreg, sreg1, imm); break; case OP_LSHR_IMM: case OP_SHR_IMM: if (imm == 0) arm_movx (code, dreg, sreg1); else arm_asrx (code, dreg, sreg1, imm); break; case OP_ISHR_UN_IMM: if (imm == 0) arm_movx (code, dreg, sreg1); else arm_lsrw (code, dreg, sreg1, imm); break; case OP_SHR_UN_IMM: case OP_LSHR_UN_IMM: if (imm == 0) arm_movx (code, dreg, sreg1); else arm_lsrx (code, dreg, sreg1, imm); break; /* 64BIT ALU */ case OP_SEXT_I4: arm_sxtwx (code, dreg, sreg1); break; case OP_ZEXT_I4: /* Clean out the upper word */ arm_movw (code, dreg, sreg1); break; /* MULTIPLY/DIVISION */ case OP_IDIV: case OP_IREM: // FIXME: Optimize this /* Check for zero */ arm_cmpx_imm (code, sreg2, 0); code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException"); /* Check for INT_MIN/-1 */ code = emit_imm (code, ARMREG_IP0, 0x80000000); arm_cmpx (code, sreg1, ARMREG_IP0); arm_cset (code, ARMCOND_EQ, ARMREG_IP1); code = emit_imm (code, ARMREG_IP0, 0xffffffff); arm_cmpx (code, sreg2, ARMREG_IP0); arm_cset (code, ARMCOND_EQ, ARMREG_IP0); arm_andx (code, ARMREG_IP0, ARMREG_IP0, ARMREG_IP1); arm_cmpx_imm (code, ARMREG_IP0, 1); code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "OverflowException"); if (ins->opcode == OP_IREM) { arm_sdivw (code, ARMREG_LR, sreg1, sreg2); arm_msubw (code, dreg, ARMREG_LR, sreg2, sreg1); } else { arm_sdivw (code, dreg, sreg1, sreg2); } break; case OP_IDIV_UN: arm_cmpx_imm (code, sreg2, 0); code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException"); arm_udivw (code, dreg, sreg1, sreg2); break; case OP_IREM_UN: arm_cmpx_imm (code, sreg2, 0); code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException"); arm_udivw (code, ARMREG_LR, sreg1, sreg2); arm_msubw (code, dreg, ARMREG_LR, sreg2, sreg1); break; case OP_LDIV: case OP_LREM: // FIXME: Optimize this /* Check for zero */ arm_cmpx_imm (code, sreg2, 0); code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException"); /* Check for INT64_MIN/-1 */ code = emit_imm64 (code, ARMREG_IP0, 0x8000000000000000); arm_cmpx (code, sreg1, ARMREG_IP0); arm_cset (code, ARMCOND_EQ, ARMREG_IP1); code = emit_imm64 (code, ARMREG_IP0, 0xffffffffffffffff); arm_cmpx (code, sreg2, ARMREG_IP0); arm_cset (code, ARMCOND_EQ, ARMREG_IP0); arm_andx (code, ARMREG_IP0, ARMREG_IP0, ARMREG_IP1); arm_cmpx_imm (code, ARMREG_IP0, 1); /* 64 bit uses OverflowException */ code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "OverflowException"); if (ins->opcode == OP_LREM) { arm_sdivx (code, ARMREG_LR, sreg1, sreg2); arm_msubx (code, dreg, ARMREG_LR, sreg2, sreg1); } else { arm_sdivx (code, dreg, sreg1, sreg2); } break; case OP_LDIV_UN: arm_cmpx_imm (code, sreg2, 0); code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException"); arm_udivx (code, dreg, sreg1, sreg2); break; case OP_LREM_UN: arm_cmpx_imm (code, sreg2, 0); code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException"); arm_udivx (code, ARMREG_LR, sreg1, sreg2); arm_msubx (code, dreg, ARMREG_LR, sreg2, sreg1); break; case OP_IMUL: arm_mulw (code, dreg, sreg1, sreg2); break; case OP_LMUL: arm_mulx (code, dreg, sreg1, sreg2); break; case OP_IMUL_IMM: code = emit_imm (code, ARMREG_LR, imm); arm_mulw (code, dreg, sreg1, ARMREG_LR); break; case OP_MUL_IMM: case OP_LMUL_IMM: code = emit_imm (code, ARMREG_LR, imm); arm_mulx (code, dreg, sreg1, ARMREG_LR); break; /* CONVERSIONS */ case OP_ICONV_TO_I1: case OP_LCONV_TO_I1: arm_sxtbx (code, dreg, sreg1); break; case OP_ICONV_TO_I2: case OP_LCONV_TO_I2: arm_sxthx (code, dreg, sreg1); break; case OP_ICONV_TO_U1: case OP_LCONV_TO_U1: arm_uxtbw (code, dreg, sreg1); break; case OP_ICONV_TO_U2: case OP_LCONV_TO_U2: arm_uxthw (code, dreg, sreg1); break; /* CSET */ case OP_CEQ: case OP_ICEQ: case OP_LCEQ: case OP_CLT: case OP_ICLT: case OP_LCLT: case OP_CGT: case OP_ICGT: case OP_LCGT: case OP_CLT_UN: case OP_ICLT_UN: case OP_LCLT_UN: case OP_CGT_UN: case OP_ICGT_UN: case OP_LCGT_UN: case OP_ICNEQ: case OP_ICGE: case OP_ICLE: case OP_ICGE_UN: case OP_ICLE_UN: { int cond; cond = opcode_to_armcond (ins->opcode); arm_cset (code, cond, dreg); break; } case OP_FCEQ: case OP_FCLT: case OP_FCLT_UN: case OP_FCGT: case OP_FCGT_UN: case OP_FCNEQ: case OP_FCLE: case OP_FCGE: { int cond; cond = opcode_to_armcond (ins->opcode); arm_fcmpd (code, sreg1, sreg2); arm_cset (code, cond, dreg); break; } /* MEMORY */ case OP_LOADI1_MEMBASE: code = emit_ldrsbx (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADU1_MEMBASE: code = emit_ldrb (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADI2_MEMBASE: code = emit_ldrshx (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADU2_MEMBASE: code = emit_ldrh (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADI4_MEMBASE: code = emit_ldrswx (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADU4_MEMBASE: code = emit_ldrw (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOAD_MEMBASE: case OP_LOADI8_MEMBASE: code = emit_ldrx (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_STOREI1_MEMBASE_IMM: case OP_STOREI2_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: case OP_STORE_MEMBASE_IMM: case OP_STOREI8_MEMBASE_IMM: { int immreg; if (imm != 0) { code = emit_imm (code, ARMREG_LR, imm); immreg = ARMREG_LR; } else { immreg = ARMREG_RZR; } switch (ins->opcode) { case OP_STOREI1_MEMBASE_IMM: code = emit_strb (code, immreg, ins->inst_destbasereg, ins->inst_offset); break; case OP_STOREI2_MEMBASE_IMM: code = emit_strh (code, immreg, ins->inst_destbasereg, ins->inst_offset); break; case OP_STOREI4_MEMBASE_IMM: code = emit_strw (code, immreg, ins->inst_destbasereg, ins->inst_offset); break; case OP_STORE_MEMBASE_IMM: case OP_STOREI8_MEMBASE_IMM: code = emit_strx (code, immreg, ins->inst_destbasereg, ins->inst_offset); break; default: g_assert_not_reached (); break; } break; } case OP_STOREI1_MEMBASE_REG: code = emit_strb (code, sreg1, ins->inst_destbasereg, ins->inst_offset); break; case OP_STOREI2_MEMBASE_REG: code = emit_strh (code, sreg1, ins->inst_destbasereg, ins->inst_offset); break; case OP_STOREI4_MEMBASE_REG: code = emit_strw (code, sreg1, ins->inst_destbasereg, ins->inst_offset); break; case OP_STORE_MEMBASE_REG: case OP_STOREI8_MEMBASE_REG: code = emit_strx (code, sreg1, ins->inst_destbasereg, ins->inst_offset); break; case OP_TLS_GET: code = emit_tls_get (code, dreg, ins->inst_offset); break; case OP_TLS_SET: code = emit_tls_set (code, sreg1, ins->inst_offset); break; /* Atomic */ case OP_MEMORY_BARRIER: arm_dmb (code, ARM_DMB_ISH); break; case OP_ATOMIC_ADD_I4: { guint8 *buf [16]; buf [0] = code; arm_ldxrw (code, ARMREG_IP0, sreg1); arm_addx (code, ARMREG_IP0, ARMREG_IP0, sreg2); arm_stlxrw (code, ARMREG_IP1, ARMREG_IP0, sreg1); arm_cbnzw (code, ARMREG_IP1, buf [0]); arm_dmb (code, ARM_DMB_ISH); arm_movx (code, dreg, ARMREG_IP0); break; } case OP_ATOMIC_ADD_I8: { guint8 *buf [16]; buf [0] = code; arm_ldxrx (code, ARMREG_IP0, sreg1); arm_addx (code, ARMREG_IP0, ARMREG_IP0, sreg2); arm_stlxrx (code, ARMREG_IP1, ARMREG_IP0, sreg1); arm_cbnzx (code, ARMREG_IP1, buf [0]); arm_dmb (code, ARM_DMB_ISH); arm_movx (code, dreg, ARMREG_IP0); break; } case OP_ATOMIC_EXCHANGE_I4: { guint8 *buf [16]; buf [0] = code; arm_ldxrw (code, ARMREG_IP0, sreg1); arm_stlxrw (code, ARMREG_IP1, sreg2, sreg1); arm_cbnzw (code, ARMREG_IP1, buf [0]); arm_dmb (code, ARM_DMB_ISH); arm_movx (code, dreg, ARMREG_IP0); break; } case OP_ATOMIC_EXCHANGE_I8: { guint8 *buf [16]; buf [0] = code; arm_ldxrx (code, ARMREG_IP0, sreg1); arm_stlxrx (code, ARMREG_IP1, sreg2, sreg1); arm_cbnzw (code, ARMREG_IP1, buf [0]); arm_dmb (code, ARM_DMB_ISH); arm_movx (code, dreg, ARMREG_IP0); break; } case OP_ATOMIC_CAS_I4: { guint8 *buf [16]; /* sreg2 is the value, sreg3 is the comparand */ buf [0] = code; arm_ldxrw (code, ARMREG_IP0, sreg1); arm_cmpw (code, ARMREG_IP0, ins->sreg3); buf [1] = code; arm_bcc (code, ARMCOND_NE, 0); arm_stlxrw (code, ARMREG_IP1, sreg2, sreg1); arm_cbnzw (code, ARMREG_IP1, buf [0]); arm_patch_rel (buf [1], code, MONO_R_ARM64_BCC); arm_dmb (code, ARM_DMB_ISH); arm_movx (code, dreg, ARMREG_IP0); break; } case OP_ATOMIC_CAS_I8: { guint8 *buf [16]; buf [0] = code; arm_ldxrx (code, ARMREG_IP0, sreg1); arm_cmpx (code, ARMREG_IP0, ins->sreg3); buf [1] = code; arm_bcc (code, ARMCOND_NE, 0); arm_stlxrx (code, ARMREG_IP1, sreg2, sreg1); arm_cbnzw (code, ARMREG_IP1, buf [0]); arm_patch_rel (buf [1], code, MONO_R_ARM64_BCC); arm_dmb (code, ARM_DMB_ISH); arm_movx (code, dreg, ARMREG_IP0); break; } case OP_ATOMIC_LOAD_I1: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarb (code, ins->dreg, ARMREG_LR); arm_sxtbx (code, ins->dreg, ins->dreg); break; } case OP_ATOMIC_LOAD_U1: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarb (code, ins->dreg, ARMREG_LR); arm_uxtbx (code, ins->dreg, ins->dreg); break; } case OP_ATOMIC_LOAD_I2: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarh (code, ins->dreg, ARMREG_LR); arm_sxthx (code, ins->dreg, ins->dreg); break; } case OP_ATOMIC_LOAD_U2: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarh (code, ins->dreg, ARMREG_LR); arm_uxthx (code, ins->dreg, ins->dreg); break; } case OP_ATOMIC_LOAD_I4: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarw (code, ins->dreg, ARMREG_LR); arm_sxtwx (code, ins->dreg, ins->dreg); break; } case OP_ATOMIC_LOAD_U4: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarw (code, ins->dreg, ARMREG_LR); arm_movw (code, ins->dreg, ins->dreg); /* Clear upper half of the register. */ break; } case OP_ATOMIC_LOAD_I8: case OP_ATOMIC_LOAD_U8: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarx (code, ins->dreg, ARMREG_LR); break; } case OP_ATOMIC_LOAD_R4: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); if (cfg->r4fp) { arm_ldarw (code, ARMREG_LR, ARMREG_LR); arm_fmov_rx_to_double (code, ins->dreg, ARMREG_LR); } else { arm_ldarw (code, ARMREG_LR, ARMREG_LR); arm_fmov_rx_to_double (code, FP_TEMP_REG, ARMREG_LR); arm_fcvt_sd (code, ins->dreg, FP_TEMP_REG); } break; } case OP_ATOMIC_LOAD_R8: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); arm_ldarx (code, ARMREG_LR, ARMREG_LR); arm_fmov_rx_to_double (code, ins->dreg, ARMREG_LR); break; } case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_U1: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); arm_stlrb (code, ARMREG_LR, ins->sreg1); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); break; } case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_U2: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); arm_stlrh (code, ARMREG_LR, ins->sreg1); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); break; } case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U4: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); arm_stlrw (code, ARMREG_LR, ins->sreg1); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); break; } case OP_ATOMIC_STORE_I8: case OP_ATOMIC_STORE_U8: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); arm_stlrx (code, ARMREG_LR, ins->sreg1); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); break; } case OP_ATOMIC_STORE_R4: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); if (cfg->r4fp) { arm_fmov_double_to_rx (code, ARMREG_IP0, ins->sreg1); arm_stlrw (code, ARMREG_LR, ARMREG_IP0); } else { arm_fcvt_ds (code, FP_TEMP_REG, ins->sreg1); arm_fmov_double_to_rx (code, ARMREG_IP0, FP_TEMP_REG); arm_stlrw (code, ARMREG_LR, ARMREG_IP0); } if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); break; } case OP_ATOMIC_STORE_R8: { code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset); arm_fmov_double_to_rx (code, ARMREG_IP0, ins->sreg1); arm_stlrx (code, ARMREG_LR, ARMREG_IP0); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) arm_dmb (code, ARM_DMB_ISH); break; } /* FP */ case OP_R8CONST: { guint64 imm = *(guint64*)ins->inst_p0; if (imm == 0) { arm_fmov_rx_to_double (code, dreg, ARMREG_RZR); } else { code = emit_imm64 (code, ARMREG_LR, imm); arm_fmov_rx_to_double (code, ins->dreg, ARMREG_LR); } break; } case OP_R4CONST: { guint64 imm = *(guint32*)ins->inst_p0; code = emit_imm64 (code, ARMREG_LR, imm); if (cfg->r4fp) { arm_fmov_rx_to_double (code, dreg, ARMREG_LR); } else { arm_fmov_rx_to_double (code, FP_TEMP_REG, ARMREG_LR); arm_fcvt_sd (code, dreg, FP_TEMP_REG); } break; } case OP_LOADR8_MEMBASE: code = emit_ldrfpx (code, dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADR4_MEMBASE: if (cfg->r4fp) { code = emit_ldrfpw (code, dreg, ins->inst_basereg, ins->inst_offset); } else { code = emit_ldrfpw (code, FP_TEMP_REG, ins->inst_basereg, ins->inst_offset); arm_fcvt_sd (code, dreg, FP_TEMP_REG); } break; case OP_STORER8_MEMBASE_REG: code = emit_strfpx (code, sreg1, ins->inst_destbasereg, ins->inst_offset); break; case OP_STORER4_MEMBASE_REG: if (cfg->r4fp) { code = emit_strfpw (code, sreg1, ins->inst_destbasereg, ins->inst_offset); } else { arm_fcvt_ds (code, FP_TEMP_REG, sreg1); code = emit_strfpw (code, FP_TEMP_REG, ins->inst_destbasereg, ins->inst_offset); } break; case OP_FMOVE: if (dreg != sreg1) arm_fmovd (code, dreg, sreg1); break; case OP_RMOVE: if (dreg != sreg1) arm_fmovs (code, dreg, sreg1); break; case OP_MOVE_F_TO_I4: if (cfg->r4fp) { arm_fmov_double_to_rx (code, ins->dreg, ins->sreg1); } else { arm_fcvt_ds (code, ins->dreg, ins->sreg1); arm_fmov_double_to_rx (code, ins->dreg, ins->dreg); } break; case OP_MOVE_I4_TO_F: if (cfg->r4fp) { arm_fmov_rx_to_double (code, ins->dreg, ins->sreg1); } else { arm_fmov_rx_to_double (code, ins->dreg, ins->sreg1); arm_fcvt_sd (code, ins->dreg, ins->dreg); } break; case OP_MOVE_F_TO_I8: arm_fmov_double_to_rx (code, ins->dreg, ins->sreg1); break; case OP_MOVE_I8_TO_F: arm_fmov_rx_to_double (code, ins->dreg, ins->sreg1); break; case OP_FCOMPARE: arm_fcmpd (code, sreg1, sreg2); break; case OP_RCOMPARE: arm_fcmps (code, sreg1, sreg2); break; case OP_FCONV_TO_I1: arm_fcvtzs_dx (code, dreg, sreg1); arm_sxtbx (code, dreg, dreg); break; case OP_FCONV_TO_U1: arm_fcvtzu_dx (code, dreg, sreg1); arm_uxtbw (code, dreg, dreg); break; case OP_FCONV_TO_I2: arm_fcvtzs_dx (code, dreg, sreg1); arm_sxthx (code, dreg, dreg); break; case OP_FCONV_TO_U2: arm_fcvtzu_dx (code, dreg, sreg1); arm_uxthw (code, dreg, dreg); break; case OP_FCONV_TO_I4: arm_fcvtzs_dx (code, dreg, sreg1); arm_sxtwx (code, dreg, dreg); break; case OP_FCONV_TO_U4: arm_fcvtzu_dx (code, dreg, sreg1); break; case OP_FCONV_TO_I8: arm_fcvtzs_dx (code, dreg, sreg1); break; case OP_FCONV_TO_U8: arm_fcvtzu_dx (code, dreg, sreg1); break; case OP_FCONV_TO_R4: if (cfg->r4fp) { arm_fcvt_ds (code, dreg, sreg1); } else { arm_fcvt_ds (code, FP_TEMP_REG, sreg1); arm_fcvt_sd (code, dreg, FP_TEMP_REG); } break; case OP_ICONV_TO_R4: if (cfg->r4fp) { arm_scvtf_rw_to_s (code, dreg, sreg1); } else { arm_scvtf_rw_to_s (code, FP_TEMP_REG, sreg1); arm_fcvt_sd (code, dreg, FP_TEMP_REG); } break; case OP_LCONV_TO_R4: if (cfg->r4fp) { arm_scvtf_rx_to_s (code, dreg, sreg1); } else { arm_scvtf_rx_to_s (code, FP_TEMP_REG, sreg1); arm_fcvt_sd (code, dreg, FP_TEMP_REG); } break; case OP_ICONV_TO_R8: arm_scvtf_rw_to_d (code, dreg, sreg1); break; case OP_LCONV_TO_R8: arm_scvtf_rx_to_d (code, dreg, sreg1); break; case OP_ICONV_TO_R_UN: arm_ucvtf_rw_to_d (code, dreg, sreg1); break; case OP_LCONV_TO_R_UN: arm_ucvtf_rx_to_d (code, dreg, sreg1); break; case OP_FADD: arm_fadd_d (code, dreg, sreg1, sreg2); break; case OP_FSUB: arm_fsub_d (code, dreg, sreg1, sreg2); break; case OP_FMUL: arm_fmul_d (code, dreg, sreg1, sreg2); break; case OP_FDIV: arm_fdiv_d (code, dreg, sreg1, sreg2); break; case OP_FREM: /* Emulated */ g_assert_not_reached (); break; case OP_FNEG: arm_fneg_d (code, dreg, sreg1); break; case OP_ARM_SETFREG_R4: arm_fcvt_ds (code, dreg, sreg1); break; case OP_CKFINITE: /* Check for infinity */ code = emit_imm64 (code, ARMREG_LR, 0x7fefffffffffffffLL); arm_fmov_rx_to_double (code, FP_TEMP_REG, ARMREG_LR); arm_fabs_d (code, FP_TEMP_REG2, sreg1); arm_fcmpd (code, FP_TEMP_REG2, FP_TEMP_REG); code = emit_cond_exc (cfg, code, OP_COND_EXC_GT, "ArithmeticException"); /* Check for nans */ arm_fcmpd (code, FP_TEMP_REG2, FP_TEMP_REG2); code = emit_cond_exc (cfg, code, OP_COND_EXC_OV, "ArithmeticException"); arm_fmovd (code, dreg, sreg1); break; /* R4 */ case OP_RADD: arm_fadd_s (code, dreg, sreg1, sreg2); break; case OP_RSUB: arm_fsub_s (code, dreg, sreg1, sreg2); break; case OP_RMUL: arm_fmul_s (code, dreg, sreg1, sreg2); break; case OP_RDIV: arm_fdiv_s (code, dreg, sreg1, sreg2); break; case OP_RNEG: arm_fneg_s (code, dreg, sreg1); break; case OP_RCONV_TO_I1: arm_fcvtzs_sx (code, dreg, sreg1); arm_sxtbx (code, dreg, dreg); break; case OP_RCONV_TO_U1: arm_fcvtzu_sx (code, dreg, sreg1); arm_uxtbw (code, dreg, dreg); break; case OP_RCONV_TO_I2: arm_fcvtzs_sx (code, dreg, sreg1); arm_sxthx (code, dreg, dreg); break; case OP_RCONV_TO_U2: arm_fcvtzu_sx (code, dreg, sreg1); arm_uxthw (code, dreg, dreg); break; case OP_RCONV_TO_I4: arm_fcvtzs_sx (code, dreg, sreg1); arm_sxtwx (code, dreg, dreg); break; case OP_RCONV_TO_U4: arm_fcvtzu_sx (code, dreg, sreg1); break; case OP_RCONV_TO_I8: arm_fcvtzs_sx (code, dreg, sreg1); break; case OP_RCONV_TO_U8: arm_fcvtzu_sx (code, dreg, sreg1); break; case OP_RCONV_TO_R8: arm_fcvt_sd (code, dreg, sreg1); break; case OP_RCONV_TO_R4: if (dreg != sreg1) arm_fmovs (code, dreg, sreg1); break; case OP_RCEQ: case OP_RCLT: case OP_RCLT_UN: case OP_RCGT: case OP_RCGT_UN: case OP_RCNEQ: case OP_RCLE: case OP_RCGE: { int cond; cond = opcode_to_armcond (ins->opcode); arm_fcmps (code, sreg1, sreg2); arm_cset (code, cond, dreg); break; } /* CALLS */ case OP_VOIDCALL: case OP_CALL: case OP_LCALL: case OP_FCALL: case OP_RCALL: case OP_VCALL2: { call = (MonoCallInst*)ins; const MonoJumpInfoTarget patch = mono_call_to_patch (call); code = emit_call (cfg, code, patch.type, patch.target); code = emit_move_return_value (cfg, code, ins); break; } case OP_VOIDCALL_REG: case OP_CALL_REG: case OP_LCALL_REG: case OP_FCALL_REG: case OP_RCALL_REG: case OP_VCALL2_REG: code = mono_arm_emit_blrx (code, sreg1); code = emit_move_return_value (cfg, code, ins); break; case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_FCALL_MEMBASE: case OP_RCALL_MEMBASE: case OP_VCALL2_MEMBASE: code = emit_ldrx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset); code = mono_arm_emit_blrx (code, ARMREG_IP0); code = emit_move_return_value (cfg, code, ins); break; case OP_TAILCALL_PARAMETER: // This opcode helps compute sizes, i.e. // of the subsequent OP_TAILCALL, but contributes no code. g_assert (ins->next); break; case OP_TAILCALL: case OP_TAILCALL_MEMBASE: case OP_TAILCALL_REG: { int branch_reg = ARMREG_IP0; guint64 free_reg = 1 << ARMREG_IP1; call = (MonoCallInst*)ins; g_assert (!cfg->method->save_lmf); max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER); while (G_UNLIKELY (offset + max_len > cfg->code_size)) { cfg->code_size *= 2; cfg->native_code = (unsigned char *)mono_realloc_native_code (cfg); code = cfg->native_code + offset; cfg->stat_code_reallocs++; } switch (ins->opcode) { case OP_TAILCALL: free_reg = (1 << ARMREG_IP0) | (1 << ARMREG_IP1); break; case OP_TAILCALL_REG: g_assert (sreg1 != -1); g_assert (sreg1 != ARMREG_IP0); g_assert (sreg1 != ARMREG_IP1); g_assert (sreg1 != ARMREG_LR); g_assert (sreg1 != ARMREG_SP); g_assert (sreg1 != ARMREG_R28); if ((sreg1 << 1) & MONO_ARCH_CALLEE_SAVED_REGS) { arm_movx (code, branch_reg, sreg1); } else { free_reg = (1 << ARMREG_IP0) | (1 << ARMREG_IP1); branch_reg = sreg1; } break; case OP_TAILCALL_MEMBASE: g_assert (ins->inst_basereg != -1); g_assert (ins->inst_basereg != ARMREG_IP0); g_assert (ins->inst_basereg != ARMREG_IP1); g_assert (ins->inst_basereg != ARMREG_LR); g_assert (ins->inst_basereg != ARMREG_SP); g_assert (ins->inst_basereg != ARMREG_R28); code = emit_ldrx (code, branch_reg, ins->inst_basereg, ins->inst_offset); break; default: g_assert_not_reached (); } // Copy stack arguments. // FIXME a fixed size memcpy is desirable here, // at least for larger values of stack_usage. for (int i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) { code = emit_ldrx (code, ARMREG_LR, ARMREG_SP, i); code = emit_strx (code, ARMREG_LR, ARMREG_R28, i); } /* Restore registers */ code = emit_load_regset (code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->arch.saved_gregs_offset); /* Destroy frame */ code = mono_arm_emit_destroy_frame (code, cfg->stack_offset, free_reg); if (enable_ptrauth) /* There is no retab to authenticate lr */ arm_autibsp (code); switch (ins->opcode) { case OP_TAILCALL: if (cfg->compile_aot) { /* This is not a PLT patch */ code = emit_aotconst (cfg, code, branch_reg, MONO_PATCH_INFO_METHOD_JUMP, call->method); } else { mono_add_patch_info_rel (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method, MONO_R_ARM64_B); arm_b (code, code); cfg->thunk_area += THUNK_SIZE; break; } // fallthrough case OP_TAILCALL_MEMBASE: case OP_TAILCALL_REG: code = mono_arm_emit_brx (code, branch_reg); break; default: g_assert_not_reached (); } ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; break; } case OP_ARGLIST: g_assert (cfg->arch.cinfo); code = emit_addx_imm (code, ARMREG_IP0, cfg->arch.args_reg, cfg->arch.cinfo->sig_cookie.offset); arm_strx (code, ARMREG_IP0, sreg1, 0); break; case OP_DYN_CALL: { MonoInst *var = cfg->dyn_call_var; guint8 *labels [16]; int i; /* * sreg1 points to a DynCallArgs structure initialized by mono_arch_start_dyn_call (). * sreg2 is the function to call. */ g_assert (var->opcode == OP_REGOFFSET); arm_movx (code, ARMREG_LR, sreg1); arm_movx (code, ARMREG_IP1, sreg2); /* Save args buffer */ code = emit_strx (code, ARMREG_LR, var->inst_basereg, var->inst_offset); /* Set fp argument regs */ code = emit_ldrw (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_fpargs)); arm_cmpw (code, ARMREG_R0, ARMREG_RZR); labels [0] = code; arm_bcc (code, ARMCOND_EQ, 0); for (i = 0; i < 8; ++i) code = emit_ldrfpx (code, ARMREG_D0 + i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * 8)); arm_patch_rel (labels [0], code, MONO_R_ARM64_BCC); /* Allocate callee area */ code = emit_ldrx (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs)); arm_lslw (code, ARMREG_R0, ARMREG_R0, 3); arm_movspx (code, ARMREG_R1, ARMREG_SP); arm_subx (code, ARMREG_R1, ARMREG_R1, ARMREG_R0); arm_movspx (code, ARMREG_SP, ARMREG_R1); /* Set stack args */ /* R1 = limit */ code = emit_ldrx (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs)); /* R2 = pointer into 'regs' */ code = emit_imm (code, ARMREG_R2, MONO_STRUCT_OFFSET (DynCallArgs, regs) + ((PARAM_REGS + 1) * sizeof (target_mgreg_t))); arm_addx (code, ARMREG_R2, ARMREG_LR, ARMREG_R2); /* R3 = pointer to stack */ arm_movspx (code, ARMREG_R3, ARMREG_SP); labels [0] = code; arm_b (code, code); labels [1] = code; code = emit_ldrx (code, ARMREG_R5, ARMREG_R2, 0); code = emit_strx (code, ARMREG_R5, ARMREG_R3, 0); code = emit_addx_imm (code, ARMREG_R2, ARMREG_R2, sizeof (target_mgreg_t)); code = emit_addx_imm (code, ARMREG_R3, ARMREG_R3, sizeof (target_mgreg_t)); code = emit_subx_imm (code, ARMREG_R1, ARMREG_R1, 1); arm_patch_rel (labels [0], code, MONO_R_ARM64_B); arm_cmpw (code, ARMREG_R1, ARMREG_RZR); arm_bcc (code, ARMCOND_GT, labels [1]); /* Set argument registers + r8 */ code = mono_arm_emit_load_regarray (code, 0x1ff, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs)); /* Make the call */ code = mono_arm_emit_blrx (code, ARMREG_IP1); /* Save result */ code = emit_ldrx (code, ARMREG_LR, var->inst_basereg, var->inst_offset); arm_strx (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, res)); arm_strx (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, res2)); /* Save fp result */ code = emit_ldrw (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_fpret)); arm_cmpw (code, ARMREG_R0, ARMREG_RZR); labels [1] = code; arm_bcc (code, ARMCOND_EQ, 0); for (i = 0; i < 8; ++i) code = emit_strfpx (code, ARMREG_D0 + i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * 8)); arm_patch_rel (labels [1], code, MONO_R_ARM64_BCC); break; } case OP_GENERIC_CLASS_INIT: { int byte_offset; guint8 *jump; byte_offset = MONO_STRUCT_OFFSET (MonoVTable, initialized); /* Load vtable->initialized */ arm_ldrsbx (code, ARMREG_IP0, sreg1, byte_offset); jump = code; arm_cbnzx (code, ARMREG_IP0, 0); /* Slowpath */ g_assert (sreg1 == ARMREG_R0); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init)); mono_arm_patch (jump, code, MONO_R_ARM64_CBZ); break; } case OP_CHECK_THIS: arm_ldrb (code, ARMREG_LR, sreg1, 0); break; case OP_NOT_NULL: case OP_NOT_REACHED: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_I8CONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: break; case OP_IL_SEQ_POINT: mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); break; /* EH */ case OP_COND_EXC_C: case OP_COND_EXC_IC: case OP_COND_EXC_OV: case OP_COND_EXC_IOV: case OP_COND_EXC_NC: case OP_COND_EXC_INC: case OP_COND_EXC_NO: case OP_COND_EXC_INO: case OP_COND_EXC_EQ: case OP_COND_EXC_IEQ: case OP_COND_EXC_NE_UN: case OP_COND_EXC_INE_UN: case OP_COND_EXC_ILT: case OP_COND_EXC_LT: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_LT_UN: case OP_COND_EXC_IGT: case OP_COND_EXC_GT: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_GT_UN: case OP_COND_EXC_IGE: case OP_COND_EXC_GE: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_GE_UN: case OP_COND_EXC_ILE: case OP_COND_EXC_LE: case OP_COND_EXC_ILE_UN: case OP_COND_EXC_LE_UN: code = emit_cond_exc (cfg, code, ins->opcode, (const char*)ins->inst_p1); break; case OP_THROW: if (sreg1 != ARMREG_R0) arm_movx (code, ARMREG_R0, sreg1); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception)); break; case OP_RETHROW: if (sreg1 != ARMREG_R0) arm_movx (code, ARMREG_R0, sreg1); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception)); break; case OP_CALL_HANDLER: mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb, MONO_R_ARM64_BL); arm_bl (code, 0); cfg->thunk_area += THUNK_SIZE; for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); break; case OP_START_HANDLER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); /* Save caller address */ code = emit_strx (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset); /* * Reserve a param area, see test_0_finally_param_area (). * This is needed because the param area is not set up when * we are called from EH code. */ if (cfg->param_area) code = emit_subx_sp_imm (code, cfg->param_area); break; } case OP_ENDFINALLY: case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); if (cfg->param_area) code = emit_addx_sp_imm (code, cfg->param_area); if (ins->opcode == OP_ENDFILTER && sreg1 != ARMREG_R0) arm_movx (code, ARMREG_R0, sreg1); /* Return to either after the branch in OP_CALL_HANDLER, or to the EH code */ code = emit_ldrx (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset); arm_brx (code, ARMREG_LR); break; } case OP_GET_EX_OBJ: if (ins->dreg != ARMREG_R0) arm_movx (code, ins->dreg, ARMREG_R0); break; case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: { guint8 *buf [1]; arm_ldrx (code, ARMREG_IP1, ins->sreg1, 0); /* Call it if it is non-null */ buf [0] = code; arm_cbzx (code, ARMREG_IP1, 0); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll)); mono_arm_patch (buf [0], code, MONO_R_ARM64_CBZ); break; } case OP_FILL_PROF_CALL_CTX: for (int i = 0; i < MONO_MAX_IREGS; i++) if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) || i == ARMREG_SP || i == ARMREG_FP) arm_strx (code, i, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, regs) + i * sizeof (target_mgreg_t)); break; default: g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__); g_assert_not_reached (); } if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) { g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)", mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset); g_assert_not_reached (); } } set_code_cursor (cfg, code); /* * If the compiled code size is larger than the bcc displacement (19 bits signed), * insert branch islands between/inside basic blocks. */ if (cfg->arch.cond_branch_islands) code = emit_branch_island (cfg, code, start_offset); } static guint8* emit_move_args (MonoCompile *cfg, guint8 *code) { MonoInst *ins; CallInfo *cinfo; ArgInfo *ainfo; int i, part; MonoMethodSignature *sig = mono_method_signature_internal (cfg->method); cinfo = cfg->arch.cinfo; g_assert (cinfo); for (i = 0; i < cinfo->nargs; ++i) { ainfo = cinfo->args + i; ins = cfg->args [i]; if (ins->opcode == OP_REGVAR) { switch (ainfo->storage) { case ArgInIReg: arm_movx (code, ins->dreg, ainfo->reg); if (i == 0 && sig->hasthis) { mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, ins, TRUE, ins->dreg, 0, code - cfg->native_code, 0); } break; case ArgOnStack: switch (ainfo->slot_size) { case 1: if (ainfo->sign) code = emit_ldrsbx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset); else code = emit_ldrb (code, ins->dreg, cfg->arch.args_reg, ainfo->offset); break; case 2: if (ainfo->sign) code = emit_ldrshx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset); else code = emit_ldrh (code, ins->dreg, cfg->arch.args_reg, ainfo->offset); break; case 4: if (ainfo->sign) code = emit_ldrswx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset); else code = emit_ldrw (code, ins->dreg, cfg->arch.args_reg, ainfo->offset); break; default: code = emit_ldrx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset); break; } break; default: g_assert_not_reached (); break; } } else { if (ainfo->storage != ArgVtypeByRef && ainfo->storage != ArgVtypeByRefOnStack) g_assert (ins->opcode == OP_REGOFFSET); switch (ainfo->storage) { case ArgInIReg: /* Stack slots for arguments have size 8 */ code = emit_strx (code, ainfo->reg, ins->inst_basereg, ins->inst_offset); if (i == 0 && sig->hasthis) { mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, ins, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0); } break; case ArgInFReg: code = emit_strfpx (code, ainfo->reg, ins->inst_basereg, ins->inst_offset); break; case ArgInFRegR4: code = emit_strfpw (code, ainfo->reg, ins->inst_basereg, ins->inst_offset); break; case ArgOnStack: case ArgOnStackR4: case ArgOnStackR8: case ArgVtypeByRefOnStack: case ArgVtypeOnStack: break; case ArgVtypeByRef: { MonoInst *addr_arg = ins->inst_left; if (ainfo->gsharedvt) { g_assert (ins->opcode == OP_GSHAREDVT_ARG_REGOFFSET); arm_strx (code, ainfo->reg, ins->inst_basereg, ins->inst_offset); } else { g_assert (ins->opcode == OP_VTARG_ADDR); g_assert (addr_arg->opcode == OP_REGOFFSET); arm_strx (code, ainfo->reg, addr_arg->inst_basereg, addr_arg->inst_offset); } break; } case ArgVtypeInIRegs: for (part = 0; part < ainfo->nregs; part ++) { code = emit_strx (code, ainfo->reg + part, ins->inst_basereg, ins->inst_offset + (part * 8)); } break; case ArgHFA: for (part = 0; part < ainfo->nregs; part ++) { if (ainfo->esize == 4) code = emit_strfpw (code, ainfo->reg + part, ins->inst_basereg, ins->inst_offset + ainfo->foffsets [part]); else code = emit_strfpx (code, ainfo->reg + part, ins->inst_basereg, ins->inst_offset + ainfo->foffsets [part]); } break; default: g_assert_not_reached (); break; } } } return code; } /* * emit_store_regarray: * * Emit code to store the registers in REGS into the appropriate elements of * the register array at BASEREG+OFFSET. */ static WARN_UNUSED_RESULT guint8* emit_store_regarray (guint8 *code, guint64 regs, int basereg, int offset) { int i; for (i = 0; i < 32; ++i) { if (regs & (1 << i)) { if (i + 1 < 32 && (regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) { arm_stpx (code, i, i + 1, basereg, offset + (i * 8)); i++; } else if (i == ARMREG_SP) { arm_movspx (code, ARMREG_IP1, ARMREG_SP); arm_strx (code, ARMREG_IP1, basereg, offset + (i * 8)); } else { arm_strx (code, i, basereg, offset + (i * 8)); } } } return code; } /* * emit_load_regarray: * * Emit code to load the registers in REGS from the appropriate elements of * the register array at BASEREG+OFFSET. */ static WARN_UNUSED_RESULT guint8* emit_load_regarray (guint8 *code, guint64 regs, int basereg, int offset) { int i; for (i = 0; i < 32; ++i) { if (regs & (1 << i)) { if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) { if (offset + (i * 8) < 500) arm_ldpx (code, i, i + 1, basereg, offset + (i * 8)); else { code = emit_ldrx (code, i, basereg, offset + (i * 8)); code = emit_ldrx (code, i + 1, basereg, offset + ((i + 1) * 8)); } i++; } else if (i == ARMREG_SP) { g_assert_not_reached (); } else { code = emit_ldrx (code, i, basereg, offset + (i * 8)); } } } return code; } /* * emit_store_regset: * * Emit code to store the registers in REGS into consecutive memory locations starting * at BASEREG+OFFSET. */ static WARN_UNUSED_RESULT guint8* emit_store_regset (guint8 *code, guint64 regs, int basereg, int offset) { int i, pos; pos = 0; for (i = 0; i < 32; ++i) { if (regs & (1 << i)) { if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) { arm_stpx (code, i, i + 1, basereg, offset + (pos * 8)); i++; pos++; } else if (i == ARMREG_SP) { arm_movspx (code, ARMREG_IP1, ARMREG_SP); arm_strx (code, ARMREG_IP1, basereg, offset + (pos * 8)); } else { arm_strx (code, i, basereg, offset + (pos * 8)); } pos++; } } return code; } /* * emit_load_regset: * * Emit code to load the registers in REGS from consecutive memory locations starting * at BASEREG+OFFSET. */ static WARN_UNUSED_RESULT guint8* emit_load_regset (guint8 *code, guint64 regs, int basereg, int offset) { int i, pos; pos = 0; for (i = 0; i < 32; ++i) { if (regs & (1 << i)) { if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) { arm_ldpx (code, i, i + 1, basereg, offset + (pos * 8)); i++; pos++; } else if (i == ARMREG_SP) { g_assert_not_reached (); } else { arm_ldrx (code, i, basereg, offset + (pos * 8)); } pos++; } } return code; } WARN_UNUSED_RESULT guint8* mono_arm_emit_load_regarray (guint8 *code, guint64 regs, int basereg, int offset) { return emit_load_regarray (code, regs, basereg, offset); } WARN_UNUSED_RESULT guint8* mono_arm_emit_store_regarray (guint8 *code, guint64 regs, int basereg, int offset) { return emit_store_regarray (code, regs, basereg, offset); } WARN_UNUSED_RESULT guint8* mono_arm_emit_store_regset (guint8 *code, guint64 regs, int basereg, int offset) { return emit_store_regset (code, regs, basereg, offset); } /* Same as emit_store_regset, but emit unwind info too */ /* CFA_OFFSET is the offset between the CFA and basereg */ static WARN_UNUSED_RESULT guint8* emit_store_regset_cfa (MonoCompile *cfg, guint8 *code, guint64 regs, int basereg, int offset, int cfa_offset, guint64 no_cfa_regset) { int i, j, pos, nregs; guint32 cfa_regset = regs & ~no_cfa_regset; pos = 0; for (i = 0; i < 32; ++i) { nregs = 1; if (regs & (1 << i)) { if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) { if (offset < 256) { arm_stpx (code, i, i + 1, basereg, offset + (pos * 8)); } else { code = emit_strx (code, i, basereg, offset + (pos * 8)); code = emit_strx (code, i + 1, basereg, offset + (pos * 8) + 8); } nregs = 2; } else if (i == ARMREG_SP) { arm_movspx (code, ARMREG_IP1, ARMREG_SP); code = emit_strx (code, ARMREG_IP1, basereg, offset + (pos * 8)); } else { code = emit_strx (code, i, basereg, offset + (pos * 8)); } for (j = 0; j < nregs; ++j) { if (cfa_regset & (1 << (i + j))) mono_emit_unwind_op_offset (cfg, code, i + j, (- cfa_offset) + offset + ((pos + j) * 8)); } i += nregs - 1; pos += nregs; } } return code; } /* * emit_setup_lmf: * * Emit code to initialize an LMF structure at LMF_OFFSET. * Clobbers ip0/ip1. */ static guint8* emit_setup_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, int cfa_offset) { /* * The LMF should contain all the state required to be able to reconstruct the machine state * at the current point of execution. Since the LMF is only read during EH, only callee * saved etc. registers need to be saved. * FIXME: Save callee saved fp regs, JITted code doesn't use them, but native code does, and they * need to be restored during EH. */ /* pc */ arm_adrx (code, ARMREG_LR, code); code = emit_strx (code, ARMREG_LR, ARMREG_FP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, pc)); /* gregs + fp + sp */ /* Don't emit unwind info for sp/fp, they are already handled in the prolog */ code = emit_store_regset_cfa (cfg, code, MONO_ARCH_LMF_REGS, ARMREG_FP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, gregs), cfa_offset, (1 << ARMREG_FP) | (1 << ARMREG_SP)); return code; } guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoMethodSignature *sig; MonoBasicBlock *bb; guint8 *code; int cfa_offset, max_offset; sig = mono_method_signature_internal (method); cfg->code_size = 256 + sig->param_count * 64; code = cfg->native_code = g_malloc (cfg->code_size); /* This can be unaligned */ cfg->stack_offset = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT); /* * - Setup frame */ cfa_offset = 0; mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0); if (enable_ptrauth) arm_pacibsp (code); /* Setup frame */ if (arm_is_ldpx_imm (-cfg->stack_offset)) { arm_stpx_pre (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, -cfg->stack_offset); } else { /* sp -= cfg->stack_offset */ /* This clobbers ip0/ip1 */ code = emit_subx_sp_imm (code, cfg->stack_offset); arm_stpx (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, 0); } cfa_offset += cfg->stack_offset; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); mono_emit_unwind_op_offset (cfg, code, ARMREG_FP, (- cfa_offset) + 0); mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, (- cfa_offset) + 8); arm_movspx (code, ARMREG_FP, ARMREG_SP); mono_emit_unwind_op_def_cfa_reg (cfg, code, ARMREG_FP); if (cfg->param_area) { /* The param area is below the frame pointer */ code = emit_subx_sp_imm (code, cfg->param_area); } if (cfg->method->save_lmf) { code = emit_setup_lmf (cfg, code, cfg->lmf_var->inst_offset, cfa_offset); } else { /* Save gregs */ code = emit_store_regset_cfa (cfg, code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->arch.saved_gregs_offset, cfa_offset, 0); } /* Setup args reg */ if (cfg->arch.args_reg) { /* The register was already saved above */ code = emit_addx_imm (code, cfg->arch.args_reg, ARMREG_FP, cfg->stack_offset); } /* Save return area addr received in R8 */ if (cfg->vret_addr) { MonoInst *ins = cfg->vret_addr; g_assert (ins->opcode == OP_REGOFFSET); code = emit_strx (code, ARMREG_R8, ins->inst_basereg, ins->inst_offset); } /* Save mrgctx received in MONO_ARCH_RGCTX_REG */ if (cfg->rgctx_var) { MonoInst *ins = cfg->rgctx_var; g_assert (ins->opcode == OP_REGOFFSET); code = emit_strx (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset); mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code); mono_add_var_location (cfg, cfg->rgctx_var, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0); } /* * Move arguments to their registers/stack locations. */ code = emit_move_args (cfg, code); /* Initialize seq_point_info_var */ if (cfg->arch.seq_point_info_var) { MonoInst *ins = cfg->arch.seq_point_info_var; /* Initialize the variable from a GOT slot */ code = emit_aotconst (cfg, code, ARMREG_IP0, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method); g_assert (ins->opcode == OP_REGOFFSET); code = emit_strx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset); /* Initialize ss_tramp_var */ ins = cfg->arch.ss_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); code = emit_ldrx (code, ARMREG_IP1, ARMREG_IP0, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr)); code = emit_strx (code, ARMREG_IP1, ins->inst_basereg, ins->inst_offset); } else { MonoInst *ins; if (cfg->arch.ss_tramp_var) { /* Initialize ss_tramp_var */ ins = cfg->arch.ss_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); code = emit_imm64 (code, ARMREG_IP0, (guint64)&ss_trampoline); code = emit_strx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset); } if (cfg->arch.bp_tramp_var) { /* Initialize bp_tramp_var */ ins = cfg->arch.bp_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); code = emit_imm64 (code, ARMREG_IP0, (guint64)bp_trampoline); code = emit_strx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset); } } max_offset = 0; if (cfg->opt & MONO_OPT_BRANCH) { for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins; bb->max_offset = max_offset; MONO_BB_FOR_EACH_INS (bb, ins) { max_offset += ins_get_size (ins->opcode); } } } if (max_offset > 0x3ffff * 4) cfg->arch.cond_branch_islands = TRUE; return code; } void mono_arch_emit_epilog (MonoCompile *cfg) { CallInfo *cinfo; int max_epilog_size; guint8 *code; int i; max_epilog_size = 16 + 20*4; code = realloc_code (cfg, max_epilog_size); if (cfg->method->save_lmf) { code = mono_arm_emit_load_regarray (code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->lmf_var->inst_offset + MONO_STRUCT_OFFSET (MonoLMF, gregs) - (MONO_ARCH_FIRST_LMF_REG * 8)); } else { /* Restore gregs */ code = emit_load_regset (code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->arch.saved_gregs_offset); } /* Load returned vtypes into registers if needed */ cinfo = cfg->arch.cinfo; switch (cinfo->ret.storage) { case ArgVtypeInIRegs: { MonoInst *ins = cfg->ret; for (i = 0; i < cinfo->ret.nregs; ++i) code = emit_ldrx (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + (i * 8)); break; } case ArgHFA: { MonoInst *ins = cfg->ret; for (i = 0; i < cinfo->ret.nregs; ++i) { if (cinfo->ret.esize == 4) code = emit_ldrfpw (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + cinfo->ret.foffsets [i]); else code = emit_ldrfpx (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + cinfo->ret.foffsets [i]); } break; } default: break; } /* Destroy frame */ code = mono_arm_emit_destroy_frame (code, cfg->stack_offset, (1 << ARMREG_IP0) | (1 << ARMREG_IP1)); if (enable_ptrauth) arm_retab (code); else arm_retx (code, ARMREG_LR); g_assert (code - (cfg->native_code + cfg->code_len) < max_epilog_size); set_code_cursor (cfg, code); } void mono_arch_emit_exceptions (MonoCompile *cfg) { MonoJumpInfo *ji; MonoClass *exc_class; guint8 *code, *ip; guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM]; guint8 exc_throw_found [MONO_EXC_INTRINS_NUM]; int i, id, size = 0; for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) { exc_throw_pos [i] = NULL; exc_throw_found [i] = 0; } for (ji = cfg->patch_info; ji; ji = ji->next) { if (ji->type == MONO_PATCH_INFO_EXC) { i = mini_exception_id_by_name ((const char*)ji->data.target); if (!exc_throw_found [i]) { size += 32; exc_throw_found [i] = TRUE; } } } code = realloc_code (cfg, size); /* Emit code to raise corlib exceptions */ for (ji = cfg->patch_info; ji; ji = ji->next) { if (ji->type != MONO_PATCH_INFO_EXC) continue; ip = cfg->native_code + ji->ip.i; id = mini_exception_id_by_name ((const char*)ji->data.target); if (exc_throw_pos [id]) { /* ip points to the bcc () in OP_COND_EXC_... */ arm_patch_rel (ip, exc_throw_pos [id], ji->relocation); ji->type = MONO_PATCH_INFO_NONE; continue; } exc_throw_pos [id] = code; arm_patch_rel (ip, code, ji->relocation); /* We are being branched to from the code generated by emit_cond_exc (), the pc is in ip1 */ /* r0 = type token */ exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", ji->data.name); code = emit_imm (code, ARMREG_R0, m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF); /* r1 = throw ip */ arm_movx (code, ARMREG_R1, ARMREG_IP1); /* Branch to the corlib exception throwing trampoline */ ji->ip.i = code - cfg->native_code; ji->type = MONO_PATCH_INFO_JIT_ICALL_ID; ji->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception; ji->relocation = MONO_R_ARM64_BL; arm_bl (code, 0); cfg->thunk_area += THUNK_SIZE; set_code_cursor (cfg, code); } set_code_cursor (cfg, code); } MonoInst* mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { return NULL; } guint32 mono_arch_get_patch_offset (guint8 *code) { return 0; } gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int i, buf_len, imt_reg; guint8 *buf, *code; #if DEBUG_IMT printf ("building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p\n", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count, size, start, ((guint8*)start) + size, vtable); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size); } #endif buf_len = 0; for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { gboolean fail_case = !item->check_target_idx && fail_tramp; if (item->check_target_idx || fail_case) { if (!item->compare_done || fail_case) { buf_len += 4 * 4 + 4; } buf_len += 4; if (item->has_target_code) { buf_len += 5 * 4; } else { buf_len += 6 * 4; } if (fail_case) { buf_len += 5 * 4; } } else { buf_len += 6 * 4; } } else { buf_len += 6 * 4; } } if (fail_tramp) { buf = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, buf_len); } else { MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); buf = mono_mem_manager_code_reserve (mem_manager, buf_len); } code = buf; MINI_BEGIN_CODEGEN (); /* * We are called by JITted code, which passes in the IMT argument in * MONO_ARCH_RGCTX_REG (r27). We need to preserve all caller saved regs * except ip0/ip1. */ imt_reg = MONO_ARCH_RGCTX_REG; for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; item->code_target = code; if (item->is_equals) { /* * Check the imt argument against item->key, if equals, jump to either * item->value.target_code or to vtable [item->value.vtable_slot]. * If fail_tramp is set, jump to it if not-equals. */ gboolean fail_case = !item->check_target_idx && fail_tramp; if (item->check_target_idx || fail_case) { /* Compare imt_reg with item->key */ if (!item->compare_done || fail_case) { // FIXME: Optimize this code = emit_imm64 (code, ARMREG_IP0, (guint64)item->key); arm_cmpx (code, imt_reg, ARMREG_IP0); } item->jmp_code = code; arm_bcc (code, ARMCOND_NE, 0); /* Jump to target if equals */ if (item->has_target_code) { code = emit_imm64 (code, ARMREG_IP0, (guint64)item->value.target_code); code = mono_arm_emit_brx (code, ARMREG_IP0); } else { guint64 imm = (guint64)&(vtable->vtable [item->value.vtable_slot]); code = emit_imm64 (code, ARMREG_IP0, imm); arm_ldrx (code, ARMREG_IP0, ARMREG_IP0, 0); code = mono_arm_emit_brx (code, ARMREG_IP0); } if (fail_case) { arm_patch_rel (item->jmp_code, code, MONO_R_ARM64_BCC); item->jmp_code = NULL; code = emit_imm64 (code, ARMREG_IP0, (guint64)fail_tramp); code = mono_arm_emit_brx (code, ARMREG_IP0); } } else { guint64 imm = (guint64)&(vtable->vtable [item->value.vtable_slot]); code = emit_imm64 (code, ARMREG_IP0, imm); arm_ldrx (code, ARMREG_IP0, ARMREG_IP0, 0); code = mono_arm_emit_brx (code, ARMREG_IP0); } } else { code = emit_imm64 (code, ARMREG_IP0, (guint64)item->key); arm_cmpx (code, imt_reg, ARMREG_IP0); item->jmp_code = code; arm_bcc (code, ARMCOND_HS, 0); } } /* Patch the branches */ for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code && item->check_target_idx) arm_patch_rel (item->jmp_code, imt_entries [item->check_target_idx]->code_target, MONO_R_ARM64_BCC); } g_assert ((code - buf) <= buf_len); MINI_END_CODEGEN (buf, code - buf, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL); return MINI_ADDR_TO_FTNPTR (buf); } GSList * mono_arch_get_trampolines (gboolean aot) { return mono_arm_get_exception_trampolines (aot); } #else /* DISABLE_JIT */ gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { g_assert_not_reached (); return NULL; } #endif /* !DISABLE_JIT */ #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = MINI_FTNPTR_TO_ADDR (ip); guint32 native_offset = ip - (guint8*)ji->code_start; if (ji->from_aot) { SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start); if (enable_ptrauth) NOT_IMPLEMENTED; g_assert (native_offset % 4 == 0); g_assert (info->bp_addrs [native_offset / 4] == 0); info->bp_addrs [native_offset / 4] = (guint8*)mini_get_breakpoint_trampoline (); } else { /* ip points to an ldrx */ code += 4; mono_codeman_enable_write (); code = mono_arm_emit_blrx (code, ARMREG_IP0); mono_codeman_disable_write (); mono_arch_flush_icache (ip, code - ip); } } void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = MINI_FTNPTR_TO_ADDR (ip); if (ji->from_aot) { guint32 native_offset = ip - (guint8*)ji->code_start; SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start); if (enable_ptrauth) NOT_IMPLEMENTED; g_assert (native_offset % 4 == 0); info->bp_addrs [native_offset / 4] = NULL; } else { /* ip points to an ldrx */ code += 4; mono_codeman_enable_write (); arm_nop (code); mono_codeman_disable_write (); mono_arch_flush_icache (ip, code - ip); } } void mono_arch_start_single_stepping (void) { ss_trampoline = mini_get_single_step_trampoline (); } void mono_arch_stop_single_stepping (void) { ss_trampoline = NULL; } gboolean mono_arch_is_single_step_event (void *info, void *sigctx) { /* We use soft breakpoints on arm64 */ return FALSE; } gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx) { /* We use soft breakpoints on arm64 */ return FALSE; } void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji) { g_assert_not_reached (); } void mono_arch_skip_single_step (MonoContext *ctx) { g_assert_not_reached (); } SeqPointInfo* mono_arch_get_seq_point_info (guint8 *code) { SeqPointInfo *info; MonoJitInfo *ji; MonoJitMemoryManager *jit_mm; jit_mm = get_default_jit_mm (); // FIXME: Add a free function jit_mm_lock (jit_mm); info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code); jit_mm_unlock (jit_mm); if (!info) { ji = mini_jit_info_table_find (code); g_assert (ji); info = g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size / 4) * sizeof(guint8*)); info->ss_tramp_addr = &ss_trampoline; jit_mm_lock (jit_mm); g_hash_table_insert (jit_mm->arch_seq_points, code, info); jit_mm_unlock (jit_mm); } return info; } #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */ gboolean mono_arch_opcode_supported (int opcode) { switch (opcode) { case OP_ATOMIC_ADD_I4: case OP_ATOMIC_ADD_I8: case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_EXCHANGE_I8: case OP_ATOMIC_CAS_I4: case OP_ATOMIC_CAS_I8: case OP_ATOMIC_LOAD_I1: case OP_ATOMIC_LOAD_I2: case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_I8: case OP_ATOMIC_LOAD_U1: case OP_ATOMIC_LOAD_U2: case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_LOAD_U8: case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_LOAD_R8: case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_I8: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_U4: case OP_ATOMIC_STORE_U8: case OP_ATOMIC_STORE_R4: case OP_ATOMIC_STORE_R8: return TRUE; default: return FALSE; } } CallInfo* mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { return get_call_info (mp, sig); } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { gpointer target = NULL; switch (jit_icall_id) { #undef MONO_AOT_ICALL #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break; MONO_AOT_ICALL (mono_arm_resume_unwind) MONO_AOT_ICALL (mono_arm_start_gsharedvt_call) MONO_AOT_ICALL (mono_arm_throw_exception) } return target; } static guint8* emit_blrx (guint8 *code, int reg) { if (enable_ptrauth) arm_blraaz (code, reg); else arm_blrx (code, reg); return code; } static guint8* emit_brx (guint8 *code, int reg) { if (enable_ptrauth) arm_braaz (code, reg); else arm_brx (code, reg); return code; } guint8* mono_arm_emit_blrx (guint8 *code, int reg) { return emit_blrx (code, reg); } guint8* mono_arm_emit_brx (guint8 *code, int reg) { return emit_brx (code, reg); }
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/mini-llvm.c
/** * \file * llvm "Backend" for the mono JIT * * Copyright 2009-2011 Novell Inc (http://www.novell.com) * Copyright 2011 Xamarin Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include "config.h" #include <mono/metadata/debug-helpers.h> #include <mono/metadata/debug-internals.h> #include <mono/metadata/mempool-internals.h> #include <mono/metadata/environment.h> #include <mono/metadata/object-internals.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/tokentype.h> #include <mono/utils/mono-tls.h> #include <mono/utils/mono-dl.h> #include <mono/utils/mono-time.h> #include <mono/utils/freebsd-dwarf.h> #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif #include "llvm-c/BitWriter.h" #include "llvm-c/Analysis.h" #include "mini-llvm-cpp.h" #include "llvm-jit.h" #include "aot-compiler.h" #include "mini-llvm.h" #include "mini-runtime.h" #include <mono/utils/mono-math.h> #ifndef DISABLE_JIT #if defined(TARGET_AMD64) && defined(TARGET_WIN32) && defined(HOST_WIN32) && defined(_MSC_VER) #define TARGET_X86_64_WIN32_MSVC #endif #if defined(TARGET_X86_64_WIN32_MSVC) #define TARGET_WIN32_MSVC #endif #if LLVM_API_VERSION < 900 #error "The version of the mono llvm repository is too old." #endif /* * Information associated by mono with LLVM modules. */ typedef struct { LLVMModuleRef lmodule; LLVMValueRef throw_icall, rethrow, throw_corlib_exception; GHashTable *llvm_types; LLVMValueRef dummy_got_var; const char *get_method_symbol; const char *get_unbox_tramp_symbol; const char *init_aotconst_symbol; GHashTable *plt_entries; GHashTable *plt_entries_ji; GHashTable *method_to_lmethod; GHashTable *method_to_call_info; GHashTable *lvalue_to_lcalls; GHashTable *direct_callables; /* Maps got slot index -> LLVMValueRef */ GHashTable *aotconst_vars; char **bb_names; int bb_names_len; GPtrArray *used; LLVMTypeRef ptr_type; GPtrArray *subprogram_mds; MonoEERef *mono_ee; LLVMExecutionEngineRef ee; gboolean external_symbols; gboolean emit_dwarf; int max_got_offset; LLVMValueRef personality; gpointer gc_poll_cold_wrapper_compiled; /* For AOT */ MonoAssembly *assembly; char *global_prefix; MonoAotFileInfo aot_info; const char *eh_frame_symbol; LLVMValueRef get_method, get_unbox_tramp, init_aotconst_func; LLVMValueRef init_methods [AOT_INIT_METHOD_NUM]; LLVMValueRef code_start, code_end; LLVMValueRef inited_var; LLVMValueRef unbox_tramp_indexes; LLVMValueRef unbox_trampolines; LLVMValueRef gc_poll_cold_wrapper; LLVMValueRef info_var; LLVMTypeRef *info_var_eltypes; int max_inited_idx, max_method_idx; gboolean has_jitted_code; gboolean static_link; gboolean llvm_only; gboolean interp; GHashTable *idx_to_lmethod; GHashTable *idx_to_unbox_tramp; GPtrArray *callsite_list; LLVMContextRef context; LLVMValueRef sentinel_exception; LLVMValueRef gc_safe_point_flag_var; LLVMValueRef interrupt_flag_var; void *di_builder, *cu; GHashTable *objc_selector_to_var; GPtrArray *cfgs; int unbox_tramp_num, unbox_tramp_elemsize; GHashTable *got_idx_to_type; GHashTable *no_method_table_lmethods; } MonoLLVMModule; /* * Information associated by the backend with mono basic blocks. */ typedef struct { LLVMBasicBlockRef bblock, end_bblock; LLVMValueRef finally_ind; gboolean added, invoke_target; /* * If this bblock is the start of a finally clause, this is a list of bblocks it * needs to branch to in ENDFINALLY. */ GSList *call_handler_return_bbs; /* * If this bblock is the start of a finally clause, this is the bblock that * CALL_HANDLER needs to branch to. */ LLVMBasicBlockRef call_handler_target_bb; /* The list of switch statements generated by ENDFINALLY instructions */ GSList *endfinally_switch_ins_list; GSList *phi_nodes; } BBInfo; /* * Structure containing emit state */ typedef struct { MonoMemPool *mempool; /* Maps method names to the corresponding LLVMValueRef */ GHashTable *emitted_method_decls; MonoCompile *cfg; LLVMValueRef lmethod; MonoLLVMModule *module; LLVMModuleRef lmodule; BBInfo *bblocks; int sindex, default_index, ex_index; LLVMBuilderRef builder; LLVMValueRef *values, *addresses; MonoType **vreg_cli_types; LLVMCallInfo *linfo; MonoMethodSignature *sig; GSList *builders; GHashTable *region_to_handler; GHashTable *clause_to_handler; LLVMBuilderRef alloca_builder; LLVMValueRef last_alloca; LLVMValueRef rgctx_arg; LLVMValueRef this_arg; LLVMTypeRef *vreg_types; gboolean *is_vphi; LLVMTypeRef method_type; LLVMBasicBlockRef init_bb, inited_bb; gboolean *is_dead; gboolean *unreachable; gboolean llvm_only; gboolean has_got_access; gboolean is_linkonce; gboolean emit_dummy_arg; gboolean has_safepoints; gboolean has_catch; int this_arg_pindex, rgctx_arg_pindex; LLVMValueRef imt_rgctx_loc; GHashTable *llvm_types; LLVMValueRef dbg_md; MonoDebugMethodInfo *minfo; /* For every clause, the clauses it is nested in */ GSList **nested_in; LLVMValueRef ex_var; GHashTable *exc_meta; GPtrArray *callsite_list; GPtrArray *phi_values; GPtrArray *bblock_list; char *method_name; GHashTable *jit_callees; LLVMValueRef long_bb_break_var; int *gc_var_indexes; LLVMValueRef gc_pin_area; LLVMValueRef il_state; LLVMValueRef il_state_ret; } EmitContext; typedef struct { MonoBasicBlock *bb; MonoInst *phi; MonoBasicBlock *in_bb; int sreg; } PhiNode; /* * Instruction metadata * This is the same as ins_info, but LREG != IREG. */ #ifdef MINI_OP #undef MINI_OP #endif #ifdef MINI_OP3 #undef MINI_OP3 #endif #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ', #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3, #define NONE ' ' #define IREG 'i' #define FREG 'f' #define VREG 'v' #define XREG 'x' #define LREG 'l' /* keep in sync with the enum in mini.h */ const char mini_llvm_ins_info[] = { #include "mini-ops.h" }; #undef MINI_OP #undef MINI_OP3 #if TARGET_SIZEOF_VOID_P == 4 #define GET_LONG_IMM(ins) ((ins)->inst_l) #else #define GET_LONG_IMM(ins) ((ins)->inst_imm) #endif #define LLVM_INS_INFO(opcode) (&mini_llvm_ins_info [((opcode) - OP_START - 1) * 4]) #if 0 #define TRACE_FAILURE(msg) do { printf ("%s\n", msg); } while (0) #else #define TRACE_FAILURE(msg) #endif #ifdef TARGET_X86 #define IS_TARGET_X86 1 #else #define IS_TARGET_X86 0 #endif #ifdef TARGET_AMD64 #define IS_TARGET_AMD64 1 #else #define IS_TARGET_AMD64 0 #endif #define ctx_ok(ctx) (!(ctx)->cfg->disable_llvm) enum { MAX_VECTOR_ELEMS = 32, // 2 vectors * 128 bits per vector / 8 bits per element ARM64_MAX_VECTOR_ELEMS = 16, }; const int mask_0_incr_1 [] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, }; static LLVMIntPredicate cond_to_llvm_cond [] = { LLVMIntEQ, LLVMIntNE, LLVMIntSLE, LLVMIntSGE, LLVMIntSLT, LLVMIntSGT, LLVMIntULE, LLVMIntUGE, LLVMIntULT, LLVMIntUGT, }; static LLVMRealPredicate fpcond_to_llvm_cond [] = { LLVMRealOEQ, LLVMRealUNE, LLVMRealOLE, LLVMRealOGE, LLVMRealOLT, LLVMRealOGT, LLVMRealULE, LLVMRealUGE, LLVMRealULT, LLVMRealUGT, LLVMRealORD, LLVMRealUNO }; /* See Table 3-1 ("Comparison Predicate for CMPPD and CMPPS Instructions") in * Vol. 2A of the Intel SDM. */ enum { SSE_eq_ord_nosignal = 0, SSE_lt_ord_signal = 1, SSE_le_ord_signal = 2, SSE_unord_nosignal = 3, SSE_neq_unord_nosignal = 4, SSE_nlt_unord_signal = 5, SSE_nle_unord_signal = 6, SSE_ord_nosignal = 7, }; static MonoLLVMModule aot_module; static GHashTable *intrins_id_to_intrins; static LLVMTypeRef i1_t, i2_t, i4_t, i8_t, r4_t, r8_t; static LLVMTypeRef sse_i1_t, sse_i2_t, sse_i4_t, sse_i8_t, sse_r4_t, sse_r8_t; static LLVMTypeRef v64_i1_t, v64_i2_t, v64_i4_t, v64_i8_t, v64_r4_t, v64_r8_t; static LLVMTypeRef v128_i1_t, v128_i2_t, v128_i4_t, v128_i8_t, v128_r4_t, v128_r8_t; static LLVMTypeRef void_func_t; static MonoLLVMModule *init_jit_module (void); static void emit_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder, const unsigned char *cil_code); static void emit_default_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder); static LLVMValueRef emit_dbg_subprogram (EmitContext *ctx, MonoCompile *cfg, LLVMValueRef method, const char *name); static void emit_dbg_info (MonoLLVMModule *module, const char *filename, const char *cu_name); static void emit_cond_system_exception (EmitContext *ctx, MonoBasicBlock *bb, const char *exc_type, LLVMValueRef cmp, gboolean force_explicit); static LLVMValueRef get_intrins (EmitContext *ctx, int id); static LLVMValueRef get_intrins_from_module (LLVMModuleRef lmodule, int id); static void llvm_jit_finalize_method (EmitContext *ctx); static void mono_llvm_nonnull_state_update (EmitContext *ctx, LLVMValueRef lcall, MonoMethod *call_method, LLVMValueRef *args, int num_params); static void mono_llvm_propagate_nonnull_final (GHashTable *all_specializable, MonoLLVMModule *module); static void create_aot_info_var (MonoLLVMModule *module); static void set_invariant_load_flag (LLVMValueRef v); static void set_nonnull_load_flag (LLVMValueRef v); enum { INTRIN_scalar = 1 << 0, INTRIN_vector64 = 1 << 1, INTRIN_vector128 = 1 << 2, INTRIN_vectorwidths = 3, INTRIN_vectormask = 0x7, INTRIN_int8 = 1 << 3, INTRIN_int16 = 1 << 4, INTRIN_int32 = 1 << 5, INTRIN_int64 = 1 << 6, INTRIN_float32 = 1 << 7, INTRIN_float64 = 1 << 8, INTRIN_elementwidths = 6, }; typedef uint16_t llvm_ovr_tag_t; static LLVMTypeRef intrin_types [INTRIN_vectorwidths][INTRIN_elementwidths]; static const llvm_ovr_tag_t intrin_arm64_ovr [] = { #define INTRINS(sym, ...) 0, #define INTRINS_OVR(sym, ...) 0, #define INTRINS_OVR_2_ARG(sym, ...) 0, #define INTRINS_OVR_3_ARG(sym, ...) 0, #define INTRINS_OVR_TAG(sym, _, arch, spec) spec, #define INTRINS_OVR_TAG_KIND(sym, _, kind, arch, spec) spec, #include "llvm-intrinsics.h" }; enum { INTRIN_kind_ftoi = 1, INTRIN_kind_widen, INTRIN_kind_widen_across, INTRIN_kind_across, INTRIN_kind_arm64_dot_prod, }; static const uint8_t intrin_kind [] = { #define INTRINS(sym, ...) 0, #define INTRINS_OVR(sym, ...) 0, #define INTRINS_OVR_2_ARG(sym, ...) 0, #define INTRINS_OVR_3_ARG(sym, ...) 0, #define INTRINS_OVR_TAG(sym, _, arch, spec) 0, #define INTRINS_OVR_TAG_KIND(sym, _, arch, kind, spec) kind, #include "llvm-intrinsics.h" }; static inline llvm_ovr_tag_t ovr_tag_force_scalar (llvm_ovr_tag_t tag) { return (tag & ~INTRIN_vectormask) | INTRIN_scalar; } static inline llvm_ovr_tag_t ovr_tag_smaller_vector (llvm_ovr_tag_t tag) { return (tag & ~INTRIN_vectormask) | ((tag & INTRIN_vectormask) >> 1); } static inline llvm_ovr_tag_t ovr_tag_smaller_elements (llvm_ovr_tag_t tag) { return ((tag & ~INTRIN_vectormask) >> 1) | (tag & INTRIN_vectormask); } static inline llvm_ovr_tag_t ovr_tag_corresponding_integer (llvm_ovr_tag_t tag) { return ((tag & ~INTRIN_vectormask) >> 2) | (tag & INTRIN_vectormask); } static LLVMTypeRef ovr_tag_to_llvm_type (llvm_ovr_tag_t tag) { int vw = 0; int ew = 0; if (tag & INTRIN_vector64) vw = 1; else if (tag & INTRIN_vector128) vw = 2; if (tag & INTRIN_int16) ew = 1; else if (tag & INTRIN_int32) ew = 2; else if (tag & INTRIN_int64) ew = 3; else if (tag & INTRIN_float32) ew = 4; else if (tag & INTRIN_float64) ew = 5; return intrin_types [vw][ew]; } static int key_from_id_and_tag (int id, llvm_ovr_tag_t ovr_tag) { return (((int) ovr_tag) << 23) | id; } static llvm_ovr_tag_t ovr_tag_from_mono_vector_class (MonoClass *klass) { int size = mono_class_value_size (klass, NULL); llvm_ovr_tag_t ret = 0; switch (size) { case 8: ret |= INTRIN_vector64; break; case 16: ret |= INTRIN_vector128; break; } MonoType *etype = mono_class_get_context (klass)->class_inst->type_argv [0]; switch (etype->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: ret |= INTRIN_int8; break; case MONO_TYPE_I2: case MONO_TYPE_U2: ret |= INTRIN_int16; break; case MONO_TYPE_I4: case MONO_TYPE_U4: ret |= INTRIN_int32; break; case MONO_TYPE_I8: case MONO_TYPE_U8: ret |= INTRIN_int64; break; case MONO_TYPE_R4: ret |= INTRIN_float32; break; case MONO_TYPE_R8: ret |= INTRIN_float64; break; } return ret; } static llvm_ovr_tag_t ovr_tag_from_llvm_type (LLVMTypeRef type) { llvm_ovr_tag_t ret = 0; LLVMTypeKind kind = LLVMGetTypeKind (type); LLVMTypeRef elem_t = NULL; switch (kind) { case LLVMVectorTypeKind: { elem_t = LLVMGetElementType (type); unsigned int bits = mono_llvm_get_prim_size_bits (type); switch (bits) { case 64: ret |= INTRIN_vector64; break; case 128: ret |= INTRIN_vector128; break; default: g_assert_not_reached (); } break; } default: g_assert_not_reached (); } if (elem_t == i1_t) ret |= INTRIN_int8; if (elem_t == i2_t) ret |= INTRIN_int16; if (elem_t == i4_t) ret |= INTRIN_int32; if (elem_t == i8_t) ret |= INTRIN_int64; if (elem_t == r4_t) ret |= INTRIN_float32; if (elem_t == r8_t) ret |= INTRIN_float64; return ret; } static inline void set_failure (EmitContext *ctx, const char *message) { TRACE_FAILURE (reason); ctx->cfg->exception_message = g_strdup (message); ctx->cfg->disable_llvm = TRUE; } static LLVMValueRef const_int1 (int v) { return LLVMConstInt (LLVMInt1Type (), v ? 1 : 0, FALSE); } static LLVMValueRef const_int8 (int v) { return LLVMConstInt (LLVMInt8Type (), v, FALSE); } static LLVMValueRef const_int32 (int v) { return LLVMConstInt (LLVMInt32Type (), v, FALSE); } static LLVMValueRef const_int64 (int64_t v) { return LLVMConstInt (LLVMInt64Type (), v, FALSE); } /* * IntPtrType: * * The LLVM type with width == TARGET_SIZEOF_VOID_P */ static LLVMTypeRef IntPtrType (void) { return TARGET_SIZEOF_VOID_P == 8 ? LLVMInt64Type () : LLVMInt32Type (); } static LLVMTypeRef ObjRefType (void) { return TARGET_SIZEOF_VOID_P == 8 ? LLVMPointerType (LLVMInt64Type (), 0) : LLVMPointerType (LLVMInt32Type (), 0); } static LLVMTypeRef ThisType (void) { return TARGET_SIZEOF_VOID_P == 8 ? LLVMPointerType (LLVMInt64Type (), 0) : LLVMPointerType (LLVMInt32Type (), 0); } typedef struct { int32_t size; uint32_t align; } MonoSizeAlign; /* * get_vtype_size: * * Return the size of the LLVM representation of the vtype T. */ static MonoSizeAlign get_vtype_size_align (MonoType *t) { uint32_t align = 0; int32_t size = mono_class_value_size (mono_class_from_mono_type_internal (t), &align); /* LLVMArgAsIArgs depends on this since it stores whole words */ while (size < 2 * TARGET_SIZEOF_VOID_P && mono_is_power_of_two (size) == -1) size ++; MonoSizeAlign ret = { size, align }; return ret; } /* * simd_class_to_llvm_type: * * Return the LLVM type corresponding to the Mono.SIMD class KLASS */ static LLVMTypeRef simd_class_to_llvm_type (EmitContext *ctx, MonoClass *klass) { const char *klass_name = m_class_get_name (klass); if (!strcmp (klass_name, "Vector2d")) { return LLVMVectorType (LLVMDoubleType (), 2); } else if (!strcmp (klass_name, "Vector2l")) { return LLVMVectorType (LLVMInt64Type (), 2); } else if (!strcmp (klass_name, "Vector2ul")) { return LLVMVectorType (LLVMInt64Type (), 2); } else if (!strcmp (klass_name, "Vector4i")) { return LLVMVectorType (LLVMInt32Type (), 4); } else if (!strcmp (klass_name, "Vector4ui")) { return LLVMVectorType (LLVMInt32Type (), 4); } else if (!strcmp (klass_name, "Vector4f")) { return LLVMVectorType (LLVMFloatType (), 4); } else if (!strcmp (klass_name, "Vector8s")) { return LLVMVectorType (LLVMInt16Type (), 8); } else if (!strcmp (klass_name, "Vector8us")) { return LLVMVectorType (LLVMInt16Type (), 8); } else if (!strcmp (klass_name, "Vector16sb")) { return LLVMVectorType (LLVMInt8Type (), 16); } else if (!strcmp (klass_name, "Vector16b")) { return LLVMVectorType (LLVMInt8Type (), 16); } else if (!strcmp (klass_name, "Vector2")) { /* System.Numerics */ return LLVMVectorType (LLVMFloatType (), 4); } else if (!strcmp (klass_name, "Vector3")) { return LLVMVectorType (LLVMFloatType (), 4); } else if (!strcmp (klass_name, "Vector4")) { return LLVMVectorType (LLVMFloatType (), 4); } else if (!strcmp (klass_name, "Vector`1") || !strcmp (klass_name, "Vector64`1") || !strcmp (klass_name, "Vector128`1") || !strcmp (klass_name, "Vector256`1")) { MonoType *etype = mono_class_get_generic_class (klass)->context.class_inst->type_argv [0]; int size = mono_class_value_size (klass, NULL); switch (etype->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return LLVMVectorType (LLVMInt8Type (), size); case MONO_TYPE_I2: case MONO_TYPE_U2: return LLVMVectorType (LLVMInt16Type (), size / 2); case MONO_TYPE_I4: case MONO_TYPE_U4: return LLVMVectorType (LLVMInt32Type (), size / 4); case MONO_TYPE_I8: case MONO_TYPE_U8: return LLVMVectorType (LLVMInt64Type (), size / 8); case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return LLVMVectorType (LLVMInt64Type (), size / 8); #else return LLVMVectorType (LLVMInt32Type (), size / 4); #endif case MONO_TYPE_R4: return LLVMVectorType (LLVMFloatType (), size / 4); case MONO_TYPE_R8: return LLVMVectorType (LLVMDoubleType (), size / 8); default: g_assert_not_reached (); return NULL; } } else { printf ("%s\n", klass_name); NOT_IMPLEMENTED; return NULL; } } static LLVMTypeRef simd_valuetuple_to_llvm_type (EmitContext *ctx, MonoClass *klass) { const char *klass_name = m_class_get_name (klass); if (!strcmp (klass_name, "ValueTuple`2")) { MonoType *etype = mono_class_get_generic_class (klass)->context.class_inst->type_argv [0]; if (etype->type != MONO_TYPE_GENERICINST) g_assert_not_reached (); MonoClass *eklass = etype->data.generic_class->cached_class; LLVMTypeRef ltype = simd_class_to_llvm_type (ctx, eklass); return LLVMArrayType (ltype, 2); } g_assert_not_reached (); } /* Return the 128 bit SIMD type corresponding to the mono type TYPE */ static inline G_GNUC_UNUSED LLVMTypeRef type_to_sse_type (int type) { switch (type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return LLVMVectorType (LLVMInt8Type (), 16); case MONO_TYPE_U2: case MONO_TYPE_I2: return LLVMVectorType (LLVMInt16Type (), 8); case MONO_TYPE_U4: case MONO_TYPE_I4: return LLVMVectorType (LLVMInt32Type (), 4); case MONO_TYPE_U8: case MONO_TYPE_I8: return LLVMVectorType (LLVMInt64Type (), 2); case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return LLVMVectorType (LLVMInt64Type (), 2); #else return LLVMVectorType (LLVMInt32Type (), 4); #endif case MONO_TYPE_R8: return LLVMVectorType (LLVMDoubleType (), 2); case MONO_TYPE_R4: return LLVMVectorType (LLVMFloatType (), 4); default: g_assert_not_reached (); return NULL; } } static LLVMTypeRef create_llvm_type_for_type (MonoLLVMModule *module, MonoClass *klass) { int i, size, nfields, esize; LLVMTypeRef *eltypes; char *name; MonoType *t; LLVMTypeRef ltype; t = m_class_get_byval_arg (klass); if (mini_type_is_hfa (t, &nfields, &esize)) { /* * This is needed on arm64 where HFAs are returned in * registers. */ /* SIMD types have size 16 in mono_class_value_size () */ if (m_class_is_simd_type (klass)) nfields = 16/ esize; size = nfields; eltypes = g_new (LLVMTypeRef, size); for (i = 0; i < size; ++i) eltypes [i] = esize == 4 ? LLVMFloatType () : LLVMDoubleType (); } else { MonoSizeAlign size_align = get_vtype_size_align (t); eltypes = g_new (LLVMTypeRef, size_align.size); size = 0; uint32_t bytes = 0; uint32_t chunk = size_align.align < TARGET_SIZEOF_VOID_P ? size_align.align : TARGET_SIZEOF_VOID_P; for (; chunk > 0; chunk = chunk >> 1) { for (; (bytes + chunk) <= size_align.size; bytes += chunk) { eltypes [size] = LLVMIntType (chunk * 8); ++size; } } } name = mono_type_full_name (m_class_get_byval_arg (klass)); ltype = LLVMStructCreateNamed (module->context, name); LLVMStructSetBody (ltype, eltypes, size, FALSE); g_free (eltypes); g_free (name); return ltype; } static LLVMTypeRef primitive_type_to_llvm_type (MonoTypeEnum type) { switch (type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return LLVMInt8Type (); case MONO_TYPE_I2: case MONO_TYPE_U2: return LLVMInt16Type (); case MONO_TYPE_I4: case MONO_TYPE_U4: return LLVMInt32Type (); case MONO_TYPE_I8: case MONO_TYPE_U8: return LLVMInt64Type (); case MONO_TYPE_R4: return LLVMFloatType (); case MONO_TYPE_R8: return LLVMDoubleType (); case MONO_TYPE_I: case MONO_TYPE_U: return IntPtrType (); default: return NULL; } } static MonoTypeEnum inst_c1_type (const MonoInst *ins) { return (MonoTypeEnum)ins->inst_c1; } /* * type_to_llvm_type: * * Return the LLVM type corresponding to T. */ static LLVMTypeRef type_to_llvm_type (EmitContext *ctx, MonoType *t) { if (m_type_is_byref (t)) return ThisType (); t = mini_get_underlying_type (t); LLVMTypeRef prim_llvm_type = primitive_type_to_llvm_type (t->type); if (prim_llvm_type != NULL) return prim_llvm_type; switch (t->type) { case MONO_TYPE_VOID: return LLVMVoidType (); case MONO_TYPE_OBJECT: return ObjRefType (); case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: { MonoClass *klass = mono_class_from_mono_type_internal (t); MonoClass *ptr_klass = m_class_get_element_class (klass); MonoType *ptr_type = m_class_get_byval_arg (ptr_klass); /* Handle primitive pointers */ switch (ptr_type->type) { case MONO_TYPE_I1: case MONO_TYPE_I2: case MONO_TYPE_I4: case MONO_TYPE_U1: case MONO_TYPE_U2: case MONO_TYPE_U4: return LLVMPointerType (type_to_llvm_type (ctx, ptr_type), 0); } return ObjRefType (); } case MONO_TYPE_VAR: case MONO_TYPE_MVAR: /* Because of generic sharing */ return ObjRefType (); case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (t)) return ObjRefType (); /* Fall through */ case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: { MonoClass *klass; LLVMTypeRef ltype; klass = mono_class_from_mono_type_internal (t); if (MONO_CLASS_IS_SIMD (ctx->cfg, klass)) return simd_class_to_llvm_type (ctx, klass); if (m_class_is_enumtype (klass)) return type_to_llvm_type (ctx, mono_class_enum_basetype_internal (klass)); ltype = (LLVMTypeRef)g_hash_table_lookup (ctx->module->llvm_types, klass); if (!ltype) { ltype = create_llvm_type_for_type (ctx->module, klass); g_hash_table_insert (ctx->module->llvm_types, klass, ltype); } return ltype; } default: printf ("X: %d\n", t->type); ctx->cfg->exception_message = g_strdup_printf ("type %s", mono_type_full_name (t)); ctx->cfg->disable_llvm = TRUE; return NULL; } } static gboolean primitive_type_is_unsigned (MonoTypeEnum t) { switch (t) { case MONO_TYPE_U1: case MONO_TYPE_U2: case MONO_TYPE_CHAR: case MONO_TYPE_U4: case MONO_TYPE_U8: case MONO_TYPE_U: return TRUE; default: return FALSE; } } /* * type_is_unsigned: * * Return whenever T is an unsigned int type. */ static gboolean type_is_unsigned (EmitContext *ctx, MonoType *t) { t = mini_get_underlying_type (t); if (m_type_is_byref (t)) return FALSE; return primitive_type_is_unsigned (t->type); } /* * type_to_llvm_arg_type: * * Same as type_to_llvm_type, but treat i8/i16 as i32. */ static LLVMTypeRef type_to_llvm_arg_type (EmitContext *ctx, MonoType *t) { LLVMTypeRef ptype = type_to_llvm_type (ctx, t); if (ctx->cfg->llvm_only) return ptype; /* * This works on all abis except arm64/ios which passes multiple * arguments in one stack slot. */ #ifndef TARGET_ARM64 if (ptype == LLVMInt8Type () || ptype == LLVMInt16Type ()) { /* * LLVM generates code which only sets the lower bits, while JITted * code expects all the bits to be set. */ ptype = LLVMInt32Type (); } #endif return ptype; } /* * llvm_type_to_stack_type: * * Return the LLVM type which needs to be used when a value of type TYPE is pushed * on the IL stack. */ static G_GNUC_UNUSED LLVMTypeRef llvm_type_to_stack_type (MonoCompile *cfg, LLVMTypeRef type) { if (type == NULL) return NULL; if (type == LLVMInt8Type ()) return LLVMInt32Type (); else if (type == LLVMInt16Type ()) return LLVMInt32Type (); else if (!cfg->r4fp && type == LLVMFloatType ()) return LLVMDoubleType (); else return type; } /* * regtype_to_llvm_type: * * Return the LLVM type corresponding to the regtype C used in instruction * descriptions. */ static LLVMTypeRef regtype_to_llvm_type (char c) { switch (c) { case 'i': return LLVMInt32Type (); case 'l': return LLVMInt64Type (); case 'f': return LLVMDoubleType (); default: return NULL; } } /* * op_to_llvm_type: * * Return the LLVM type corresponding to the unary/binary opcode OPCODE. */ static LLVMTypeRef op_to_llvm_type (int opcode) { switch (opcode) { case OP_ICONV_TO_I1: case OP_LCONV_TO_I1: return LLVMInt8Type (); case OP_ICONV_TO_U1: case OP_LCONV_TO_U1: return LLVMInt8Type (); case OP_ICONV_TO_I2: case OP_LCONV_TO_I2: return LLVMInt16Type (); case OP_ICONV_TO_U2: case OP_LCONV_TO_U2: return LLVMInt16Type (); case OP_ICONV_TO_I4: case OP_LCONV_TO_I4: return LLVMInt32Type (); case OP_ICONV_TO_U4: case OP_LCONV_TO_U4: return LLVMInt32Type (); case OP_ICONV_TO_I8: return LLVMInt64Type (); case OP_ICONV_TO_R4: return LLVMFloatType (); case OP_ICONV_TO_R8: return LLVMDoubleType (); case OP_ICONV_TO_U8: return LLVMInt64Type (); case OP_FCONV_TO_I4: return LLVMInt32Type (); case OP_FCONV_TO_I8: return LLVMInt64Type (); case OP_FCONV_TO_I1: case OP_FCONV_TO_U1: case OP_RCONV_TO_I1: case OP_RCONV_TO_U1: return LLVMInt8Type (); case OP_FCONV_TO_I2: case OP_FCONV_TO_U2: case OP_RCONV_TO_I2: case OP_RCONV_TO_U2: return LLVMInt16Type (); case OP_FCONV_TO_U4: case OP_RCONV_TO_U4: return LLVMInt32Type (); case OP_FCONV_TO_U8: case OP_RCONV_TO_U8: return LLVMInt64Type (); case OP_FCONV_TO_I: case OP_RCONV_TO_I: return TARGET_SIZEOF_VOID_P == 8 ? LLVMInt64Type () : LLVMInt32Type (); case OP_IADD_OVF: case OP_IADD_OVF_UN: case OP_ISUB_OVF: case OP_ISUB_OVF_UN: case OP_IMUL_OVF: case OP_IMUL_OVF_UN: return LLVMInt32Type (); case OP_LADD_OVF: case OP_LADD_OVF_UN: case OP_LSUB_OVF: case OP_LSUB_OVF_UN: case OP_LMUL_OVF: case OP_LMUL_OVF_UN: return LLVMInt64Type (); default: printf ("%s\n", mono_inst_name (opcode)); g_assert_not_reached (); return NULL; } } #define CLAUSE_START(clause) ((clause)->try_offset) #define CLAUSE_END(clause) (((clause))->try_offset + ((clause))->try_len) /* * load_store_to_llvm_type: * * Return the size/sign/zero extension corresponding to the load/store opcode * OPCODE. */ static LLVMTypeRef load_store_to_llvm_type (int opcode, int *size, gboolean *sext, gboolean *zext) { *sext = FALSE; *zext = FALSE; switch (opcode) { case OP_LOADI1_MEMBASE: case OP_STOREI1_MEMBASE_REG: case OP_STOREI1_MEMBASE_IMM: case OP_ATOMIC_LOAD_I1: case OP_ATOMIC_STORE_I1: *size = 1; *sext = TRUE; return LLVMInt8Type (); case OP_LOADU1_MEMBASE: case OP_LOADU1_MEM: case OP_ATOMIC_LOAD_U1: case OP_ATOMIC_STORE_U1: *size = 1; *zext = TRUE; return LLVMInt8Type (); case OP_LOADI2_MEMBASE: case OP_STOREI2_MEMBASE_REG: case OP_STOREI2_MEMBASE_IMM: case OP_ATOMIC_LOAD_I2: case OP_ATOMIC_STORE_I2: *size = 2; *sext = TRUE; return LLVMInt16Type (); case OP_LOADU2_MEMBASE: case OP_LOADU2_MEM: case OP_ATOMIC_LOAD_U2: case OP_ATOMIC_STORE_U2: *size = 2; *zext = TRUE; return LLVMInt16Type (); case OP_LOADI4_MEMBASE: case OP_LOADU4_MEMBASE: case OP_LOADI4_MEM: case OP_LOADU4_MEM: case OP_STOREI4_MEMBASE_REG: case OP_STOREI4_MEMBASE_IMM: case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_STORE_U4: *size = 4; return LLVMInt32Type (); case OP_LOADI8_MEMBASE: case OP_LOADI8_MEM: case OP_STOREI8_MEMBASE_REG: case OP_STOREI8_MEMBASE_IMM: case OP_ATOMIC_LOAD_I8: case OP_ATOMIC_STORE_I8: case OP_ATOMIC_LOAD_U8: case OP_ATOMIC_STORE_U8: *size = 8; return LLVMInt64Type (); case OP_LOADR4_MEMBASE: case OP_STORER4_MEMBASE_REG: case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_STORE_R4: *size = 4; return LLVMFloatType (); case OP_LOADR8_MEMBASE: case OP_STORER8_MEMBASE_REG: case OP_ATOMIC_LOAD_R8: case OP_ATOMIC_STORE_R8: *size = 8; return LLVMDoubleType (); case OP_LOAD_MEMBASE: case OP_LOAD_MEM: case OP_STORE_MEMBASE_REG: case OP_STORE_MEMBASE_IMM: *size = TARGET_SIZEOF_VOID_P; return IntPtrType (); default: g_assert_not_reached (); return NULL; } } /* * ovf_op_to_intrins: * * Return the LLVM intrinsics corresponding to the overflow opcode OPCODE. */ static IntrinsicId ovf_op_to_intrins (int opcode) { switch (opcode) { case OP_IADD_OVF: return INTRINS_SADD_OVF_I32; case OP_IADD_OVF_UN: return INTRINS_UADD_OVF_I32; case OP_ISUB_OVF: return INTRINS_SSUB_OVF_I32; case OP_ISUB_OVF_UN: return INTRINS_USUB_OVF_I32; case OP_IMUL_OVF: return INTRINS_SMUL_OVF_I32; case OP_IMUL_OVF_UN: return INTRINS_UMUL_OVF_I32; case OP_LADD_OVF: return INTRINS_SADD_OVF_I64; case OP_LADD_OVF_UN: return INTRINS_UADD_OVF_I64; case OP_LSUB_OVF: return INTRINS_SSUB_OVF_I64; case OP_LSUB_OVF_UN: return INTRINS_USUB_OVF_I64; case OP_LMUL_OVF: return INTRINS_SMUL_OVF_I64; case OP_LMUL_OVF_UN: return INTRINS_UMUL_OVF_I64; default: g_assert_not_reached (); return (IntrinsicId)0; } } static IntrinsicId simd_ins_to_intrins (int opcode) { switch (opcode) { #if defined(TARGET_X86) || defined(TARGET_AMD64) case OP_CVTPD2DQ: return INTRINS_SSE_CVTPD2DQ; case OP_CVTPS2DQ: return INTRINS_SSE_CVTPS2DQ; case OP_CVTPD2PS: return INTRINS_SSE_CVTPD2PS; case OP_CVTTPD2DQ: return INTRINS_SSE_CVTTPD2DQ; case OP_CVTTPS2DQ: return INTRINS_SSE_CVTTPS2DQ; case OP_SSE_SQRTSS: return INTRINS_SSE_SQRT_SS; case OP_SSE2_SQRTSD: return INTRINS_SSE_SQRT_SD; #endif default: g_assert_not_reached (); return (IntrinsicId)0; } } static LLVMTypeRef simd_op_to_llvm_type (int opcode) { #if defined(TARGET_X86) || defined(TARGET_AMD64) switch (opcode) { case OP_EXTRACT_R8: case OP_EXPAND_R8: return sse_r8_t; case OP_EXTRACT_I8: case OP_EXPAND_I8: return sse_i8_t; case OP_EXTRACT_I4: case OP_EXPAND_I4: return sse_i4_t; case OP_EXTRACT_I2: case OP_EXTRACTX_U2: case OP_EXPAND_I2: return sse_i2_t; case OP_EXTRACT_I1: case OP_EXPAND_I1: return sse_i1_t; case OP_EXTRACT_R4: case OP_EXPAND_R4: return sse_r4_t; case OP_CVTPD2DQ: case OP_CVTPD2PS: case OP_CVTTPD2DQ: return sse_r8_t; case OP_CVTPS2DQ: case OP_CVTTPS2DQ: return sse_r4_t; case OP_SQRTPS: case OP_RSQRTPS: case OP_DUPPS_LOW: case OP_DUPPS_HIGH: return sse_r4_t; case OP_SQRTPD: case OP_DUPPD: return sse_r8_t; default: g_assert_not_reached (); return NULL; } #else return NULL; #endif } static void set_cold_cconv (LLVMValueRef func) { /* * xcode10 (watchOS) and ARM/ARM64 doesn't seem to support preserveall, it fails with: * fatal error: error in backend: Unsupported calling convention */ #if !defined(TARGET_WATCHOS) && !defined(TARGET_ARM) && !defined(TARGET_ARM64) LLVMSetFunctionCallConv (func, LLVMColdCallConv); #endif } static void set_call_cold_cconv (LLVMValueRef func) { #if !defined(TARGET_WATCHOS) && !defined(TARGET_ARM) && !defined(TARGET_ARM64) LLVMSetInstructionCallConv (func, LLVMColdCallConv); #endif } /* * get_bb: * * Return the LLVM basic block corresponding to BB. */ static LLVMBasicBlockRef get_bb (EmitContext *ctx, MonoBasicBlock *bb) { char bb_name_buf [128]; char *bb_name; if (ctx->bblocks [bb->block_num].bblock == NULL) { if (bb->flags & BB_EXCEPTION_HANDLER) { int clause_index = (mono_get_block_region_notry (ctx->cfg, bb->region) >> 8) - 1; sprintf (bb_name_buf, "EH_CLAUSE%d_BB%d", clause_index, bb->block_num); bb_name = bb_name_buf; } else if (bb->block_num < 256) { if (!ctx->module->bb_names) { ctx->module->bb_names_len = 256; ctx->module->bb_names = g_new0 (char*, ctx->module->bb_names_len); } if (!ctx->module->bb_names [bb->block_num]) { char *n; n = g_strdup_printf ("BB%d", bb->block_num); mono_memory_barrier (); ctx->module->bb_names [bb->block_num] = n; } bb_name = ctx->module->bb_names [bb->block_num]; } else { sprintf (bb_name_buf, "BB%d", bb->block_num); bb_name = bb_name_buf; } ctx->bblocks [bb->block_num].bblock = LLVMAppendBasicBlock (ctx->lmethod, bb_name); ctx->bblocks [bb->block_num].end_bblock = ctx->bblocks [bb->block_num].bblock; } return ctx->bblocks [bb->block_num].bblock; } /* * get_end_bb: * * Return the last LLVM bblock corresponding to BB. * This might not be equal to the bb returned by get_bb () since we need to generate * multiple LLVM bblocks for a mono bblock to handle throwing exceptions. */ static LLVMBasicBlockRef get_end_bb (EmitContext *ctx, MonoBasicBlock *bb) { get_bb (ctx, bb); return ctx->bblocks [bb->block_num].end_bblock; } static LLVMBasicBlockRef gen_bb (EmitContext *ctx, const char *prefix) { char bb_name [128]; sprintf (bb_name, "%s%d", prefix, ++ ctx->ex_index); return LLVMAppendBasicBlock (ctx->lmethod, bb_name); } /* * resolve_patch: * * Return the target of the patch identified by TYPE and TARGET. */ static gpointer resolve_patch (MonoCompile *cfg, MonoJumpInfoType type, gconstpointer target) { MonoJumpInfo ji; ERROR_DECL (error); gpointer res; memset (&ji, 0, sizeof (ji)); ji.type = type; ji.data.target = target; res = mono_resolve_patch_target (cfg->method, NULL, &ji, FALSE, error); mono_error_assert_ok (error); return res; } /* * convert_full: * * Emit code to convert the LLVM value V to DTYPE. */ static LLVMValueRef convert_full (EmitContext *ctx, LLVMValueRef v, LLVMTypeRef dtype, gboolean is_unsigned) { LLVMTypeRef stype = LLVMTypeOf (v); if (stype != dtype) { gboolean ext = FALSE; /* Extend */ if (dtype == LLVMInt64Type () && (stype == LLVMInt32Type () || stype == LLVMInt16Type () || stype == LLVMInt8Type ())) ext = TRUE; else if (dtype == LLVMInt32Type () && (stype == LLVMInt16Type () || stype == LLVMInt8Type ())) ext = TRUE; else if (dtype == LLVMInt16Type () && (stype == LLVMInt8Type ())) ext = TRUE; if (ext) return is_unsigned ? LLVMBuildZExt (ctx->builder, v, dtype, "") : LLVMBuildSExt (ctx->builder, v, dtype, ""); if (dtype == LLVMDoubleType () && stype == LLVMFloatType ()) return LLVMBuildFPExt (ctx->builder, v, dtype, ""); /* Trunc */ if (stype == LLVMInt64Type () && (dtype == LLVMInt32Type () || dtype == LLVMInt16Type () || dtype == LLVMInt8Type ())) return LLVMBuildTrunc (ctx->builder, v, dtype, ""); if (stype == LLVMInt32Type () && (dtype == LLVMInt16Type () || dtype == LLVMInt8Type ())) return LLVMBuildTrunc (ctx->builder, v, dtype, ""); if (stype == LLVMInt16Type () && dtype == LLVMInt8Type ()) return LLVMBuildTrunc (ctx->builder, v, dtype, ""); if (stype == LLVMDoubleType () && dtype == LLVMFloatType ()) return LLVMBuildFPTrunc (ctx->builder, v, dtype, ""); if (LLVMGetTypeKind (stype) == LLVMPointerTypeKind && LLVMGetTypeKind (dtype) == LLVMPointerTypeKind) return LLVMBuildBitCast (ctx->builder, v, dtype, ""); if (LLVMGetTypeKind (dtype) == LLVMPointerTypeKind) return LLVMBuildIntToPtr (ctx->builder, v, dtype, ""); if (LLVMGetTypeKind (stype) == LLVMPointerTypeKind) return LLVMBuildPtrToInt (ctx->builder, v, dtype, ""); if (mono_arch_is_soft_float ()) { if (stype == LLVMInt32Type () && dtype == LLVMFloatType ()) return LLVMBuildBitCast (ctx->builder, v, dtype, ""); if (stype == LLVMInt32Type () && dtype == LLVMDoubleType ()) return LLVMBuildBitCast (ctx->builder, LLVMBuildZExt (ctx->builder, v, LLVMInt64Type (), ""), dtype, ""); } if (LLVMGetTypeKind (stype) == LLVMVectorTypeKind && LLVMGetTypeKind (dtype) == LLVMVectorTypeKind) { if (mono_llvm_get_prim_size_bits (stype) == mono_llvm_get_prim_size_bits (dtype)) return LLVMBuildBitCast (ctx->builder, v, dtype, ""); } mono_llvm_dump_value (v); mono_llvm_dump_type (dtype); printf ("\n"); g_assert_not_reached (); return NULL; } else { return v; } } static LLVMValueRef convert (EmitContext *ctx, LLVMValueRef v, LLVMTypeRef dtype) { return convert_full (ctx, v, dtype, FALSE); } static void emit_memset (EmitContext *ctx, LLVMBuilderRef builder, LLVMValueRef v, LLVMValueRef size, int alignment) { LLVMValueRef args [5]; int aindex = 0; args [aindex ++] = v; args [aindex ++] = LLVMConstInt (LLVMInt8Type (), 0, FALSE); args [aindex ++] = size; args [aindex ++] = LLVMConstInt (LLVMInt1Type (), 0, FALSE); LLVMBuildCall (builder, get_intrins (ctx, INTRINS_MEMSET), args, aindex, ""); } /* * emit_volatile_load: * * If vreg is volatile, emit a load from its address. */ static LLVMValueRef emit_volatile_load (EmitContext *ctx, int vreg) { MonoType *t; LLVMValueRef v; // On arm64, we pass the rgctx in a callee saved // register on arm64 (x15), and llvm might keep the value in that register // even through the register is marked as 'reserved' inside llvm. v = mono_llvm_build_load (ctx->builder, ctx->addresses [vreg], "", TRUE); t = ctx->vreg_cli_types [vreg]; if (t && !m_type_is_byref (t)) { /* * Might have to zero extend since llvm doesn't have * unsigned types. */ if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_U2 || t->type == MONO_TYPE_CHAR || t->type == MONO_TYPE_BOOLEAN) v = LLVMBuildZExt (ctx->builder, v, LLVMInt32Type (), ""); else if (t->type == MONO_TYPE_I1 || t->type == MONO_TYPE_I2) v = LLVMBuildSExt (ctx->builder, v, LLVMInt32Type (), ""); else if (t->type == MONO_TYPE_U8) v = LLVMBuildZExt (ctx->builder, v, LLVMInt64Type (), ""); } return v; } /* * emit_volatile_store: * * If VREG is volatile, emit a store from its value to its address. */ static void emit_volatile_store (EmitContext *ctx, int vreg) { MonoInst *var = get_vreg_to_inst (ctx->cfg, vreg); if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) { g_assert (ctx->addresses [vreg]); #ifdef TARGET_WASM /* Need volatile stores otherwise the compiler might move them */ mono_llvm_build_store (ctx->builder, convert (ctx, ctx->values [vreg], type_to_llvm_type (ctx, var->inst_vtype)), ctx->addresses [vreg], TRUE, LLVM_BARRIER_NONE); #else LLVMBuildStore (ctx->builder, convert (ctx, ctx->values [vreg], type_to_llvm_type (ctx, var->inst_vtype)), ctx->addresses [vreg]); #endif } } static LLVMTypeRef sig_to_llvm_sig_no_cinfo (EmitContext *ctx, MonoMethodSignature *sig) { LLVMTypeRef ret_type; LLVMTypeRef *param_types = NULL; LLVMTypeRef res; int i, pindex; ret_type = type_to_llvm_type (ctx, sig->ret); if (!ctx_ok (ctx)) return NULL; param_types = g_new0 (LLVMTypeRef, (sig->param_count * 8) + 3); pindex = 0; if (sig->hasthis) param_types [pindex ++] = ThisType (); for (i = 0; i < sig->param_count; ++i) param_types [pindex ++] = type_to_llvm_arg_type (ctx, sig->params [i]); if (!ctx_ok (ctx)) { g_free (param_types); return NULL; } res = LLVMFunctionType (ret_type, param_types, pindex, FALSE); g_free (param_types); return res; } /* * sig_to_llvm_sig_full: * * Return the LLVM signature corresponding to the mono signature SIG using the * calling convention information in CINFO. Fill out the parameter mapping information in CINFO. */ static LLVMTypeRef sig_to_llvm_sig_full (EmitContext *ctx, MonoMethodSignature *sig, LLVMCallInfo *cinfo) { LLVMTypeRef ret_type; LLVMTypeRef *param_types = NULL; LLVMTypeRef res; int i, j, pindex, vret_arg_pindex = 0; gboolean vretaddr = FALSE; MonoType *rtype; if (!cinfo) return sig_to_llvm_sig_no_cinfo (ctx, sig); ret_type = type_to_llvm_type (ctx, sig->ret); if (!ctx_ok (ctx)) return NULL; rtype = mini_get_underlying_type (sig->ret); switch (cinfo->ret.storage) { case LLVMArgVtypeInReg: /* LLVM models this by returning an aggregate value */ if (cinfo->ret.pair_storage [0] == LLVMArgInIReg && cinfo->ret.pair_storage [1] == LLVMArgNone) { LLVMTypeRef members [2]; members [0] = IntPtrType (); ret_type = LLVMStructType (members, 1, FALSE); } else if (cinfo->ret.pair_storage [0] == LLVMArgNone && cinfo->ret.pair_storage [1] == LLVMArgNone) { /* Empty struct */ ret_type = LLVMVoidType (); } else if (cinfo->ret.pair_storage [0] == LLVMArgInIReg && cinfo->ret.pair_storage [1] == LLVMArgInIReg) { LLVMTypeRef members [2]; members [0] = IntPtrType (); members [1] = IntPtrType (); ret_type = LLVMStructType (members, 2, FALSE); } else { g_assert_not_reached (); } break; case LLVMArgVtypeByVal: /* Vtype returned normally by val */ break; case LLVMArgVtypeAsScalar: { int size = mono_class_value_size (mono_class_from_mono_type_internal (rtype), NULL); /* LLVM models this by returning an int */ if (size < TARGET_SIZEOF_VOID_P) { g_assert (cinfo->ret.nslots == 1); ret_type = LLVMIntType (size * 8); } else { g_assert (cinfo->ret.nslots == 1 || cinfo->ret.nslots == 2); ret_type = LLVMIntType (cinfo->ret.nslots * sizeof (target_mgreg_t) * 8); } break; } case LLVMArgAsIArgs: ret_type = LLVMArrayType (IntPtrType (), cinfo->ret.nslots); break; case LLVMArgFpStruct: { /* Vtype returned as a fp struct */ LLVMTypeRef members [16]; /* Have to create our own structure since we don't map fp structures to LLVM fp structures yet */ for (i = 0; i < cinfo->ret.nslots; ++i) members [i] = cinfo->ret.esize == 8 ? LLVMDoubleType () : LLVMFloatType (); ret_type = LLVMStructType (members, cinfo->ret.nslots, FALSE); break; } case LLVMArgVtypeByRef: /* Vtype returned using a hidden argument */ ret_type = LLVMVoidType (); break; case LLVMArgVtypeRetAddr: case LLVMArgGsharedvtFixed: case LLVMArgGsharedvtFixedVtype: case LLVMArgGsharedvtVariable: vretaddr = TRUE; ret_type = LLVMVoidType (); break; case LLVMArgWasmVtypeAsScalar: g_assert (cinfo->ret.esize); ret_type = LLVMIntType (cinfo->ret.esize * 8); break; default: break; } param_types = g_new0 (LLVMTypeRef, (sig->param_count * 8) + 3); pindex = 0; if (cinfo->ret.storage == LLVMArgVtypeByRef) { /* * Has to be the first argument because of the sret argument attribute * FIXME: This might conflict with passing 'this' as the first argument, but * this is only used on arm64 which has a dedicated struct return register. */ cinfo->vret_arg_pindex = pindex; param_types [pindex] = type_to_llvm_arg_type (ctx, sig->ret); if (!ctx_ok (ctx)) { g_free (param_types); return NULL; } param_types [pindex] = LLVMPointerType (param_types [pindex], 0); pindex ++; } if (!ctx->llvm_only && cinfo->rgctx_arg) { cinfo->rgctx_arg_pindex = pindex; param_types [pindex] = ctx->module->ptr_type; pindex ++; } if (cinfo->imt_arg) { cinfo->imt_arg_pindex = pindex; param_types [pindex] = ctx->module->ptr_type; pindex ++; } if (vretaddr) { /* Compute the index in the LLVM signature where the vret arg needs to be passed */ vret_arg_pindex = pindex; if (cinfo->vret_arg_index == 1) { /* Add the slots consumed by the first argument */ LLVMArgInfo *ainfo = &cinfo->args [0]; switch (ainfo->storage) { case LLVMArgVtypeInReg: for (j = 0; j < 2; ++j) { if (ainfo->pair_storage [j] == LLVMArgInIReg) vret_arg_pindex ++; } break; default: vret_arg_pindex ++; } } cinfo->vret_arg_pindex = vret_arg_pindex; } if (vretaddr && vret_arg_pindex == pindex) param_types [pindex ++] = IntPtrType (); if (sig->hasthis) { cinfo->this_arg_pindex = pindex; param_types [pindex ++] = ThisType (); cinfo->args [0].pindex = cinfo->this_arg_pindex; } if (vretaddr && vret_arg_pindex == pindex) param_types [pindex ++] = IntPtrType (); for (i = 0; i < sig->param_count; ++i) { LLVMArgInfo *ainfo = &cinfo->args [i + sig->hasthis]; if (vretaddr && vret_arg_pindex == pindex) param_types [pindex ++] = IntPtrType (); ainfo->pindex = pindex; switch (ainfo->storage) { case LLVMArgVtypeInReg: for (j = 0; j < 2; ++j) { switch (ainfo->pair_storage [j]) { case LLVMArgInIReg: param_types [pindex ++] = LLVMIntType (TARGET_SIZEOF_VOID_P * 8); break; case LLVMArgNone: break; default: g_assert_not_reached (); } } break; case LLVMArgVtypeByVal: param_types [pindex] = type_to_llvm_arg_type (ctx, ainfo->type); if (!ctx_ok (ctx)) break; param_types [pindex] = LLVMPointerType (param_types [pindex], 0); pindex ++; break; case LLVMArgAsIArgs: if (ainfo->esize == 8) param_types [pindex] = LLVMArrayType (LLVMInt64Type (), ainfo->nslots); else param_types [pindex] = LLVMArrayType (IntPtrType (), ainfo->nslots); pindex ++; break; case LLVMArgVtypeAddr: case LLVMArgVtypeByRef: param_types [pindex] = type_to_llvm_arg_type (ctx, ainfo->type); if (!ctx_ok (ctx)) break; param_types [pindex] = LLVMPointerType (param_types [pindex], 0); pindex ++; break; case LLVMArgAsFpArgs: { int j; /* Emit dummy fp arguments if needed so the rest is passed on the stack */ for (j = 0; j < ainfo->ndummy_fpargs; ++j) param_types [pindex ++] = LLVMDoubleType (); for (j = 0; j < ainfo->nslots; ++j) param_types [pindex ++] = ainfo->esize == 8 ? LLVMDoubleType () : LLVMFloatType (); break; } case LLVMArgVtypeAsScalar: g_assert_not_reached (); break; case LLVMArgWasmVtypeAsScalar: g_assert (ainfo->esize); param_types [pindex ++] = LLVMIntType (ainfo->esize * 8); break; case LLVMArgGsharedvtFixed: case LLVMArgGsharedvtFixedVtype: param_types [pindex ++] = LLVMPointerType (type_to_llvm_arg_type (ctx, ainfo->type), 0); break; case LLVMArgGsharedvtVariable: param_types [pindex ++] = LLVMPointerType (IntPtrType (), 0); break; default: param_types [pindex ++] = type_to_llvm_arg_type (ctx, ainfo->type); break; } } if (!ctx_ok (ctx)) { g_free (param_types); return NULL; } if (vretaddr && vret_arg_pindex == pindex) param_types [pindex ++] = IntPtrType (); if (ctx->llvm_only && cinfo->rgctx_arg) { /* Pass the rgctx as the last argument */ cinfo->rgctx_arg_pindex = pindex; param_types [pindex] = ctx->module->ptr_type; pindex ++; } else if (ctx->llvm_only && cinfo->dummy_arg) { /* Pass a dummy arg last */ cinfo->dummy_arg_pindex = pindex; param_types [pindex] = ctx->module->ptr_type; pindex ++; } res = LLVMFunctionType (ret_type, param_types, pindex, FALSE); g_free (param_types); return res; } static LLVMTypeRef sig_to_llvm_sig (EmitContext *ctx, MonoMethodSignature *sig) { return sig_to_llvm_sig_full (ctx, sig, NULL); } /* * LLVMFunctionType1: * * Create an LLVM function type from the arguments. */ static G_GNUC_UNUSED LLVMTypeRef LLVMFunctionType0 (LLVMTypeRef ReturnType, int IsVarArg) { return LLVMFunctionType (ReturnType, NULL, 0, IsVarArg); } /* * LLVMFunctionType1: * * Create an LLVM function type from the arguments. */ static G_GNUC_UNUSED LLVMTypeRef LLVMFunctionType1 (LLVMTypeRef ReturnType, LLVMTypeRef ParamType1, int IsVarArg) { LLVMTypeRef param_types [1]; param_types [0] = ParamType1; return LLVMFunctionType (ReturnType, param_types, 1, IsVarArg); } /* * LLVMFunctionType2: * * Create an LLVM function type from the arguments. */ static G_GNUC_UNUSED LLVMTypeRef LLVMFunctionType2 (LLVMTypeRef ReturnType, LLVMTypeRef ParamType1, LLVMTypeRef ParamType2, int IsVarArg) { LLVMTypeRef param_types [2]; param_types [0] = ParamType1; param_types [1] = ParamType2; return LLVMFunctionType (ReturnType, param_types, 2, IsVarArg); } /* * LLVMFunctionType3: * * Create an LLVM function type from the arguments. */ static G_GNUC_UNUSED LLVMTypeRef LLVMFunctionType3 (LLVMTypeRef ReturnType, LLVMTypeRef ParamType1, LLVMTypeRef ParamType2, LLVMTypeRef ParamType3, int IsVarArg) { LLVMTypeRef param_types [3]; param_types [0] = ParamType1; param_types [1] = ParamType2; param_types [2] = ParamType3; return LLVMFunctionType (ReturnType, param_types, 3, IsVarArg); } static G_GNUC_UNUSED LLVMTypeRef LLVMFunctionType4 (LLVMTypeRef ReturnType, LLVMTypeRef ParamType1, LLVMTypeRef ParamType2, LLVMTypeRef ParamType3, LLVMTypeRef ParamType4, int IsVarArg) { LLVMTypeRef param_types [4]; param_types [0] = ParamType1; param_types [1] = ParamType2; param_types [2] = ParamType3; param_types [3] = ParamType4; return LLVMFunctionType (ReturnType, param_types, 4, IsVarArg); } static G_GNUC_UNUSED LLVMTypeRef LLVMFunctionType5 (LLVMTypeRef ReturnType, LLVMTypeRef ParamType1, LLVMTypeRef ParamType2, LLVMTypeRef ParamType3, LLVMTypeRef ParamType4, LLVMTypeRef ParamType5, int IsVarArg) { LLVMTypeRef param_types [5]; param_types [0] = ParamType1; param_types [1] = ParamType2; param_types [2] = ParamType3; param_types [3] = ParamType4; param_types [4] = ParamType5; return LLVMFunctionType (ReturnType, param_types, 5, IsVarArg); } /* * create_builder: * * Create an LLVM builder and remember it so it can be freed later. */ static LLVMBuilderRef create_builder (EmitContext *ctx) { LLVMBuilderRef builder = LLVMCreateBuilder (); if (mono_use_fast_math) mono_llvm_set_fast_math (builder); ctx->builders = g_slist_prepend_mempool (ctx->cfg->mempool, ctx->builders, builder); emit_default_dbg_loc (ctx, builder); return builder; } static char* get_aotconst_name (MonoJumpInfoType type, gconstpointer data, int got_offset) { char *name; int len; switch (type) { case MONO_PATCH_INFO_JIT_ICALL_ID: name = g_strdup_printf ("jit_icall_%s", mono_find_jit_icall_info ((MonoJitICallId)(gsize)data)->name); break; case MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL: name = g_strdup_printf ("jit_icall_addr_nocall_%s", mono_find_jit_icall_info ((MonoJitICallId)(gsize)data)->name); break; case MONO_PATCH_INFO_RGCTX_SLOT_INDEX: { MonoJumpInfoRgctxEntry *entry = (MonoJumpInfoRgctxEntry*)data; name = g_strdup_printf ("rgctx_slot_index_%s", mono_rgctx_info_type_to_str (entry->info_type)); break; } case MONO_PATCH_INFO_AOT_MODULE: case MONO_PATCH_INFO_GC_SAFE_POINT_FLAG: case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: case MONO_PATCH_INFO_GC_NURSERY_START: case MONO_PATCH_INFO_GC_NURSERY_BITS: case MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG: name = g_strdup_printf ("%s", mono_ji_type_to_string (type)); len = strlen (name); for (int i = 0; i < len; ++i) name [i] = tolower (name [i]); break; default: name = g_strdup_printf ("%s_%d", mono_ji_type_to_string (type), got_offset); len = strlen (name); for (int i = 0; i < len; ++i) name [i] = tolower (name [i]); break; } return name; } static int compute_aot_got_offset (MonoLLVMModule *module, MonoJumpInfo *ji, LLVMTypeRef llvm_type) { guint32 got_offset = mono_aot_get_got_offset (ji); LLVMTypeRef lookup_type = (LLVMTypeRef) g_hash_table_lookup (module->got_idx_to_type, GINT_TO_POINTER (got_offset)); if (!lookup_type) { lookup_type = llvm_type; } else if (llvm_type != lookup_type) { lookup_type = module->ptr_type; } else { return got_offset; } g_hash_table_insert (module->got_idx_to_type, GINT_TO_POINTER (got_offset), lookup_type); return got_offset; } /* Allocate a GOT slot for TYPE/DATA, and emit IR to load it */ static LLVMValueRef get_aotconst_module (MonoLLVMModule *module, LLVMBuilderRef builder, MonoJumpInfoType type, gconstpointer data, LLVMTypeRef llvm_type, guint32 *out_got_offset, MonoJumpInfo **out_ji) { guint32 got_offset; LLVMValueRef load; MonoJumpInfo tmp_ji; tmp_ji.type = type; tmp_ji.data.target = data; MonoJumpInfo *ji = mono_aot_patch_info_dup (&tmp_ji); if (out_ji) *out_ji = ji; got_offset = compute_aot_got_offset (module, ji, llvm_type); module->max_got_offset = MAX (module->max_got_offset, got_offset); if (out_got_offset) *out_got_offset = got_offset; if (module->static_link && type == MONO_PATCH_INFO_GC_SAFE_POINT_FLAG) { if (!module->gc_safe_point_flag_var) { const char *symbol = "mono_polling_required"; module->gc_safe_point_flag_var = LLVMAddGlobal (module->lmodule, llvm_type, symbol); LLVMSetLinkage (module->gc_safe_point_flag_var, LLVMExternalLinkage); } return module->gc_safe_point_flag_var; } if (module->static_link && type == MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG) { if (!module->interrupt_flag_var) { const char *symbol = "mono_thread_interruption_request_flag"; module->interrupt_flag_var = LLVMAddGlobal (module->lmodule, llvm_type, symbol); LLVMSetLinkage (module->interrupt_flag_var, LLVMExternalLinkage); } return module->interrupt_flag_var; } LLVMValueRef const_var = g_hash_table_lookup (module->aotconst_vars, GINT_TO_POINTER (got_offset)); if (!const_var) { LLVMTypeRef type = llvm_type; // FIXME: char *name = get_aotconst_name (ji->type, ji->data.target, got_offset); char *symbol = g_strdup_printf ("aotconst_%s", name); g_free (name); LLVMValueRef v = LLVMAddGlobal (module->lmodule, type, symbol); LLVMSetVisibility (v, LLVMHiddenVisibility); LLVMSetLinkage (v, LLVMInternalLinkage); LLVMSetInitializer (v, LLVMConstNull (type)); // FIXME: LLVMSetAlignment (v, 8); g_hash_table_insert (module->aotconst_vars, GINT_TO_POINTER (got_offset), v); const_var = v; } load = LLVMBuildLoad (builder, const_var, ""); if (mono_aot_is_shared_got_offset (got_offset)) set_invariant_load_flag (load); if (type == MONO_PATCH_INFO_LDSTR) set_nonnull_load_flag (load); load = LLVMBuildBitCast (builder, load, llvm_type, ""); return load; } static LLVMValueRef get_aotconst (EmitContext *ctx, MonoJumpInfoType type, gconstpointer data, LLVMTypeRef llvm_type) { MonoCompile *cfg; guint32 got_offset; MonoJumpInfo *ji; LLVMValueRef load; cfg = ctx->cfg; load = get_aotconst_module (ctx->module, ctx->builder, type, data, llvm_type, &got_offset, &ji); ji->next = cfg->patch_info; cfg->patch_info = ji; /* * If the got slot is shared, it means its initialized when the aot image is loaded, so we don't need to * explicitly initialize it. */ if (!mono_aot_is_shared_got_offset (got_offset)) { //mono_print_ji (ji); //printf ("\n"); ctx->cfg->got_access_count ++; } return load; } static LLVMValueRef get_dummy_aotconst (EmitContext *ctx, LLVMTypeRef llvm_type) { LLVMValueRef indexes [2]; LLVMValueRef got_entry_addr, load; LLVMBuilderRef builder = ctx->builder; indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); got_entry_addr = LLVMBuildGEP (builder, ctx->module->dummy_got_var, indexes, 2, ""); load = LLVMBuildLoad (builder, got_entry_addr, ""); load = convert (ctx, load, llvm_type); return load; } typedef struct { MonoJumpInfo *ji; MonoMethod *method; LLVMValueRef load; LLVMTypeRef type; LLVMValueRef lmethod; } CallSite; static LLVMValueRef get_callee_llvmonly (EmitContext *ctx, LLVMTypeRef llvm_sig, MonoJumpInfoType type, gconstpointer data) { LLVMValueRef callee; char *callee_name = NULL; if (ctx->module->static_link && ctx->module->assembly->image != mono_get_corlib ()) { if (type == MONO_PATCH_INFO_JIT_ICALL_ID) { MonoJitICallInfo * const info = mono_find_jit_icall_info ((MonoJitICallId)(gsize)data); g_assert (info); if (info->func != info->wrapper) { type = MONO_PATCH_INFO_METHOD; data = mono_icall_get_wrapper_method (info); callee_name = mono_aot_get_mangled_method_name ((MonoMethod*)data); } } else if (type == MONO_PATCH_INFO_METHOD) { MonoMethod *method = (MonoMethod*)data; if (m_class_get_image (method->klass) != ctx->module->assembly->image && mono_aot_is_externally_callable (method)) callee_name = mono_aot_get_mangled_method_name (method); } } if (!callee_name) callee_name = mono_aot_get_direct_call_symbol (type, data); if (callee_name) { /* Directly callable */ // FIXME: Locking callee = (LLVMValueRef)g_hash_table_lookup (ctx->module->direct_callables, callee_name); if (!callee) { callee = LLVMAddFunction (ctx->lmodule, callee_name, llvm_sig); LLVMSetVisibility (callee, LLVMHiddenVisibility); g_hash_table_insert (ctx->module->direct_callables, (char*)callee_name, callee); } else { /* LLVMTypeRef's are uniqued */ if (LLVMGetElementType (LLVMTypeOf (callee)) != llvm_sig) return LLVMConstBitCast (callee, LLVMPointerType (llvm_sig, 0)); g_free (callee_name); } return callee; } /* * Change references to icalls/pinvokes/jit icalls to their wrappers when in corlib, so * they can be called directly. */ if (ctx->module->assembly->image == mono_get_corlib () && type == MONO_PATCH_INFO_JIT_ICALL_ID) { MonoJitICallInfo * const info = mono_find_jit_icall_info ((MonoJitICallId)(gsize)data); if (info->func != info->wrapper) { type = MONO_PATCH_INFO_METHOD; data = mono_icall_get_wrapper_method (info); } } if (ctx->module->assembly->image == mono_get_corlib () && type == MONO_PATCH_INFO_METHOD) { MonoMethod *method = (MonoMethod*)data; if (m_method_is_icall (method) || m_method_is_pinvoke (method)) data = mono_marshal_get_native_wrapper (method, TRUE, TRUE); } /* * Instead of emitting an indirect call through a got slot, emit a placeholder, and * replace it with a direct call or an indirect call in mono_llvm_fixup_aot_module () * after all methods have been emitted. */ if (type == MONO_PATCH_INFO_METHOD) { MonoMethod *method = (MonoMethod*)data; if (m_class_get_image (method->klass)->assembly == ctx->module->assembly) { MonoJumpInfo tmp_ji; tmp_ji.type = type; tmp_ji.data.target = method; MonoJumpInfo *ji = mono_aot_patch_info_dup (&tmp_ji); ji->next = ctx->cfg->patch_info; ctx->cfg->patch_info = ji; LLVMTypeRef llvm_type = LLVMPointerType (llvm_sig, 0); ctx->cfg->got_access_count ++; CallSite *info = g_new0 (CallSite, 1); info->method = method; info->ji = ji; info->type = llvm_type; /* * Emit a dummy load to represent the callee, and either replace it with * a reference to the llvm method for the callee, or from a load from the * GOT. */ LLVMValueRef load = get_dummy_aotconst (ctx, llvm_type); info->load = load; info->lmethod = ctx->lmethod; g_ptr_array_add (ctx->callsite_list, info); return load; } } /* * All other calls are made through the GOT. */ callee = get_aotconst (ctx, type, data, LLVMPointerType (llvm_sig, 0)); return callee; } /* * get_callee: * * Return an llvm value representing the callee given by the arguments. */ static LLVMValueRef get_callee (EmitContext *ctx, LLVMTypeRef llvm_sig, MonoJumpInfoType type, gconstpointer data) { LLVMValueRef callee; char *callee_name; MonoJumpInfo *ji = NULL; if (ctx->llvm_only) return get_callee_llvmonly (ctx, llvm_sig, type, data); callee_name = NULL; /* Cross-assembly direct calls */ if (type == MONO_PATCH_INFO_METHOD) { MonoMethod *cmethod = (MonoMethod*)data; if (m_class_get_image (cmethod->klass) != ctx->module->assembly->image) { MonoJumpInfo tmp_ji; memset (&tmp_ji, 0, sizeof (MonoJumpInfo)); tmp_ji.type = type; tmp_ji.data.target = data; if (mono_aot_is_direct_callable (&tmp_ji)) { /* * This will add a reference to cmethod's image so it will * be loaded when the current AOT image is loaded, so * the GOT slots used by the init method code are initialized. */ tmp_ji.type = MONO_PATCH_INFO_IMAGE; tmp_ji.data.image = m_class_get_image (cmethod->klass); ji = mono_aot_patch_info_dup (&tmp_ji); mono_aot_get_got_offset (ji); callee_name = mono_aot_get_mangled_method_name (cmethod); callee = (LLVMValueRef)g_hash_table_lookup (ctx->module->direct_callables, callee_name); if (!callee) { callee = LLVMAddFunction (ctx->lmodule, callee_name, llvm_sig); LLVMSetLinkage (callee, LLVMExternalLinkage); g_hash_table_insert (ctx->module->direct_callables, callee_name, callee); } else { /* LLVMTypeRef's are uniqued */ if (LLVMGetElementType (LLVMTypeOf (callee)) != llvm_sig) callee = LLVMConstBitCast (callee, LLVMPointerType (llvm_sig, 0)); g_free (callee_name); } return callee; } } } callee_name = mono_aot_get_plt_symbol (type, data); if (!callee_name) return NULL; if (ctx->cfg->compile_aot) /* Add a patch so referenced wrappers can be compiled in full aot mode */ mono_add_patch_info (ctx->cfg, 0, type, data); // FIXME: Locking callee = (LLVMValueRef)g_hash_table_lookup (ctx->module->plt_entries, callee_name); if (!callee) { callee = LLVMAddFunction (ctx->lmodule, callee_name, llvm_sig); LLVMSetVisibility (callee, LLVMHiddenVisibility); g_hash_table_insert (ctx->module->plt_entries, (char*)callee_name, callee); } if (ctx->cfg->compile_aot) { ji = g_new0 (MonoJumpInfo, 1); ji->type = type; ji->data.target = data; g_hash_table_insert (ctx->module->plt_entries_ji, ji, callee); } return callee; } static LLVMValueRef get_jit_callee (EmitContext *ctx, const char *name, LLVMTypeRef llvm_sig, MonoJumpInfoType type, gconstpointer data) { gpointer target; // This won't be patched so compile the wrapper immediately if (type == MONO_PATCH_INFO_JIT_ICALL_ID) { MonoJitICallInfo * const info = mono_find_jit_icall_info ((MonoJitICallId)(gsize)data); target = (gpointer)mono_icall_get_wrapper_full (info, TRUE); } else { target = resolve_patch (ctx->cfg, type, data); } LLVMValueRef tramp_var = LLVMAddGlobal (ctx->lmodule, LLVMPointerType (llvm_sig, 0), name); LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (LLVMConstInt (LLVMInt64Type (), (guint64)(size_t)target, FALSE), LLVMPointerType (llvm_sig, 0))); LLVMSetLinkage (tramp_var, LLVMExternalLinkage); LLVMValueRef callee = LLVMBuildLoad (ctx->builder, tramp_var, ""); return callee; } static int get_handler_clause (MonoCompile *cfg, MonoBasicBlock *bb) { MonoMethodHeader *header = cfg->header; MonoExceptionClause *clause; int i; /* Directly */ if (bb->region != -1 && MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_TRY)) return (bb->region >> 8) - 1; /* Indirectly */ for (i = 0; i < header->num_clauses; ++i) { clause = &header->clauses [i]; if (MONO_OFFSET_IN_CLAUSE (clause, bb->real_offset) && clause->flags == MONO_EXCEPTION_CLAUSE_NONE) return i; } return -1; } static MonoExceptionClause * get_most_deep_clause (MonoCompile *cfg, EmitContext *ctx, MonoBasicBlock *bb) { if (bb == cfg->bb_init) return NULL; // Since they're sorted by nesting we just need // the first one that the bb is a member of for (int i = 0; i < cfg->header->num_clauses; i++) { MonoExceptionClause *curr = &cfg->header->clauses [i]; if (MONO_OFFSET_IN_CLAUSE (curr, bb->real_offset)) return curr; } return NULL; } static void set_metadata_flag (LLVMValueRef v, const char *flag_name) { LLVMValueRef md_arg; int md_kind; md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name)); md_arg = LLVMMDString ("mono", 4); LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1)); } static void set_nonnull_load_flag (LLVMValueRef v) { LLVMValueRef md_arg; int md_kind; const char *flag_name; flag_name = "nonnull"; md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name)); md_arg = LLVMMDString ("<index>", strlen ("<index>")); LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1)); } static void set_nontemporal_flag (LLVMValueRef v) { LLVMValueRef md_arg; int md_kind; const char *flag_name; // FIXME: Cache this flag_name = "nontemporal"; md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name)); md_arg = const_int32 (1); LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1)); } static void set_invariant_load_flag (LLVMValueRef v) { LLVMValueRef md_arg; int md_kind; const char *flag_name; // FIXME: Cache this flag_name = "invariant.load"; md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name)); md_arg = LLVMMDString ("<index>", strlen ("<index>")); LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1)); } /* * emit_call: * * Emit an LLVM call or invoke instruction depending on whenever the call is inside * a try region. */ static LLVMValueRef emit_call (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, LLVMValueRef callee, LLVMValueRef *args, int pindex) { MonoCompile *cfg = ctx->cfg; LLVMValueRef lcall = NULL; LLVMBuilderRef builder = *builder_ref; MonoExceptionClause *clause; if (ctx->llvm_only) { clause = bb ? get_most_deep_clause (cfg, ctx, bb) : NULL; // FIXME: Use an invoke only for calls inside try-catch blocks if (clause && (!cfg->deopt || ctx->has_catch)) { /* * Have to use an invoke instead of a call, branching to the * handler bblock of the clause containing this bblock. */ intptr_t key = CLAUSE_END (clause); LLVMBasicBlockRef lpad_bb = (LLVMBasicBlockRef)g_hash_table_lookup (ctx->exc_meta, (gconstpointer)key); // FIXME: Find the one that has the lowest end bound for the right start address // FIXME: Finally + nesting if (lpad_bb) { LLVMBasicBlockRef noex_bb = gen_bb (ctx, "CALL_NOEX_BB"); /* Use an invoke */ lcall = LLVMBuildInvoke (builder, callee, args, pindex, noex_bb, lpad_bb, ""); builder = ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, noex_bb); ctx->bblocks [bb->block_num].end_bblock = noex_bb; } } } else { int clause_index = get_handler_clause (cfg, bb); if (clause_index != -1) { MonoMethodHeader *header = cfg->header; MonoExceptionClause *ec = &header->clauses [clause_index]; MonoBasicBlock *tblock; LLVMBasicBlockRef ex_bb, noex_bb; /* * Have to use an invoke instead of a call, branching to the * handler bblock of the clause containing this bblock. */ g_assert (ec->flags == MONO_EXCEPTION_CLAUSE_NONE || ec->flags == MONO_EXCEPTION_CLAUSE_FINALLY || ec->flags == MONO_EXCEPTION_CLAUSE_FAULT); tblock = cfg->cil_offset_to_bb [ec->handler_offset]; g_assert (tblock); ctx->bblocks [tblock->block_num].invoke_target = TRUE; ex_bb = get_bb (ctx, tblock); noex_bb = gen_bb (ctx, "NOEX_BB"); /* Use an invoke */ lcall = LLVMBuildInvoke (builder, callee, args, pindex, noex_bb, ex_bb, ""); builder = ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, noex_bb); ctx->bblocks [bb->block_num].end_bblock = noex_bb; } } if (!lcall) { lcall = LLVMBuildCall (builder, callee, args, pindex, ""); ctx->builder = builder; } if (builder_ref) *builder_ref = ctx->builder; return lcall; } static LLVMValueRef emit_load (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef addr, LLVMValueRef base, const char *name, gboolean is_faulting, gboolean is_volatile, BarrierKind barrier) { LLVMValueRef res; /* * We emit volatile loads for loads which can fault, because otherwise * LLVM will generate invalid code when encountering a load from a * NULL address. */ if (barrier != LLVM_BARRIER_NONE) res = mono_llvm_build_atomic_load (*builder_ref, addr, name, is_volatile, size, barrier); else res = mono_llvm_build_load (*builder_ref, addr, name, is_volatile); return res; } static void emit_store_general (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef value, LLVMValueRef addr, LLVMValueRef base, gboolean is_faulting, gboolean is_volatile, BarrierKind barrier) { if (barrier != LLVM_BARRIER_NONE) mono_llvm_build_aligned_store (*builder_ref, value, addr, barrier, size); else mono_llvm_build_store (*builder_ref, value, addr, is_volatile, barrier); } static void emit_store (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef value, LLVMValueRef addr, LLVMValueRef base, gboolean is_faulting, gboolean is_volatile) { emit_store_general (ctx, bb, builder_ref, size, value, addr, base, is_faulting, is_volatile, LLVM_BARRIER_NONE); } /* * emit_cond_system_exception: * * Emit code to throw the exception EXC_TYPE if the condition CMP is false. * Might set the ctx exception. */ static void emit_cond_system_exception (EmitContext *ctx, MonoBasicBlock *bb, const char *exc_type, LLVMValueRef cmp, gboolean force_explicit) { LLVMBasicBlockRef ex_bb, ex2_bb = NULL, noex_bb; LLVMBuilderRef builder; MonoClass *exc_class; LLVMValueRef args [2]; LLVMValueRef callee; gboolean no_pc = FALSE; static MonoClass *exc_classes [MONO_EXC_INTRINS_NUM]; if (IS_TARGET_AMD64) /* Some platforms don't require the pc argument */ no_pc = TRUE; int exc_id = mini_exception_id_by_name (exc_type); if (!exc_classes [exc_id]) exc_classes [exc_id] = mono_class_load_from_name (mono_get_corlib (), "System", exc_type); exc_class = exc_classes [exc_id]; ex_bb = gen_bb (ctx, "EX_BB"); if (ctx->llvm_only) ex2_bb = gen_bb (ctx, "EX2_BB"); noex_bb = gen_bb (ctx, "NOEX_BB"); LLVMValueRef branch = LLVMBuildCondBr (ctx->builder, cmp, ex_bb, noex_bb); if (exc_id == MONO_EXC_NULL_REF && !ctx->cfg->disable_llvm_implicit_null_checks && !force_explicit) { mono_llvm_set_implicit_branch (ctx->builder, branch); } /* Emit exception throwing code */ ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (builder, ex_bb); if (ctx->cfg->llvm_only) { LLVMBuildBr (builder, ex2_bb); ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, ex2_bb); if (exc_id == MONO_EXC_NULL_REF) { static LLVMTypeRef sig; if (!sig) sig = LLVMFunctionType0 (LLVMVoidType (), FALSE); /* Can't cache this */ callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_nullref_exception)); emit_call (ctx, bb, &builder, callee, NULL, 0); } else { static LLVMTypeRef sig; if (!sig) sig = LLVMFunctionType1 (LLVMVoidType (), LLVMInt32Type (), FALSE); callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_corlib_exception)); args [0] = LLVMConstInt (LLVMInt32Type (), m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF, FALSE); emit_call (ctx, bb, &builder, callee, args, 1); } LLVMBuildUnreachable (builder); ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, noex_bb); ctx->bblocks [bb->block_num].end_bblock = noex_bb; ctx->ex_index ++; return; } callee = ctx->module->throw_corlib_exception; if (!callee) { LLVMTypeRef sig; if (no_pc) sig = LLVMFunctionType1 (LLVMVoidType (), LLVMInt32Type (), FALSE); else sig = LLVMFunctionType2 (LLVMVoidType (), LLVMInt32Type (), LLVMPointerType (LLVMInt8Type (), 0), FALSE); const MonoJitICallId icall_id = MONO_JIT_ICALL_mono_llvm_throw_corlib_exception_abs_trampoline; if (ctx->cfg->compile_aot) { callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); } else { /* * Differences between the LLVM/non-LLVM throw corlib exception trampoline: * - On x86, LLVM generated code doesn't push the arguments * - The trampoline takes the throw address as an arguments, not a pc offset. */ callee = get_jit_callee (ctx, "llvm_throw_corlib_exception_trampoline", sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); /* * Make sure that ex_bb starts with the invoke, so the block address points to it, and not to the load * added by get_jit_callee (). */ ex2_bb = gen_bb (ctx, "EX2_BB"); LLVMBuildBr (builder, ex2_bb); ex_bb = ex2_bb; ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, ex2_bb); } } args [0] = LLVMConstInt (LLVMInt32Type (), m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF, FALSE); /* * The LLVM mono branch contains changes so a block address can be passed as an * argument to a call. */ if (no_pc) { emit_call (ctx, bb, &builder, callee, args, 1); } else { args [1] = LLVMBlockAddress (ctx->lmethod, ex_bb); emit_call (ctx, bb, &builder, callee, args, 2); } LLVMBuildUnreachable (builder); ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, noex_bb); ctx->bblocks [bb->block_num].end_bblock = noex_bb; ctx->ex_index ++; return; } /* * emit_args_to_vtype: * * Emit code to store the vtype in the arguments args to the address ADDRESS. */ static void emit_args_to_vtype (EmitContext *ctx, LLVMBuilderRef builder, MonoType *t, LLVMValueRef address, LLVMArgInfo *ainfo, LLVMValueRef *args) { int j, size, nslots; MonoClass *klass; t = mini_get_underlying_type (t); klass = mono_class_from_mono_type_internal (t); size = mono_class_value_size (klass, NULL); if (MONO_CLASS_IS_SIMD (ctx->cfg, klass)) address = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (LLVMInt8Type (), 0), ""); if (ainfo->storage == LLVMArgAsFpArgs) nslots = ainfo->nslots; else nslots = 2; for (j = 0; j < nslots; ++j) { LLVMValueRef index [2], addr, daddr; int part_size = size > TARGET_SIZEOF_VOID_P ? TARGET_SIZEOF_VOID_P : size; LLVMTypeRef part_type; while (part_size != 1 && part_size != 2 && part_size != 4 && part_size < 8) part_size ++; if (ainfo->pair_storage [j] == LLVMArgNone) continue; switch (ainfo->pair_storage [j]) { case LLVMArgInIReg: { part_type = LLVMIntType (part_size * 8); if (MONO_CLASS_IS_SIMD (ctx->cfg, klass)) { index [0] = LLVMConstInt (LLVMInt32Type (), j * TARGET_SIZEOF_VOID_P, FALSE); addr = LLVMBuildGEP (builder, address, index, 1, ""); } else { daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (IntPtrType (), 0), ""); index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE); addr = LLVMBuildGEP (builder, daddr, index, 1, ""); } LLVMBuildStore (builder, convert (ctx, args [j], part_type), LLVMBuildBitCast (ctx->builder, addr, LLVMPointerType (part_type, 0), "")); break; } case LLVMArgInFPReg: { LLVMTypeRef arg_type; if (ainfo->esize == 8) arg_type = LLVMDoubleType (); else arg_type = LLVMFloatType (); index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE); daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (arg_type, 0), ""); addr = LLVMBuildGEP (builder, daddr, index, 1, ""); LLVMBuildStore (builder, args [j], addr); break; } case LLVMArgNone: break; default: g_assert_not_reached (); } size -= TARGET_SIZEOF_VOID_P; } } /* * emit_vtype_to_args: * * Emit code to load a vtype at address ADDRESS into scalar arguments. Store the arguments * into ARGS, and the number of arguments into NARGS. */ static void emit_vtype_to_args (EmitContext *ctx, LLVMBuilderRef builder, MonoType *t, LLVMValueRef address, LLVMArgInfo *ainfo, LLVMValueRef *args, guint32 *nargs) { int pindex = 0; int j, nslots; LLVMTypeRef arg_type; t = mini_get_underlying_type (t); int32_t size = get_vtype_size_align (t).size; if (MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (t))) address = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (LLVMInt8Type (), 0), ""); if (ainfo->storage == LLVMArgAsFpArgs) nslots = ainfo->nslots; else nslots = 2; for (j = 0; j < nslots; ++j) { LLVMValueRef index [2], addr, daddr; int partsize = size > TARGET_SIZEOF_VOID_P ? TARGET_SIZEOF_VOID_P : size; if (ainfo->pair_storage [j] == LLVMArgNone) continue; switch (ainfo->pair_storage [j]) { case LLVMArgInIReg: if (MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (t))) { index [0] = LLVMConstInt (LLVMInt32Type (), j * TARGET_SIZEOF_VOID_P, FALSE); addr = LLVMBuildGEP (builder, address, index, 1, ""); } else { daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (IntPtrType (), 0), ""); index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE); addr = LLVMBuildGEP (builder, daddr, index, 1, ""); } args [pindex ++] = convert (ctx, LLVMBuildLoad (builder, LLVMBuildBitCast (ctx->builder, addr, LLVMPointerType (LLVMIntType (partsize * 8), 0), ""), ""), IntPtrType ()); break; case LLVMArgInFPReg: if (ainfo->esize == 8) arg_type = LLVMDoubleType (); else arg_type = LLVMFloatType (); daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (arg_type, 0), ""); index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE); addr = LLVMBuildGEP (builder, daddr, index, 1, ""); args [pindex ++] = LLVMBuildLoad (builder, addr, ""); break; case LLVMArgNone: break; default: g_assert_not_reached (); } size -= TARGET_SIZEOF_VOID_P; } *nargs = pindex; } static LLVMValueRef build_alloca_llvm_type_name (EmitContext *ctx, LLVMTypeRef t, int align, const char *name) { /* * Have to place all alloca's at the end of the entry bb, since otherwise they would * get executed every time control reaches them. */ LLVMPositionBuilder (ctx->alloca_builder, get_bb (ctx, ctx->cfg->bb_entry), ctx->last_alloca); ctx->last_alloca = mono_llvm_build_alloca (ctx->alloca_builder, t, NULL, align, name); return ctx->last_alloca; } static LLVMValueRef build_alloca_llvm_type (EmitContext *ctx, LLVMTypeRef t, int align) { return build_alloca_llvm_type_name (ctx, t, align, ""); } static LLVMValueRef build_named_alloca (EmitContext *ctx, MonoType *t, char const *name) { MonoClass *k = mono_class_from_mono_type_internal (t); int align; g_assert (!mini_is_gsharedvt_variable_type (t)); if (MONO_CLASS_IS_SIMD (ctx->cfg, k)) align = mono_class_value_size (k, NULL); else align = mono_class_min_align (k); /* Sometimes align is not a power of 2 */ while (mono_is_power_of_two (align) == -1) align ++; return build_alloca_llvm_type_name (ctx, type_to_llvm_type (ctx, t), align, name); } static LLVMValueRef build_alloca (EmitContext *ctx, MonoType *t) { return build_named_alloca (ctx, t, ""); } static LLVMValueRef emit_gsharedvt_ldaddr (EmitContext *ctx, int vreg) { /* * gsharedvt local. * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx]. */ MonoCompile *cfg = ctx->cfg; LLVMBuilderRef builder = ctx->builder; LLVMValueRef offset, offset_var; LLVMValueRef info_var = ctx->values [cfg->gsharedvt_info_var->dreg]; LLVMValueRef locals_var = ctx->values [cfg->gsharedvt_locals_var->dreg]; LLVMValueRef ptr; char *name; g_assert (info_var); g_assert (locals_var); int idx = cfg->gsharedvt_vreg_to_idx [vreg] - 1; offset = LLVMConstInt (LLVMInt32Type (), MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P), FALSE); ptr = LLVMBuildAdd (builder, convert (ctx, info_var, IntPtrType ()), convert (ctx, offset, IntPtrType ()), ""); name = g_strdup_printf ("gsharedvt_local_%d_offset", vreg); offset_var = LLVMBuildLoad (builder, convert (ctx, ptr, LLVMPointerType (LLVMInt32Type (), 0)), name); return LLVMBuildAdd (builder, convert (ctx, locals_var, IntPtrType ()), convert (ctx, offset_var, IntPtrType ()), ""); } /* * Put the global into the 'llvm.used' array to prevent it from being optimized away. */ static void mark_as_used (MonoLLVMModule *module, LLVMValueRef global) { if (!module->used) module->used = g_ptr_array_sized_new (16); g_ptr_array_add (module->used, global); } static void emit_llvm_used (MonoLLVMModule *module) { LLVMModuleRef lmodule = module->lmodule; LLVMTypeRef used_type; LLVMValueRef used, *used_elem; int i; if (!module->used) return; used_type = LLVMArrayType (LLVMPointerType (LLVMInt8Type (), 0), module->used->len); used = LLVMAddGlobal (lmodule, used_type, "llvm.used"); used_elem = g_new0 (LLVMValueRef, module->used->len); for (i = 0; i < module->used->len; ++i) used_elem [i] = LLVMConstBitCast ((LLVMValueRef)g_ptr_array_index (module->used, i), LLVMPointerType (LLVMInt8Type (), 0)); LLVMSetInitializer (used, LLVMConstArray (LLVMPointerType (LLVMInt8Type (), 0), used_elem, module->used->len)); LLVMSetLinkage (used, LLVMAppendingLinkage); LLVMSetSection (used, "llvm.metadata"); } /* * emit_get_method: * * Emit a function mapping method indexes to their code */ static void emit_get_method (MonoLLVMModule *module) { LLVMModuleRef lmodule = module->lmodule; LLVMValueRef func, switch_ins, m; LLVMBasicBlockRef entry_bb, fail_bb, bb, code_start_bb, code_end_bb, main_bb; LLVMBasicBlockRef *bbs = NULL; LLVMTypeRef rtype; LLVMBuilderRef builder = LLVMCreateBuilder (); LLVMValueRef table = NULL; char *name; int i; gboolean emit_table = FALSE; #ifdef TARGET_WASM /* * Emit a table of functions instead of a switch statement, * its very efficient on wasm. This might be usable on * other platforms too. */ emit_table = TRUE; #endif rtype = LLVMPointerType (LLVMInt8Type (), 0); int table_len = module->max_method_idx + 1; if (emit_table) { LLVMTypeRef table_type; LLVMValueRef *table_elems; char *table_name; table_type = LLVMArrayType (rtype, table_len); table_name = g_strdup_printf ("%s_method_table", module->global_prefix); table = LLVMAddGlobal (lmodule, table_type, table_name); table_elems = g_new0 (LLVMValueRef, table_len); for (i = 0; i < table_len; ++i) { m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_lmethod, GINT_TO_POINTER (i)); if (m && !g_hash_table_lookup (module->no_method_table_lmethods, m)) table_elems [i] = LLVMBuildBitCast (builder, m, rtype, ""); else table_elems [i] = LLVMConstNull (rtype); } LLVMSetInitializer (table, LLVMConstArray (LLVMPointerType (LLVMInt8Type (), 0), table_elems, table_len)); } /* * Emit a switch statement. Emitting a table of function addresses is smaller/faster, * but generating code seems safer. */ func = LLVMAddFunction (lmodule, module->get_method_symbol, LLVMFunctionType1 (rtype, LLVMInt32Type (), FALSE)); LLVMSetLinkage (func, LLVMExternalLinkage); LLVMSetVisibility (func, LLVMHiddenVisibility); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND); module->get_method = func; entry_bb = LLVMAppendBasicBlock (func, "ENTRY"); /* * Return llvm_code_start/llvm_code_end when called with -1/-2. * Hopefully, the toolchain doesn't reorder these functions. If it does, * then we will have to find another solution. */ name = g_strdup_printf ("BB_CODE_START"); code_start_bb = LLVMAppendBasicBlock (func, name); g_free (name); LLVMPositionBuilderAtEnd (builder, code_start_bb); LLVMBuildRet (builder, LLVMBuildBitCast (builder, module->code_start, rtype, "")); name = g_strdup_printf ("BB_CODE_END"); code_end_bb = LLVMAppendBasicBlock (func, name); g_free (name); LLVMPositionBuilderAtEnd (builder, code_end_bb); LLVMBuildRet (builder, LLVMBuildBitCast (builder, module->code_end, rtype, "")); if (emit_table) { /* * Because table_len is computed using the method indexes available for us, it * might not include methods which are not compiled because of AOT profiles. * So table_len can be smaller than info->nmethods. Add a bounds check because * of that. * switch (index) { * case -1: return code_start; * case -2: return code_end; * default: return index < table_len ? method_table [index] : 0; */ fail_bb = LLVMAppendBasicBlock (func, "FAIL"); LLVMPositionBuilderAtEnd (builder, fail_bb); LLVMBuildRet (builder, LLVMBuildIntToPtr (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), rtype, "")); main_bb = LLVMAppendBasicBlock (func, "MAIN"); LLVMPositionBuilderAtEnd (builder, main_bb); LLVMValueRef base = table; LLVMValueRef indexes [2]; indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMGetParam (func, 0); LLVMValueRef addr = LLVMBuildGEP (builder, base, indexes, 2, ""); LLVMValueRef res = mono_llvm_build_load (builder, addr, "", FALSE); LLVMBuildRet (builder, res); LLVMBasicBlockRef default_bb = LLVMAppendBasicBlock (func, "DEFAULT"); LLVMPositionBuilderAtEnd (builder, default_bb); LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSGE, LLVMGetParam (func, 0), LLVMConstInt (LLVMInt32Type (), table_len, FALSE), ""); LLVMBuildCondBr (builder, cmp, fail_bb, main_bb); LLVMPositionBuilderAtEnd (builder, entry_bb); switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), default_bb, 0); LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -1, FALSE), code_start_bb); LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -2, FALSE), code_end_bb); } else { bbs = g_new0 (LLVMBasicBlockRef, module->max_method_idx + 1); for (i = 0; i < module->max_method_idx + 1; ++i) { name = g_strdup_printf ("BB_%d", i); bb = LLVMAppendBasicBlock (func, name); g_free (name); bbs [i] = bb; LLVMPositionBuilderAtEnd (builder, bb); m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_lmethod, GINT_TO_POINTER (i)); if (m && !g_hash_table_lookup (module->no_method_table_lmethods, m)) LLVMBuildRet (builder, LLVMBuildBitCast (builder, m, rtype, "")); else LLVMBuildRet (builder, LLVMConstNull (rtype)); } fail_bb = LLVMAppendBasicBlock (func, "FAIL"); LLVMPositionBuilderAtEnd (builder, fail_bb); LLVMBuildRet (builder, LLVMConstNull (rtype)); LLVMPositionBuilderAtEnd (builder, entry_bb); switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), fail_bb, 0); LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -1, FALSE), code_start_bb); LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -2, FALSE), code_end_bb); for (i = 0; i < module->max_method_idx + 1; ++i) { LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]); } } mark_as_used (module, func); LLVMDisposeBuilder (builder); } /* * emit_get_unbox_tramp: * * Emit a function mapping method indexes to their unbox trampoline */ static void emit_get_unbox_tramp (MonoLLVMModule *module) { LLVMModuleRef lmodule = module->lmodule; LLVMValueRef func, switch_ins, m; LLVMBasicBlockRef entry_bb, fail_bb, bb; LLVMBasicBlockRef *bbs; LLVMTypeRef rtype; LLVMBuilderRef builder = LLVMCreateBuilder (); char *name; int i; gboolean emit_table = FALSE; /* Similar to emit_get_method () */ #ifndef TARGET_WATCHOS emit_table = TRUE; #endif rtype = LLVMPointerType (LLVMInt8Type (), 0); if (emit_table) { // About 10% of methods have an unbox tramp, so emit a table of indexes for them // that the runtime can search using a binary search int len = 0; for (i = 0; i < module->max_method_idx + 1; ++i) { m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i)); if (m) len ++; } LLVMTypeRef table_type, elemtype; LLVMValueRef *table_elems; LLVMValueRef table; char *table_name; int table_len; int elemsize; table_len = len; elemsize = module->max_method_idx < 65000 ? 2 : 4; // The index table elemtype = elemsize == 2 ? LLVMInt16Type () : LLVMInt32Type (); table_type = LLVMArrayType (elemtype, table_len); table_name = g_strdup_printf ("%s_unbox_tramp_indexes", module->global_prefix); table = LLVMAddGlobal (lmodule, table_type, table_name); table_elems = g_new0 (LLVMValueRef, table_len); int idx = 0; for (i = 0; i < module->max_method_idx + 1; ++i) { m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i)); if (m) table_elems [idx ++] = LLVMConstInt (elemtype, i, FALSE); } LLVMSetInitializer (table, LLVMConstArray (elemtype, table_elems, table_len)); module->unbox_tramp_indexes = table; // The trampoline table elemtype = rtype; table_type = LLVMArrayType (elemtype, table_len); table_name = g_strdup_printf ("%s_unbox_trampolines", module->global_prefix); table = LLVMAddGlobal (lmodule, table_type, table_name); table_elems = g_new0 (LLVMValueRef, table_len); idx = 0; for (i = 0; i < module->max_method_idx + 1; ++i) { m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i)); if (m) table_elems [idx ++] = LLVMBuildBitCast (builder, m, rtype, ""); } LLVMSetInitializer (table, LLVMConstArray (elemtype, table_elems, table_len)); module->unbox_trampolines = table; module->unbox_tramp_num = table_len; module->unbox_tramp_elemsize = elemsize; return; } func = LLVMAddFunction (lmodule, module->get_unbox_tramp_symbol, LLVMFunctionType1 (rtype, LLVMInt32Type (), FALSE)); LLVMSetLinkage (func, LLVMExternalLinkage); LLVMSetVisibility (func, LLVMHiddenVisibility); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND); module->get_unbox_tramp = func; entry_bb = LLVMAppendBasicBlock (func, "ENTRY"); bbs = g_new0 (LLVMBasicBlockRef, module->max_method_idx + 1); for (i = 0; i < module->max_method_idx + 1; ++i) { m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i)); if (!m) continue; name = g_strdup_printf ("BB_%d", i); bb = LLVMAppendBasicBlock (func, name); g_free (name); bbs [i] = bb; LLVMPositionBuilderAtEnd (builder, bb); LLVMBuildRet (builder, LLVMBuildBitCast (builder, m, rtype, "")); } fail_bb = LLVMAppendBasicBlock (func, "FAIL"); LLVMPositionBuilderAtEnd (builder, fail_bb); LLVMBuildRet (builder, LLVMConstNull (rtype)); LLVMPositionBuilderAtEnd (builder, entry_bb); switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), fail_bb, 0); for (i = 0; i < module->max_method_idx + 1; ++i) { m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i)); if (!m) continue; LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]); } mark_as_used (module, func); LLVMDisposeBuilder (builder); } /* * emit_init_aotconst: * * Emit a function to initialize the aotconst_ variables. Called by the runtime. */ static void emit_init_aotconst (MonoLLVMModule *module) { LLVMModuleRef lmodule = module->lmodule; LLVMValueRef func; LLVMBasicBlockRef entry_bb; LLVMBuilderRef builder = LLVMCreateBuilder (); func = LLVMAddFunction (lmodule, module->init_aotconst_symbol, LLVMFunctionType2 (LLVMVoidType (), LLVMInt32Type (), IntPtrType (), FALSE)); LLVMSetLinkage (func, LLVMExternalLinkage); LLVMSetVisibility (func, LLVMHiddenVisibility); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND); module->init_aotconst_func = func; entry_bb = LLVMAppendBasicBlock (func, "ENTRY"); LLVMPositionBuilderAtEnd (builder, entry_bb); #ifdef TARGET_WASM /* Emit a table of aotconst addresses instead of a switch statement to save space */ LLVMValueRef aotconsts; LLVMTypeRef aotconst_addr_type = LLVMPointerType (module->ptr_type, 0); int table_size = module->max_got_offset + 1; LLVMTypeRef aotconst_arr_type = LLVMArrayType (aotconst_addr_type, table_size); LLVMValueRef aotconst_dummy = LLVMAddGlobal (module->lmodule, module->ptr_type, "aotconst_dummy"); LLVMSetInitializer (aotconst_dummy, LLVMConstNull (module->ptr_type)); LLVMSetVisibility (aotconst_dummy, LLVMHiddenVisibility); LLVMSetLinkage (aotconst_dummy, LLVMInternalLinkage); aotconsts = LLVMAddGlobal (module->lmodule, aotconst_arr_type, "aotconsts"); LLVMValueRef *aotconst_init = g_new0 (LLVMValueRef, table_size); for (int i = 0; i < table_size; ++i) { LLVMValueRef aotconst = (LLVMValueRef)g_hash_table_lookup (module->aotconst_vars, GINT_TO_POINTER (i)); if (aotconst) aotconst_init [i] = LLVMConstBitCast (aotconst, aotconst_addr_type); else aotconst_init [i] = LLVMConstBitCast (aotconst_dummy, aotconst_addr_type); } LLVMSetInitializer (aotconsts, LLVMConstArray (aotconst_addr_type, aotconst_init, table_size)); LLVMSetVisibility (aotconsts, LLVMHiddenVisibility); LLVMSetLinkage (aotconsts, LLVMInternalLinkage); LLVMBasicBlockRef exit_bb = LLVMAppendBasicBlock (func, "EXIT_BB"); LLVMBasicBlockRef main_bb = LLVMAppendBasicBlock (func, "BB"); LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSGE, LLVMGetParam (func, 0), LLVMConstInt (LLVMInt32Type (), table_size, FALSE), ""); LLVMBuildCondBr (builder, cmp, exit_bb, main_bb); LLVMPositionBuilderAtEnd (builder, main_bb); LLVMValueRef indexes [2]; indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMGetParam (func, 0); LLVMValueRef aotconst_addr = LLVMBuildLoad (builder, LLVMBuildGEP (builder, aotconsts, indexes, 2, ""), ""); LLVMBuildStore (builder, LLVMBuildIntToPtr (builder, LLVMGetParam (func, 1), module->ptr_type, ""), aotconst_addr); LLVMBuildBr (builder, exit_bb); LLVMPositionBuilderAtEnd (builder, exit_bb); LLVMBuildRetVoid (builder); #else LLVMValueRef switch_ins; LLVMBasicBlockRef fail_bb, bb; LLVMBasicBlockRef *bbs = NULL; char *name; bbs = g_new0 (LLVMBasicBlockRef, module->max_got_offset + 1); for (int i = 0; i < module->max_got_offset + 1; ++i) { name = g_strdup_printf ("BB_%d", i); bb = LLVMAppendBasicBlock (func, name); g_free (name); bbs [i] = bb; LLVMPositionBuilderAtEnd (builder, bb); LLVMValueRef var = g_hash_table_lookup (module->aotconst_vars, GINT_TO_POINTER (i)); if (var) { LLVMValueRef addr = LLVMBuildBitCast (builder, var, LLVMPointerType (IntPtrType (), 0), ""); LLVMBuildStore (builder, LLVMGetParam (func, 1), addr); } LLVMBuildRetVoid (builder); } fail_bb = LLVMAppendBasicBlock (func, "FAIL"); LLVMPositionBuilderAtEnd (builder, fail_bb); LLVMBuildRetVoid (builder); LLVMPositionBuilderAtEnd (builder, entry_bb); switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), fail_bb, 0); for (int i = 0; i < module->max_got_offset + 1; ++i) LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]); #endif LLVMDisposeBuilder (builder); } /* Add a function to mark the beginning of LLVM code */ static void emit_llvm_code_start (MonoLLVMModule *module) { LLVMModuleRef lmodule = module->lmodule; LLVMValueRef func; LLVMBasicBlockRef entry_bb; LLVMBuilderRef builder; func = LLVMAddFunction (lmodule, "llvm_code_start", LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE)); LLVMSetLinkage (func, LLVMInternalLinkage); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND); module->code_start = func; entry_bb = LLVMAppendBasicBlock (func, "ENTRY"); builder = LLVMCreateBuilder (); LLVMPositionBuilderAtEnd (builder, entry_bb); LLVMBuildRetVoid (builder); LLVMDisposeBuilder (builder); } /* * emit_init_func: * * Emit functions to initialize LLVM methods. * These are wrappers around the mini_llvm_init_method () JIT icall. * The wrappers handle adding the 'amodule' argument, loading the vtable from different locations, and they have * a cold calling convention. */ static LLVMValueRef emit_init_func (MonoLLVMModule *module, MonoAotInitSubtype subtype) { LLVMModuleRef lmodule = module->lmodule; LLVMValueRef func, indexes [2], args [16], callee, info_var, index_var, inited_var, cmp; LLVMBasicBlockRef entry_bb, inited_bb, notinited_bb; LLVMBuilderRef builder; LLVMTypeRef icall_sig; const char *wrapper_name = mono_marshal_get_aot_init_wrapper_name (subtype); LLVMTypeRef func_type = NULL; LLVMTypeRef arg_type = module->ptr_type; char *name = g_strdup_printf ("%s_%s", module->global_prefix, wrapper_name); switch (subtype) { case AOT_INIT_METHOD: func_type = LLVMFunctionType1 (LLVMVoidType (), arg_type, FALSE); break; case AOT_INIT_METHOD_GSHARED_MRGCTX: case AOT_INIT_METHOD_GSHARED_VTABLE: func_type = LLVMFunctionType2 (LLVMVoidType (), arg_type, IntPtrType (), FALSE); break; case AOT_INIT_METHOD_GSHARED_THIS: func_type = LLVMFunctionType2 (LLVMVoidType (), arg_type, ObjRefType (), FALSE); break; default: g_assert_not_reached (); } func = LLVMAddFunction (lmodule, name, func_type); info_var = LLVMGetParam (func, 0); LLVMSetLinkage (func, LLVMInternalLinkage); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_INLINE); set_cold_cconv (func); entry_bb = LLVMAppendBasicBlock (func, "ENTRY"); builder = LLVMCreateBuilder (); LLVMPositionBuilderAtEnd (builder, entry_bb); /* Load method_index which is emitted at the start of the method info */ indexes [0] = const_int32 (0); indexes [1] = const_int32 (0); // FIXME: Make sure its aligned index_var = LLVMBuildLoad (builder, LLVMBuildGEP (builder, LLVMBuildBitCast (builder, info_var, LLVMPointerType (LLVMInt32Type (), 0), ""), indexes, 1, ""), "method_index"); /* Check for is_inited here as well, since this can be called from JITted code which might not check it */ indexes [0] = const_int32 (0); indexes [1] = index_var; inited_var = LLVMBuildLoad (builder, LLVMBuildGEP (builder, module->inited_var, indexes, 2, ""), "is_inited"); cmp = LLVMBuildICmp (builder, LLVMIntEQ, inited_var, LLVMConstInt (LLVMTypeOf (inited_var), 0, FALSE), ""); inited_bb = LLVMAppendBasicBlock (func, "INITED"); notinited_bb = LLVMAppendBasicBlock (func, "NOT_INITED"); LLVMBuildCondBr (builder, cmp, notinited_bb, inited_bb); LLVMPositionBuilderAtEnd (builder, notinited_bb); LLVMValueRef amodule_var = get_aotconst_module (module, builder, MONO_PATCH_INFO_AOT_MODULE, NULL, LLVMPointerType (IntPtrType (), 0), NULL, NULL); args [0] = LLVMBuildPtrToInt (builder, module->info_var, IntPtrType (), ""); args [1] = LLVMBuildPtrToInt (builder, amodule_var, IntPtrType (), ""); args [2] = info_var; switch (subtype) { case AOT_INIT_METHOD: args [3] = LLVMConstNull (IntPtrType ()); break; case AOT_INIT_METHOD_GSHARED_VTABLE: args [3] = LLVMGetParam (func, 1); break; case AOT_INIT_METHOD_GSHARED_THIS: /* Load this->vtable */ args [3] = LLVMBuildBitCast (builder, LLVMGetParam (func, 1), LLVMPointerType (IntPtrType (), 0), ""); indexes [0] = const_int32 (MONO_STRUCT_OFFSET (MonoObject, vtable) / SIZEOF_VOID_P); args [3] = LLVMBuildLoad (builder, LLVMBuildGEP (builder, args [3], indexes, 1, ""), "vtable"); break; case AOT_INIT_METHOD_GSHARED_MRGCTX: /* Load mrgctx->vtable */ args [3] = LLVMBuildIntToPtr (builder, LLVMGetParam (func, 1), LLVMPointerType (IntPtrType (), 0), ""); indexes [0] = const_int32 (MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable) / SIZEOF_VOID_P); args [3] = LLVMBuildLoad (builder, LLVMBuildGEP (builder, args [3], indexes, 1, ""), "vtable"); break; default: g_assert_not_reached (); break; } /* Call the mini_llvm_init_method JIT icall */ icall_sig = LLVMFunctionType4 (LLVMVoidType (), IntPtrType (), IntPtrType (), arg_type, IntPtrType (), FALSE); callee = get_aotconst_module (module, builder, MONO_PATCH_INFO_JIT_ICALL_ID, GINT_TO_POINTER (MONO_JIT_ICALL_mini_llvm_init_method), LLVMPointerType (icall_sig, 0), NULL, NULL); LLVMBuildCall (builder, callee, args, LLVMCountParamTypes (icall_sig), ""); /* * Set the inited flag * This is already done by the LLVM methods themselves, but its needed by JITted methods. */ indexes [0] = const_int32 (0); indexes [1] = index_var; LLVMBuildStore (builder, LLVMConstInt (LLVMInt8Type (), 1, FALSE), LLVMBuildGEP (builder, module->inited_var, indexes, 2, "")); LLVMBuildBr (builder, inited_bb); LLVMPositionBuilderAtEnd (builder, inited_bb); LLVMBuildRetVoid (builder); LLVMVerifyFunction (func, LLVMAbortProcessAction); LLVMDisposeBuilder (builder); g_free (name); return func; } /* Emit a wrapper around the parameterless JIT icall ICALL_ID with a cold calling convention */ static LLVMValueRef emit_icall_cold_wrapper (MonoLLVMModule *module, LLVMModuleRef lmodule, MonoJitICallId icall_id, gboolean aot) { LLVMValueRef func, callee; LLVMBasicBlockRef entry_bb; LLVMBuilderRef builder; LLVMTypeRef sig; char *name; name = g_strdup_printf ("%s_icall_cold_wrapper_%d", module->global_prefix, icall_id); func = LLVMAddFunction (lmodule, name, LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE)); sig = LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE); LLVMSetLinkage (func, LLVMInternalLinkage); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_INLINE); set_cold_cconv (func); entry_bb = LLVMAppendBasicBlock (func, "ENTRY"); builder = LLVMCreateBuilder (); LLVMPositionBuilderAtEnd (builder, entry_bb); if (aot) { callee = get_aotconst_module (module, builder, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id), LLVMPointerType (sig, 0), NULL, NULL); } else { MonoJitICallInfo * const info = mono_find_jit_icall_info (icall_id); gpointer target = (gpointer)mono_icall_get_wrapper_full (info, TRUE); LLVMValueRef tramp_var = LLVMAddGlobal (lmodule, LLVMPointerType (sig, 0), name); LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (LLVMConstInt (LLVMInt64Type (), (guint64)(size_t)target, FALSE), LLVMPointerType (sig, 0))); LLVMSetLinkage (tramp_var, LLVMExternalLinkage); callee = LLVMBuildLoad (builder, tramp_var, ""); } LLVMBuildCall (builder, callee, NULL, 0, ""); LLVMBuildRetVoid (builder); LLVMVerifyFunction(func, LLVMAbortProcessAction); LLVMDisposeBuilder (builder); return func; } /* * Emit wrappers around the C icalls used to initialize llvm methods, to * make the calling code smaller and to enable usage of the llvm * cold calling convention. */ static void emit_init_funcs (MonoLLVMModule *module) { for (int i = 0; i < AOT_INIT_METHOD_NUM; ++i) module->init_methods [i] = emit_init_func (module, i); } static LLVMValueRef get_init_func (MonoLLVMModule *module, MonoAotInitSubtype subtype) { return module->init_methods [subtype]; } static void emit_gc_safepoint_poll (MonoLLVMModule *module, LLVMModuleRef lmodule, MonoCompile *cfg) { gboolean is_aot = cfg == NULL || cfg->compile_aot; LLVMValueRef func = mono_llvm_get_or_insert_gc_safepoint_poll (lmodule); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND); if (is_aot) { #if TARGET_WIN32 if (module->static_link) { LLVMSetLinkage (func, LLVMInternalLinkage); /* Prevent it from being optimized away, leading to asserts inside 'opt' */ mark_as_used (module, func); } else { LLVMSetLinkage (func, LLVMWeakODRLinkage); } #else LLVMSetLinkage (func, LLVMWeakODRLinkage); #endif } else { mono_llvm_add_func_attr (func, LLVM_ATTR_OPTIMIZE_NONE); // no need to waste time here, the function is already optimized and will be inlined. mono_llvm_add_func_attr (func, LLVM_ATTR_NO_INLINE); // optnone attribute requires noinline (but it will be inlined anyway) if (!module->gc_poll_cold_wrapper_compiled) { ERROR_DECL (error); /* Compiling a method here is a bit ugly, but it works */ MonoMethod *wrapper = mono_marshal_get_llvm_func_wrapper (LLVM_FUNC_WRAPPER_GC_POLL); module->gc_poll_cold_wrapper_compiled = mono_jit_compile_method (wrapper, error); mono_error_assert_ok (error); } } LLVMBasicBlockRef entry_bb = LLVMAppendBasicBlock (func, "gc.safepoint_poll.entry"); LLVMBasicBlockRef poll_bb = LLVMAppendBasicBlock (func, "gc.safepoint_poll.poll"); LLVMBasicBlockRef exit_bb = LLVMAppendBasicBlock (func, "gc.safepoint_poll.exit"); LLVMTypeRef ptr_type = LLVMPointerType (IntPtrType (), 0); LLVMBuilderRef builder = LLVMCreateBuilder (); /* entry: */ LLVMPositionBuilderAtEnd (builder, entry_bb); LLVMValueRef poll_val_ptr; if (is_aot) { poll_val_ptr = get_aotconst_module (module, builder, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, NULL, ptr_type, NULL, NULL); } else { LLVMValueRef poll_val_int = LLVMConstInt (IntPtrType (), (guint64) &mono_polling_required, FALSE); poll_val_ptr = LLVMBuildIntToPtr (builder, poll_val_int, ptr_type, ""); } LLVMValueRef poll_val_ptr_load = LLVMBuildLoad (builder, poll_val_ptr, ""); // probably needs to be volatile LLVMValueRef poll_val = LLVMBuildPtrToInt (builder, poll_val_ptr_load, IntPtrType (), ""); LLVMValueRef poll_val_zero = LLVMConstNull (LLVMTypeOf (poll_val)); LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntEQ, poll_val, poll_val_zero, ""); mono_llvm_build_weighted_branch (builder, cmp, exit_bb, poll_bb, 1000 /* weight for exit_bb */, 1 /* weight for poll_bb */); /* poll: */ LLVMPositionBuilderAtEnd (builder, poll_bb); LLVMValueRef call; if (is_aot) { LLVMValueRef icall_wrapper = emit_icall_cold_wrapper (module, lmodule, MONO_JIT_ICALL_mono_threads_state_poll, TRUE); module->gc_poll_cold_wrapper = icall_wrapper; call = LLVMBuildCall (builder, icall_wrapper, NULL, 0, ""); } else { // in JIT mode we have to emit @gc.safepoint_poll function for each method (module) // this function calls gc_poll_cold_wrapper_compiled via a global variable. // @gc.safepoint_poll will be inlined and can be deleted after -place-safepoints pass. LLVMTypeRef poll_sig = LLVMFunctionType0 (LLVMVoidType (), FALSE); LLVMTypeRef poll_sig_ptr = LLVMPointerType (poll_sig, 0); gpointer target = resolve_patch (cfg, MONO_PATCH_INFO_ABS, module->gc_poll_cold_wrapper_compiled); LLVMValueRef tramp_var = LLVMAddGlobal (lmodule, poll_sig_ptr, "mono_threads_state_poll"); LLVMValueRef target_val = LLVMConstInt (LLVMInt64Type (), (guint64) target, FALSE); LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (target_val, poll_sig_ptr)); LLVMSetLinkage (tramp_var, LLVMExternalLinkage); LLVMValueRef callee = LLVMBuildLoad (builder, tramp_var, ""); call = LLVMBuildCall (builder, callee, NULL, 0, ""); } set_call_cold_cconv (call); LLVMBuildBr (builder, exit_bb); /* exit: */ LLVMPositionBuilderAtEnd (builder, exit_bb); LLVMBuildRetVoid (builder); LLVMDisposeBuilder (builder); } static void emit_llvm_code_end (MonoLLVMModule *module) { LLVMModuleRef lmodule = module->lmodule; LLVMValueRef func; LLVMBasicBlockRef entry_bb; LLVMBuilderRef builder; func = LLVMAddFunction (lmodule, "llvm_code_end", LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE)); LLVMSetLinkage (func, LLVMInternalLinkage); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND); module->code_end = func; entry_bb = LLVMAppendBasicBlock (func, "ENTRY"); builder = LLVMCreateBuilder (); LLVMPositionBuilderAtEnd (builder, entry_bb); LLVMBuildRetVoid (builder); LLVMDisposeBuilder (builder); } static void emit_div_check (EmitContext *ctx, LLVMBuilderRef builder, MonoBasicBlock *bb, MonoInst *ins, LLVMValueRef lhs, LLVMValueRef rhs) { gboolean need_div_check = ctx->cfg->backend->need_div_check; if (bb->region) /* LLVM doesn't know that these can throw an exception since they are not called through an intrinsic */ need_div_check = TRUE; if (!need_div_check) return; switch (ins->opcode) { case OP_IDIV: case OP_LDIV: case OP_IREM: case OP_LREM: case OP_IDIV_UN: case OP_LDIV_UN: case OP_IREM_UN: case OP_LREM_UN: case OP_IDIV_IMM: case OP_LDIV_IMM: case OP_IREM_IMM: case OP_LREM_IMM: case OP_IDIV_UN_IMM: case OP_LDIV_UN_IMM: case OP_IREM_UN_IMM: case OP_LREM_UN_IMM: { LLVMValueRef cmp; gboolean is_signed = (ins->opcode == OP_IDIV || ins->opcode == OP_LDIV || ins->opcode == OP_IREM || ins->opcode == OP_LREM || ins->opcode == OP_IDIV_IMM || ins->opcode == OP_LDIV_IMM || ins->opcode == OP_IREM_IMM || ins->opcode == OP_LREM_IMM); cmp = LLVMBuildICmp (builder, LLVMIntEQ, rhs, LLVMConstInt (LLVMTypeOf (rhs), 0, FALSE), ""); emit_cond_system_exception (ctx, bb, "DivideByZeroException", cmp, FALSE); if (!ctx_ok (ctx)) break; builder = ctx->builder; /* b == -1 && a == 0x80000000 */ if (is_signed) { LLVMValueRef c = (LLVMTypeOf (lhs) == LLVMInt32Type ()) ? LLVMConstInt (LLVMTypeOf (lhs), 0x80000000, FALSE) : LLVMConstInt (LLVMTypeOf (lhs), 0x8000000000000000LL, FALSE); LLVMValueRef cond1 = LLVMBuildICmp (builder, LLVMIntEQ, rhs, LLVMConstInt (LLVMTypeOf (rhs), -1, FALSE), ""); LLVMValueRef cond2 = LLVMBuildICmp (builder, LLVMIntEQ, lhs, c, ""); cmp = LLVMBuildICmp (builder, LLVMIntEQ, LLVMBuildAnd (builder, cond1, cond2, ""), LLVMConstInt (LLVMInt1Type (), 1, FALSE), ""); emit_cond_system_exception (ctx, bb, "OverflowException", cmp, FALSE); if (!ctx_ok (ctx)) break; builder = ctx->builder; } break; } default: break; } } /* * emit_method_init: * * Emit code to initialize the GOT slots used by the method. */ static void emit_method_init (EmitContext *ctx) { LLVMValueRef indexes [16], args [16]; LLVMValueRef inited_var, cmp, call; LLVMBasicBlockRef inited_bb, notinited_bb; LLVMBuilderRef builder = ctx->builder; MonoCompile *cfg = ctx->cfg; MonoAotInitSubtype subtype; ctx->module->max_inited_idx = MAX (ctx->module->max_inited_idx, cfg->method_index); indexes [0] = const_int32 (0); indexes [1] = const_int32 (cfg->method_index); inited_var = LLVMBuildLoad (builder, LLVMBuildGEP (builder, ctx->module->inited_var, indexes, 2, ""), "is_inited"); args [0] = inited_var; args [1] = LLVMConstInt (LLVMInt8Type (), 1, FALSE); inited_var = LLVMBuildCall (ctx->builder, get_intrins (ctx, INTRINS_EXPECT_I8), args, 2, ""); cmp = LLVMBuildICmp (builder, LLVMIntEQ, inited_var, LLVMConstInt (LLVMTypeOf (inited_var), 0, FALSE), ""); inited_bb = ctx->inited_bb; notinited_bb = gen_bb (ctx, "NOTINITED_BB"); ctx->cfg->llvmonly_init_cond = LLVMBuildCondBr (ctx->builder, cmp, notinited_bb, inited_bb); builder = ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, notinited_bb); LLVMTypeRef type = LLVMArrayType (LLVMInt8Type (), 0); char *symbol = g_strdup_printf ("info_dummy_%s", cfg->llvm_method_name); LLVMValueRef info_var = LLVMAddGlobal (ctx->lmodule, type, symbol); g_free (symbol); cfg->llvm_dummy_info_var = info_var; int nargs = 0; args [nargs ++] = convert (ctx, info_var, ctx->module->ptr_type); switch (cfg->rgctx_access) { case MONO_RGCTX_ACCESS_MRGCTX: if (ctx->rgctx_arg) { args [nargs ++] = convert (ctx, ctx->rgctx_arg, IntPtrType ()); subtype = AOT_INIT_METHOD_GSHARED_MRGCTX; } else { g_assert (ctx->this_arg); args [nargs ++] = convert (ctx, ctx->this_arg, ObjRefType ()); subtype = AOT_INIT_METHOD_GSHARED_THIS; } break; case MONO_RGCTX_ACCESS_VTABLE: args [nargs ++] = convert (ctx, ctx->rgctx_arg, IntPtrType ()); subtype = AOT_INIT_METHOD_GSHARED_VTABLE; break; case MONO_RGCTX_ACCESS_THIS: args [nargs ++] = convert (ctx, ctx->this_arg, ObjRefType ()); subtype = AOT_INIT_METHOD_GSHARED_THIS; break; case MONO_RGCTX_ACCESS_NONE: subtype = AOT_INIT_METHOD; break; default: g_assert_not_reached (); } call = LLVMBuildCall (builder, ctx->module->init_methods [subtype], args, nargs, ""); /* * This enables llvm to keep arguments in their original registers/ * scratch registers, since the call will not clobber them. */ set_call_cold_cconv (call); // Set the inited flag indexes [0] = const_int32 (0); indexes [1] = const_int32 (cfg->method_index); LLVMBuildStore (builder, LLVMConstInt (LLVMInt8Type (), 1, FALSE), LLVMBuildGEP (builder, ctx->module->inited_var, indexes, 2, "")); LLVMBuildBr (builder, inited_bb); ctx->bblocks [cfg->bb_entry->block_num].end_bblock = inited_bb; builder = ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, inited_bb); } static void emit_unbox_tramp (EmitContext *ctx, const char *method_name, LLVMTypeRef method_type, LLVMValueRef method, int method_index) { /* * Emit unbox trampoline using a tailcall */ LLVMValueRef tramp, call, *args; LLVMBuilderRef builder; LLVMBasicBlockRef lbb; LLVMCallInfo *linfo; char *tramp_name; int i, nargs; tramp_name = g_strdup_printf ("ut_%s", method_name); tramp = LLVMAddFunction (ctx->module->lmodule, tramp_name, method_type); LLVMSetLinkage (tramp, LLVMInternalLinkage); mono_llvm_add_func_attr (tramp, LLVM_ATTR_OPTIMIZE_FOR_SIZE); //mono_llvm_add_func_attr (tramp, LLVM_ATTR_NO_UNWIND); linfo = ctx->linfo; // FIXME: Reduce code duplication with mono_llvm_compile_method () etc. if (!ctx->llvm_only && ctx->rgctx_arg_pindex != -1) mono_llvm_add_param_attr (LLVMGetParam (tramp, ctx->rgctx_arg_pindex), LLVM_ATTR_IN_REG); if (ctx->cfg->vret_addr) { LLVMSetValueName (LLVMGetParam (tramp, linfo->vret_arg_pindex), "vret"); if (linfo->ret.storage == LLVMArgVtypeByRef) { mono_llvm_add_param_attr (LLVMGetParam (tramp, linfo->vret_arg_pindex), LLVM_ATTR_STRUCT_RET); mono_llvm_add_param_attr (LLVMGetParam (tramp, linfo->vret_arg_pindex), LLVM_ATTR_NO_ALIAS); } } lbb = LLVMAppendBasicBlock (tramp, ""); builder = LLVMCreateBuilder (); LLVMPositionBuilderAtEnd (builder, lbb); nargs = LLVMCountParamTypes (method_type); args = g_new0 (LLVMValueRef, nargs); for (i = 0; i < nargs; ++i) { args [i] = LLVMGetParam (tramp, i); if (i == ctx->this_arg_pindex) { LLVMTypeRef arg_type = LLVMTypeOf (args [i]); args [i] = LLVMBuildPtrToInt (builder, args [i], IntPtrType (), ""); args [i] = LLVMBuildAdd (builder, args [i], LLVMConstInt (IntPtrType (), MONO_ABI_SIZEOF (MonoObject), FALSE), ""); args [i] = LLVMBuildIntToPtr (builder, args [i], arg_type, ""); } } call = LLVMBuildCall (builder, method, args, nargs, ""); if (!ctx->llvm_only && ctx->rgctx_arg_pindex != -1) mono_llvm_add_instr_attr (call, 1 + ctx->rgctx_arg_pindex, LLVM_ATTR_IN_REG); if (linfo->ret.storage == LLVMArgVtypeByRef) mono_llvm_add_instr_attr (call, 1 + linfo->vret_arg_pindex, LLVM_ATTR_STRUCT_RET); // FIXME: This causes assertions in clang //mono_llvm_set_must_tailcall (call); if (LLVMGetReturnType (method_type) == LLVMVoidType ()) LLVMBuildRetVoid (builder); else LLVMBuildRet (builder, call); g_hash_table_insert (ctx->module->idx_to_unbox_tramp, GINT_TO_POINTER (method_index), tramp); LLVMDisposeBuilder (builder); } #ifdef TARGET_WASM static void emit_gc_pin (EmitContext *ctx, LLVMBuilderRef builder, int vreg) { LLVMValueRef index0 = LLVMConstInt (LLVMInt32Type (), 0, FALSE); LLVMValueRef index1 = LLVMConstInt (LLVMInt32Type (), ctx->gc_var_indexes [vreg] - 1, FALSE); LLVMValueRef indexes [] = { index0, index1 }; LLVMValueRef addr = LLVMBuildGEP (builder, ctx->gc_pin_area, indexes, 2, ""); mono_llvm_build_store (builder, convert (ctx, ctx->values [vreg], IntPtrType ()), addr, TRUE, LLVM_BARRIER_NONE); } #endif /* * emit_entry_bb: * * Emit code to load/convert arguments. */ static void emit_entry_bb (EmitContext *ctx, LLVMBuilderRef builder) { int i, j, pindex; MonoCompile *cfg = ctx->cfg; MonoMethodSignature *sig = ctx->sig; LLVMCallInfo *linfo = ctx->linfo; MonoBasicBlock *bb; char **names; LLVMBuilderRef old_builder = ctx->builder; ctx->builder = builder; ctx->alloca_builder = create_builder (ctx); #ifdef TARGET_WASM /* * For GC stack scanning to work, allocate an area on the stack and store * every ref vreg into it after its written. Because the stack is scanned * conservatively, the objects will be pinned, so the vregs can directly * reference the objects, there is no need to load them from the stack * on every access. */ ctx->gc_var_indexes = g_new0 (int, cfg->next_vreg); int ngc_vars = 0; for (i = 0; i < cfg->next_vreg; ++i) { if (vreg_is_ref (cfg, i)) { ctx->gc_var_indexes [i] = ngc_vars + 1; ngc_vars ++; } } // FIXME: Count only live vregs ctx->gc_pin_area = build_alloca_llvm_type_name (ctx, LLVMArrayType (IntPtrType (), ngc_vars), 0, "gc_pin"); #endif /* * Handle indirect/volatile variables by allocating memory for them * using 'alloca', and storing their address in a temporary. */ for (i = 0; i < cfg->num_varinfo; ++i) { MonoInst *var = cfg->varinfo [i]; if ((var->opcode == OP_GSHAREDVT_LOCAL || var->opcode == OP_GSHAREDVT_ARG_REGOFFSET)) continue; if (var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (mini_type_is_vtype (var->inst_vtype) && !MONO_CLASS_IS_SIMD (ctx->cfg, var->klass))) { if (!ctx_ok (ctx)) return; /* Could be already created by an OP_VPHI */ if (!ctx->addresses [var->dreg]) { if (var->flags & MONO_INST_LMF) { // FIXME: Allocate a smaller struct in the deopt case int size = cfg->deopt ? MONO_ABI_SIZEOF (MonoLMFExt) : MONO_ABI_SIZEOF (MonoLMF); ctx->addresses [var->dreg] = build_alloca_llvm_type_name (ctx, LLVMArrayType (LLVMInt8Type (), size), sizeof (target_mgreg_t), "lmf"); } else { char *name = g_strdup_printf ("vreg_loc_%d", var->dreg); ctx->addresses [var->dreg] = build_named_alloca (ctx, var->inst_vtype, name); g_free (name); } } ctx->vreg_cli_types [var->dreg] = var->inst_vtype; } } names = g_new (char *, sig->param_count); mono_method_get_param_names (cfg->method, (const char **) names); for (i = 0; i < sig->param_count; ++i) { LLVMArgInfo *ainfo = &linfo->args [i + sig->hasthis]; int reg = cfg->args [i + sig->hasthis]->dreg; char *name; pindex = ainfo->pindex; LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex); switch (ainfo->storage) { case LLVMArgVtypeInReg: case LLVMArgAsFpArgs: { LLVMValueRef args [8]; int j; pindex += ainfo->ndummy_fpargs; /* The argument is received as a set of int/fp arguments, store them into the real argument */ memset (args, 0, sizeof (args)); if (ainfo->storage == LLVMArgVtypeInReg) { args [0] = LLVMGetParam (ctx->lmethod, pindex); if (ainfo->pair_storage [1] != LLVMArgNone) args [1] = LLVMGetParam (ctx->lmethod, pindex + 1); } else { g_assert (ainfo->nslots <= 8); for (j = 0; j < ainfo->nslots; ++j) args [j] = LLVMGetParam (ctx->lmethod, pindex + j); } ctx->addresses [reg] = build_alloca (ctx, ainfo->type); emit_args_to_vtype (ctx, builder, ainfo->type, ctx->addresses [reg], ainfo, args); break; } case LLVMArgVtypeByVal: { ctx->addresses [reg] = LLVMGetParam (ctx->lmethod, pindex); break; } case LLVMArgVtypeAddr: case LLVMArgVtypeByRef: { /* The argument is passed by ref */ ctx->addresses [reg] = LLVMGetParam (ctx->lmethod, pindex); break; } case LLVMArgAsIArgs: { LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex); int size; MonoType *t = mini_get_underlying_type (ainfo->type); /* The argument is received as an array of ints, store it into the real argument */ ctx->addresses [reg] = build_alloca (ctx, t); size = mono_class_value_size (mono_class_from_mono_type_internal (t), NULL); if (size == 0) { } else if (size < TARGET_SIZEOF_VOID_P) { /* The upper bits of the registers might not be valid */ LLVMValueRef val = LLVMBuildExtractValue (builder, arg, 0, ""); LLVMValueRef dest = convert (ctx, ctx->addresses [reg], LLVMPointerType (LLVMIntType (size * 8), 0)); LLVMBuildStore (ctx->builder, LLVMBuildTrunc (builder, val, LLVMIntType (size * 8), ""), dest); } else { LLVMBuildStore (ctx->builder, arg, convert (ctx, ctx->addresses [reg], LLVMPointerType (LLVMTypeOf (arg), 0))); } break; } case LLVMArgVtypeAsScalar: g_assert_not_reached (); break; case LLVMArgWasmVtypeAsScalar: { MonoType *t = mini_get_underlying_type (ainfo->type); /* The argument is received as a scalar */ ctx->addresses [reg] = build_alloca (ctx, t); LLVMValueRef dest = convert (ctx, ctx->addresses [reg], LLVMPointerType (LLVMIntType (ainfo->esize * 8), 0)); LLVMBuildStore (ctx->builder, arg, dest); break; } case LLVMArgGsharedvtFixed: { /* These are non-gsharedvt arguments passed by ref, the rest of the IR treats them as scalars */ LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex); if (names [i]) name = g_strdup_printf ("arg_%s", names [i]); else name = g_strdup_printf ("arg_%d", i); ctx->values [reg] = LLVMBuildLoad (builder, convert (ctx, arg, LLVMPointerType (type_to_llvm_type (ctx, ainfo->type), 0)), name); break; } case LLVMArgGsharedvtFixedVtype: { LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex); if (names [i]) name = g_strdup_printf ("vtype_arg_%s", names [i]); else name = g_strdup_printf ("vtype_arg_%d", i); /* Non-gsharedvt vtype argument passed by ref, the rest of the IR treats it as a vtype */ g_assert (ctx->addresses [reg]); LLVMSetValueName (ctx->addresses [reg], name); LLVMBuildStore (builder, LLVMBuildLoad (builder, convert (ctx, arg, LLVMPointerType (type_to_llvm_type (ctx, ainfo->type), 0)), ""), ctx->addresses [reg]); break; } case LLVMArgGsharedvtVariable: /* The IR treats these as variables with addresses */ if (!ctx->addresses [reg]) ctx->addresses [reg] = LLVMGetParam (ctx->lmethod, pindex); break; default: { LLVMTypeRef t; /* Needed to avoid phi argument mismatch errors since operations on pointers produce i32/i64 */ if (m_type_is_byref (ainfo->type)) t = IntPtrType (); else t = type_to_llvm_type (ctx, ainfo->type); ctx->values [reg] = convert_full (ctx, ctx->values [reg], llvm_type_to_stack_type (cfg, t), type_is_unsigned (ctx, ainfo->type)); break; } } switch (ainfo->storage) { case LLVMArgVtypeInReg: case LLVMArgVtypeByVal: case LLVMArgAsIArgs: // FIXME: Enabling this fails on windows case LLVMArgVtypeAddr: case LLVMArgVtypeByRef: { if (MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (ainfo->type))) /* Treat these as normal values */ ctx->values [reg] = LLVMBuildLoad (builder, ctx->addresses [reg], "simd_vtype"); break; } default: break; } } g_free (names); if (sig->hasthis) { /* Handle this arguments as inputs to phi nodes */ int reg = cfg->args [0]->dreg; if (ctx->vreg_types [reg]) ctx->values [reg] = convert (ctx, ctx->values [reg], ctx->vreg_types [reg]); } if (cfg->vret_addr) emit_volatile_store (ctx, cfg->vret_addr->dreg); if (sig->hasthis) emit_volatile_store (ctx, cfg->args [0]->dreg); for (i = 0; i < sig->param_count; ++i) if (!mini_type_is_vtype (sig->params [i])) emit_volatile_store (ctx, cfg->args [i + sig->hasthis]->dreg); if (sig->hasthis && !cfg->rgctx_var && cfg->gshared && !cfg->llvm_only) { LLVMValueRef this_alloc; /* * The exception handling code needs the location where the this argument was * stored for gshared methods. We create a separate alloca to hold it, and mark it * with the "mono.this" custom metadata to tell llvm that it needs to save its * location into the LSDA. */ this_alloc = mono_llvm_build_alloca (builder, ThisType (), LLVMConstInt (LLVMInt32Type (), 1, FALSE), 0, ""); /* This volatile store will keep the alloca alive */ mono_llvm_build_store (builder, ctx->values [cfg->args [0]->dreg], this_alloc, TRUE, LLVM_BARRIER_NONE); set_metadata_flag (this_alloc, "mono.this"); } if (cfg->rgctx_var) { if (!(cfg->rgctx_var->flags & MONO_INST_VOLATILE)) { /* FIXME: This could be volatile even in llvmonly mode if used inside a clause etc. */ g_assert (!ctx->addresses [cfg->rgctx_var->dreg]); ctx->values [cfg->rgctx_var->dreg] = ctx->rgctx_arg; } else { LLVMValueRef rgctx_alloc, store; /* * We handle the rgctx arg similarly to the this pointer. */ g_assert (ctx->addresses [cfg->rgctx_var->dreg]); rgctx_alloc = ctx->addresses [cfg->rgctx_var->dreg]; /* This volatile store will keep the alloca alive */ store = mono_llvm_build_store (builder, convert (ctx, ctx->rgctx_arg, IntPtrType ()), rgctx_alloc, TRUE, LLVM_BARRIER_NONE); (void)store; /* unused */ set_metadata_flag (rgctx_alloc, "mono.this"); } } #ifdef TARGET_WASM /* * Store ref arguments to the pin area. * FIXME: This might not be needed, since the caller already does it ? */ for (i = 0; i < cfg->num_varinfo; ++i) { MonoInst *var = cfg->varinfo [i]; if (var->opcode == OP_ARG && vreg_is_ref (cfg, var->dreg) && ctx->values [var->dreg]) emit_gc_pin (ctx, builder, var->dreg); } #endif if (cfg->deopt) { LLVMValueRef addr, index [2]; MonoMethodHeader *header = cfg->header; int nfields = (sig->ret->type != MONO_TYPE_VOID ? 1 : 0) + sig->hasthis + sig->param_count + header->num_locals + 2; LLVMTypeRef *types = g_alloca (nfields * sizeof (LLVMTypeRef)); int findex = 0; /* method */ types [findex ++] = IntPtrType (); /* il_offset */ types [findex ++] = LLVMInt32Type (); int data_start = findex; /* data */ if (sig->ret->type != MONO_TYPE_VOID) types [findex ++] = IntPtrType (); if (sig->hasthis) types [findex ++] = IntPtrType (); for (int i = 0; i < sig->param_count; ++i) types [findex ++] = LLVMPointerType (type_to_llvm_type (ctx, sig->params [i]), 0); for (int i = 0; i < header->num_locals; ++i) types [findex ++] = LLVMPointerType (type_to_llvm_type (ctx, header->locals [i]), 0); g_assert (findex == nfields); char *name = g_strdup_printf ("%s_il_state", ctx->method_name); LLVMTypeRef il_state_type = LLVMStructCreateNamed (ctx->module->context, name); LLVMStructSetBody (il_state_type, types, nfields, FALSE); g_free (name); ctx->il_state = build_alloca_llvm_type_name (ctx, il_state_type, 0, "il_state"); g_assert (cfg->il_state_var); ctx->addresses [cfg->il_state_var->dreg] = ctx->il_state; /* Set il_state->il_offset = -1 */ index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); index [1] = LLVMConstInt (LLVMInt32Type (), 1, FALSE); addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, ""); LLVMBuildStore (ctx->builder, LLVMConstInt (types [1], -1, FALSE), addr); /* * Set il_state->data [i] to either the address of the arg/local, or NULL. * Because of mono_liveness_handle_exception_clauses (), all locals used/reachable from * clauses are supposed to be volatile, so they have an address. */ findex = data_start; if (sig->ret->type != MONO_TYPE_VOID) { LLVMTypeRef ret_type = type_to_llvm_type (ctx, sig->ret); ctx->il_state_ret = build_alloca_llvm_type_name (ctx, ret_type, 0, "il_state_ret"); index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); index [1] = LLVMConstInt (LLVMInt32Type (), findex, FALSE); addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, ""); LLVMBuildStore (ctx->builder, ctx->il_state_ret, convert (ctx, addr, LLVMPointerType (LLVMTypeOf (ctx->il_state_ret), 0))); findex ++; } for (int i = 0; i < sig->hasthis + sig->param_count; ++i) { LLVMValueRef var_addr = ctx->addresses [cfg->args [i]->dreg]; index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); index [1] = LLVMConstInt (LLVMInt32Type (), findex, FALSE); addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, ""); if (var_addr) LLVMBuildStore (ctx->builder, var_addr, convert (ctx, addr, LLVMPointerType (LLVMTypeOf (var_addr), 0))); else LLVMBuildStore (ctx->builder, LLVMConstNull (types [findex]), addr); findex ++; } for (int i = 0; i < header->num_locals; ++i) { LLVMValueRef var_addr = ctx->addresses [cfg->locals [i]->dreg]; index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); index [1] = LLVMConstInt (LLVMInt32Type (), findex, FALSE); addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, ""); if (var_addr) LLVMBuildStore (ctx->builder, LLVMBuildBitCast (builder, var_addr, types [findex], ""), addr); else LLVMBuildStore (ctx->builder, LLVMConstNull (types [findex]), addr); findex ++; } } /* Initialize the method if needed */ if (cfg->compile_aot) { /* Emit a location for the initialization code */ ctx->init_bb = gen_bb (ctx, "INIT_BB"); ctx->inited_bb = gen_bb (ctx, "INITED_BB"); LLVMBuildBr (ctx->builder, ctx->init_bb); builder = ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, ctx->inited_bb); ctx->bblocks [cfg->bb_entry->block_num].end_bblock = ctx->inited_bb; } /* Compute nesting between clauses */ ctx->nested_in = (GSList**)mono_mempool_alloc0 (cfg->mempool, sizeof (GSList*) * cfg->header->num_clauses); for (i = 0; i < cfg->header->num_clauses; ++i) { for (j = 0; j < cfg->header->num_clauses; ++j) { MonoExceptionClause *clause1 = &cfg->header->clauses [i]; MonoExceptionClause *clause2 = &cfg->header->clauses [j]; if (i != j && clause1->try_offset >= clause2->try_offset && clause1->handler_offset <= clause2->handler_offset) ctx->nested_in [i] = g_slist_prepend_mempool (cfg->mempool, ctx->nested_in [i], GINT_TO_POINTER (j)); } } /* * For finally clauses, create an indicator variable telling OP_ENDFINALLY whenever * it needs to continue normally, or return back to the exception handling system. */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { char name [128]; if (!(bb->region != -1 && (bb->flags & BB_EXCEPTION_HANDLER))) continue; if (bb->in_scount == 0) { LLVMValueRef val; sprintf (name, "finally_ind_bb%d", bb->block_num); val = LLVMBuildAlloca (builder, LLVMInt32Type (), name); LLVMBuildStore (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), val); ctx->bblocks [bb->block_num].finally_ind = val; } else { /* Create a variable to hold the exception var */ if (!ctx->ex_var) ctx->ex_var = LLVMBuildAlloca (builder, ObjRefType (), "exvar"); } } ctx->builder = old_builder; } static gboolean needs_extra_arg (EmitContext *ctx, MonoMethod *method) { WrapperInfo *info = NULL; /* * When targeting wasm, the caller and callee signature has to match exactly. This means * that every method which can be called indirectly need an extra arg since the caller * will call it through an ftnptr and will pass an extra arg. */ if (!ctx->cfg->llvm_only || !ctx->emit_dummy_arg) return FALSE; if (method->wrapper_type) info = mono_marshal_get_wrapper_info (method); switch (method->wrapper_type) { case MONO_WRAPPER_OTHER: if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG) /* Already have an explicit extra arg */ return FALSE; break; case MONO_WRAPPER_MANAGED_TO_NATIVE: if (strstr (method->name, "icall_wrapper")) /* These are JIT icall wrappers which are only called from JITted code directly */ return FALSE; /* Normal icalls can be virtual methods which need an extra arg */ break; case MONO_WRAPPER_RUNTIME_INVOKE: case MONO_WRAPPER_ALLOC: case MONO_WRAPPER_CASTCLASS: case MONO_WRAPPER_WRITE_BARRIER: case MONO_WRAPPER_NATIVE_TO_MANAGED: return FALSE; case MONO_WRAPPER_STELEMREF: if (info->subtype != WRAPPER_SUBTYPE_VIRTUAL_STELEMREF) return FALSE; break; case MONO_WRAPPER_MANAGED_TO_MANAGED: if (info->subtype == WRAPPER_SUBTYPE_STRING_CTOR) return FALSE; break; default: break; } if (method->string_ctor) return FALSE; /* These are called from gsharedvt code with an indirect call which doesn't pass an extra arg */ if (method->klass == mono_get_string_class () && (strstr (method->name, "memcpy") || strstr (method->name, "bzero"))) return FALSE; return TRUE; } static inline gboolean is_supported_callconv (EmitContext *ctx, MonoCallInst *call) { #if defined(TARGET_WIN32) && defined(TARGET_AMD64) gboolean result = (call->signature->call_convention == MONO_CALL_DEFAULT) || (call->signature->call_convention == MONO_CALL_C) || (call->signature->call_convention == MONO_CALL_STDCALL); #else gboolean result = (call->signature->call_convention == MONO_CALL_DEFAULT) || ((call->signature->call_convention == MONO_CALL_C) && ctx->llvm_only); #endif return result; } static void process_call (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, MonoInst *ins) { MonoCompile *cfg = ctx->cfg; LLVMValueRef *values = ctx->values; LLVMValueRef *addresses = ctx->addresses; MonoCallInst *call = (MonoCallInst*)ins; MonoMethodSignature *sig = call->signature; LLVMValueRef callee = NULL, lcall; LLVMValueRef *args; LLVMCallInfo *cinfo; GSList *l; int i, len, nargs; gboolean vretaddr; LLVMTypeRef llvm_sig; gpointer target; gboolean is_virtual, calli; LLVMBuilderRef builder = *builder_ref; /* If both imt and rgctx arg are required, only pass the imt arg, the rgctx trampoline will pass the rgctx */ if (call->imt_arg_reg) call->rgctx_arg_reg = 0; if (!is_supported_callconv (ctx, call)) { set_failure (ctx, "non-default callconv"); return; } cinfo = call->cinfo; g_assert (cinfo); if (call->rgctx_arg_reg) cinfo->rgctx_arg = TRUE; if (call->imt_arg_reg) cinfo->imt_arg = TRUE; if (!call->rgctx_arg_reg && call->method && needs_extra_arg (ctx, call->method)) cinfo->dummy_arg = TRUE; vretaddr = (cinfo->ret.storage == LLVMArgVtypeRetAddr || cinfo->ret.storage == LLVMArgVtypeByRef || cinfo->ret.storage == LLVMArgGsharedvtFixed || cinfo->ret.storage == LLVMArgGsharedvtVariable || cinfo->ret.storage == LLVMArgGsharedvtFixedVtype); llvm_sig = sig_to_llvm_sig_full (ctx, sig, cinfo); if (!ctx_ok (ctx)) return; int const opcode = ins->opcode; is_virtual = opcode == OP_VOIDCALL_MEMBASE || opcode == OP_CALL_MEMBASE || opcode == OP_VCALL_MEMBASE || opcode == OP_LCALL_MEMBASE || opcode == OP_FCALL_MEMBASE || opcode == OP_RCALL_MEMBASE || opcode == OP_TAILCALL_MEMBASE; calli = !call->fptr_is_patch && (opcode == OP_VOIDCALL_REG || opcode == OP_CALL_REG || opcode == OP_VCALL_REG || opcode == OP_LCALL_REG || opcode == OP_FCALL_REG || opcode == OP_RCALL_REG || opcode == OP_TAILCALL_REG); /* FIXME: Avoid creating duplicate methods */ if (ins->flags & MONO_INST_HAS_METHOD) { if (is_virtual) { callee = NULL; } else { if (cfg->compile_aot) { callee = get_callee (ctx, llvm_sig, MONO_PATCH_INFO_METHOD, call->method); if (!callee) { set_failure (ctx, "can't encode patch"); return; } } else if (cfg->method == call->method) { callee = ctx->lmethod; } else { ERROR_DECL (error); static int tramp_index; char *name; name = g_strdup_printf ("[tramp_%d] %s", tramp_index, mono_method_full_name (call->method, TRUE)); tramp_index ++; /* * Use our trampoline infrastructure for lazy compilation instead of llvm's. * Make all calls through a global. The address of the global will be saved in * MonoJitDomainInfo.llvm_jit_callees and updated when the method it refers to is * compiled. */ LLVMValueRef tramp_var = (LLVMValueRef)g_hash_table_lookup (ctx->jit_callees, call->method); if (!tramp_var) { target = mono_create_jit_trampoline (call->method, error); if (!is_ok (error)) { set_failure (ctx, mono_error_get_message (error)); mono_error_cleanup (error); return; } tramp_var = LLVMAddGlobal (ctx->lmodule, LLVMPointerType (llvm_sig, 0), name); LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (LLVMConstInt (LLVMInt64Type (), (guint64)(size_t)target, FALSE), LLVMPointerType (llvm_sig, 0))); LLVMSetLinkage (tramp_var, LLVMExternalLinkage); g_hash_table_insert (ctx->jit_callees, call->method, tramp_var); } callee = LLVMBuildLoad (builder, tramp_var, ""); } } if (!cfg->llvm_only && call->method && strstr (m_class_get_name (call->method->klass), "AsyncVoidMethodBuilder")) { /* LLVM miscompiles async methods */ set_failure (ctx, "#13734"); return; } } else if (calli) { } else { const MonoJitICallId jit_icall_id = call->jit_icall_id; if (jit_icall_id) { if (cfg->compile_aot) { callee = get_callee (ctx, llvm_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (jit_icall_id)); if (!callee) { set_failure (ctx, "can't encode patch"); return; } } else { callee = get_jit_callee (ctx, "", llvm_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (jit_icall_id)); } } else { if (cfg->compile_aot) { callee = NULL; if (cfg->abs_patches) { MonoJumpInfo *abs_ji = (MonoJumpInfo*)g_hash_table_lookup (cfg->abs_patches, call->fptr); if (abs_ji) { callee = get_callee (ctx, llvm_sig, abs_ji->type, abs_ji->data.target); if (!callee) { set_failure (ctx, "can't encode patch"); return; } } } if (!callee) { set_failure (ctx, "aot"); return; } } else { if (cfg->abs_patches) { MonoJumpInfo *abs_ji = (MonoJumpInfo*)g_hash_table_lookup (cfg->abs_patches, call->fptr); if (abs_ji) { ERROR_DECL (error); target = mono_resolve_patch_target (cfg->method, NULL, abs_ji, FALSE, error); mono_error_assert_ok (error); callee = get_jit_callee (ctx, "", llvm_sig, abs_ji->type, abs_ji->data.target); } else { g_assert_not_reached (); } } else { g_assert_not_reached (); } } } } if (is_virtual) { int size = TARGET_SIZEOF_VOID_P; LLVMValueRef index; g_assert (ins->inst_offset % size == 0); index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE); callee = convert (ctx, LLVMBuildLoad (builder, LLVMBuildGEP (builder, convert (ctx, values [ins->inst_basereg], LLVMPointerType (LLVMPointerType (IntPtrType (), 0), 0)), &index, 1, ""), ""), LLVMPointerType (llvm_sig, 0)); } else if (calli) { callee = convert (ctx, values [ins->sreg1], LLVMPointerType (llvm_sig, 0)); } else { if (ins->flags & MONO_INST_HAS_METHOD) { } } /* * Collect and convert arguments */ nargs = (sig->param_count * 16) + sig->hasthis + vretaddr + call->rgctx_reg + call->imt_arg_reg + call->cinfo->dummy_arg + 1; len = sizeof (LLVMValueRef) * nargs; args = g_newa (LLVMValueRef, nargs); memset (args, 0, len); l = call->out_ireg_args; if (call->rgctx_arg_reg) { g_assert (values [call->rgctx_arg_reg]); g_assert (cinfo->rgctx_arg_pindex < nargs); /* * On ARM, the imt/rgctx argument is passed in a caller save register, but some of our trampolines etc. clobber it, leading to * problems is LLVM moves the arg assignment earlier. To work around this, save the argument into a stack slot and load * it using a volatile load. */ #ifdef TARGET_ARM if (!ctx->imt_rgctx_loc) ctx->imt_rgctx_loc = build_alloca_llvm_type (ctx, ctx->module->ptr_type, TARGET_SIZEOF_VOID_P); LLVMBuildStore (builder, convert (ctx, ctx->values [call->rgctx_arg_reg], ctx->module->ptr_type), ctx->imt_rgctx_loc); args [cinfo->rgctx_arg_pindex] = mono_llvm_build_load (builder, ctx->imt_rgctx_loc, "", TRUE); #else args [cinfo->rgctx_arg_pindex] = convert (ctx, values [call->rgctx_arg_reg], ctx->module->ptr_type); #endif } if (call->imt_arg_reg) { g_assert (!ctx->llvm_only); g_assert (values [call->imt_arg_reg]); g_assert (cinfo->imt_arg_pindex < nargs); #ifdef TARGET_ARM if (!ctx->imt_rgctx_loc) ctx->imt_rgctx_loc = build_alloca_llvm_type (ctx, ctx->module->ptr_type, TARGET_SIZEOF_VOID_P); LLVMBuildStore (builder, convert (ctx, ctx->values [call->imt_arg_reg], ctx->module->ptr_type), ctx->imt_rgctx_loc); args [cinfo->imt_arg_pindex] = mono_llvm_build_load (builder, ctx->imt_rgctx_loc, "", TRUE); #else args [cinfo->imt_arg_pindex] = convert (ctx, values [call->imt_arg_reg], ctx->module->ptr_type); #endif } switch (cinfo->ret.storage) { case LLVMArgGsharedvtVariable: { MonoInst *var = get_vreg_to_inst (cfg, call->inst.dreg); if (var && var->opcode == OP_GSHAREDVT_LOCAL) { args [cinfo->vret_arg_pindex] = convert (ctx, emit_gsharedvt_ldaddr (ctx, var->dreg), IntPtrType ()); } else { g_assert (addresses [call->inst.dreg]); args [cinfo->vret_arg_pindex] = convert (ctx, addresses [call->inst.dreg], IntPtrType ()); } break; } default: if (vretaddr) { if (!addresses [call->inst.dreg]) addresses [call->inst.dreg] = build_alloca (ctx, sig->ret); g_assert (cinfo->vret_arg_pindex < nargs); if (cinfo->ret.storage == LLVMArgVtypeByRef) args [cinfo->vret_arg_pindex] = addresses [call->inst.dreg]; else args [cinfo->vret_arg_pindex] = LLVMBuildPtrToInt (builder, addresses [call->inst.dreg], IntPtrType (), ""); } break; } /* * Sometimes the same method is called with two different signatures (i.e. with and without 'this'), so * use the real callee for argument type conversion. */ LLVMTypeRef callee_type = LLVMGetElementType (LLVMTypeOf (callee)); LLVMTypeRef *param_types = (LLVMTypeRef*)g_alloca (sizeof (LLVMTypeRef) * LLVMCountParamTypes (callee_type)); LLVMGetParamTypes (callee_type, param_types); for (i = 0; i < sig->param_count + sig->hasthis; ++i) { guint32 regpair; int reg, pindex; LLVMArgInfo *ainfo = &call->cinfo->args [i]; pindex = ainfo->pindex; regpair = (guint32)(gssize)(l->data); reg = regpair & 0xffffff; args [pindex] = values [reg]; switch (ainfo->storage) { case LLVMArgVtypeInReg: case LLVMArgAsFpArgs: { guint32 nargs; int j; for (j = 0; j < ainfo->ndummy_fpargs; ++j) args [pindex + j] = LLVMConstNull (LLVMDoubleType ()); pindex += ainfo->ndummy_fpargs; g_assert (addresses [reg]); emit_vtype_to_args (ctx, builder, ainfo->type, addresses [reg], ainfo, args + pindex, &nargs); pindex += nargs; // FIXME: alignment // FIXME: Get rid of the VMOVE break; } case LLVMArgVtypeByVal: g_assert (addresses [reg]); args [pindex] = addresses [reg]; break; case LLVMArgVtypeAddr : case LLVMArgVtypeByRef: { g_assert (addresses [reg]); args [pindex] = convert (ctx, addresses [reg], LLVMPointerType (type_to_llvm_arg_type (ctx, ainfo->type), 0)); break; } case LLVMArgAsIArgs: g_assert (addresses [reg]); if (ainfo->esize == 8) args [pindex] = LLVMBuildLoad (ctx->builder, convert (ctx, addresses [reg], LLVMPointerType (LLVMArrayType (LLVMInt64Type (), ainfo->nslots), 0)), ""); else args [pindex] = LLVMBuildLoad (ctx->builder, convert (ctx, addresses [reg], LLVMPointerType (LLVMArrayType (IntPtrType (), ainfo->nslots), 0)), ""); break; case LLVMArgVtypeAsScalar: g_assert_not_reached (); break; case LLVMArgWasmVtypeAsScalar: g_assert (addresses [reg]); args [pindex] = LLVMBuildLoad (ctx->builder, convert (ctx, addresses [reg], LLVMPointerType (LLVMIntType (ainfo->esize * 8), 0)), ""); break; case LLVMArgGsharedvtFixed: case LLVMArgGsharedvtFixedVtype: g_assert (addresses [reg]); args [pindex] = convert (ctx, addresses [reg], LLVMPointerType (type_to_llvm_arg_type (ctx, ainfo->type), 0)); break; case LLVMArgGsharedvtVariable: g_assert (addresses [reg]); args [pindex] = convert (ctx, addresses [reg], LLVMPointerType (IntPtrType (), 0)); break; default: g_assert (args [pindex]); if (i == 0 && sig->hasthis) args [pindex] = convert (ctx, args [pindex], param_types [pindex]); else args [pindex] = convert (ctx, args [pindex], type_to_llvm_arg_type (ctx, ainfo->type)); break; } g_assert (pindex <= nargs); l = l->next; } if (call->cinfo->dummy_arg) { g_assert (call->cinfo->dummy_arg_pindex < nargs); args [call->cinfo->dummy_arg_pindex] = LLVMConstNull (ctx->module->ptr_type); } // FIXME: Align call sites /* * Emit the call */ lcall = emit_call (ctx, bb, &builder, callee, args, LLVMCountParamTypes (llvm_sig)); mono_llvm_nonnull_state_update (ctx, lcall, call->method, args, LLVMCountParamTypes (llvm_sig)); // If we just allocated an object, it's not null. if (call->method && call->method->wrapper_type == MONO_WRAPPER_ALLOC) { mono_llvm_set_call_nonnull_ret (lcall); } if (ins->opcode != OP_TAILCALL && ins->opcode != OP_TAILCALL_MEMBASE && LLVMGetInstructionOpcode (lcall) == LLVMCall) mono_llvm_set_call_notailcall (lcall); // Add original method name we are currently emitting as a custom string metadata (the only way to leave comments in LLVM IR) if (mono_debug_enabled () && call && call->method) mono_llvm_add_string_metadata (lcall, "managed_name", mono_method_full_name (call->method, TRUE)); // As per the LLVM docs, a function has a noalias return value if and only if // it is an allocation function. This is an allocation function. if (call->method && call->method->wrapper_type == MONO_WRAPPER_ALLOC) { mono_llvm_set_call_noalias_ret (lcall); // All objects are expected to be 8-byte aligned (SGEN_ALLOC_ALIGN) mono_llvm_set_alignment_ret (lcall, 8); } /* * Modify cconv and parameter attributes to pass rgctx/imt correctly. */ #if defined(MONO_ARCH_IMT_REG) && defined(MONO_ARCH_RGCTX_REG) g_assert (MONO_ARCH_IMT_REG == MONO_ARCH_RGCTX_REG); #endif /* The two can't be used together, so use only one LLVM calling conv to pass them */ g_assert (!(call->rgctx_arg_reg && call->imt_arg_reg)); if (!sig->pinvoke && !cfg->llvm_only) LLVMSetInstructionCallConv (lcall, LLVMMono1CallConv); if (cinfo->ret.storage == LLVMArgVtypeByRef) mono_llvm_add_instr_attr (lcall, 1 + cinfo->vret_arg_pindex, LLVM_ATTR_STRUCT_RET); if (!ctx->llvm_only && call->rgctx_arg_reg) mono_llvm_add_instr_attr (lcall, 1 + cinfo->rgctx_arg_pindex, LLVM_ATTR_IN_REG); if (call->imt_arg_reg) mono_llvm_add_instr_attr (lcall, 1 + cinfo->imt_arg_pindex, LLVM_ATTR_IN_REG); /* Add byval attributes if needed */ for (i = 0; i < sig->param_count; ++i) { LLVMArgInfo *ainfo = &call->cinfo->args [i + sig->hasthis]; if (ainfo && ainfo->storage == LLVMArgVtypeByVal) mono_llvm_add_instr_attr (lcall, 1 + ainfo->pindex, LLVM_ATTR_BY_VAL); #ifdef TARGET_WASM if (ainfo && ainfo->storage == LLVMArgVtypeByRef) /* This causes llvm to make a copy of the value which is what we need */ mono_llvm_add_instr_byval_attr (lcall, 1 + ainfo->pindex, LLVMGetElementType (param_types [ainfo->pindex])); #endif } gboolean is_simd = MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (sig->ret)); gboolean should_promote_to_value = FALSE; const char *load_name = NULL; /* * Convert the result. Non-SIMD value types are manipulated via an * indirection. SIMD value types are represented directly as LLVM vector * values, and must have a corresponding LLVM value definition in * `values`. */ switch (cinfo->ret.storage) { case LLVMArgAsIArgs: case LLVMArgFpStruct: if (!addresses [call->inst.dreg]) addresses [call->inst.dreg] = build_alloca (ctx, sig->ret); LLVMBuildStore (builder, lcall, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (LLVMTypeOf (lcall), 0), FALSE)); break; case LLVMArgVtypeByVal: /* * Only used by amd64 and x86. Only ever used when passing * arguments; never used for return values. */ g_assert_not_reached (); break; case LLVMArgVtypeInReg: { if (LLVMTypeOf (lcall) == LLVMVoidType ()) /* Empty struct */ break; if (!addresses [ins->dreg]) addresses [ins->dreg] = build_alloca (ctx, sig->ret); LLVMValueRef regs [2] = { 0 }; regs [0] = LLVMBuildExtractValue (builder, lcall, 0, ""); if (cinfo->ret.pair_storage [1] != LLVMArgNone) regs [1] = LLVMBuildExtractValue (builder, lcall, 1, ""); emit_args_to_vtype (ctx, builder, sig->ret, addresses [ins->dreg], &cinfo->ret, regs); load_name = "process_call_vtype_in_reg"; should_promote_to_value = is_simd; break; } case LLVMArgVtypeAsScalar: if (!addresses [call->inst.dreg]) addresses [call->inst.dreg] = build_alloca (ctx, sig->ret); LLVMBuildStore (builder, lcall, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (LLVMTypeOf (lcall), 0), FALSE)); load_name = "process_call_vtype_as_scalar"; should_promote_to_value = is_simd; break; case LLVMArgVtypeRetAddr: case LLVMArgVtypeByRef: load_name = "process_call_vtype_ret_addr"; should_promote_to_value = is_simd; break; case LLVMArgGsharedvtVariable: break; case LLVMArgGsharedvtFixed: case LLVMArgGsharedvtFixedVtype: values [ins->dreg] = LLVMBuildLoad (builder, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (type_to_llvm_type (ctx, sig->ret), 0), FALSE), ""); break; case LLVMArgWasmVtypeAsScalar: if (!addresses [call->inst.dreg]) addresses [call->inst.dreg] = build_alloca (ctx, sig->ret); LLVMBuildStore (builder, lcall, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (LLVMTypeOf (lcall), 0), FALSE)); break; default: if (sig->ret->type != MONO_TYPE_VOID) /* If the method returns an unsigned value, need to zext it */ values [ins->dreg] = convert_full (ctx, lcall, llvm_type_to_stack_type (cfg, type_to_llvm_type (ctx, sig->ret)), type_is_unsigned (ctx, sig->ret)); break; } if (should_promote_to_value) { g_assert (addresses [call->inst.dreg]); LLVMTypeRef addr_type = LLVMPointerType (type_to_llvm_type (ctx, sig->ret), 0); LLVMValueRef addr = convert_full (ctx, addresses [call->inst.dreg], addr_type, FALSE); values [ins->dreg] = LLVMBuildLoad (builder, addr, load_name); } *builder_ref = ctx->builder; } static void emit_llvmonly_throw (EmitContext *ctx, MonoBasicBlock *bb, gboolean rethrow, LLVMValueRef exc) { MonoJitICallId icall_id = rethrow ? MONO_JIT_ICALL_mini_llvmonly_rethrow_exception : MONO_JIT_ICALL_mini_llvmonly_throw_exception; LLVMValueRef callee = rethrow ? ctx->module->rethrow : ctx->module->throw_icall; LLVMTypeRef exc_type = type_to_llvm_type (ctx, m_class_get_byval_arg (mono_get_exception_class ())); if (!callee) { LLVMTypeRef fun_sig = LLVMFunctionType1 (LLVMVoidType (), exc_type, FALSE); g_assert (ctx->cfg->compile_aot); callee = get_callee (ctx, fun_sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (icall_id)); } LLVMValueRef args [2]; args [0] = convert (ctx, exc, exc_type); emit_call (ctx, bb, &ctx->builder, callee, args, 1); LLVMBuildUnreachable (ctx->builder); ctx->builder = create_builder (ctx); } static void emit_throw (EmitContext *ctx, MonoBasicBlock *bb, gboolean rethrow, LLVMValueRef exc) { MonoMethodSignature *throw_sig; LLVMValueRef * const pcallee = rethrow ? &ctx->module->rethrow : &ctx->module->throw_icall; LLVMValueRef callee = *pcallee; char const * const icall_name = rethrow ? "mono_arch_rethrow_exception" : "mono_arch_throw_exception"; #ifndef TARGET_X86 const #endif MonoJitICallId icall_id = rethrow ? MONO_JIT_ICALL_mono_arch_rethrow_exception : MONO_JIT_ICALL_mono_arch_throw_exception; if (!callee) { throw_sig = mono_metadata_signature_alloc (mono_get_corlib (), 1); throw_sig->ret = m_class_get_byval_arg (mono_get_void_class ()); throw_sig->params [0] = m_class_get_byval_arg (mono_get_object_class ()); if (ctx->cfg->compile_aot) { callee = get_callee (ctx, sig_to_llvm_sig (ctx, throw_sig), MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); } else { #ifdef TARGET_X86 /* * LLVM doesn't push the exception argument, so we need a different * trampoline. */ icall_id = rethrow ? MONO_JIT_ICALL_mono_llvm_rethrow_exception_trampoline : MONO_JIT_ICALL_mono_llvm_throw_exception_trampoline; #endif callee = get_jit_callee (ctx, icall_name, sig_to_llvm_sig (ctx, throw_sig), MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); } mono_memory_barrier (); } LLVMValueRef arg; arg = convert (ctx, exc, type_to_llvm_type (ctx, m_class_get_byval_arg (mono_get_object_class ()))); emit_call (ctx, bb, &ctx->builder, callee, &arg, 1); } static void emit_resume_eh (EmitContext *ctx, MonoBasicBlock *bb) { const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_resume_exception; LLVMValueRef callee; LLVMTypeRef fun_sig = LLVMFunctionType0 (LLVMVoidType (), FALSE); g_assert (ctx->cfg->compile_aot); callee = get_callee (ctx, fun_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); emit_call (ctx, bb, &ctx->builder, callee, NULL, 0); LLVMBuildUnreachable (ctx->builder); ctx->builder = create_builder (ctx); } static LLVMValueRef mono_llvm_emit_clear_exception_call (EmitContext *ctx, LLVMBuilderRef builder) { const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_clear_exception; LLVMTypeRef call_sig = LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE); LLVMValueRef callee = NULL; if (!callee) { callee = get_callee (ctx, call_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); } g_assert (builder && callee); return LLVMBuildCall (builder, callee, NULL, 0, ""); } static LLVMValueRef mono_llvm_emit_load_exception_call (EmitContext *ctx, LLVMBuilderRef builder) { const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_load_exception; LLVMTypeRef call_sig = LLVMFunctionType (ObjRefType (), NULL, 0, FALSE); LLVMValueRef callee = NULL; g_assert (ctx->cfg->compile_aot); if (!callee) { callee = get_callee (ctx, call_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); } g_assert (builder && callee); return LLVMBuildCall (builder, callee, NULL, 0, "load_exception"); } static LLVMValueRef mono_llvm_emit_match_exception_call (EmitContext *ctx, LLVMBuilderRef builder, gint32 region_start, gint32 region_end) { const char *icall_name = "mini_llvmonly_match_exception"; const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_match_exception; ctx->builder = builder; LLVMValueRef args[5]; const int num_args = G_N_ELEMENTS (args); args [0] = convert (ctx, get_aotconst (ctx, MONO_PATCH_INFO_AOT_JIT_INFO, GINT_TO_POINTER (ctx->cfg->method_index), LLVMPointerType (IntPtrType (), 0)), IntPtrType ()); args [1] = LLVMConstInt (LLVMInt32Type (), region_start, 0); args [2] = LLVMConstInt (LLVMInt32Type (), region_end, 0); if (ctx->cfg->rgctx_var) { if (ctx->cfg->llvm_only) { args [3] = convert (ctx, ctx->rgctx_arg, IntPtrType ()); } else { LLVMValueRef rgctx_alloc = ctx->addresses [ctx->cfg->rgctx_var->dreg]; g_assert (rgctx_alloc); args [3] = LLVMBuildLoad (builder, convert (ctx, rgctx_alloc, LLVMPointerType (IntPtrType (), 0)), ""); } } else { args [3] = LLVMConstInt (IntPtrType (), 0, 0); } if (ctx->this_arg) args [4] = convert (ctx, ctx->this_arg, IntPtrType ()); else args [4] = LLVMConstInt (IntPtrType (), 0, 0); LLVMTypeRef match_sig = LLVMFunctionType5 (LLVMInt32Type (), IntPtrType (), LLVMInt32Type (), LLVMInt32Type (), IntPtrType (), IntPtrType (), FALSE); LLVMValueRef callee; g_assert (ctx->cfg->compile_aot); ctx->builder = builder; // get_callee expects ctx->builder to be the emitting builder callee = get_callee (ctx, match_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); g_assert (builder && callee); g_assert (ctx->ex_var); return LLVMBuildCall (builder, callee, args, num_args, icall_name); } // FIXME: This won't work because the code-finding makes this // not a constant. /*#define MONO_PERSONALITY_DEBUG*/ #ifdef MONO_PERSONALITY_DEBUG static const gboolean use_mono_personality_debug = TRUE; static const char *default_personality_name = "mono_debug_personality"; #else static const gboolean use_mono_personality_debug = FALSE; static const char *default_personality_name = "__gxx_personality_v0"; #endif static LLVMTypeRef default_cpp_lpad_exc_signature (void) { static LLVMTypeRef sig; if (!sig) { LLVMTypeRef signature [2]; signature [0] = LLVMPointerType (LLVMInt8Type (), 0); signature [1] = LLVMInt32Type (); sig = LLVMStructType (signature, 2, FALSE); } return sig; } static LLVMValueRef get_mono_personality (EmitContext *ctx) { LLVMValueRef personality = NULL; LLVMTypeRef personality_type = LLVMFunctionType (LLVMInt32Type (), NULL, 0, TRUE); g_assert (ctx->cfg->compile_aot); if (!use_mono_personality_debug) { personality = LLVMGetNamedFunction (ctx->lmodule, default_personality_name); } else { personality = get_callee (ctx, personality_type, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_debug_personality)); } g_assert (personality); return personality; } static LLVMBasicBlockRef emit_landing_pad (EmitContext *ctx, int group_index, int group_size) { MonoCompile *cfg = ctx->cfg; LLVMBuilderRef old_builder = ctx->builder; MonoExceptionClause *group_start = cfg->header->clauses + group_index; LLVMBuilderRef lpadBuilder = create_builder (ctx); ctx->builder = lpadBuilder; MonoBasicBlock *handler_bb = cfg->cil_offset_to_bb [CLAUSE_START (group_start)]; g_assert (handler_bb); // <resultval> = landingpad <somety> personality <type> <pers_fn> <clause>+ LLVMValueRef personality = get_mono_personality (ctx); g_assert (personality); char *bb_name = g_strdup_printf ("LPAD%d_BB", group_index); LLVMBasicBlockRef lpad_bb = gen_bb (ctx, bb_name); g_free (bb_name); LLVMPositionBuilderAtEnd (lpadBuilder, lpad_bb); LLVMValueRef landing_pad = LLVMBuildLandingPad (lpadBuilder, default_cpp_lpad_exc_signature (), personality, 0, ""); g_assert (landing_pad); LLVMValueRef cast = LLVMBuildBitCast (lpadBuilder, ctx->module->sentinel_exception, LLVMPointerType (LLVMInt8Type (), 0), "int8TypeInfo"); LLVMAddClause (landing_pad, cast); if (ctx->cfg->deopt) { /* * Call mini_llvmonly_resume_exception_il_state (lmf, il_state) * * The call will execute the catch clause and the rest of the method and store the return * value into ctx->il_state_ret. */ if (!ctx->has_catch) { /* Unused */ LLVMBuildUnreachable (lpadBuilder); return lpad_bb; } const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_resume_exception_il_state; LLVMValueRef callee; LLVMValueRef args [2]; LLVMTypeRef fun_sig = LLVMFunctionType2 (LLVMVoidType (), IntPtrType (), IntPtrType (), FALSE); callee = get_callee (ctx, fun_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); g_assert (ctx->cfg->lmf_var); g_assert (ctx->addresses [ctx->cfg->lmf_var->dreg]); args [0] = LLVMBuildPtrToInt (ctx->builder, ctx->addresses [ctx->cfg->lmf_var->dreg], IntPtrType (), ""); args [1] = LLVMBuildPtrToInt (ctx->builder, ctx->il_state, IntPtrType (), ""); emit_call (ctx, NULL, &ctx->builder, callee, args, 2); /* Return the value set in ctx->il_state_ret */ LLVMTypeRef ret_type = LLVMGetReturnType (LLVMGetElementType (LLVMTypeOf (ctx->lmethod))); LLVMBuilderRef builder = ctx->builder; LLVMValueRef addr, retval, gep, indexes [2]; switch (ctx->linfo->ret.storage) { case LLVMArgNone: LLVMBuildRetVoid (builder); break; case LLVMArgNormal: case LLVMArgWasmVtypeAsScalar: case LLVMArgVtypeInReg: { if (ctx->sig->ret->type == MONO_TYPE_VOID) { LLVMBuildRetVoid (builder); break; } addr = ctx->il_state_ret; g_assert (addr); addr = convert (ctx, ctx->il_state_ret, LLVMPointerType (ret_type, 0)); indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); gep = LLVMBuildGEP (builder, addr, indexes, 1, ""); LLVMBuildRet (builder, LLVMBuildLoad (builder, gep, "")); break; } case LLVMArgVtypeRetAddr: { LLVMValueRef ret_addr; g_assert (cfg->vret_addr); ret_addr = ctx->values [cfg->vret_addr->dreg]; addr = ctx->il_state_ret; g_assert (addr); /* The ret value is in il_state_ret, copy it to the memory pointed to by the vret arg */ ret_type = type_to_llvm_type (ctx, ctx->sig->ret); indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); gep = LLVMBuildGEP (builder, addr, indexes, 1, ""); retval = convert (ctx, LLVMBuildLoad (builder, gep, ""), ret_type); LLVMBuildStore (builder, retval, convert (ctx, ret_addr, LLVMPointerType (ret_type, 0))); LLVMBuildRetVoid (builder); break; } default: g_assert_not_reached (); break; } return lpad_bb; } LLVMBasicBlockRef resume_bb = gen_bb (ctx, "RESUME_BB"); LLVMBuilderRef resume_builder = create_builder (ctx); ctx->builder = resume_builder; LLVMPositionBuilderAtEnd (resume_builder, resume_bb); emit_resume_eh (ctx, handler_bb); // Build match ctx->builder = lpadBuilder; LLVMPositionBuilderAtEnd (lpadBuilder, lpad_bb); gboolean finally_only = TRUE; MonoExceptionClause *group_cursor = group_start; for (int i = 0; i < group_size; i ++) { if (!(group_cursor->flags & MONO_EXCEPTION_CLAUSE_FINALLY || group_cursor->flags & MONO_EXCEPTION_CLAUSE_FAULT)) finally_only = FALSE; group_cursor++; } // FIXME: // Handle landing pad inlining if (!finally_only) { // So at each level of the exception stack we will match the exception again. // During that match, we need to compare against the handler types for the current // protected region. We send the try start and end so that we can only check against // handlers for this lexical protected region. LLVMValueRef match = mono_llvm_emit_match_exception_call (ctx, lpadBuilder, group_start->try_offset, group_start->try_offset + group_start->try_len); // if returns -1, resume LLVMValueRef switch_ins = LLVMBuildSwitch (lpadBuilder, match, resume_bb, group_size); // else move to that target bb for (int i = 0; i < group_size; i++) { MonoExceptionClause *clause = group_start + i; int clause_index = clause - cfg->header->clauses; MonoBasicBlock *handler_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->clause_to_handler, GINT_TO_POINTER (clause_index)); g_assert (handler_bb); g_assert (ctx->bblocks [handler_bb->block_num].call_handler_target_bb); LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), clause_index, FALSE), ctx->bblocks [handler_bb->block_num].call_handler_target_bb); } } else { int clause_index = group_start - cfg->header->clauses; MonoBasicBlock *finally_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->clause_to_handler, GINT_TO_POINTER (clause_index)); g_assert (finally_bb); LLVMBuildBr (ctx->builder, ctx->bblocks [finally_bb->block_num].call_handler_target_bb); } ctx->builder = old_builder; return lpad_bb; } static LLVMValueRef create_const_vector (LLVMTypeRef t, const int *vals, int count) { g_assert (count <= MAX_VECTOR_ELEMS); LLVMValueRef llvm_vals [MAX_VECTOR_ELEMS]; for (int i = 0; i < count; i++) llvm_vals [i] = LLVMConstInt (t, vals [i], FALSE); return LLVMConstVector (llvm_vals, count); } static LLVMValueRef create_const_vector_i32 (const int *mask, int count) { return create_const_vector (LLVMInt32Type (), mask, count); } static LLVMValueRef create_const_vector_4_i32 (int v0, int v1, int v2, int v3) { LLVMValueRef mask [4]; mask [0] = LLVMConstInt (LLVMInt32Type (), v0, FALSE); mask [1] = LLVMConstInt (LLVMInt32Type (), v1, FALSE); mask [2] = LLVMConstInt (LLVMInt32Type (), v2, FALSE); mask [3] = LLVMConstInt (LLVMInt32Type (), v3, FALSE); return LLVMConstVector (mask, 4); } static LLVMValueRef create_const_vector_2_i32 (int v0, int v1) { LLVMValueRef mask [2]; mask [0] = LLVMConstInt (LLVMInt32Type (), v0, FALSE); mask [1] = LLVMConstInt (LLVMInt32Type (), v1, FALSE); return LLVMConstVector (mask, 2); } static LLVMValueRef broadcast_element (EmitContext *ctx, LLVMValueRef elem, int count) { LLVMTypeRef t = LLVMTypeOf (elem); LLVMTypeRef init_vec_t = LLVMVectorType (t, 1); LLVMValueRef undef = LLVMGetUndef (init_vec_t); LLVMValueRef vec = LLVMBuildInsertElement (ctx->builder, undef, elem, const_int32 (0), ""); LLVMValueRef select_zero = LLVMConstNull (LLVMVectorType (LLVMInt32Type (), count)); return LLVMBuildShuffleVector (ctx->builder, vec, undef, select_zero, "broadcast"); } static LLVMValueRef broadcast_constant (int const_val, LLVMTypeRef elem_t, int count) { int vals [MAX_VECTOR_ELEMS]; for (int i = 0; i < count; ++i) vals [i] = const_val; return create_const_vector (elem_t, vals, count); } static LLVMValueRef create_shift_vector (EmitContext *ctx, LLVMValueRef type_donor, LLVMValueRef shiftamt) { LLVMTypeRef t = LLVMTypeOf (type_donor); unsigned int elems = LLVMGetVectorSize (t); LLVMTypeRef elem_t = LLVMGetElementType (t); shiftamt = convert_full (ctx, shiftamt, elem_t, TRUE); shiftamt = broadcast_element (ctx, shiftamt, elems); return shiftamt; } static LLVMTypeRef to_integral_vector_type (LLVMTypeRef t) { unsigned int elems = LLVMGetVectorSize (t); LLVMTypeRef elem_t = LLVMGetElementType (t); unsigned int bits = mono_llvm_get_prim_size_bits (elem_t); return LLVMVectorType (LLVMIntType (bits), elems); } static LLVMValueRef bitcast_to_integral (EmitContext *ctx, LLVMValueRef vec) { LLVMTypeRef src_t = LLVMTypeOf (vec); LLVMTypeRef dst_t = to_integral_vector_type (src_t); if (dst_t != src_t) return LLVMBuildBitCast (ctx->builder, vec, dst_t, "bc2i"); return vec; } static LLVMValueRef extract_high_elements (EmitContext *ctx, LLVMValueRef src_vec) { LLVMTypeRef src_t = LLVMTypeOf (src_vec); unsigned int src_elems = LLVMGetVectorSize (src_t); unsigned int dst_elems = src_elems / 2; int mask [MAX_VECTOR_ELEMS] = { 0 }; for (int i = 0; i < dst_elems; ++i) mask [i] = dst_elems + i; return LLVMBuildShuffleVector (ctx->builder, src_vec, LLVMGetUndef (src_t), create_const_vector_i32 (mask, dst_elems), "extract_high"); } static LLVMValueRef keep_lowest_element (EmitContext *ctx, LLVMTypeRef dst_t, LLVMValueRef vec) { LLVMTypeRef t = LLVMTypeOf (vec); g_assert (LLVMGetElementType (dst_t) == LLVMGetElementType (t)); unsigned int elems = LLVMGetVectorSize (dst_t); unsigned int src_elems = LLVMGetVectorSize (t); int mask [MAX_VECTOR_ELEMS] = { 0 }; mask [0] = 0; for (unsigned int i = 1; i < elems; ++i) mask [i] = src_elems; return LLVMBuildShuffleVector (ctx->builder, vec, LLVMConstNull (t), create_const_vector_i32 (mask, elems), "keep_lowest"); } static LLVMValueRef concatenate_vectors (EmitContext *ctx, LLVMValueRef xs, LLVMValueRef ys) { LLVMTypeRef t = LLVMTypeOf (xs); unsigned int elems = LLVMGetVectorSize (t) * 2; int mask [MAX_VECTOR_ELEMS] = { 0 }; for (int i = 0; i < elems; ++i) mask [i] = i; return LLVMBuildShuffleVector (ctx->builder, xs, ys, create_const_vector_i32 (mask, elems), "concat_vecs"); } static LLVMValueRef scalar_from_vector (EmitContext *ctx, LLVMValueRef xs) { return LLVMBuildExtractElement (ctx->builder, xs, const_int32 (0), "v2s"); } static LLVMValueRef vector_from_scalar (EmitContext *ctx, LLVMTypeRef type, LLVMValueRef x) { return LLVMBuildInsertElement (ctx->builder, LLVMConstNull (type), x, const_int32 (0), "s2v"); } typedef struct { EmitContext *ctx; MonoBasicBlock *bb; LLVMBasicBlockRef continuation; LLVMValueRef phi; LLVMValueRef switch_ins; LLVMBasicBlockRef tmp_block; LLVMBasicBlockRef default_case; LLVMTypeRef switch_index_type; const char *name; int max_cases; int i; } ImmediateUnrollCtx; static ImmediateUnrollCtx immediate_unroll_begin ( EmitContext *ctx, MonoBasicBlock *bb, int max_cases, LLVMValueRef switch_index, LLVMTypeRef return_type, const char *name) { LLVMBasicBlockRef default_case = gen_bb (ctx, name); LLVMBasicBlockRef continuation = gen_bb (ctx, name); LLVMValueRef switch_ins = LLVMBuildSwitch (ctx->builder, switch_index, default_case, max_cases); LLVMPositionBuilderAtEnd (ctx->builder, continuation); LLVMValueRef phi = LLVMBuildPhi (ctx->builder, return_type, name); ImmediateUnrollCtx ictx = { 0 }; ictx.ctx = ctx; ictx.bb = bb; ictx.continuation = continuation; ictx.phi = phi; ictx.switch_ins = switch_ins; ictx.default_case = default_case; ictx.switch_index_type = LLVMTypeOf (switch_index); ictx.name = name; ictx.max_cases = max_cases; return ictx; } static gboolean immediate_unroll_next (ImmediateUnrollCtx *ictx, int *i) { if (ictx->i >= ictx->max_cases) return FALSE; ictx->tmp_block = gen_bb (ictx->ctx, ictx->name); LLVMPositionBuilderAtEnd (ictx->ctx->builder, ictx->tmp_block); *i = ictx->i; ++ictx->i; return TRUE; } static void immediate_unroll_commit (ImmediateUnrollCtx *ictx, int switch_const, LLVMValueRef value) { LLVMBuildBr (ictx->ctx->builder, ictx->continuation); LLVMAddCase (ictx->switch_ins, LLVMConstInt (ictx->switch_index_type, switch_const, FALSE), ictx->tmp_block); LLVMAddIncoming (ictx->phi, &value, &ictx->tmp_block, 1); } static void immediate_unroll_default (ImmediateUnrollCtx *ictx) { LLVMPositionBuilderAtEnd (ictx->ctx->builder, ictx->default_case); } static void immediate_unroll_commit_default (ImmediateUnrollCtx *ictx, LLVMValueRef value) { LLVMBuildBr (ictx->ctx->builder, ictx->continuation); LLVMAddIncoming (ictx->phi, &value, &ictx->default_case, 1); } static void immediate_unroll_unreachable_default (ImmediateUnrollCtx *ictx) { immediate_unroll_default (ictx); LLVMBuildUnreachable (ictx->ctx->builder); } static LLVMValueRef immediate_unroll_end (ImmediateUnrollCtx *ictx, LLVMBasicBlockRef *continuation) { EmitContext *ctx = ictx->ctx; LLVMBuilderRef builder = ctx->builder; LLVMPositionBuilderAtEnd (builder, ictx->continuation); *continuation = ictx->continuation; ctx->bblocks [ictx->bb->block_num].end_bblock = ictx->continuation; return ictx->phi; } typedef struct { EmitContext *ctx; LLVMTypeRef intermediate_type; LLVMTypeRef return_type; gboolean needs_fake_scalar_op; llvm_ovr_tag_t ovr_tag; } ScalarOpFromVectorOpCtx; static inline gboolean check_needs_fake_scalar_op (MonoTypeEnum type) { #if defined(TARGET_ARM64) switch (type) { case MONO_TYPE_U1: case MONO_TYPE_I1: case MONO_TYPE_U2: case MONO_TYPE_I2: return TRUE; } #endif return FALSE; } static ScalarOpFromVectorOpCtx scalar_op_from_vector_op (EmitContext *ctx, LLVMTypeRef return_type, MonoInst *ins) { ScalarOpFromVectorOpCtx ret = { 0 }; ret.ctx = ctx; ret.intermediate_type = return_type; ret.return_type = return_type; ret.needs_fake_scalar_op = check_needs_fake_scalar_op (inst_c1_type (ins)); ret.ovr_tag = ovr_tag_from_llvm_type (return_type); if (!ret.needs_fake_scalar_op) { ret.ovr_tag = ovr_tag_force_scalar (ret.ovr_tag); ret.intermediate_type = ovr_tag_to_llvm_type (ret.ovr_tag); } return ret; } static void scalar_op_from_vector_op_process_args (ScalarOpFromVectorOpCtx *sctx, LLVMValueRef *args, int num_args) { if (!sctx->needs_fake_scalar_op) for (int i = 0; i < num_args; ++i) args [i] = scalar_from_vector (sctx->ctx, args [i]); } static LLVMValueRef scalar_op_from_vector_op_process_result (ScalarOpFromVectorOpCtx *sctx, LLVMValueRef result) { if (sctx->needs_fake_scalar_op) return keep_lowest_element (sctx->ctx, LLVMTypeOf (result), result); return vector_from_scalar (sctx->ctx, sctx->return_type, result); } static void emit_llvmonly_handler_start (EmitContext *ctx, MonoBasicBlock *bb, LLVMBasicBlockRef cbb) { int clause_index = MONO_REGION_CLAUSE_INDEX (bb->region); MonoExceptionClause *clause = &ctx->cfg->header->clauses [clause_index]; // Make exception available to catch blocks if (!(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY || clause->flags & MONO_EXCEPTION_CLAUSE_FAULT)) { LLVMValueRef mono_exc = mono_llvm_emit_load_exception_call (ctx, ctx->builder); g_assert (ctx->ex_var); LLVMBuildStore (ctx->builder, LLVMBuildBitCast (ctx->builder, mono_exc, ObjRefType (), ""), ctx->ex_var); if (bb->in_scount == 1) { MonoInst *exvar = bb->in_stack [0]; g_assert (!ctx->values [exvar->dreg]); g_assert (ctx->ex_var); ctx->values [exvar->dreg] = LLVMBuildLoad (ctx->builder, ctx->ex_var, "save_exception"); emit_volatile_store (ctx, exvar->dreg); } mono_llvm_emit_clear_exception_call (ctx, ctx->builder); } #ifdef TARGET_WASM if (ctx->cfg->lmf_var && !ctx->cfg->deopt) { LLVMValueRef callee; LLVMValueRef args [1]; LLVMTypeRef sig = LLVMFunctionType1 (LLVMVoidType (), ctx->module->ptr_type, FALSE); /* * There might be an LMF on the stack inserted to enable stack walking, see * method_needs_stack_walk (). If an exception is thrown, the LMF popping code * is not executed, so do it here. */ g_assert (ctx->addresses [ctx->cfg->lmf_var->dreg]); callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_pop_lmf)); args [0] = convert (ctx, ctx->addresses [ctx->cfg->lmf_var->dreg], ctx->module->ptr_type); emit_call (ctx, bb, &ctx->builder, callee, args, 1); } #endif LLVMBuilderRef handler_builder = create_builder (ctx); LLVMBasicBlockRef target_bb = ctx->bblocks [bb->block_num].call_handler_target_bb; LLVMPositionBuilderAtEnd (handler_builder, target_bb); // Make the handler code end with a jump to cbb LLVMBuildBr (handler_builder, cbb); } static void emit_handler_start (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef builder) { MonoCompile *cfg = ctx->cfg; LLVMValueRef *values = ctx->values; LLVMModuleRef lmodule = ctx->lmodule; BBInfo *bblocks = ctx->bblocks; LLVMTypeRef i8ptr; LLVMValueRef personality; LLVMValueRef landing_pad; LLVMBasicBlockRef target_bb; MonoInst *exvar; static int ti_generator; char ti_name [128]; LLVMValueRef type_info; int clause_index; GSList *l; // <resultval> = landingpad <somety> personality <type> <pers_fn> <clause>+ if (cfg->compile_aot) { /* Use a dummy personality function */ personality = LLVMGetNamedFunction (lmodule, "mono_personality"); g_assert (personality); } else { /* Can't cache this as each method is in its own llvm module */ LLVMTypeRef personality_type = LLVMFunctionType (LLVMInt32Type (), NULL, 0, TRUE); personality = LLVMAddFunction (ctx->lmodule, "mono_personality", personality_type); mono_llvm_add_func_attr (personality, LLVM_ATTR_NO_UNWIND); LLVMBasicBlockRef entry_bb = LLVMAppendBasicBlock (personality, "ENTRY"); LLVMBuilderRef builder2 = LLVMCreateBuilder (); LLVMPositionBuilderAtEnd (builder2, entry_bb); LLVMBuildRet (builder2, LLVMConstInt (LLVMInt32Type (), 0, FALSE)); LLVMDisposeBuilder (builder2); } i8ptr = LLVMPointerType (LLVMInt8Type (), 0); clause_index = (mono_get_block_region_notry (cfg, bb->region) >> 8) - 1; /* * Create the type info */ sprintf (ti_name, "type_info_%d", ti_generator); ti_generator ++; if (cfg->compile_aot) { /* decode_eh_frame () in aot-runtime.c will decode this */ type_info = LLVMAddGlobal (lmodule, LLVMInt32Type (), ti_name); LLVMSetInitializer (type_info, LLVMConstInt (LLVMInt32Type (), clause_index, FALSE)); /* * These symbols are not really used, the clause_index is embedded into the EH tables generated by DwarfMonoException in LLVM. */ LLVMSetLinkage (type_info, LLVMInternalLinkage); } else { type_info = LLVMAddGlobal (lmodule, LLVMInt32Type (), ti_name); LLVMSetInitializer (type_info, LLVMConstInt (LLVMInt32Type (), clause_index, FALSE)); } { LLVMTypeRef members [2], ret_type; members [0] = i8ptr; members [1] = LLVMInt32Type (); ret_type = LLVMStructType (members, 2, FALSE); landing_pad = LLVMBuildLandingPad (builder, ret_type, personality, 1, ""); LLVMAddClause (landing_pad, type_info); /* Store the exception into the exvar */ if (ctx->ex_var) LLVMBuildStore (builder, convert (ctx, LLVMBuildExtractValue (builder, landing_pad, 0, "ex_obj"), ObjRefType ()), ctx->ex_var); } /* * LLVM throw sites are associated with a one landing pad, and LLVM generated * code expects control to be transferred to this landing pad even in the * presence of nested clauses. The landing pad needs to branch to the landing * pads belonging to nested clauses based on the selector value returned by * the landing pad instruction, which is passed to the landing pad in a * register by the EH code. */ target_bb = bblocks [bb->block_num].call_handler_target_bb; g_assert (target_bb); /* * Branch to the correct landing pad */ LLVMValueRef ex_selector = LLVMBuildExtractValue (builder, landing_pad, 1, "ex_selector"); LLVMValueRef switch_ins = LLVMBuildSwitch (builder, ex_selector, target_bb, 0); for (l = ctx->nested_in [clause_index]; l; l = l->next) { int nesting_clause_index = GPOINTER_TO_INT (l->data); MonoBasicBlock *handler_bb; handler_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->clause_to_handler, GINT_TO_POINTER (nesting_clause_index)); g_assert (handler_bb); g_assert (ctx->bblocks [handler_bb->block_num].call_handler_target_bb); LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), nesting_clause_index, FALSE), ctx->bblocks [handler_bb->block_num].call_handler_target_bb); } /* Start a new bblock which CALL_HANDLER can branch to */ ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, target_bb); ctx->bblocks [bb->block_num].end_bblock = target_bb; /* Store the exception into the IL level exvar */ if (bb->in_scount == 1) { g_assert (bb->in_scount == 1); exvar = bb->in_stack [0]; // FIXME: This is shared with filter clauses ? g_assert (!values [exvar->dreg]); g_assert (ctx->ex_var); values [exvar->dreg] = LLVMBuildLoad (builder, ctx->ex_var, ""); emit_volatile_store (ctx, exvar->dreg); } /* Make normal branches to the start of the clause branch to the new bblock */ bblocks [bb->block_num].bblock = target_bb; } static LLVMValueRef get_double_const (MonoCompile *cfg, double val) { //#ifdef TARGET_WASM #if 0 //Wasm requires us to canonicalize NaNs. if (mono_isnan (val)) *(gint64 *)&val = 0x7FF8000000000000ll; #endif return LLVMConstReal (LLVMDoubleType (), val); } static LLVMValueRef get_float_const (MonoCompile *cfg, float val) { //#ifdef TARGET_WASM #if 0 if (mono_isnan (val)) *(int *)&val = 0x7FC00000; #endif if (cfg->r4fp) return LLVMConstReal (LLVMFloatType (), val); else return LLVMConstFPExt (LLVMConstReal (LLVMFloatType (), val), LLVMDoubleType ()); } static LLVMValueRef call_overloaded_intrins (EmitContext *ctx, int id, llvm_ovr_tag_t ovr_tag, LLVMValueRef *args, const char *name) { int key = key_from_id_and_tag (id, ovr_tag); LLVMValueRef intrins = get_intrins (ctx, key); int nargs = LLVMCountParamTypes (LLVMGetElementType (LLVMTypeOf (intrins))); for (int i = 0; i < nargs; ++i) { LLVMTypeRef t1 = LLVMTypeOf (args [i]); LLVMTypeRef t2 = LLVMTypeOf (LLVMGetParam (intrins, i)); if (t1 != t2) args [i] = convert (ctx, args [i], t2); } return LLVMBuildCall (ctx->builder, intrins, args, nargs, name); } static LLVMValueRef call_intrins (EmitContext *ctx, int id, LLVMValueRef *args, const char *name) { return call_overloaded_intrins (ctx, id, 0, args, name); } static void process_bb (EmitContext *ctx, MonoBasicBlock *bb) { MonoCompile *cfg = ctx->cfg; MonoMethodSignature *sig = ctx->sig; LLVMValueRef method = ctx->lmethod; LLVMValueRef *values = ctx->values; LLVMValueRef *addresses = ctx->addresses; LLVMCallInfo *linfo = ctx->linfo; BBInfo *bblocks = ctx->bblocks; MonoInst *ins; LLVMBasicBlockRef cbb; LLVMBuilderRef builder; gboolean has_terminator; LLVMValueRef v; LLVMValueRef lhs, rhs, arg3; int nins = 0; cbb = get_end_bb (ctx, bb); builder = create_builder (ctx); ctx->builder = builder; LLVMPositionBuilderAtEnd (builder, cbb); if (!ctx_ok (ctx)) return; if (cfg->interp_entry_only && bb != cfg->bb_init && bb != cfg->bb_entry && bb != cfg->bb_exit) { /* The interp entry code is in bb_entry, skip the rest as we might not be able to compile it */ LLVMBuildUnreachable (builder); return; } if (bb->flags & BB_EXCEPTION_HANDLER) { if (!ctx->llvm_only && !bblocks [bb->block_num].invoke_target) { set_failure (ctx, "handler without invokes"); return; } if (ctx->llvm_only) emit_llvmonly_handler_start (ctx, bb, cbb); else emit_handler_start (ctx, bb, builder); if (!ctx_ok (ctx)) return; builder = ctx->builder; } /* Handle PHI nodes first */ /* They should be grouped at the start of the bb */ for (ins = bb->code; ins; ins = ins->next) { emit_dbg_loc (ctx, builder, ins->cil_code); if (ins->opcode == OP_NOP) continue; if (!MONO_IS_PHI (ins)) break; if (cfg->interp_entry_only) break; int i; gboolean empty = TRUE; /* Check that all input bblocks really branch to us */ for (i = 0; i < bb->in_count; ++i) { if (bb->in_bb [i]->last_ins && bb->in_bb [i]->last_ins->opcode == OP_NOT_REACHED) ins->inst_phi_args [i + 1] = -1; else empty = FALSE; } if (empty) { /* LLVM doesn't like phi instructions with zero operands */ ctx->is_dead [ins->dreg] = TRUE; continue; } /* Created earlier, insert it now */ LLVMInsertIntoBuilder (builder, values [ins->dreg]); for (i = 0; i < ins->inst_phi_args [0]; i++) { int sreg1 = ins->inst_phi_args [i + 1]; int count, j; /* * Count the number of times the incoming bblock branches to us, * since llvm requires a separate entry for each. */ if (bb->in_bb [i]->last_ins && bb->in_bb [i]->last_ins->opcode == OP_SWITCH) { MonoInst *switch_ins = bb->in_bb [i]->last_ins; count = 0; for (j = 0; j < GPOINTER_TO_UINT (switch_ins->klass); ++j) { if (switch_ins->inst_many_bb [j] == bb) count ++; } } else { count = 1; } /* Remember for later */ for (j = 0; j < count; ++j) { PhiNode *node = (PhiNode*)mono_mempool_alloc0 (ctx->mempool, sizeof (PhiNode)); node->bb = bb; node->phi = ins; node->in_bb = bb->in_bb [i]; node->sreg = sreg1; bblocks [bb->in_bb [i]->block_num].phi_nodes = g_slist_prepend_mempool (ctx->mempool, bblocks [bb->in_bb [i]->block_num].phi_nodes, node); } } } // Add volatile stores for PHI nodes // These need to be emitted after the PHI nodes for (ins = bb->code; ins; ins = ins->next) { const char *spec = LLVM_INS_INFO (ins->opcode); if (ins->opcode == OP_NOP) continue; if (!MONO_IS_PHI (ins)) break; if (spec [MONO_INST_DEST] != 'v') emit_volatile_store (ctx, ins->dreg); } has_terminator = FALSE; for (ins = bb->code; ins; ins = ins->next) { const char *spec = LLVM_INS_INFO (ins->opcode); char *dname = NULL; char dname_buf [128]; emit_dbg_loc (ctx, builder, ins->cil_code); nins ++; if (nins > 1000) { /* * Some steps in llc are non-linear in the size of basic blocks, see #5714. * Start a new bblock. * Prevent the bblocks to be merged by doing a volatile load + cond branch * from localloc-ed memory. */ if (!cfg->llvm_only) ;//set_failure (ctx, "basic block too long"); if (!ctx->long_bb_break_var) { ctx->long_bb_break_var = build_alloca_llvm_type_name (ctx, LLVMInt32Type (), 0, "long_bb_break"); mono_llvm_build_store (ctx->alloca_builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ctx->long_bb_break_var, TRUE, LLVM_BARRIER_NONE); } cbb = gen_bb (ctx, "CONT_LONG_BB"); LLVMBasicBlockRef dummy_bb = gen_bb (ctx, "CONT_LONG_BB_DUMMY"); LLVMValueRef load = mono_llvm_build_load (builder, ctx->long_bb_break_var, "", TRUE); /* * The long_bb_break_var is initialized to 0 in the prolog, so this branch will always go to 'cbb' * but llvm doesn't know that, so the branch is not going to be eliminated. */ LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntEQ, load, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); LLVMBuildCondBr (builder, cmp, cbb, dummy_bb); /* Emit a dummy false bblock which does nothing but contains a volatile store so it cannot be eliminated */ ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (builder, dummy_bb); mono_llvm_build_store (builder, LLVMConstInt (LLVMInt32Type (), 1, FALSE), ctx->long_bb_break_var, TRUE, LLVM_BARRIER_NONE); LLVMBuildBr (builder, cbb); ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (builder, cbb); ctx->bblocks [bb->block_num].end_bblock = cbb; nins = 0; emit_dbg_loc (ctx, builder, ins->cil_code); } if (has_terminator) /* There could be instructions after a terminator, skip them */ break; if (spec [MONO_INST_DEST] != ' ' && !MONO_IS_STORE_MEMBASE (ins)) { sprintf (dname_buf, "t%d", ins->dreg); dname = dname_buf; } if (spec [MONO_INST_SRC1] != ' ' && spec [MONO_INST_SRC1] != 'v') { MonoInst *var = get_vreg_to_inst (cfg, ins->sreg1); if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) && var->opcode != OP_GSHAREDVT_ARG_REGOFFSET) { lhs = emit_volatile_load (ctx, ins->sreg1); } else { /* It is ok for SETRET to have an uninitialized argument */ if (!values [ins->sreg1] && ins->opcode != OP_SETRET) { set_failure (ctx, "sreg1"); return; } lhs = values [ins->sreg1]; } } else { lhs = NULL; } if (spec [MONO_INST_SRC2] != ' ' && spec [MONO_INST_SRC2] != 'v') { MonoInst *var = get_vreg_to_inst (cfg, ins->sreg2); if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) { rhs = emit_volatile_load (ctx, ins->sreg2); } else { if (!values [ins->sreg2]) { set_failure (ctx, "sreg2"); return; } rhs = values [ins->sreg2]; } } else { rhs = NULL; } if (spec [MONO_INST_SRC3] != ' ' && spec [MONO_INST_SRC3] != 'v') { MonoInst *var = get_vreg_to_inst (cfg, ins->sreg3); if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) { arg3 = emit_volatile_load (ctx, ins->sreg3); } else { if (!values [ins->sreg3]) { set_failure (ctx, "sreg3"); return; } arg3 = values [ins->sreg3]; } } else { arg3 = NULL; } //mono_print_ins (ins); gboolean skip_volatile_store = FALSE; switch (ins->opcode) { case OP_NOP: case OP_NOT_NULL: case OP_LIVERANGE_START: case OP_LIVERANGE_END: break; case OP_ICONST: values [ins->dreg] = LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE); break; case OP_I8CONST: #if TARGET_SIZEOF_VOID_P == 4 values [ins->dreg] = LLVMConstInt (LLVMInt64Type (), GET_LONG_IMM (ins), FALSE); #else values [ins->dreg] = LLVMConstInt (LLVMInt64Type (), (gint64)ins->inst_c0, FALSE); #endif break; case OP_R8CONST: values [ins->dreg] = get_double_const (cfg, *(double*)ins->inst_p0); break; case OP_R4CONST: values [ins->dreg] = get_float_const (cfg, *(float*)ins->inst_p0); break; case OP_DUMMY_ICONST: values [ins->dreg] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); break; case OP_DUMMY_I8CONST: values [ins->dreg] = LLVMConstInt (LLVMInt64Type (), 0, FALSE); break; case OP_DUMMY_R8CONST: values [ins->dreg] = LLVMConstReal (LLVMDoubleType (), 0.0f); break; case OP_BR: { LLVMBasicBlockRef target_bb = get_bb (ctx, ins->inst_target_bb); LLVMBuildBr (builder, target_bb); has_terminator = TRUE; break; } case OP_SWITCH: { int i; LLVMValueRef v; char bb_name [128]; LLVMBasicBlockRef new_bb; LLVMBuilderRef new_builder; // The default branch is already handled // FIXME: Handle it here /* Start new bblock */ sprintf (bb_name, "SWITCH_DEFAULT_BB%d", ctx->default_index ++); new_bb = LLVMAppendBasicBlock (ctx->lmethod, bb_name); lhs = convert (ctx, lhs, LLVMInt32Type ()); v = LLVMBuildSwitch (builder, lhs, new_bb, GPOINTER_TO_UINT (ins->klass)); for (i = 0; i < GPOINTER_TO_UINT (ins->klass); ++i) { MonoBasicBlock *target_bb = ins->inst_many_bb [i]; LLVMAddCase (v, LLVMConstInt (LLVMInt32Type (), i, FALSE), get_bb (ctx, target_bb)); } new_builder = create_builder (ctx); LLVMPositionBuilderAtEnd (new_builder, new_bb); LLVMBuildUnreachable (new_builder); has_terminator = TRUE; g_assert (!ins->next); break; } case OP_SETRET: switch (linfo->ret.storage) { case LLVMArgNormal: case LLVMArgVtypeInReg: case LLVMArgVtypeAsScalar: case LLVMArgWasmVtypeAsScalar: { LLVMTypeRef ret_type = LLVMGetReturnType (LLVMGetElementType (LLVMTypeOf (method))); LLVMValueRef retval = LLVMGetUndef (ret_type); gboolean src_in_reg = FALSE; gboolean is_simd = MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (sig->ret)); switch (linfo->ret.storage) { case LLVMArgNormal: src_in_reg = TRUE; break; case LLVMArgVtypeInReg: case LLVMArgVtypeAsScalar: src_in_reg = is_simd; break; } if (src_in_reg && (!lhs || ctx->is_dead [ins->sreg1])) { /* * The method did not set its return value, probably because it * ends with a throw. */ LLVMBuildRet (builder, retval); break; } switch (linfo->ret.storage) { case LLVMArgNormal: retval = convert (ctx, lhs, type_to_llvm_type (ctx, sig->ret)); break; case LLVMArgVtypeInReg: if (is_simd) { /* The return type is an LLVM aggregate type, so a bare bitcast cannot be used to do this conversion. */ int width = mono_type_size (sig->ret, NULL); int elems = width / TARGET_SIZEOF_VOID_P; /* The return value might not be set if there is a throw */ LLVMValueRef val = LLVMBuildBitCast (builder, lhs, LLVMVectorType (IntPtrType (), elems), ""); for (int i = 0; i < elems; ++i) { LLVMValueRef element = LLVMBuildExtractElement (builder, val, const_int32 (i), ""); retval = LLVMBuildInsertValue (builder, retval, element, i, "setret_simd_vtype_in_reg"); } } else { LLVMValueRef addr = LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (ret_type, 0), ""); for (int i = 0; i < 2; ++i) { if (linfo->ret.pair_storage [i] == LLVMArgInIReg) { LLVMValueRef indexes [2], part_addr; indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMConstInt (LLVMInt32Type (), i, FALSE); part_addr = LLVMBuildGEP (builder, addr, indexes, 2, ""); retval = LLVMBuildInsertValue (builder, retval, LLVMBuildLoad (builder, part_addr, ""), i, ""); } else { g_assert (linfo->ret.pair_storage [i] == LLVMArgNone); } } } break; case LLVMArgVtypeAsScalar: if (is_simd) { retval = LLVMBuildBitCast (builder, values [ins->sreg1], ret_type, "setret_simd_vtype_as_scalar"); } else { g_assert (addresses [ins->sreg1]); retval = LLVMBuildLoad (builder, LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (ret_type, 0), ""), ""); } break; case LLVMArgWasmVtypeAsScalar: g_assert (addresses [ins->sreg1]); retval = LLVMBuildLoad (builder, LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (ret_type, 0), ""), ""); break; } LLVMBuildRet (builder, retval); break; } case LLVMArgVtypeByRef: { LLVMBuildRetVoid (builder); break; } case LLVMArgGsharedvtFixed: { LLVMTypeRef ret_type = type_to_llvm_type (ctx, sig->ret); /* The return value is in lhs, need to store to the vret argument */ /* sreg1 might not be set */ if (lhs) { g_assert (cfg->vret_addr); g_assert (values [cfg->vret_addr->dreg]); LLVMBuildStore (builder, convert (ctx, lhs, ret_type), convert (ctx, values [cfg->vret_addr->dreg], LLVMPointerType (ret_type, 0))); } LLVMBuildRetVoid (builder); break; } case LLVMArgGsharedvtFixedVtype: { /* Already set */ LLVMBuildRetVoid (builder); break; } case LLVMArgGsharedvtVariable: { /* Already set */ LLVMBuildRetVoid (builder); break; } case LLVMArgVtypeRetAddr: { LLVMBuildRetVoid (builder); break; } case LLVMArgAsIArgs: case LLVMArgFpStruct: { LLVMTypeRef ret_type = LLVMGetReturnType (LLVMGetElementType (LLVMTypeOf (method))); LLVMValueRef retval; g_assert (addresses [ins->sreg1]); retval = LLVMBuildLoad (builder, convert (ctx, addresses [ins->sreg1], LLVMPointerType (ret_type, 0)), ""); LLVMBuildRet (builder, retval); break; } case LLVMArgNone: LLVMBuildRetVoid (builder); break; default: g_assert_not_reached (); break; } has_terminator = TRUE; break; case OP_ICOMPARE: case OP_FCOMPARE: case OP_RCOMPARE: case OP_LCOMPARE: case OP_COMPARE: case OP_ICOMPARE_IMM: case OP_LCOMPARE_IMM: case OP_COMPARE_IMM: { CompRelation rel; LLVMValueRef cmp, args [16]; gboolean likely = (ins->flags & MONO_INST_LIKELY) != 0; gboolean unlikely = FALSE; if (MONO_IS_COND_BRANCH_OP (ins->next)) { if (ins->next->inst_false_bb->out_of_line) likely = TRUE; else if (ins->next->inst_true_bb->out_of_line) unlikely = TRUE; } if (ins->next->opcode == OP_NOP) break; if (ins->next->opcode == OP_BR) /* The comparison result is not needed */ continue; rel = mono_opcode_to_cond (ins->next->opcode); if (ins->opcode == OP_ICOMPARE_IMM) { lhs = convert (ctx, lhs, LLVMInt32Type ()); rhs = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE); } if (ins->opcode == OP_LCOMPARE_IMM) { lhs = convert (ctx, lhs, LLVMInt64Type ()); rhs = LLVMConstInt (LLVMInt64Type (), GET_LONG_IMM (ins), FALSE); } if (ins->opcode == OP_LCOMPARE) { lhs = convert (ctx, lhs, LLVMInt64Type ()); rhs = convert (ctx, rhs, LLVMInt64Type ()); } if (ins->opcode == OP_ICOMPARE) { lhs = convert (ctx, lhs, LLVMInt32Type ()); rhs = convert (ctx, rhs, LLVMInt32Type ()); } if (lhs && rhs) { if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind) rhs = convert (ctx, rhs, LLVMTypeOf (lhs)); else if (LLVMGetTypeKind (LLVMTypeOf (rhs)) == LLVMPointerTypeKind) lhs = convert (ctx, lhs, LLVMTypeOf (rhs)); } /* We use COMPARE+SETcc/Bcc, llvm uses SETcc+br cond */ if (ins->opcode == OP_FCOMPARE) { cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMDoubleType ()), convert (ctx, rhs, LLVMDoubleType ()), ""); } else if (ins->opcode == OP_RCOMPARE) { cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMFloatType ()), convert (ctx, rhs, LLVMFloatType ()), ""); } else if (ins->opcode == OP_COMPARE_IMM) { LLVMIntPredicate llvm_pred = cond_to_llvm_cond [rel]; if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind && ins->inst_imm == 0) { // We are emitting a NULL check for a pointer gboolean nonnull = mono_llvm_is_nonnull (lhs); if (nonnull && llvm_pred == LLVMIntEQ) cmp = LLVMConstInt (LLVMInt1Type (), FALSE, FALSE); else if (nonnull && llvm_pred == LLVMIntNE) cmp = LLVMConstInt (LLVMInt1Type (), TRUE, FALSE); else cmp = LLVMBuildICmp (builder, llvm_pred, lhs, LLVMConstNull (LLVMTypeOf (lhs)), ""); } else { cmp = LLVMBuildICmp (builder, llvm_pred, convert (ctx, lhs, IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), ""); } } else if (ins->opcode == OP_LCOMPARE_IMM) { cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, ""); } else if (ins->opcode == OP_COMPARE) { if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind && LLVMTypeOf (lhs) == LLVMTypeOf (rhs)) cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, ""); else cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], convert (ctx, lhs, IntPtrType ()), convert (ctx, rhs, IntPtrType ()), ""); } else cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, ""); if (likely || unlikely) { args [0] = cmp; args [1] = LLVMConstInt (LLVMInt1Type (), likely ? 1 : 0, FALSE); cmp = call_intrins (ctx, INTRINS_EXPECT_I1, args, ""); } if (MONO_IS_COND_BRANCH_OP (ins->next)) { if (ins->next->inst_true_bb == ins->next->inst_false_bb) { /* * If the target bb contains PHI instructions, LLVM requires * two PHI entries for this bblock, while we only generate one. * So convert this to an unconditional bblock. (bxc #171). */ LLVMBuildBr (builder, get_bb (ctx, ins->next->inst_true_bb)); } else { LLVMBuildCondBr (builder, cmp, get_bb (ctx, ins->next->inst_true_bb), get_bb (ctx, ins->next->inst_false_bb)); } has_terminator = TRUE; } else if (MONO_IS_SETCC (ins->next)) { sprintf (dname_buf, "t%d", ins->next->dreg); dname = dname_buf; values [ins->next->dreg] = LLVMBuildZExt (builder, cmp, LLVMInt32Type (), dname); /* Add stores for volatile variables */ emit_volatile_store (ctx, ins->next->dreg); } else if (MONO_IS_COND_EXC (ins->next)) { gboolean force_explicit_branch = FALSE; if (bb->region != -1) { /* Don't tag null check branches in exception-handling * regions with `make.implicit`. */ force_explicit_branch = TRUE; } emit_cond_system_exception (ctx, bb, (const char*)ins->next->inst_p1, cmp, force_explicit_branch); if (!ctx_ok (ctx)) break; builder = ctx->builder; } else { set_failure (ctx, "next"); break; } ins = ins->next; break; } case OP_FCEQ: case OP_FCNEQ: case OP_FCLT: case OP_FCLT_UN: case OP_FCGT: case OP_FCGT_UN: case OP_FCGE: case OP_FCLE: { CompRelation rel; LLVMValueRef cmp; rel = mono_opcode_to_cond (ins->opcode); cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMDoubleType ()), convert (ctx, rhs, LLVMDoubleType ()), ""); values [ins->dreg] = LLVMBuildZExt (builder, cmp, LLVMInt32Type (), dname); break; } case OP_RCEQ: case OP_RCNEQ: case OP_RCLT: case OP_RCLT_UN: case OP_RCGT: case OP_RCGT_UN: { CompRelation rel; LLVMValueRef cmp; rel = mono_opcode_to_cond (ins->opcode); cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMFloatType ()), convert (ctx, rhs, LLVMFloatType ()), ""); values [ins->dreg] = LLVMBuildZExt (builder, cmp, LLVMInt32Type (), dname); break; } case OP_PHI: case OP_FPHI: case OP_VPHI: case OP_XPHI: { // Handled above skip_volatile_store = TRUE; break; } case OP_MOVE: case OP_LMOVE: case OP_XMOVE: case OP_SETFRET: g_assert (lhs); values [ins->dreg] = lhs; break; case OP_FMOVE: case OP_RMOVE: { MonoInst *var = get_vreg_to_inst (cfg, ins->dreg); g_assert (lhs); values [ins->dreg] = lhs; if (var && m_class_get_byval_arg (var->klass)->type == MONO_TYPE_R4) { /* * This is added by the spilling pass in case of the JIT, * but we have to do it ourselves. */ values [ins->dreg] = convert (ctx, values [ins->dreg], LLVMFloatType ()); } break; } case OP_MOVE_F_TO_I4: { values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildFPTrunc (builder, lhs, LLVMFloatType (), ""), LLVMInt32Type (), ""); break; } case OP_MOVE_I4_TO_F: { values [ins->dreg] = LLVMBuildFPExt (builder, LLVMBuildBitCast (builder, lhs, LLVMFloatType (), ""), LLVMDoubleType (), ""); break; } case OP_MOVE_F_TO_I8: { values [ins->dreg] = LLVMBuildBitCast (builder, lhs, LLVMInt64Type (), ""); break; } case OP_MOVE_I8_TO_F: { values [ins->dreg] = LLVMBuildBitCast (builder, lhs, LLVMDoubleType (), ""); break; } case OP_IADD: case OP_ISUB: case OP_IAND: case OP_IMUL: case OP_IDIV: case OP_IDIV_UN: case OP_IREM: case OP_IREM_UN: case OP_IOR: case OP_IXOR: case OP_ISHL: case OP_ISHR: case OP_ISHR_UN: case OP_FADD: case OP_FSUB: case OP_FMUL: case OP_FDIV: case OP_LADD: case OP_LSUB: case OP_LMUL: case OP_LDIV: case OP_LDIV_UN: case OP_LREM: case OP_LREM_UN: case OP_LAND: case OP_LOR: case OP_LXOR: case OP_LSHL: case OP_LSHR: case OP_LSHR_UN: lhs = convert (ctx, lhs, regtype_to_llvm_type (spec [MONO_INST_DEST])); rhs = convert (ctx, rhs, regtype_to_llvm_type (spec [MONO_INST_DEST])); emit_div_check (ctx, builder, bb, ins, lhs, rhs); if (!ctx_ok (ctx)) break; builder = ctx->builder; switch (ins->opcode) { case OP_IADD: case OP_LADD: values [ins->dreg] = LLVMBuildAdd (builder, lhs, rhs, dname); break; case OP_ISUB: case OP_LSUB: values [ins->dreg] = LLVMBuildSub (builder, lhs, rhs, dname); break; case OP_IMUL: case OP_LMUL: values [ins->dreg] = LLVMBuildMul (builder, lhs, rhs, dname); break; case OP_IREM: case OP_LREM: values [ins->dreg] = LLVMBuildSRem (builder, lhs, rhs, dname); break; case OP_IREM_UN: case OP_LREM_UN: values [ins->dreg] = LLVMBuildURem (builder, lhs, rhs, dname); break; case OP_IDIV: case OP_LDIV: values [ins->dreg] = LLVMBuildSDiv (builder, lhs, rhs, dname); break; case OP_IDIV_UN: case OP_LDIV_UN: values [ins->dreg] = LLVMBuildUDiv (builder, lhs, rhs, dname); break; case OP_FDIV: case OP_RDIV: values [ins->dreg] = LLVMBuildFDiv (builder, lhs, rhs, dname); break; case OP_IAND: case OP_LAND: values [ins->dreg] = LLVMBuildAnd (builder, lhs, rhs, dname); break; case OP_IOR: case OP_LOR: values [ins->dreg] = LLVMBuildOr (builder, lhs, rhs, dname); break; case OP_IXOR: case OP_LXOR: values [ins->dreg] = LLVMBuildXor (builder, lhs, rhs, dname); break; case OP_ISHL: case OP_LSHL: values [ins->dreg] = LLVMBuildShl (builder, lhs, rhs, dname); break; case OP_ISHR: case OP_LSHR: values [ins->dreg] = LLVMBuildAShr (builder, lhs, rhs, dname); break; case OP_ISHR_UN: case OP_LSHR_UN: values [ins->dreg] = LLVMBuildLShr (builder, lhs, rhs, dname); break; case OP_FADD: values [ins->dreg] = LLVMBuildFAdd (builder, lhs, rhs, dname); break; case OP_FSUB: values [ins->dreg] = LLVMBuildFSub (builder, lhs, rhs, dname); break; case OP_FMUL: values [ins->dreg] = LLVMBuildFMul (builder, lhs, rhs, dname); break; default: g_assert_not_reached (); } break; case OP_RADD: case OP_RSUB: case OP_RMUL: case OP_RDIV: { lhs = convert (ctx, lhs, LLVMFloatType ()); rhs = convert (ctx, rhs, LLVMFloatType ()); switch (ins->opcode) { case OP_RADD: values [ins->dreg] = LLVMBuildFAdd (builder, lhs, rhs, dname); break; case OP_RSUB: values [ins->dreg] = LLVMBuildFSub (builder, lhs, rhs, dname); break; case OP_RMUL: values [ins->dreg] = LLVMBuildFMul (builder, lhs, rhs, dname); break; case OP_RDIV: values [ins->dreg] = LLVMBuildFDiv (builder, lhs, rhs, dname); break; default: g_assert_not_reached (); break; } break; } case OP_IADD_IMM: case OP_ISUB_IMM: case OP_IMUL_IMM: case OP_IREM_IMM: case OP_IREM_UN_IMM: case OP_IDIV_IMM: case OP_IDIV_UN_IMM: case OP_IAND_IMM: case OP_IOR_IMM: case OP_IXOR_IMM: case OP_ISHL_IMM: case OP_ISHR_IMM: case OP_ISHR_UN_IMM: case OP_LADD_IMM: case OP_LSUB_IMM: case OP_LMUL_IMM: case OP_LREM_IMM: case OP_LAND_IMM: case OP_LOR_IMM: case OP_LXOR_IMM: case OP_LSHL_IMM: case OP_LSHR_IMM: case OP_LSHR_UN_IMM: case OP_ADD_IMM: case OP_AND_IMM: case OP_MUL_IMM: case OP_SHL_IMM: case OP_SHR_IMM: case OP_SHR_UN_IMM: { LLVMValueRef imm; if (spec [MONO_INST_SRC1] == 'l') { imm = LLVMConstInt (LLVMInt64Type (), GET_LONG_IMM (ins), FALSE); } else { imm = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE); } emit_div_check (ctx, builder, bb, ins, lhs, imm); if (!ctx_ok (ctx)) break; builder = ctx->builder; #if TARGET_SIZEOF_VOID_P == 4 if (ins->opcode == OP_LSHL_IMM || ins->opcode == OP_LSHR_IMM || ins->opcode == OP_LSHR_UN_IMM) imm = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE); #endif if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind) lhs = convert (ctx, lhs, IntPtrType ()); imm = convert (ctx, imm, LLVMTypeOf (lhs)); switch (ins->opcode) { case OP_IADD_IMM: case OP_LADD_IMM: case OP_ADD_IMM: values [ins->dreg] = LLVMBuildAdd (builder, lhs, imm, dname); break; case OP_ISUB_IMM: case OP_LSUB_IMM: values [ins->dreg] = LLVMBuildSub (builder, lhs, imm, dname); break; case OP_IMUL_IMM: case OP_MUL_IMM: case OP_LMUL_IMM: values [ins->dreg] = LLVMBuildMul (builder, lhs, imm, dname); break; case OP_IDIV_IMM: case OP_LDIV_IMM: values [ins->dreg] = LLVMBuildSDiv (builder, lhs, imm, dname); break; case OP_IDIV_UN_IMM: case OP_LDIV_UN_IMM: values [ins->dreg] = LLVMBuildUDiv (builder, lhs, imm, dname); break; case OP_IREM_IMM: case OP_LREM_IMM: values [ins->dreg] = LLVMBuildSRem (builder, lhs, imm, dname); break; case OP_IREM_UN_IMM: values [ins->dreg] = LLVMBuildURem (builder, lhs, imm, dname); break; case OP_IAND_IMM: case OP_LAND_IMM: case OP_AND_IMM: values [ins->dreg] = LLVMBuildAnd (builder, lhs, imm, dname); break; case OP_IOR_IMM: case OP_LOR_IMM: values [ins->dreg] = LLVMBuildOr (builder, lhs, imm, dname); break; case OP_IXOR_IMM: case OP_LXOR_IMM: values [ins->dreg] = LLVMBuildXor (builder, lhs, imm, dname); break; case OP_ISHL_IMM: case OP_LSHL_IMM: values [ins->dreg] = LLVMBuildShl (builder, lhs, imm, dname); break; case OP_SHL_IMM: if (TARGET_SIZEOF_VOID_P == 8) { /* The IL is not regular */ lhs = convert (ctx, lhs, LLVMInt64Type ()); imm = convert (ctx, imm, LLVMInt64Type ()); } values [ins->dreg] = LLVMBuildShl (builder, lhs, imm, dname); break; case OP_ISHR_IMM: case OP_LSHR_IMM: case OP_SHR_IMM: values [ins->dreg] = LLVMBuildAShr (builder, lhs, imm, dname); break; case OP_ISHR_UN_IMM: /* This is used to implement conv.u4, so the lhs could be an i8 */ lhs = convert (ctx, lhs, LLVMInt32Type ()); imm = convert (ctx, imm, LLVMInt32Type ()); values [ins->dreg] = LLVMBuildLShr (builder, lhs, imm, dname); break; case OP_LSHR_UN_IMM: case OP_SHR_UN_IMM: values [ins->dreg] = LLVMBuildLShr (builder, lhs, imm, dname); break; default: g_assert_not_reached (); } break; } case OP_INEG: values [ins->dreg] = LLVMBuildSub (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), convert (ctx, lhs, LLVMInt32Type ()), dname); break; case OP_LNEG: if (LLVMTypeOf (lhs) != LLVMInt64Type ()) lhs = convert (ctx, lhs, LLVMInt64Type ()); values [ins->dreg] = LLVMBuildSub (builder, LLVMConstInt (LLVMInt64Type (), 0, FALSE), lhs, dname); break; case OP_FNEG: lhs = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = LLVMBuildFNeg (builder, lhs, dname); break; case OP_RNEG: lhs = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = LLVMBuildFNeg (builder, lhs, dname); break; case OP_INOT: { guint32 v = 0xffffffff; values [ins->dreg] = LLVMBuildXor (builder, LLVMConstInt (LLVMInt32Type (), v, FALSE), convert (ctx, lhs, LLVMInt32Type ()), dname); break; } case OP_LNOT: { if (LLVMTypeOf (lhs) != LLVMInt64Type ()) lhs = convert (ctx, lhs, LLVMInt64Type ()); guint64 v = 0xffffffffffffffffLL; values [ins->dreg] = LLVMBuildXor (builder, LLVMConstInt (LLVMInt64Type (), v, FALSE), lhs, dname); break; } #if defined(TARGET_X86) || defined(TARGET_AMD64) case OP_X86_LEA: { LLVMValueRef v1, v2; rhs = LLVMBuildSExt (builder, convert (ctx, rhs, LLVMInt32Type ()), LLVMInt64Type (), ""); v1 = LLVMBuildMul (builder, convert (ctx, rhs, IntPtrType ()), LLVMConstInt (IntPtrType (), ((unsigned long long)1 << ins->backend.shift_amount), FALSE), ""); v2 = LLVMBuildAdd (builder, convert (ctx, lhs, IntPtrType ()), v1, ""); values [ins->dreg] = LLVMBuildAdd (builder, v2, LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), dname); break; } case OP_X86_BSF32: case OP_X86_BSF64: { LLVMValueRef args [] = { lhs, LLVMConstInt (LLVMInt1Type (), 1, TRUE), }; int op = ins->opcode == OP_X86_BSF32 ? INTRINS_CTTZ_I32 : INTRINS_CTTZ_I64; values [ins->dreg] = call_intrins (ctx, op, args, dname); break; } case OP_X86_BSR32: case OP_X86_BSR64: { LLVMValueRef args [] = { lhs, LLVMConstInt (LLVMInt1Type (), 1, TRUE), }; int op = ins->opcode == OP_X86_BSR32 ? INTRINS_CTLZ_I32 : INTRINS_CTLZ_I64; LLVMValueRef width = ins->opcode == OP_X86_BSR32 ? const_int32 (31) : const_int64 (63); LLVMValueRef tz = call_intrins (ctx, op, args, ""); values [ins->dreg] = LLVMBuildXor (builder, tz, width, dname); break; } #endif case OP_ICONV_TO_I1: case OP_ICONV_TO_I2: case OP_ICONV_TO_I4: case OP_ICONV_TO_U1: case OP_ICONV_TO_U2: case OP_ICONV_TO_U4: case OP_LCONV_TO_I1: case OP_LCONV_TO_I2: case OP_LCONV_TO_U1: case OP_LCONV_TO_U2: case OP_LCONV_TO_U4: { gboolean sign; sign = (ins->opcode == OP_ICONV_TO_I1) || (ins->opcode == OP_ICONV_TO_I2) || (ins->opcode == OP_ICONV_TO_I4) || (ins->opcode == OP_LCONV_TO_I1) || (ins->opcode == OP_LCONV_TO_I2); /* Have to do two casts since our vregs have type int */ v = LLVMBuildTrunc (builder, lhs, op_to_llvm_type (ins->opcode), ""); if (sign) values [ins->dreg] = LLVMBuildSExt (builder, v, LLVMInt32Type (), dname); else values [ins->dreg] = LLVMBuildZExt (builder, v, LLVMInt32Type (), dname); break; } case OP_ICONV_TO_I8: values [ins->dreg] = LLVMBuildSExt (builder, lhs, LLVMInt64Type (), dname); break; case OP_ICONV_TO_U8: values [ins->dreg] = LLVMBuildZExt (builder, lhs, LLVMInt64Type (), dname); break; case OP_FCONV_TO_I4: case OP_RCONV_TO_I4: values [ins->dreg] = LLVMBuildFPToSI (builder, lhs, LLVMInt32Type (), dname); break; case OP_FCONV_TO_I1: case OP_RCONV_TO_I1: values [ins->dreg] = LLVMBuildSExt (builder, LLVMBuildFPToSI (builder, lhs, LLVMInt8Type (), dname), LLVMInt32Type (), ""); break; case OP_FCONV_TO_U1: case OP_RCONV_TO_U1: values [ins->dreg] = LLVMBuildZExt (builder, LLVMBuildTrunc (builder, LLVMBuildFPToUI (builder, lhs, IntPtrType (), dname), LLVMInt8Type (), ""), LLVMInt32Type (), ""); break; case OP_FCONV_TO_I2: case OP_RCONV_TO_I2: values [ins->dreg] = LLVMBuildSExt (builder, LLVMBuildFPToSI (builder, lhs, LLVMInt16Type (), dname), LLVMInt32Type (), ""); break; case OP_FCONV_TO_U2: case OP_RCONV_TO_U2: values [ins->dreg] = LLVMBuildZExt (builder, LLVMBuildFPToUI (builder, lhs, LLVMInt16Type (), dname), LLVMInt32Type (), ""); break; case OP_FCONV_TO_U4: case OP_RCONV_TO_U4: values [ins->dreg] = LLVMBuildFPToUI (builder, lhs, LLVMInt32Type (), dname); break; case OP_FCONV_TO_U8: case OP_RCONV_TO_U8: values [ins->dreg] = LLVMBuildFPToUI (builder, lhs, LLVMInt64Type (), dname); break; case OP_FCONV_TO_I8: case OP_RCONV_TO_I8: values [ins->dreg] = LLVMBuildFPToSI (builder, lhs, LLVMInt64Type (), dname); break; case OP_FCONV_TO_I: case OP_RCONV_TO_I: values [ins->dreg] = LLVMBuildFPToSI (builder, lhs, IntPtrType (), dname); break; case OP_ICONV_TO_R8: case OP_LCONV_TO_R8: values [ins->dreg] = LLVMBuildSIToFP (builder, lhs, LLVMDoubleType (), dname); break; case OP_ICONV_TO_R_UN: case OP_LCONV_TO_R_UN: values [ins->dreg] = LLVMBuildUIToFP (builder, lhs, LLVMDoubleType (), dname); break; #if TARGET_SIZEOF_VOID_P == 4 case OP_LCONV_TO_U: #endif case OP_LCONV_TO_I4: values [ins->dreg] = LLVMBuildTrunc (builder, lhs, LLVMInt32Type (), dname); break; case OP_ICONV_TO_R4: case OP_LCONV_TO_R4: v = LLVMBuildSIToFP (builder, lhs, LLVMFloatType (), ""); if (cfg->r4fp) values [ins->dreg] = v; else values [ins->dreg] = LLVMBuildFPExt (builder, v, LLVMDoubleType (), dname); break; case OP_FCONV_TO_R4: v = LLVMBuildFPTrunc (builder, lhs, LLVMFloatType (), ""); if (cfg->r4fp) values [ins->dreg] = v; else values [ins->dreg] = LLVMBuildFPExt (builder, v, LLVMDoubleType (), dname); break; case OP_RCONV_TO_R8: values [ins->dreg] = LLVMBuildFPExt (builder, lhs, LLVMDoubleType (), dname); break; case OP_RCONV_TO_R4: values [ins->dreg] = lhs; break; case OP_SEXT_I4: values [ins->dreg] = LLVMBuildSExt (builder, convert (ctx, lhs, LLVMInt32Type ()), LLVMInt64Type (), dname); break; case OP_ZEXT_I4: values [ins->dreg] = LLVMBuildZExt (builder, convert (ctx, lhs, LLVMInt32Type ()), LLVMInt64Type (), dname); break; case OP_TRUNC_I4: values [ins->dreg] = LLVMBuildTrunc (builder, lhs, LLVMInt32Type (), dname); break; case OP_LOCALLOC_IMM: { LLVMValueRef v; guint32 size = ins->inst_imm; size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1); v = mono_llvm_build_alloca (builder, LLVMInt8Type (), LLVMConstInt (LLVMInt32Type (), size, FALSE), MONO_ARCH_FRAME_ALIGNMENT, ""); if (ins->flags & MONO_INST_INIT) emit_memset (ctx, builder, v, const_int32 (size), MONO_ARCH_FRAME_ALIGNMENT); values [ins->dreg] = v; break; } case OP_LOCALLOC: { LLVMValueRef v, size; size = LLVMBuildAnd (builder, LLVMBuildAdd (builder, convert (ctx, lhs, LLVMInt32Type ()), LLVMConstInt (LLVMInt32Type (), MONO_ARCH_FRAME_ALIGNMENT - 1, FALSE), ""), LLVMConstInt (LLVMInt32Type (), ~ (MONO_ARCH_FRAME_ALIGNMENT - 1), FALSE), ""); v = mono_llvm_build_alloca (builder, LLVMInt8Type (), size, MONO_ARCH_FRAME_ALIGNMENT, ""); if (ins->flags & MONO_INST_INIT) emit_memset (ctx, builder, v, size, MONO_ARCH_FRAME_ALIGNMENT); values [ins->dreg] = v; break; } case OP_LOADI1_MEMBASE: case OP_LOADU1_MEMBASE: case OP_LOADI2_MEMBASE: case OP_LOADU2_MEMBASE: case OP_LOADI4_MEMBASE: case OP_LOADU4_MEMBASE: case OP_LOADI8_MEMBASE: case OP_LOADR4_MEMBASE: case OP_LOADR8_MEMBASE: case OP_LOAD_MEMBASE: case OP_LOADI8_MEM: case OP_LOADU1_MEM: case OP_LOADU2_MEM: case OP_LOADI4_MEM: case OP_LOADU4_MEM: case OP_LOAD_MEM: { int size = 8; LLVMValueRef base, index, addr; LLVMTypeRef t; gboolean sext = FALSE, zext = FALSE; gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0; gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0; gboolean is_unaligned = (ins->flags & MONO_INST_UNALIGNED) != 0; t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext); if (sext || zext) dname = (char*)""; if ((ins->opcode == OP_LOADI8_MEM) || (ins->opcode == OP_LOAD_MEM) || (ins->opcode == OP_LOADI4_MEM) || (ins->opcode == OP_LOADU4_MEM) || (ins->opcode == OP_LOADU1_MEM) || (ins->opcode == OP_LOADU2_MEM)) { addr = LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE); base = addr; } else { /* _MEMBASE */ base = lhs; if (ins->inst_offset == 0) { LLVMValueRef gep_base, gep_offset; if (mono_llvm_can_be_gep (base, &gep_base, &gep_offset)) { addr = LLVMBuildGEP (builder, convert (ctx, gep_base, LLVMPointerType (LLVMInt8Type (), 0)), &gep_offset, 1, ""); } else { addr = base; } } else if (ins->inst_offset % size != 0) { /* Unaligned load */ index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (LLVMInt8Type (), 0)), &index, 1, ""); } else { index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, ""); } } addr = convert (ctx, addr, LLVMPointerType (t, 0)); if (is_unaligned) values [ins->dreg] = mono_llvm_build_aligned_load (builder, addr, dname, is_volatile, 1); else values [ins->dreg] = emit_load (ctx, bb, &builder, size, addr, base, dname, is_faulting, is_volatile, LLVM_BARRIER_NONE); if (!(is_faulting || is_volatile) && (ins->flags & MONO_INST_INVARIANT_LOAD)) { /* * These will signal LLVM that these loads do not alias any stores, and * they can't fail, allowing them to be hoisted out of loops. */ set_invariant_load_flag (values [ins->dreg]); } if (sext) values [ins->dreg] = LLVMBuildSExt (builder, values [ins->dreg], LLVMInt32Type (), dname); else if (zext) values [ins->dreg] = LLVMBuildZExt (builder, values [ins->dreg], LLVMInt32Type (), dname); else if (!cfg->r4fp && ins->opcode == OP_LOADR4_MEMBASE) values [ins->dreg] = LLVMBuildFPExt (builder, values [ins->dreg], LLVMDoubleType (), dname); break; } case OP_STOREI1_MEMBASE_REG: case OP_STOREI2_MEMBASE_REG: case OP_STOREI4_MEMBASE_REG: case OP_STOREI8_MEMBASE_REG: case OP_STORER4_MEMBASE_REG: case OP_STORER8_MEMBASE_REG: case OP_STORE_MEMBASE_REG: { int size = 8; LLVMValueRef index, addr, base; LLVMTypeRef t; gboolean sext = FALSE, zext = FALSE; gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0; gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0; gboolean is_unaligned = (ins->flags & MONO_INST_UNALIGNED) != 0; if (!values [ins->inst_destbasereg]) { set_failure (ctx, "inst_destbasereg"); break; } t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext); base = values [ins->inst_destbasereg]; LLVMValueRef gep_base, gep_offset; if (ins->inst_offset == 0 && mono_llvm_can_be_gep (base, &gep_base, &gep_offset)) { addr = LLVMBuildGEP (builder, convert (ctx, gep_base, LLVMPointerType (LLVMInt8Type (), 0)), &gep_offset, 1, ""); } else if (ins->inst_offset % size != 0) { /* Unaligned store */ index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (LLVMInt8Type (), 0)), &index, 1, ""); } else { index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, ""); } if (is_volatile && LLVMGetInstructionOpcode (base) == LLVMAlloca && !(ins->flags & MONO_INST_VOLATILE)) /* Storing to an alloca cannot fail */ is_volatile = FALSE; LLVMValueRef srcval = convert (ctx, values [ins->sreg1], t); LLVMValueRef ptrdst = convert (ctx, addr, LLVMPointerType (t, 0)); if (is_unaligned) mono_llvm_build_aligned_store (builder, srcval, ptrdst, is_volatile, 1); else emit_store (ctx, bb, &builder, size, srcval, ptrdst, base, is_faulting, is_volatile); break; } case OP_STOREI1_MEMBASE_IMM: case OP_STOREI2_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: case OP_STOREI8_MEMBASE_IMM: case OP_STORE_MEMBASE_IMM: { int size = 8; LLVMValueRef index, addr, base; LLVMTypeRef t; gboolean sext = FALSE, zext = FALSE; gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0; gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0; gboolean is_unaligned = (ins->flags & MONO_INST_UNALIGNED) != 0; t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext); base = values [ins->inst_destbasereg]; LLVMValueRef gep_base, gep_offset; if (ins->inst_offset == 0 && mono_llvm_can_be_gep (base, &gep_base, &gep_offset)) { addr = LLVMBuildGEP (builder, convert (ctx, gep_base, LLVMPointerType (LLVMInt8Type (), 0)), &gep_offset, 1, ""); } else if (ins->inst_offset % size != 0) { /* Unaligned store */ index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (LLVMInt8Type (), 0)), &index, 1, ""); } else { index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, ""); } LLVMValueRef srcval = convert (ctx, LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), t); LLVMValueRef ptrdst = convert (ctx, addr, LLVMPointerType (t, 0)); if (is_unaligned) mono_llvm_build_aligned_store (builder, srcval, ptrdst, is_volatile, 1); else emit_store (ctx, bb, &builder, size, srcval, ptrdst, base, is_faulting, is_volatile); break; } case OP_CHECK_THIS: emit_load (ctx, bb, &builder, TARGET_SIZEOF_VOID_P, convert (ctx, lhs, LLVMPointerType (IntPtrType (), 0)), lhs, "", TRUE, FALSE, LLVM_BARRIER_NONE); break; case OP_OUTARG_VTRETADDR: break; case OP_VOIDCALL: case OP_CALL: case OP_LCALL: case OP_FCALL: case OP_RCALL: case OP_VCALL: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_FCALL_MEMBASE: case OP_RCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VOIDCALL_REG: case OP_CALL_REG: case OP_LCALL_REG: case OP_FCALL_REG: case OP_RCALL_REG: case OP_VCALL_REG: { process_call (ctx, bb, &builder, ins); break; } case OP_AOTCONST: { MonoJumpInfoType ji_type = ins->inst_c1; gpointer ji_data = ins->inst_p0; if (ji_type == MONO_PATCH_INFO_ICALL_ADDR) { char *symbol = mono_aot_get_direct_call_symbol (MONO_PATCH_INFO_ICALL_ADDR_CALL, ji_data); if (symbol) { /* * Avoid emitting a got entry for these since the method is directly called, and it might not be * resolvable at runtime using dlsym (). */ g_free (symbol); values [ins->dreg] = LLVMConstInt (IntPtrType (), 0, FALSE); break; } } values [ins->dreg] = get_aotconst (ctx, ji_type, ji_data, LLVMPointerType (IntPtrType (), 0)); break; } case OP_MEMMOVE: { int argn = 0; LLVMValueRef args [5]; args [argn++] = convert (ctx, values [ins->sreg1], LLVMPointerType (LLVMInt8Type (), 0)); args [argn++] = convert (ctx, values [ins->sreg2], LLVMPointerType (LLVMInt8Type (), 0)); args [argn++] = convert (ctx, values [ins->sreg3], LLVMInt64Type ()); args [argn++] = LLVMConstInt (LLVMInt1Type (), 0, FALSE); // is_volatile call_intrins (ctx, INTRINS_MEMMOVE, args, ""); break; } case OP_NOT_REACHED: LLVMBuildUnreachable (builder); has_terminator = TRUE; g_assert (bb->block_num < cfg->max_block_num); ctx->unreachable [bb->block_num] = TRUE; /* Might have instructions after this */ while (ins->next) { MonoInst *next = ins->next; /* * FIXME: If later code uses the regs defined by these instructions, * compilation will fail. */ const char *spec = INS_INFO (next->opcode); if (spec [MONO_INST_DEST] == 'i' && !MONO_IS_STORE_MEMBASE (next)) ctx->values [next->dreg] = LLVMConstNull (LLVMInt32Type ()); MONO_DELETE_INS (bb, next); } break; case OP_LDADDR: { MonoInst *var = ins->inst_i0; MonoClass *klass = var->klass; if (var->opcode == OP_VTARG_ADDR && !MONO_CLASS_IS_SIMD(cfg, klass)) { /* The variable contains the vtype address */ values [ins->dreg] = values [var->dreg]; } else if (var->opcode == OP_GSHAREDVT_LOCAL) { values [ins->dreg] = emit_gsharedvt_ldaddr (ctx, var->dreg); } else { values [ins->dreg] = addresses [var->dreg]; } break; } case OP_SIN: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_SIN, args, dname); break; } case OP_SINF: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_SINF, args, dname); break; } case OP_EXP: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_EXP, args, dname); break; } case OP_EXPF: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_EXPF, args, dname); break; } case OP_LOG2: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_LOG2, args, dname); break; } case OP_LOG2F: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_LOG2F, args, dname); break; } case OP_LOG10: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_LOG10, args, dname); break; } case OP_LOG10F: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_LOG10F, args, dname); break; } case OP_LOG: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_LOG, args, dname); break; } case OP_TRUNC: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_TRUNC, args, dname); break; } case OP_TRUNCF: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_TRUNCF, args, dname); break; } case OP_COS: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_COS, args, dname); break; } case OP_COSF: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_COSF, args, dname); break; } case OP_SQRT: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_SQRT, args, dname); break; } case OP_SQRTF: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_SQRTF, args, dname); break; } case OP_FLOOR: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_FLOOR, args, dname); break; } case OP_FLOORF: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_FLOORF, args, dname); break; } case OP_CEIL: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_CEIL, args, dname); break; } case OP_CEILF: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_CEILF, args, dname); break; } case OP_FMA: { LLVMValueRef args [3]; args [0] = convert (ctx, values [ins->sreg1], LLVMDoubleType ()); args [1] = convert (ctx, values [ins->sreg2], LLVMDoubleType ()); args [2] = convert (ctx, values [ins->sreg3], LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_FMA, args, dname); break; } case OP_FMAF: { LLVMValueRef args [3]; args [0] = convert (ctx, values [ins->sreg1], LLVMFloatType ()); args [1] = convert (ctx, values [ins->sreg2], LLVMFloatType ()); args [2] = convert (ctx, values [ins->sreg3], LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_FMAF, args, dname); break; } case OP_ABS: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_FABS, args, dname); break; } case OP_ABSF: { LLVMValueRef args [1]; #ifdef TARGET_AMD64 args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_ABSF, args, dname); #else /* llvm.fabs not supported on all platforms */ args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_FABS, args, dname); values [ins->dreg] = convert (ctx, values [ins->dreg], LLVMFloatType ()); #endif break; } case OP_RPOW: { LLVMValueRef args [2]; args [0] = convert (ctx, lhs, LLVMFloatType ()); args [1] = convert (ctx, rhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_POWF, args, dname); break; } case OP_FPOW: { LLVMValueRef args [2]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); args [1] = convert (ctx, rhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_POW, args, dname); break; } case OP_FCOPYSIGN: { LLVMValueRef args [2]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); args [1] = convert (ctx, rhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_COPYSIGN, args, dname); break; } case OP_RCOPYSIGN: { LLVMValueRef args [2]; args [0] = convert (ctx, lhs, LLVMFloatType ()); args [1] = convert (ctx, rhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_COPYSIGNF, args, dname); break; } case OP_IMIN: case OP_LMIN: case OP_IMAX: case OP_LMAX: case OP_IMIN_UN: case OP_LMIN_UN: case OP_IMAX_UN: case OP_LMAX_UN: case OP_FMIN: case OP_FMAX: case OP_RMIN: case OP_RMAX: { LLVMValueRef v; lhs = convert (ctx, lhs, regtype_to_llvm_type (spec [MONO_INST_DEST])); rhs = convert (ctx, rhs, regtype_to_llvm_type (spec [MONO_INST_DEST])); switch (ins->opcode) { case OP_IMIN: case OP_LMIN: v = LLVMBuildICmp (builder, LLVMIntSLE, lhs, rhs, ""); break; case OP_IMAX: case OP_LMAX: v = LLVMBuildICmp (builder, LLVMIntSGE, lhs, rhs, ""); break; case OP_IMIN_UN: case OP_LMIN_UN: v = LLVMBuildICmp (builder, LLVMIntULE, lhs, rhs, ""); break; case OP_IMAX_UN: case OP_LMAX_UN: v = LLVMBuildICmp (builder, LLVMIntUGE, lhs, rhs, ""); break; case OP_FMAX: case OP_RMAX: v = LLVMBuildFCmp (builder, LLVMRealUGE, lhs, rhs, ""); break; case OP_FMIN: case OP_RMIN: v = LLVMBuildFCmp (builder, LLVMRealULE, lhs, rhs, ""); break; default: g_assert_not_reached (); break; } values [ins->dreg] = LLVMBuildSelect (builder, v, lhs, rhs, dname); break; } /* * See the ARM64 comment in mono/utils/atomic.h for an explanation of why this * hack is necessary (for now). */ #ifdef TARGET_ARM64 #define ARM64_ATOMIC_FENCE_FIX mono_llvm_build_fence (builder, LLVM_BARRIER_SEQ) #else #define ARM64_ATOMIC_FENCE_FIX #endif case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_EXCHANGE_I8: { LLVMValueRef args [2]; LLVMTypeRef t; if (ins->opcode == OP_ATOMIC_EXCHANGE_I4) t = LLVMInt32Type (); else t = LLVMInt64Type (); g_assert (ins->inst_offset == 0); args [0] = convert (ctx, lhs, LLVMPointerType (t, 0)); args [1] = convert (ctx, rhs, t); ARM64_ATOMIC_FENCE_FIX; values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_XCHG, args [0], args [1]); ARM64_ATOMIC_FENCE_FIX; break; } case OP_ATOMIC_ADD_I4: case OP_ATOMIC_ADD_I8: case OP_ATOMIC_AND_I4: case OP_ATOMIC_AND_I8: case OP_ATOMIC_OR_I4: case OP_ATOMIC_OR_I8: { LLVMValueRef args [2]; LLVMTypeRef t; if (ins->type == STACK_I4) t = LLVMInt32Type (); else t = LLVMInt64Type (); g_assert (ins->inst_offset == 0); args [0] = convert (ctx, lhs, LLVMPointerType (t, 0)); args [1] = convert (ctx, rhs, t); ARM64_ATOMIC_FENCE_FIX; if (ins->opcode == OP_ATOMIC_ADD_I4 || ins->opcode == OP_ATOMIC_ADD_I8) // Interlocked.Add returns new value (that's why we emit additional Add here) // see https://github.com/dotnet/runtime/pull/33102 values [ins->dreg] = LLVMBuildAdd (builder, mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_ADD, args [0], args [1]), args [1], dname); else if (ins->opcode == OP_ATOMIC_AND_I4 || ins->opcode == OP_ATOMIC_AND_I8) values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_AND, args [0], args [1]); else if (ins->opcode == OP_ATOMIC_OR_I4 || ins->opcode == OP_ATOMIC_OR_I8) values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_OR, args [0], args [1]); else g_assert_not_reached (); ARM64_ATOMIC_FENCE_FIX; break; } case OP_ATOMIC_CAS_I4: case OP_ATOMIC_CAS_I8: { LLVMValueRef args [3], val; LLVMTypeRef t; if (ins->opcode == OP_ATOMIC_CAS_I4) t = LLVMInt32Type (); else t = LLVMInt64Type (); args [0] = convert (ctx, lhs, LLVMPointerType (t, 0)); /* comparand */ args [1] = convert (ctx, values [ins->sreg3], t); /* new value */ args [2] = convert (ctx, values [ins->sreg2], t); ARM64_ATOMIC_FENCE_FIX; val = mono_llvm_build_cmpxchg (builder, args [0], args [1], args [2]); ARM64_ATOMIC_FENCE_FIX; /* cmpxchg returns a pair */ values [ins->dreg] = LLVMBuildExtractValue (builder, val, 0, ""); break; } case OP_MEMORY_BARRIER: { mono_llvm_build_fence (builder, (BarrierKind) ins->backend.memory_barrier_kind); break; } case OP_ATOMIC_LOAD_I1: case OP_ATOMIC_LOAD_I2: case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_I8: case OP_ATOMIC_LOAD_U1: case OP_ATOMIC_LOAD_U2: case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_LOAD_U8: case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_LOAD_R8: { int size; gboolean sext, zext; LLVMTypeRef t; gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0; gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0; BarrierKind barrier = (BarrierKind) ins->backend.memory_barrier_kind; LLVMValueRef index, addr; t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext); if (sext || zext) dname = (char *)""; if (ins->inst_offset != 0) { index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, lhs, LLVMPointerType (t, 0)), &index, 1, ""); } else { addr = lhs; } addr = convert (ctx, addr, LLVMPointerType (t, 0)); ARM64_ATOMIC_FENCE_FIX; values [ins->dreg] = emit_load (ctx, bb, &builder, size, addr, lhs, dname, is_faulting, is_volatile, barrier); ARM64_ATOMIC_FENCE_FIX; if (sext) values [ins->dreg] = LLVMBuildSExt (builder, values [ins->dreg], LLVMInt32Type (), dname); else if (zext) values [ins->dreg] = LLVMBuildZExt (builder, values [ins->dreg], LLVMInt32Type (), dname); break; } case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_I8: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_U4: case OP_ATOMIC_STORE_U8: case OP_ATOMIC_STORE_R4: case OP_ATOMIC_STORE_R8: { int size; gboolean sext, zext; LLVMTypeRef t; gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0; gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0; BarrierKind barrier = (BarrierKind) ins->backend.memory_barrier_kind; LLVMValueRef index, addr, value, base; if (!values [ins->inst_destbasereg]) { set_failure (ctx, "inst_destbasereg"); break; } t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext); base = values [ins->inst_destbasereg]; index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, ""); value = convert (ctx, values [ins->sreg1], t); ARM64_ATOMIC_FENCE_FIX; emit_store_general (ctx, bb, &builder, size, value, addr, base, is_faulting, is_volatile, barrier); ARM64_ATOMIC_FENCE_FIX; break; } case OP_RELAXED_NOP: { #if defined(TARGET_AMD64) || defined(TARGET_X86) call_intrins (ctx, INTRINS_SSE_PAUSE, NULL, ""); break; #else break; #endif } case OP_TLS_GET: { #if (defined(TARGET_AMD64) || defined(TARGET_X86)) && defined(__linux__) #ifdef TARGET_AMD64 // 257 == FS segment register LLVMTypeRef ptrtype = LLVMPointerType (IntPtrType (), 257); #else // 256 == GS segment register LLVMTypeRef ptrtype = LLVMPointerType (IntPtrType (), 256); #endif // FIXME: XEN values [ins->dreg] = LLVMBuildLoad (builder, LLVMBuildIntToPtr (builder, LLVMConstInt (IntPtrType (), ins->inst_offset, TRUE), ptrtype, ""), ""); #elif defined(TARGET_AMD64) && defined(TARGET_OSX) /* See mono_amd64_emit_tls_get () */ int offset = mono_amd64_get_tls_gs_offset () + (ins->inst_offset * 8); // 256 == GS segment register LLVMTypeRef ptrtype = LLVMPointerType (IntPtrType (), 256); values [ins->dreg] = LLVMBuildLoad (builder, LLVMBuildIntToPtr (builder, LLVMConstInt (IntPtrType (), offset, TRUE), ptrtype, ""), ""); #else set_failure (ctx, "opcode tls-get"); break; #endif break; } case OP_GC_SAFE_POINT: { LLVMValueRef val, cmp, callee, call; LLVMBasicBlockRef poll_bb, cont_bb; LLVMValueRef args [2]; static LLVMTypeRef sig; const char *icall_name = "mono_threads_state_poll"; /* * Create the cold wrapper around the icall, along with a managed method for it so * unwinding works. */ if (!cfg->compile_aot && !ctx->module->gc_poll_cold_wrapper_compiled) { ERROR_DECL (error); /* Compiling a method here is a bit ugly, but it works */ MonoMethod *wrapper = mono_marshal_get_llvm_func_wrapper (LLVM_FUNC_WRAPPER_GC_POLL); ctx->module->gc_poll_cold_wrapper_compiled = mono_jit_compile_method (wrapper, error); mono_error_assert_ok (error); } if (!sig) sig = LLVMFunctionType0 (LLVMVoidType (), FALSE); /* * if (!*sreg1) * mono_threads_state_poll (); */ val = mono_llvm_build_load (builder, convert (ctx, lhs, LLVMPointerType (IntPtrType (), 0)), "", TRUE); cmp = LLVMBuildICmp (builder, LLVMIntEQ, val, LLVMConstNull (LLVMTypeOf (val)), ""); poll_bb = gen_bb (ctx, "POLL_BB"); cont_bb = gen_bb (ctx, "CONT_BB"); args [0] = cmp; args [1] = LLVMConstInt (LLVMInt1Type (), 1, FALSE); cmp = call_intrins (ctx, INTRINS_EXPECT_I1, args, ""); mono_llvm_build_weighted_branch (builder, cmp, cont_bb, poll_bb, 1000, 1); ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (builder, poll_bb); if (ctx->cfg->compile_aot) { callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll)); call = LLVMBuildCall (builder, callee, NULL, 0, ""); } else { callee = get_jit_callee (ctx, icall_name, sig, MONO_PATCH_INFO_ABS, ctx->module->gc_poll_cold_wrapper_compiled); call = LLVMBuildCall (builder, callee, NULL, 0, ""); set_call_cold_cconv (call); } LLVMBuildBr (builder, cont_bb); ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (builder, cont_bb); ctx->bblocks [bb->block_num].end_bblock = cont_bb; break; } /* * Overflow opcodes. */ case OP_IADD_OVF: case OP_IADD_OVF_UN: case OP_ISUB_OVF: case OP_ISUB_OVF_UN: case OP_IMUL_OVF: case OP_IMUL_OVF_UN: case OP_LADD_OVF: case OP_LADD_OVF_UN: case OP_LSUB_OVF: case OP_LSUB_OVF_UN: case OP_LMUL_OVF: case OP_LMUL_OVF_UN: { LLVMValueRef args [2], val, ovf; IntrinsicId intrins; args [0] = convert (ctx, lhs, op_to_llvm_type (ins->opcode)); args [1] = convert (ctx, rhs, op_to_llvm_type (ins->opcode)); intrins = ovf_op_to_intrins (ins->opcode); val = call_intrins (ctx, intrins, args, ""); values [ins->dreg] = LLVMBuildExtractValue (builder, val, 0, dname); ovf = LLVMBuildExtractValue (builder, val, 1, ""); emit_cond_system_exception (ctx, bb, ins->inst_exc_name, ovf, FALSE); if (!ctx_ok (ctx)) break; builder = ctx->builder; break; } /* * Valuetypes. * We currently model them using arrays. Promotion to local vregs is * disabled for them in mono_handle_global_vregs () in the LLVM case, * so we always have an entry in cfg->varinfo for them. * FIXME: Is this needed ? */ case OP_VZERO: { MonoClass *klass = ins->klass; if (!klass) { // FIXME: set_failure (ctx, "!klass"); break; } if (!addresses [ins->dreg]) addresses [ins->dreg] = build_named_alloca (ctx, m_class_get_byval_arg (klass), "vzero"); LLVMValueRef ptr = LLVMBuildBitCast (builder, addresses [ins->dreg], LLVMPointerType (LLVMInt8Type (), 0), ""); emit_memset (ctx, builder, ptr, const_int32 (mono_class_value_size (klass, NULL)), 0); break; } case OP_DUMMY_VZERO: break; case OP_STOREV_MEMBASE: case OP_LOADV_MEMBASE: case OP_VMOVE: { MonoClass *klass = ins->klass; LLVMValueRef src = NULL, dst, args [5]; gboolean done = FALSE; gboolean is_volatile = FALSE; if (!klass) { // FIXME: set_failure (ctx, "!klass"); break; } if (mini_is_gsharedvt_klass (klass)) { // FIXME: set_failure (ctx, "gsharedvt"); break; } switch (ins->opcode) { case OP_STOREV_MEMBASE: if (cfg->gen_write_barriers && m_class_has_references (klass) && ins->inst_destbasereg != cfg->frame_reg && LLVMGetInstructionOpcode (values [ins->inst_destbasereg]) != LLVMAlloca) { /* Decomposed earlier */ g_assert_not_reached (); break; } if (!addresses [ins->sreg1]) { /* SIMD */ g_assert (values [ins->sreg1]); dst = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_destbasereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (type_to_llvm_type (ctx, m_class_get_byval_arg (klass)), 0)); LLVMBuildStore (builder, values [ins->sreg1], dst); done = TRUE; } else { src = LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (LLVMInt8Type (), 0), ""); dst = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_destbasereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (LLVMInt8Type (), 0)); } break; case OP_LOADV_MEMBASE: if (!addresses [ins->dreg]) addresses [ins->dreg] = build_alloca (ctx, m_class_get_byval_arg (klass)); src = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_basereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (LLVMInt8Type (), 0)); dst = LLVMBuildBitCast (builder, addresses [ins->dreg], LLVMPointerType (LLVMInt8Type (), 0), ""); break; case OP_VMOVE: if (!addresses [ins->sreg1]) addresses [ins->sreg1] = build_alloca (ctx, m_class_get_byval_arg (klass)); if (!addresses [ins->dreg]) addresses [ins->dreg] = build_alloca (ctx, m_class_get_byval_arg (klass)); src = LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (LLVMInt8Type (), 0), ""); dst = LLVMBuildBitCast (builder, addresses [ins->dreg], LLVMPointerType (LLVMInt8Type (), 0), ""); break; default: g_assert_not_reached (); } if (!ctx_ok (ctx)) break; if (done) break; #ifdef TARGET_WASM is_volatile = m_class_has_references (klass); #endif int aindex = 0; args [aindex ++] = dst; args [aindex ++] = src; args [aindex ++] = LLVMConstInt (LLVMInt32Type (), mono_class_value_size (klass, NULL), FALSE); args [aindex ++] = LLVMConstInt (LLVMInt1Type (), is_volatile ? 1 : 0, FALSE); call_intrins (ctx, INTRINS_MEMCPY, args, ""); break; } case OP_LLVM_OUTARG_VT: { LLVMArgInfo *ainfo = (LLVMArgInfo*)ins->inst_p0; MonoType *t = mini_get_underlying_type (ins->inst_vtype); if (ainfo->storage == LLVMArgGsharedvtVariable) { MonoInst *var = get_vreg_to_inst (cfg, ins->sreg1); if (var && var->opcode == OP_GSHAREDVT_LOCAL) { addresses [ins->dreg] = convert (ctx, emit_gsharedvt_ldaddr (ctx, var->dreg), LLVMPointerType (IntPtrType (), 0)); } else { g_assert (addresses [ins->sreg1]); addresses [ins->dreg] = addresses [ins->sreg1]; } } else if (ainfo->storage == LLVMArgGsharedvtFixed) { if (!addresses [ins->sreg1]) { addresses [ins->sreg1] = build_alloca (ctx, t); g_assert (values [ins->sreg1]); } LLVMBuildStore (builder, convert (ctx, values [ins->sreg1], LLVMGetElementType (LLVMTypeOf (addresses [ins->sreg1]))), addresses [ins->sreg1]); addresses [ins->dreg] = addresses [ins->sreg1]; } else { if (!addresses [ins->sreg1]) { addresses [ins->sreg1] = build_named_alloca (ctx, t, "llvm_outarg_vt"); g_assert (values [ins->sreg1]); LLVMBuildStore (builder, convert (ctx, values [ins->sreg1], type_to_llvm_type (ctx, t)), addresses [ins->sreg1]); addresses [ins->dreg] = addresses [ins->sreg1]; } else if (ainfo->storage == LLVMArgVtypeAddr || values [ins->sreg1] == addresses [ins->sreg1]) { /* LLVMArgVtypeByRef/LLVMArgVtypeAddr, have to make a copy */ addresses [ins->dreg] = build_alloca (ctx, t); LLVMValueRef v = LLVMBuildLoad (builder, addresses [ins->sreg1], "llvm_outarg_vt_copy"); LLVMBuildStore (builder, convert (ctx, v, type_to_llvm_type (ctx, t)), addresses [ins->dreg]); } else { if (values [ins->sreg1]) { LLVMTypeRef src_t = LLVMTypeOf (values [ins->sreg1]); LLVMValueRef dst = convert (ctx, addresses [ins->sreg1], LLVMPointerType (src_t, 0)); LLVMBuildStore (builder, values [ins->sreg1], dst); } addresses [ins->dreg] = addresses [ins->sreg1]; } } break; } case OP_OBJC_GET_SELECTOR: { const char *name = (const char*)ins->inst_p0; LLVMValueRef var; if (!ctx->module->objc_selector_to_var) { ctx->module->objc_selector_to_var = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); LLVMValueRef info_var = LLVMAddGlobal (ctx->lmodule, LLVMArrayType (LLVMInt8Type (), 8), "@OBJC_IMAGE_INFO"); int32_t objc_imageinfo [] = { 0, 16 }; LLVMSetInitializer (info_var, mono_llvm_create_constant_data_array ((uint8_t *) &objc_imageinfo, 8)); LLVMSetLinkage (info_var, LLVMPrivateLinkage); LLVMSetExternallyInitialized (info_var, TRUE); LLVMSetSection (info_var, "__DATA, __objc_imageinfo,regular,no_dead_strip"); LLVMSetAlignment (info_var, sizeof (target_mgreg_t)); mark_as_used (ctx->module, info_var); } var = (LLVMValueRef)g_hash_table_lookup (ctx->module->objc_selector_to_var, name); if (!var) { LLVMValueRef indexes [16]; LLVMValueRef name_var = LLVMAddGlobal (ctx->lmodule, LLVMArrayType (LLVMInt8Type (), strlen (name) + 1), "@OBJC_METH_VAR_NAME_"); LLVMSetInitializer (name_var, mono_llvm_create_constant_data_array ((const uint8_t*)name, strlen (name) + 1)); LLVMSetLinkage (name_var, LLVMPrivateLinkage); LLVMSetSection (name_var, "__TEXT,__objc_methname,cstring_literals"); mark_as_used (ctx->module, name_var); LLVMValueRef ref_var = LLVMAddGlobal (ctx->lmodule, LLVMPointerType (LLVMInt8Type (), 0), "@OBJC_SELECTOR_REFERENCES_"); indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, 0); indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, 0); LLVMSetInitializer (ref_var, LLVMConstGEP (name_var, indexes, 2)); LLVMSetLinkage (ref_var, LLVMPrivateLinkage); LLVMSetExternallyInitialized (ref_var, TRUE); LLVMSetSection (ref_var, "__DATA, __objc_selrefs, literal_pointers, no_dead_strip"); LLVMSetAlignment (ref_var, sizeof (target_mgreg_t)); mark_as_used (ctx->module, ref_var); g_hash_table_insert (ctx->module->objc_selector_to_var, g_strdup (name), ref_var); var = ref_var; } values [ins->dreg] = LLVMBuildLoad (builder, var, ""); break; } #if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_WASM) case OP_EXTRACTX_U2: case OP_XEXTRACT_I1: case OP_XEXTRACT_I2: case OP_XEXTRACT_I4: case OP_XEXTRACT_I8: case OP_XEXTRACT_R4: case OP_XEXTRACT_R8: case OP_EXTRACT_I1: case OP_EXTRACT_I2: case OP_EXTRACT_I4: case OP_EXTRACT_I8: case OP_EXTRACT_R4: case OP_EXTRACT_R8: { MonoTypeEnum mono_elt_t = inst_c1_type (ins); LLVMTypeRef elt_t = primitive_type_to_llvm_type (mono_elt_t); gboolean sext = FALSE; gboolean zext = FALSE; switch (mono_elt_t) { case MONO_TYPE_I1: case MONO_TYPE_I2: sext = TRUE; break; case MONO_TYPE_U1: case MONO_TYPE_U2: zext = TRUE; break; } LLVMValueRef element_ix = NULL; switch (ins->opcode) { case OP_XEXTRACT_I1: case OP_XEXTRACT_I2: case OP_XEXTRACT_I4: case OP_XEXTRACT_R4: case OP_XEXTRACT_R8: case OP_XEXTRACT_I8: element_ix = rhs; break; default: element_ix = const_int32 (ins->inst_c0); } LLVMTypeRef lhs_t = LLVMTypeOf (lhs); int vec_width = mono_llvm_get_prim_size_bits (lhs_t); int elem_width = mono_llvm_get_prim_size_bits (elt_t); int elements = vec_width / elem_width; element_ix = LLVMBuildAnd (builder, element_ix, const_int32 (elements - 1), "extract"); LLVMTypeRef ret_t = LLVMVectorType (elt_t, elements); LLVMValueRef src = LLVMBuildBitCast (builder, lhs, ret_t, "extract"); LLVMValueRef result = LLVMBuildExtractElement (builder, src, element_ix, "extract"); if (zext) result = LLVMBuildZExt (builder, result, i4_t, "extract_zext"); else if (sext) result = LLVMBuildSExt (builder, result, i4_t, "extract_sext"); values [ins->dreg] = result; break; } case OP_XINSERT_I1: case OP_XINSERT_I2: case OP_XINSERT_I4: case OP_XINSERT_I8: case OP_XINSERT_R4: case OP_XINSERT_R8: { MonoTypeEnum primty = inst_c1_type (ins); LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMTypeRef elem_t = LLVMGetElementType (ret_t); int elements = LLVMGetVectorSize (ret_t); LLVMValueRef element_ix = LLVMBuildAnd (builder, arg3, const_int32 (elements - 1), "xinsert"); LLVMValueRef vec = convert (ctx, lhs, ret_t); LLVMValueRef val = convert_full (ctx, rhs, elem_t, primitive_type_is_unsigned (primty)); LLVMValueRef result = LLVMBuildInsertElement (builder, vec, val, element_ix, "xinsert"); values [ins->dreg] = result; break; } case OP_EXPAND_I1: case OP_EXPAND_I2: case OP_EXPAND_I4: case OP_EXPAND_I8: case OP_EXPAND_R4: case OP_EXPAND_R8: { LLVMTypeRef t; LLVMValueRef mask [MAX_VECTOR_ELEMS], v; int i; t = simd_class_to_llvm_type (ctx, ins->klass); for (i = 0; i < MAX_VECTOR_ELEMS; ++i) mask [i] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); v = convert (ctx, values [ins->sreg1], LLVMGetElementType (t)); values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (t), v, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); values [ins->dreg] = LLVMBuildShuffleVector (builder, values [ins->dreg], LLVMGetUndef (t), LLVMConstVector (mask, LLVMGetVectorSize (t)), ""); break; } case OP_XZERO: { values [ins->dreg] = LLVMConstNull (type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass))); break; } case OP_LOADX_MEMBASE: { LLVMTypeRef t = type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass)); LLVMValueRef src; src = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_basereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (t, 0)); values [ins->dreg] = mono_llvm_build_aligned_load (builder, src, "", FALSE, 1); break; } case OP_STOREX_MEMBASE: { LLVMTypeRef t = LLVMTypeOf (values [ins->sreg1]); LLVMValueRef dest; dest = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_destbasereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (t, 0)); mono_llvm_build_aligned_store (builder, values [ins->sreg1], dest, FALSE, 1); break; } case OP_XBINOP: case OP_XBINOP_SCALAR: case OP_XBINOP_BYSCALAR: { gboolean scalar = ins->opcode == OP_XBINOP_SCALAR; gboolean byscalar = ins->opcode == OP_XBINOP_BYSCALAR; LLVMValueRef result = NULL; LLVMValueRef args [] = { lhs, rhs }; if (scalar) for (int i = 0; i < 2; ++i) args [i] = scalar_from_vector (ctx, args [i]); if (byscalar) { LLVMTypeRef t = LLVMTypeOf (args [0]); unsigned int elems = LLVMGetVectorSize (t); args [1] = broadcast_element (ctx, scalar_from_vector (ctx, args [1]), elems); } LLVMValueRef l = args [0]; LLVMValueRef r = args [1]; switch (ins->inst_c0) { case OP_IADD: result = LLVMBuildAdd (builder, l, r, ""); break; case OP_ISUB: result = LLVMBuildSub (builder, l, r, ""); break; case OP_IMUL: result = LLVMBuildMul (builder, l, r, ""); break; case OP_IAND: result = LLVMBuildAnd (builder, l, r, ""); break; case OP_IOR: result = LLVMBuildOr (builder, l, r, ""); break; case OP_IXOR: result = LLVMBuildXor (builder, l, r, ""); break; case OP_FADD: result = LLVMBuildFAdd (builder, l, r, ""); break; case OP_FSUB: result = LLVMBuildFSub (builder, l, r, ""); break; case OP_FMUL: result = LLVMBuildFMul (builder, l, r, ""); break; case OP_FDIV: result = LLVMBuildFDiv (builder, l, r, ""); break; case OP_FMAX: case OP_FMIN: { LLVMValueRef args [] = { l, r }; #if defined(TARGET_X86) || defined(TARGET_AMD64) LLVMTypeRef t = LLVMTypeOf (l); LLVMTypeRef elem_t = LLVMGetElementType (t); unsigned int elems = LLVMGetVectorSize (t); unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t); unsigned int v_size = elems * elem_bits; if (v_size == 128) { gboolean is_r4 = ins->inst_c1 == MONO_TYPE_R4; int iid = -1; if (ins->inst_c0 == OP_FMAX) { if (elems == 1) iid = is_r4 ? INTRINS_SSE_MAXSS : INTRINS_SSE_MAXSD; else iid = is_r4 ? INTRINS_SSE_MAXPS : INTRINS_SSE_MAXPD; } else { if (elems == 1) iid = is_r4 ? INTRINS_SSE_MINSS : INTRINS_SSE_MINSD; else iid = is_r4 ? INTRINS_SSE_MINPS : INTRINS_SSE_MINPD; } result = call_intrins (ctx, iid, args, dname); } else { LLVMRealPredicate op = ins->inst_c0 == OP_FMAX ? LLVMRealUGE : LLVMRealULE; LLVMValueRef cmp = LLVMBuildFCmp (builder, op, l, r, ""); result = LLVMBuildSelect (builder, cmp, l, r, ""); } #elif defined(TARGET_ARM64) IntrinsicId iid = ins->inst_c0 == OP_FMAX ? INTRINS_AARCH64_ADV_SIMD_FMAX : INTRINS_AARCH64_ADV_SIMD_FMIN; llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); result = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); #else NOT_IMPLEMENTED; #endif break; } case OP_IMAX: case OP_IMIN: { gboolean is_unsigned = ins->inst_c1 == MONO_TYPE_U1 || ins->inst_c1 == MONO_TYPE_U2 || ins->inst_c1 == MONO_TYPE_U4 || ins->inst_c1 == MONO_TYPE_U8; LLVMIntPredicate op; switch (ins->inst_c0) { case OP_IMAX: op = is_unsigned ? LLVMIntUGT : LLVMIntSGT; break; case OP_IMIN: op = is_unsigned ? LLVMIntULT : LLVMIntSLT; break; default: g_assert_not_reached (); } #if defined(TARGET_ARM64) if ((ins->inst_c1 == MONO_TYPE_U8) || (ins->inst_c1 == MONO_TYPE_I8)) { LLVMValueRef cmp = LLVMBuildICmp (builder, op, l, r, ""); result = LLVMBuildSelect (builder, cmp, l, r, ""); } else { IntrinsicId iid; switch (ins->inst_c0) { case OP_IMAX: iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UMAX : INTRINS_AARCH64_ADV_SIMD_SMAX; break; case OP_IMIN: iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UMIN : INTRINS_AARCH64_ADV_SIMD_SMIN; break; default: g_assert_not_reached (); } LLVMValueRef args [] = { l, r }; llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); result = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); } #else LLVMValueRef cmp = LLVMBuildICmp (builder, op, l, r, ""); result = LLVMBuildSelect (builder, cmp, l, r, ""); #endif break; } default: g_assert_not_reached (); } if (scalar) result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result); values [ins->dreg] = result; break; } case OP_XBINOP_FORCEINT: { LLVMTypeRef t = LLVMTypeOf (lhs); LLVMTypeRef elem_t = LLVMGetElementType (t); unsigned int elems = LLVMGetVectorSize (t); unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t); LLVMTypeRef intermediate_elem_t = LLVMIntType (elem_bits); LLVMTypeRef intermediate_t = LLVMVectorType (intermediate_elem_t, elems); LLVMValueRef lhs_int = convert (ctx, lhs, intermediate_t); LLVMValueRef rhs_int = convert (ctx, rhs, intermediate_t); LLVMValueRef result = NULL; switch (ins->inst_c0) { case XBINOP_FORCEINT_and: result = LLVMBuildAnd (builder, lhs_int, rhs_int, ""); break; case XBINOP_FORCEINT_or: result = LLVMBuildOr (builder, lhs_int, rhs_int, ""); break; case XBINOP_FORCEINT_ornot: result = LLVMBuildNot (builder, rhs_int, ""); result = LLVMBuildOr (builder, result, lhs_int, ""); break; case XBINOP_FORCEINT_xor: result = LLVMBuildXor (builder, lhs_int, rhs_int, ""); break; } values [ins->dreg] = LLVMBuildBitCast (builder, result, t, ""); break; } case OP_CREATE_SCALAR: case OP_CREATE_SCALAR_UNSAFE: { MonoTypeEnum primty = inst_c1_type (ins); LLVMTypeRef type = simd_class_to_llvm_type (ctx, ins->klass); // use undef vector (most likely empty but may contain garbage values) for OP_CREATE_SCALAR_UNSAFE // and zero one for OP_CREATE_SCALAR LLVMValueRef vector = (ins->opcode == OP_CREATE_SCALAR) ? LLVMConstNull (type) : LLVMGetUndef (type); LLVMValueRef val = convert_full (ctx, lhs, primitive_type_to_llvm_type (primty), primitive_type_is_unsigned (primty)); values [ins->dreg] = LLVMBuildInsertElement (builder, vector, val, const_int32 (0), ""); break; } case OP_INSERT_I1: values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt8Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname); break; case OP_INSERT_I2: values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt16Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname); break; case OP_INSERT_I4: values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt32Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname); break; case OP_INSERT_I8: values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt64Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname); break; case OP_INSERT_R4: values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMFloatType ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname); break; case OP_INSERT_R8: values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMDoubleType ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname); break; case OP_XCAST: { LLVMTypeRef t = simd_class_to_llvm_type (ctx, ins->klass); values [ins->dreg] = LLVMBuildBitCast (builder, lhs, t, ""); break; } case OP_XCONCAT: { values [ins->dreg] = concatenate_vectors (ctx, lhs, rhs); break; } case OP_XINSERT_LOWER: case OP_XINSERT_UPPER: { const char *oname = ins->opcode == OP_XINSERT_LOWER ? "xinsert_lower" : "xinsert_upper"; int ix = ins->opcode == OP_XINSERT_LOWER ? 0 : 1; LLVMTypeRef src_t = LLVMTypeOf (lhs); unsigned int width = mono_llvm_get_prim_size_bits (src_t); LLVMTypeRef int_t = LLVMIntType (width / 2); LLVMTypeRef intvec_t = LLVMVectorType (int_t, 2); LLVMValueRef insval = LLVMBuildBitCast (builder, rhs, int_t, oname); LLVMValueRef val = LLVMBuildBitCast (builder, lhs, intvec_t, oname); val = LLVMBuildInsertElement (builder, val, insval, const_int32 (ix), oname); val = LLVMBuildBitCast (builder, val, src_t, oname); values [ins->dreg] = val; break; } case OP_XLOWER: case OP_XUPPER: { const char *oname = ins->opcode == OP_XLOWER ? "xlower" : "xupper"; LLVMTypeRef src_t = LLVMTypeOf (lhs); unsigned int elems = LLVMGetVectorSize (src_t); g_assert (elems >= 2 && elems <= MAX_VECTOR_ELEMS); unsigned int ret_elems = elems / 2; int startix = ins->opcode == OP_XLOWER ? 0 : ret_elems; LLVMValueRef val = LLVMBuildShuffleVector (builder, lhs, LLVMGetUndef (src_t), create_const_vector_i32 (&mask_0_incr_1 [startix], ret_elems), oname); values [ins->dreg] = val; break; } case OP_XWIDEN: case OP_XWIDEN_UNSAFE: { const char *oname = ins->opcode == OP_XWIDEN ? "xwiden" : "xwiden_unsafe"; LLVMTypeRef src_t = LLVMTypeOf (lhs); unsigned int elems = LLVMGetVectorSize (src_t); g_assert (elems <= MAX_VECTOR_ELEMS / 2); unsigned int ret_elems = elems * 2; LLVMValueRef upper = ins->opcode == OP_XWIDEN ? LLVMConstNull (src_t) : LLVMGetUndef (src_t); LLVMValueRef val = LLVMBuildShuffleVector (builder, lhs, upper, create_const_vector_i32 (mask_0_incr_1, ret_elems), oname); values [ins->dreg] = val; break; } #endif // defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_WASM) #if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_WASM) case OP_PADDB: case OP_PADDW: case OP_PADDD: case OP_PADDQ: values [ins->dreg] = LLVMBuildAdd (builder, lhs, rhs, ""); break; case OP_ADDPD: case OP_ADDPS: values [ins->dreg] = LLVMBuildFAdd (builder, lhs, rhs, ""); break; case OP_PSUBB: case OP_PSUBW: case OP_PSUBD: case OP_PSUBQ: values [ins->dreg] = LLVMBuildSub (builder, lhs, rhs, ""); break; case OP_SUBPD: case OP_SUBPS: values [ins->dreg] = LLVMBuildFSub (builder, lhs, rhs, ""); break; case OP_MULPD: case OP_MULPS: values [ins->dreg] = LLVMBuildFMul (builder, lhs, rhs, ""); break; case OP_DIVPD: case OP_DIVPS: values [ins->dreg] = LLVMBuildFDiv (builder, lhs, rhs, ""); break; case OP_PAND: values [ins->dreg] = LLVMBuildAnd (builder, lhs, rhs, ""); break; case OP_POR: values [ins->dreg] = LLVMBuildOr (builder, lhs, rhs, ""); break; case OP_PXOR: values [ins->dreg] = LLVMBuildXor (builder, lhs, rhs, ""); break; case OP_PMULW: case OP_PMULD: values [ins->dreg] = LLVMBuildMul (builder, lhs, rhs, ""); break; case OP_ANDPS: case OP_ANDNPS: case OP_ORPS: case OP_XORPS: case OP_ANDPD: case OP_ANDNPD: case OP_ORPD: case OP_XORPD: { LLVMTypeRef t, rt; LLVMValueRef v = NULL; switch (ins->opcode) { case OP_ANDPS: case OP_ANDNPS: case OP_ORPS: case OP_XORPS: t = LLVMVectorType (LLVMInt32Type (), 4); rt = LLVMVectorType (LLVMFloatType (), 4); break; case OP_ANDPD: case OP_ANDNPD: case OP_ORPD: case OP_XORPD: t = LLVMVectorType (LLVMInt64Type (), 2); rt = LLVMVectorType (LLVMDoubleType (), 2); break; default: t = LLVMInt32Type (); rt = LLVMInt32Type (); g_assert_not_reached (); } lhs = LLVMBuildBitCast (builder, lhs, t, ""); rhs = LLVMBuildBitCast (builder, rhs, t, ""); switch (ins->opcode) { case OP_ANDPS: case OP_ANDPD: v = LLVMBuildAnd (builder, lhs, rhs, ""); break; case OP_ORPS: case OP_ORPD: v = LLVMBuildOr (builder, lhs, rhs, ""); break; case OP_XORPS: case OP_XORPD: v = LLVMBuildXor (builder, lhs, rhs, ""); break; case OP_ANDNPS: case OP_ANDNPD: v = LLVMBuildAnd (builder, rhs, LLVMBuildNot (builder, lhs, ""), ""); break; } values [ins->dreg] = LLVMBuildBitCast (builder, v, rt, ""); break; } case OP_PMIND_UN: case OP_PMINW_UN: case OP_PMINB_UN: { LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntULT, lhs, rhs, ""); values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, ""); break; } case OP_PMAXD_UN: case OP_PMAXW_UN: case OP_PMAXB_UN: { LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntUGT, lhs, rhs, ""); values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, ""); break; } case OP_PMINW: { LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSLT, lhs, rhs, ""); values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, ""); break; } case OP_PMAXW: { LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSGT, lhs, rhs, ""); values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, ""); break; } case OP_PAVGB_UN: case OP_PAVGW_UN: { LLVMValueRef ones_vec; LLVMValueRef ones [MAX_VECTOR_ELEMS]; int vector_size = LLVMGetVectorSize (LLVMTypeOf (lhs)); LLVMTypeRef ext_elem_type = vector_size == 16 ? LLVMInt16Type () : LLVMInt32Type (); for (int i = 0; i < MAX_VECTOR_ELEMS; ++i) ones [i] = LLVMConstInt (ext_elem_type, 1, FALSE); ones_vec = LLVMConstVector (ones, vector_size); LLVMValueRef val; LLVMTypeRef ext_type = LLVMVectorType (ext_elem_type, vector_size); /* Have to increase the vector element size to prevent overflows */ /* res = trunc ((zext (lhs) + zext (rhs) + 1) >> 1) */ val = LLVMBuildAdd (builder, LLVMBuildZExt (builder, lhs, ext_type, ""), LLVMBuildZExt (builder, rhs, ext_type, ""), ""); val = LLVMBuildAdd (builder, val, ones_vec, ""); val = LLVMBuildLShr (builder, val, ones_vec, ""); values [ins->dreg] = LLVMBuildTrunc (builder, val, LLVMTypeOf (lhs), ""); break; } case OP_PCMPEQB: case OP_PCMPEQW: case OP_PCMPEQD: case OP_PCMPEQQ: case OP_PCMPGTB: { LLVMValueRef pcmp; LLVMTypeRef retType; LLVMIntPredicate cmpOp; if (ins->opcode == OP_PCMPGTB) cmpOp = LLVMIntSGT; else cmpOp = LLVMIntEQ; if (LLVMTypeOf (lhs) == LLVMTypeOf (rhs)) { pcmp = LLVMBuildICmp (builder, cmpOp, lhs, rhs, ""); retType = LLVMTypeOf (lhs); } else { LLVMTypeRef flatType = LLVMVectorType (LLVMInt8Type (), 16); LLVMValueRef flatRHS = convert (ctx, rhs, flatType); LLVMValueRef flatLHS = convert (ctx, lhs, flatType); pcmp = LLVMBuildICmp (builder, cmpOp, flatLHS, flatRHS, ""); retType = flatType; } values [ins->dreg] = LLVMBuildSExt (builder, pcmp, retType, ""); break; } case OP_CVTDQ2PS: { LLVMValueRef i4 = LLVMBuildBitCast (builder, lhs, sse_i4_t, ""); values [ins->dreg] = LLVMBuildSIToFP (builder, i4, sse_r4_t, dname); break; } case OP_CVTDQ2PD: { LLVMValueRef indexes [16]; indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMConstInt (LLVMInt32Type (), 1, FALSE); LLVMValueRef mask = LLVMConstVector (indexes, 2); LLVMValueRef shuffle = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), mask, ""); values [ins->dreg] = LLVMBuildSIToFP (builder, shuffle, LLVMVectorType (LLVMDoubleType (), 2), dname); break; } case OP_SSE2_CVTSS2SD: { LLVMValueRef rhs_elem = LLVMBuildExtractElement (builder, rhs, const_int32 (0), ""); LLVMValueRef fpext = LLVMBuildFPExt (builder, rhs_elem, LLVMDoubleType (), dname); values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, fpext, const_int32 (0), ""); break; } case OP_CVTPS2PD: { LLVMValueRef indexes [16]; indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMConstInt (LLVMInt32Type (), 1, FALSE); LLVMValueRef mask = LLVMConstVector (indexes, 2); LLVMValueRef shuffle = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), mask, ""); values [ins->dreg] = LLVMBuildFPExt (builder, shuffle, LLVMVectorType (LLVMDoubleType (), 2), dname); break; } case OP_CVTTPS2DQ: values [ins->dreg] = LLVMBuildFPToSI (builder, lhs, LLVMVectorType (LLVMInt32Type (), 4), dname); break; case OP_CVTPD2DQ: case OP_CVTPS2DQ: case OP_CVTPD2PS: case OP_CVTTPD2DQ: { LLVMValueRef v; v = convert (ctx, values [ins->sreg1], simd_op_to_llvm_type (ins->opcode)); values [ins->dreg] = call_intrins (ctx, simd_ins_to_intrins (ins->opcode), &v, dname); break; } case OP_COMPPS: case OP_COMPPD: { LLVMRealPredicate op; switch (ins->inst_c0) { case SIMD_COMP_EQ: op = LLVMRealOEQ; break; case SIMD_COMP_LT: op = LLVMRealOLT; break; case SIMD_COMP_LE: op = LLVMRealOLE; break; case SIMD_COMP_UNORD: op = LLVMRealUNO; break; case SIMD_COMP_NEQ: op = LLVMRealUNE; break; case SIMD_COMP_NLT: op = LLVMRealUGE; break; case SIMD_COMP_NLE: op = LLVMRealUGT; break; case SIMD_COMP_ORD: op = LLVMRealORD; break; default: g_assert_not_reached (); } LLVMValueRef cmp = LLVMBuildFCmp (builder, op, lhs, rhs, ""); if (ins->opcode == OP_COMPPD) values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt64Type (), 2), ""), LLVMTypeOf (lhs), ""); else values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt32Type (), 4), ""), LLVMTypeOf (lhs), ""); break; } case OP_ICONV_TO_X: /* This is only used for implementing shifts by non-immediate */ values [ins->dreg] = lhs; break; case OP_SHUFPS: case OP_SHUFPD: case OP_PSHUFLED: case OP_PSHUFLEW_LOW: case OP_PSHUFLEW_HIGH: { int mask [16]; LLVMValueRef v1 = NULL, v2 = NULL, mask_values [16]; int i, mask_size = 0; int imask = ins->inst_c0; /* Convert the x86 shuffle mask to LLVM's */ switch (ins->opcode) { case OP_SHUFPS: mask_size = 4; mask [0] = ((imask >> 0) & 3); mask [1] = ((imask >> 2) & 3); mask [2] = ((imask >> 4) & 3) + 4; mask [3] = ((imask >> 6) & 3) + 4; v1 = values [ins->sreg1]; v2 = values [ins->sreg2]; break; case OP_SHUFPD: mask_size = 2; mask [0] = ((imask >> 0) & 1); mask [1] = ((imask >> 1) & 1) + 2; v1 = values [ins->sreg1]; v2 = values [ins->sreg2]; break; case OP_PSHUFLEW_LOW: mask_size = 8; mask [0] = ((imask >> 0) & 3); mask [1] = ((imask >> 2) & 3); mask [2] = ((imask >> 4) & 3); mask [3] = ((imask >> 6) & 3); mask [4] = 4 + 0; mask [5] = 4 + 1; mask [6] = 4 + 2; mask [7] = 4 + 3; v1 = values [ins->sreg1]; v2 = LLVMGetUndef (LLVMTypeOf (v1)); break; case OP_PSHUFLEW_HIGH: mask_size = 8; mask [0] = 0; mask [1] = 1; mask [2] = 2; mask [3] = 3; mask [4] = 4 + ((imask >> 0) & 3); mask [5] = 4 + ((imask >> 2) & 3); mask [6] = 4 + ((imask >> 4) & 3); mask [7] = 4 + ((imask >> 6) & 3); v1 = values [ins->sreg1]; v2 = LLVMGetUndef (LLVMTypeOf (v1)); break; case OP_PSHUFLED: mask_size = 4; mask [0] = ((imask >> 0) & 3); mask [1] = ((imask >> 2) & 3); mask [2] = ((imask >> 4) & 3); mask [3] = ((imask >> 6) & 3); v1 = values [ins->sreg1]; v2 = LLVMGetUndef (LLVMTypeOf (v1)); break; default: g_assert_not_reached (); } for (i = 0; i < mask_size; ++i) mask_values [i] = LLVMConstInt (LLVMInt32Type (), mask [i], FALSE); values [ins->dreg] = LLVMBuildShuffleVector (builder, v1, v2, LLVMConstVector (mask_values, mask_size), dname); break; } case OP_UNPACK_LOWB: case OP_UNPACK_LOWW: case OP_UNPACK_LOWD: case OP_UNPACK_LOWQ: case OP_UNPACK_LOWPS: case OP_UNPACK_LOWPD: case OP_UNPACK_HIGHB: case OP_UNPACK_HIGHW: case OP_UNPACK_HIGHD: case OP_UNPACK_HIGHQ: case OP_UNPACK_HIGHPS: case OP_UNPACK_HIGHPD: { int mask [16]; LLVMValueRef mask_values [16]; int i, mask_size = 0; gboolean low = FALSE; switch (ins->opcode) { case OP_UNPACK_LOWB: mask_size = 16; low = TRUE; break; case OP_UNPACK_LOWW: mask_size = 8; low = TRUE; break; case OP_UNPACK_LOWD: case OP_UNPACK_LOWPS: mask_size = 4; low = TRUE; break; case OP_UNPACK_LOWQ: case OP_UNPACK_LOWPD: mask_size = 2; low = TRUE; break; case OP_UNPACK_HIGHB: mask_size = 16; break; case OP_UNPACK_HIGHW: mask_size = 8; break; case OP_UNPACK_HIGHD: case OP_UNPACK_HIGHPS: mask_size = 4; break; case OP_UNPACK_HIGHQ: case OP_UNPACK_HIGHPD: mask_size = 2; break; default: g_assert_not_reached (); } if (low) { for (i = 0; i < (mask_size / 2); ++i) { mask [(i * 2)] = i; mask [(i * 2) + 1] = mask_size + i; } } else { for (i = 0; i < (mask_size / 2); ++i) { mask [(i * 2)] = (mask_size / 2) + i; mask [(i * 2) + 1] = mask_size + (mask_size / 2) + i; } } for (i = 0; i < mask_size; ++i) mask_values [i] = LLVMConstInt (LLVMInt32Type (), mask [i], FALSE); values [ins->dreg] = LLVMBuildShuffleVector (builder, values [ins->sreg1], values [ins->sreg2], LLVMConstVector (mask_values, mask_size), dname); break; } case OP_DUPPD: { LLVMTypeRef t = simd_op_to_llvm_type (ins->opcode); LLVMValueRef v, val; v = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); val = LLVMConstNull (t); val = LLVMBuildInsertElement (builder, val, v, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); val = LLVMBuildInsertElement (builder, val, v, LLVMConstInt (LLVMInt32Type (), 1, FALSE), dname); values [ins->dreg] = val; break; } case OP_DUPPS_LOW: case OP_DUPPS_HIGH: { LLVMTypeRef t = simd_op_to_llvm_type (ins->opcode); LLVMValueRef v1, v2, val; if (ins->opcode == OP_DUPPS_LOW) { v1 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); v2 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 2, FALSE), ""); } else { v1 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 1, FALSE), ""); v2 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 3, FALSE), ""); } val = LLVMConstNull (t); val = LLVMBuildInsertElement (builder, val, v1, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); val = LLVMBuildInsertElement (builder, val, v1, LLVMConstInt (LLVMInt32Type (), 1, FALSE), ""); val = LLVMBuildInsertElement (builder, val, v2, LLVMConstInt (LLVMInt32Type (), 2, FALSE), ""); val = LLVMBuildInsertElement (builder, val, v2, LLVMConstInt (LLVMInt32Type (), 3, FALSE), ""); values [ins->dreg] = val; break; } case OP_FCONV_TO_R8_X: { values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (sse_r8_t), lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); break; } case OP_FCONV_TO_R4_X: { values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (sse_r4_t), lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); break; } #if defined(TARGET_X86) || defined(TARGET_AMD64) case OP_SSE_MOVMSK: { LLVMValueRef args [1]; if (ins->inst_c1 == MONO_TYPE_R4) { args [0] = lhs; values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_MOVMSK_PS, args, dname); } else if (ins->inst_c1 == MONO_TYPE_R8) { args [0] = lhs; values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_MOVMSK_PD, args, dname); } else { args [0] = convert (ctx, lhs, sse_i1_t); values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_PMOVMSKB, args, dname); } break; } case OP_SSE_MOVS: case OP_SSE_MOVS2: { if (ins->inst_c1 == MONO_TYPE_R4) values [ins->dreg] = LLVMBuildShuffleVector (builder, rhs, lhs, create_const_vector_4_i32 (0, 5, 6, 7), ""); else if (ins->inst_c1 == MONO_TYPE_R8) values [ins->dreg] = LLVMBuildShuffleVector (builder, rhs, lhs, create_const_vector_2_i32 (0, 3), ""); else if (ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8) values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, LLVMConstInt (LLVMInt64Type (), 0, FALSE), LLVMConstInt (LLVMInt32Type (), 1, FALSE), ""); else g_assert_not_reached (); // will be needed for other types later break; } case OP_SSE_MOVEHL: { if (ins->inst_c1 == MONO_TYPE_R4) values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (6, 7, 2, 3), ""); else g_assert_not_reached (); break; } case OP_SSE_MOVELH: { if (ins->inst_c1 == MONO_TYPE_R4) values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (0, 1, 4, 5), ""); else g_assert_not_reached (); break; } case OP_SSE_UNPACKLO: { if (ins->inst_c1 == MONO_TYPE_R8 || ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8) { values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_2_i32 (0, 2), ""); } else if (ins->inst_c1 == MONO_TYPE_R4 || ins->inst_c1 == MONO_TYPE_I4 || ins->inst_c1 == MONO_TYPE_U4) { values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (0, 4, 1, 5), ""); } else if (ins->inst_c1 == MONO_TYPE_I2 || ins->inst_c1 == MONO_TYPE_U2) { const int mask_values [] = { 0, 8, 1, 9, 2, 10, 3, 11 }; LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, convert (ctx, lhs, sse_i2_t), convert (ctx, rhs, sse_i2_t), create_const_vector_i32 (mask_values, 8), ""); values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1)); } else if (ins->inst_c1 == MONO_TYPE_I1 || ins->inst_c1 == MONO_TYPE_U1) { const int mask_values [] = { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 }; LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, convert (ctx, lhs, sse_i1_t), convert (ctx, rhs, sse_i1_t), create_const_vector_i32 (mask_values, 16), ""); values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1)); } else { g_assert_not_reached (); } break; } case OP_SSE_UNPACKHI: { if (ins->inst_c1 == MONO_TYPE_R8 || ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8) { values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_2_i32 (1, 3), ""); } else if (ins->inst_c1 == MONO_TYPE_R4 || ins->inst_c1 == MONO_TYPE_I4 || ins->inst_c1 == MONO_TYPE_U4) { values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (2, 6, 3, 7), ""); } else if (ins->inst_c1 == MONO_TYPE_I2 || ins->inst_c1 == MONO_TYPE_U2) { const int mask_values [] = { 4, 12, 5, 13, 6, 14, 7, 15 }; LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, convert (ctx, lhs, sse_i2_t), convert (ctx, rhs, sse_i2_t), create_const_vector_i32 (mask_values, 8), ""); values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1)); } else if (ins->inst_c1 == MONO_TYPE_I1 || ins->inst_c1 == MONO_TYPE_U1) { const int mask_values [] = { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 }; LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, convert (ctx, lhs, sse_i1_t), convert (ctx, rhs, sse_i1_t), create_const_vector_i32 (mask_values, 16), ""); values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1)); } else { g_assert_not_reached (); } break; } case OP_SSE_LOADU: { LLVMValueRef dst_ptr = convert (ctx, lhs, LLVMPointerType (primitive_type_to_llvm_type (inst_c1_type (ins)), 0)); LLVMValueRef dst_vec = LLVMBuildBitCast (builder, dst_ptr, LLVMPointerType (type_to_sse_type (ins->inst_c1), 0), ""); values [ins->dreg] = mono_llvm_build_aligned_load (builder, dst_vec, "", FALSE, ins->inst_c0); // inst_c0 is alignment break; } case OP_SSE_MOVSS: { LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMFloatType (), 0)); LLVMValueRef val = mono_llvm_build_load (builder, addr, "", FALSE); values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (type_to_sse_type (ins->inst_c1)), val, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); break; } case OP_SSE_MOVSS_STORE: { LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMFloatType (), 0)); LLVMValueRef val = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); mono_llvm_build_store (builder, val, addr, FALSE, LLVM_BARRIER_NONE); break; } case OP_SSE2_MOVD: case OP_SSE2_MOVQ: case OP_SSE2_MOVUPD: { LLVMTypeRef rty = NULL; switch (ins->opcode) { case OP_SSE2_MOVD: rty = sse_i4_t; break; case OP_SSE2_MOVQ: rty = sse_i8_t; break; case OP_SSE2_MOVUPD: rty = sse_r8_t; break; } LLVMTypeRef srcty = LLVMGetElementType (rty); LLVMValueRef zero = LLVMConstNull (rty); LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (srcty, 0)); LLVMValueRef val = mono_llvm_build_aligned_load (builder, addr, "", FALSE, 1); values [ins->dreg] = LLVMBuildInsertElement (builder, zero, val, const_int32 (0), dname); break; } case OP_SSE_MOVLPS_LOAD: case OP_SSE_MOVHPS_LOAD: { LLVMTypeRef t = LLVMFloatType (); int size = 4; gboolean high = ins->opcode == OP_SSE_MOVHPS_LOAD; /* Load two floats from rhs and store them in the low/high part of lhs */ LLVMValueRef addr = rhs; LLVMValueRef addr1 = convert (ctx, addr, LLVMPointerType (t, 0)); LLVMValueRef addr2 = convert (ctx, LLVMBuildAdd (builder, convert (ctx, addr, IntPtrType ()), convert (ctx, LLVMConstInt (LLVMInt32Type (), size, FALSE), IntPtrType ()), ""), LLVMPointerType (t, 0)); LLVMValueRef val1 = mono_llvm_build_load (builder, addr1, "", FALSE); LLVMValueRef val2 = mono_llvm_build_load (builder, addr2, "", FALSE); int index1, index2; index1 = high ? 2: 0; index2 = high ? 3 : 1; values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMBuildInsertElement (builder, lhs, val1, LLVMConstInt (LLVMInt32Type (), index1, FALSE), ""), val2, LLVMConstInt (LLVMInt32Type (), index2, FALSE), ""); break; } case OP_SSE2_MOVLPD_LOAD: case OP_SSE2_MOVHPD_LOAD: { LLVMTypeRef t = LLVMDoubleType (); LLVMValueRef addr = convert (ctx, rhs, LLVMPointerType (t, 0)); LLVMValueRef val = mono_llvm_build_load (builder, addr, "", FALSE); int index = ins->opcode == OP_SSE2_MOVHPD_LOAD ? 1 : 0; values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, val, const_int32 (index), ""); break; } case OP_SSE_MOVLPS_STORE: case OP_SSE_MOVHPS_STORE: { /* Store two floats from the low/hight part of rhs into lhs */ LLVMValueRef addr = lhs; LLVMValueRef addr1 = convert (ctx, addr, LLVMPointerType (LLVMFloatType (), 0)); LLVMValueRef addr2 = convert (ctx, LLVMBuildAdd (builder, convert (ctx, addr, IntPtrType ()), convert (ctx, LLVMConstInt (LLVMInt32Type (), 4, FALSE), IntPtrType ()), ""), LLVMPointerType (LLVMFloatType (), 0)); int index1 = ins->opcode == OP_SSE_MOVLPS_STORE ? 0 : 2; int index2 = ins->opcode == OP_SSE_MOVLPS_STORE ? 1 : 3; LLVMValueRef val1 = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), index1, FALSE), ""); LLVMValueRef val2 = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), index2, FALSE), ""); mono_llvm_build_store (builder, val1, addr1, FALSE, LLVM_BARRIER_NONE); mono_llvm_build_store (builder, val2, addr2, FALSE, LLVM_BARRIER_NONE); break; } case OP_SSE2_MOVLPD_STORE: case OP_SSE2_MOVHPD_STORE: { LLVMTypeRef t = LLVMDoubleType (); LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (t, 0)); int index = ins->opcode == OP_SSE2_MOVHPD_STORE ? 1 : 0; LLVMValueRef val = LLVMBuildExtractElement (builder, rhs, const_int32 (index), ""); mono_llvm_build_store (builder, val, addr, FALSE, LLVM_BARRIER_NONE); break; } case OP_SSE_STORE: { LLVMValueRef dst_vec = convert (ctx, lhs, LLVMPointerType (LLVMTypeOf (rhs), 0)); mono_llvm_build_aligned_store (builder, rhs, dst_vec, FALSE, ins->inst_c0); break; } case OP_SSE_STORES: { LLVMValueRef first_elem = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); LLVMValueRef dst = convert (ctx, lhs, LLVMPointerType (LLVMTypeOf (first_elem), 0)); mono_llvm_build_aligned_store (builder, first_elem, dst, FALSE, 1); break; } case OP_SSE_MOVNTPS: { LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMTypeOf (rhs), 0)); LLVMValueRef store = mono_llvm_build_aligned_store (builder, rhs, addr, FALSE, ins->inst_c0); set_nontemporal_flag (store); break; } case OP_SSE_PREFETCHT0: { LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0)); LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (3), const_int32 (1) }; call_intrins (ctx, INTRINS_PREFETCH, args, ""); break; } case OP_SSE_PREFETCHT1: { LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0)); LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (2), const_int32 (1) }; call_intrins (ctx, INTRINS_PREFETCH, args, ""); break; } case OP_SSE_PREFETCHT2: { LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0)); LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (1), const_int32 (1) }; call_intrins (ctx, INTRINS_PREFETCH, args, ""); break; } case OP_SSE_PREFETCHNTA: { LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0)); LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (0), const_int32 (1) }; call_intrins (ctx, INTRINS_PREFETCH, args, ""); break; } case OP_SSE_OR: { LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t); LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t); LLVMValueRef vec_and = LLVMBuildOr (builder, vec_lhs_i64, vec_rhs_i64, ""); values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), ""); break; } case OP_SSE_XOR: { LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t); LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t); LLVMValueRef vec_and = LLVMBuildXor (builder, vec_lhs_i64, vec_rhs_i64, ""); values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), ""); break; } case OP_SSE_AND: { LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t); LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t); LLVMValueRef vec_and = LLVMBuildAnd (builder, vec_lhs_i64, vec_rhs_i64, ""); values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), ""); break; } case OP_SSE_ANDN: { LLVMValueRef minus_one [2]; minus_one [0] = LLVMConstInt (LLVMInt64Type (), -1, FALSE); minus_one [1] = LLVMConstInt (LLVMInt64Type (), -1, FALSE); LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t); LLVMValueRef vec_xor = LLVMBuildXor (builder, vec_lhs_i64, LLVMConstVector (minus_one, 2), ""); LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t); LLVMValueRef vec_and = LLVMBuildAnd (builder, vec_rhs_i64, vec_xor, ""); values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), ""); break; } case OP_SSE_ADDSS: case OP_SSE_SUBSS: case OP_SSE_DIVSS: case OP_SSE_MULSS: case OP_SSE2_ADDSD: case OP_SSE2_SUBSD: case OP_SSE2_DIVSD: case OP_SSE2_MULSD: { LLVMValueRef v1 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); LLVMValueRef v2 = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); LLVMValueRef v = NULL; switch (ins->opcode) { case OP_SSE_ADDSS: case OP_SSE2_ADDSD: v = LLVMBuildFAdd (builder, v1, v2, ""); break; case OP_SSE_SUBSS: case OP_SSE2_SUBSD: v = LLVMBuildFSub (builder, v1, v2, ""); break; case OP_SSE_DIVSS: case OP_SSE2_DIVSD: v = LLVMBuildFDiv (builder, v1, v2, ""); break; case OP_SSE_MULSS: case OP_SSE2_MULSD: v = LLVMBuildFMul (builder, v1, v2, ""); break; default: g_assert_not_reached (); } values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, v, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); break; } case OP_SSE_CMPSS: case OP_SSE2_CMPSD: { int imm = -1; gboolean swap = FALSE; switch (ins->inst_c0) { case CMP_EQ: imm = SSE_eq_ord_nosignal; break; case CMP_GT: imm = SSE_lt_ord_signal; swap = TRUE; break; case CMP_GE: imm = SSE_le_ord_signal; swap = TRUE; break; case CMP_LT: imm = SSE_lt_ord_signal; break; case CMP_LE: imm = SSE_le_ord_signal; break; case CMP_GT_UN: imm = SSE_nle_unord_signal; break; case CMP_GE_UN: imm = SSE_nlt_unord_signal; break; case CMP_LT_UN: imm = SSE_nle_unord_signal; swap = TRUE; break; case CMP_LE_UN: imm = SSE_nlt_unord_signal; swap = TRUE; break; case CMP_NE: imm = SSE_neq_unord_nosignal; break; case CMP_ORD: imm = SSE_ord_nosignal; break; case CMP_UNORD: imm = SSE_unord_nosignal; break; default: g_assert_not_reached (); break; } LLVMValueRef cmp = LLVMConstInt (LLVMInt8Type (), imm, FALSE); LLVMValueRef args [] = { lhs, rhs, cmp }; if (swap) { args [0] = rhs; args [1] = lhs; } IntrinsicId id = (IntrinsicId) 0; switch (ins->opcode) { case OP_SSE_CMPSS: id = INTRINS_SSE_CMPSS; break; case OP_SSE2_CMPSD: id = INTRINS_SSE_CMPSD; break; default: g_assert_not_reached (); break; } int elements = LLVMGetVectorSize (LLVMTypeOf (lhs)); int mask_values [MAX_VECTOR_ELEMS] = { 0 }; for (int i = 1; i < elements; ++i) { mask_values [i] = elements + i; } LLVMValueRef result = call_intrins (ctx, id, args, ""); result = LLVMBuildShuffleVector (builder, result, lhs, create_const_vector_i32 (mask_values, elements), ""); values [ins->dreg] = result; break; } case OP_SSE_COMISS: { LLVMValueRef args [] = { lhs, rhs }; IntrinsicId id = (IntrinsicId)0; switch (ins->inst_c0) { case CMP_EQ: id = INTRINS_SSE_COMIEQ_SS; break; case CMP_GT: id = INTRINS_SSE_COMIGT_SS; break; case CMP_GE: id = INTRINS_SSE_COMIGE_SS; break; case CMP_LT: id = INTRINS_SSE_COMILT_SS; break; case CMP_LE: id = INTRINS_SSE_COMILE_SS; break; case CMP_NE: id = INTRINS_SSE_COMINEQ_SS; break; default: g_assert_not_reached (); break; } values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_SSE_UCOMISS: { LLVMValueRef args [] = { lhs, rhs }; IntrinsicId id = (IntrinsicId)0; switch (ins->inst_c0) { case CMP_EQ: id = INTRINS_SSE_UCOMIEQ_SS; break; case CMP_GT: id = INTRINS_SSE_UCOMIGT_SS; break; case CMP_GE: id = INTRINS_SSE_UCOMIGE_SS; break; case CMP_LT: id = INTRINS_SSE_UCOMILT_SS; break; case CMP_LE: id = INTRINS_SSE_UCOMILE_SS; break; case CMP_NE: id = INTRINS_SSE_UCOMINEQ_SS; break; default: g_assert_not_reached (); break; } values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_SSE2_COMISD: { LLVMValueRef args [] = { lhs, rhs }; IntrinsicId id = (IntrinsicId)0; switch (ins->inst_c0) { case CMP_EQ: id = INTRINS_SSE_COMIEQ_SD; break; case CMP_GT: id = INTRINS_SSE_COMIGT_SD; break; case CMP_GE: id = INTRINS_SSE_COMIGE_SD; break; case CMP_LT: id = INTRINS_SSE_COMILT_SD; break; case CMP_LE: id = INTRINS_SSE_COMILE_SD; break; case CMP_NE: id = INTRINS_SSE_COMINEQ_SD; break; default: g_assert_not_reached (); break; } values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_SSE2_UCOMISD: { LLVMValueRef args [] = { lhs, rhs }; IntrinsicId id = (IntrinsicId)0; switch (ins->inst_c0) { case CMP_EQ: id = INTRINS_SSE_UCOMIEQ_SD; break; case CMP_GT: id = INTRINS_SSE_UCOMIGT_SD; break; case CMP_GE: id = INTRINS_SSE_UCOMIGE_SD; break; case CMP_LT: id = INTRINS_SSE_UCOMILT_SD; break; case CMP_LE: id = INTRINS_SSE_UCOMILE_SD; break; case CMP_NE: id = INTRINS_SSE_UCOMINEQ_SD; break; default: g_assert_not_reached (); break; } values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_SSE_CVTSI2SS: case OP_SSE_CVTSI2SS64: case OP_SSE2_CVTSI2SD: case OP_SSE2_CVTSI2SD64: { LLVMTypeRef ty = LLVMFloatType (); switch (ins->opcode) { case OP_SSE2_CVTSI2SD: case OP_SSE2_CVTSI2SD64: ty = LLVMDoubleType (); break; } LLVMValueRef fp = LLVMBuildSIToFP (builder, rhs, ty, ""); values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, fp, const_int32 (0), dname); break; } case OP_SSE2_PMULUDQ: { LLVMValueRef i32_max = LLVMConstInt (LLVMInt64Type (), UINT32_MAX, FALSE); LLVMValueRef maskvals [] = { i32_max, i32_max }; LLVMValueRef mask = LLVMConstVector (maskvals, 2); LLVMValueRef l = LLVMBuildAnd (builder, convert (ctx, lhs, sse_i8_t), mask, ""); LLVMValueRef r = LLVMBuildAnd (builder, convert (ctx, rhs, sse_i8_t), mask, ""); values [ins->dreg] = LLVMBuildNUWMul (builder, l, r, dname); break; } case OP_SSE_SQRTSS: case OP_SSE2_SQRTSD: { LLVMValueRef upper = values [ins->sreg1]; LLVMValueRef lower = values [ins->sreg2]; LLVMValueRef scalar = LLVMBuildExtractElement (builder, lower, const_int32 (0), ""); LLVMValueRef result = call_intrins (ctx, simd_ins_to_intrins (ins->opcode), &scalar, dname); values [ins->dreg] = LLVMBuildInsertElement (builder, upper, result, const_int32 (0), ""); break; } case OP_SSE_RCPSS: case OP_SSE_RSQRTSS: { IntrinsicId id = (IntrinsicId)0; switch (ins->opcode) { case OP_SSE_RCPSS: id = INTRINS_SSE_RCP_SS; break; case OP_SSE_RSQRTSS: id = INTRINS_SSE_RSQRT_SS; break; default: g_assert_not_reached (); break; }; LLVMValueRef result = call_intrins (ctx, id, &rhs, dname); const int mask[] = { 0, 5, 6, 7 }; LLVMValueRef shufmask = create_const_vector_i32 (mask, 4); values [ins->dreg] = LLVMBuildShuffleVector (builder, result, lhs, shufmask, ""); break; } case OP_XOP: { IntrinsicId id = (IntrinsicId)ins->inst_c0; call_intrins (ctx, id, NULL, ""); break; } case OP_XOP_X_I: case OP_XOP_X_X: case OP_XOP_I4_X: case OP_XOP_I8_X: case OP_XOP_X_X_X: case OP_XOP_X_X_I4: case OP_XOP_X_X_I8: { IntrinsicId id = (IntrinsicId)ins->inst_c0; LLVMValueRef args [] = { lhs, rhs }; values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_XOP_I4_X_X: { gboolean to_i8_t = FALSE; gboolean ret_bool = FALSE; IntrinsicId id = (IntrinsicId)ins->inst_c0; switch (ins->inst_c0) { case INTRINS_SSE_TESTC: to_i8_t = TRUE; ret_bool = TRUE; break; case INTRINS_SSE_TESTZ: to_i8_t = TRUE; ret_bool = TRUE; break; case INTRINS_SSE_TESTNZ: to_i8_t = TRUE; ret_bool = TRUE; break; default: g_assert_not_reached (); break; } LLVMValueRef args [] = { lhs, rhs }; if (to_i8_t) { args [0] = convert (ctx, args [0], sse_i8_t); args [1] = convert (ctx, args [1], sse_i8_t); } LLVMValueRef call = call_intrins (ctx, id, args, ""); if (ret_bool) { // if return type is bool (it's still i32) we need to normalize it to 1/0 LLVMValueRef cmp_zero = LLVMBuildICmp (builder, LLVMIntNE, call, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); values [ins->dreg] = LLVMBuildZExt (builder, cmp_zero, LLVMInt8Type (), ""); } else { values [ins->dreg] = call; } break; } case OP_SSE2_MASKMOVDQU: { LLVMTypeRef i8ptr = LLVMPointerType (LLVMInt8Type (), 0); LLVMValueRef dstaddr = convert (ctx, values [ins->sreg3], i8ptr); LLVMValueRef src = convert (ctx, lhs, sse_i1_t); LLVMValueRef mask = convert (ctx, rhs, sse_i1_t); LLVMValueRef args[] = { src, mask, dstaddr }; call_intrins (ctx, INTRINS_SSE_MASKMOVDQU, args, ""); break; } case OP_PADDB_SAT: case OP_PADDW_SAT: case OP_PSUBB_SAT: case OP_PSUBW_SAT: case OP_PADDB_SAT_UN: case OP_PADDW_SAT_UN: case OP_PSUBB_SAT_UN: case OP_PSUBW_SAT_UN: case OP_SSE2_ADDS: case OP_SSE2_SUBS: { IntrinsicId id = (IntrinsicId)0; int type = 0; gboolean is_add = TRUE; switch (ins->opcode) { case OP_PADDB_SAT: type = MONO_TYPE_I1; break; case OP_PADDW_SAT: type = MONO_TYPE_I2; break; case OP_PSUBB_SAT: type = MONO_TYPE_I1; is_add = FALSE; break; case OP_PSUBW_SAT: type = MONO_TYPE_I2; is_add = FALSE; break; case OP_PADDB_SAT_UN: type = MONO_TYPE_U1; break; case OP_PADDW_SAT_UN: type = MONO_TYPE_U2; break; case OP_PSUBB_SAT_UN: type = MONO_TYPE_U1; is_add = FALSE; break; case OP_PSUBW_SAT_UN: type = MONO_TYPE_U2; is_add = FALSE; break; case OP_SSE2_ADDS: type = ins->inst_c1; break; case OP_SSE2_SUBS: type = ins->inst_c1; is_add = FALSE; break; default: g_assert_not_reached (); } if (is_add) { switch (type) { case MONO_TYPE_I1: id = INTRINS_SSE_SADD_SATI8; break; case MONO_TYPE_U1: id = INTRINS_SSE_UADD_SATI8; break; case MONO_TYPE_I2: id = INTRINS_SSE_SADD_SATI16; break; case MONO_TYPE_U2: id = INTRINS_SSE_UADD_SATI16; break; default: g_assert_not_reached (); break; } } else { switch (type) { case MONO_TYPE_I1: id = INTRINS_SSE_SSUB_SATI8; break; case MONO_TYPE_U1: id = INTRINS_SSE_USUB_SATI8; break; case MONO_TYPE_I2: id = INTRINS_SSE_SSUB_SATI16; break; case MONO_TYPE_U2: id = INTRINS_SSE_USUB_SATI16; break; default: g_assert_not_reached (); break; } } LLVMTypeRef vecty = type_to_sse_type (type); LLVMValueRef args [] = { convert (ctx, lhs, vecty), convert (ctx, rhs, vecty) }; LLVMValueRef result = call_intrins (ctx, id, args, dname); values [ins->dreg] = convert (ctx, result, vecty); break; } case OP_SSE2_PACKUS: { LLVMValueRef args [2]; args [0] = convert (ctx, lhs, sse_i2_t); args [1] = convert (ctx, rhs, sse_i2_t); values [ins->dreg] = convert (ctx, call_intrins (ctx, INTRINS_SSE_PACKUSWB, args, dname), type_to_sse_type (ins->inst_c1)); break; } case OP_SSE2_SRLI: { LLVMValueRef args [] = { lhs, rhs }; values [ins->dreg] = convert (ctx, call_intrins (ctx, INTRINS_SSE_PSRLI_W, args, dname), type_to_sse_type (ins->inst_c1)); break; } case OP_SSE2_PSLLDQ: case OP_SSE2_PSRLDQ: { LLVMBasicBlockRef bbs [16 + 1]; LLVMValueRef switch_ins; LLVMValueRef value = lhs; LLVMValueRef index = rhs; LLVMValueRef phi_values [16 + 1]; LLVMTypeRef t = sse_i1_t; int nelems = 16; int i; gboolean shift_right = (ins->opcode == OP_SSE2_PSRLDQ); value = convert (ctx, value, t); // No corresponding LLVM intrinsics // FIXME: Optimize const count for (i = 0; i < nelems; ++i) bbs [i] = gen_bb (ctx, "PSLLDQ_CASE_BB"); bbs [nelems] = gen_bb (ctx, "PSLLDQ_DEF_BB"); cbb = gen_bb (ctx, "PSLLDQ_COND_BB"); switch_ins = LLVMBuildSwitch (builder, index, bbs [nelems], 0); for (i = 0; i < nelems; ++i) { LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]); LLVMPositionBuilderAtEnd (builder, bbs [i]); int mask_values [16]; // Implement shift using a shuffle if (shift_right) { for (int j = 0; j < nelems - i; ++j) mask_values [j] = i + j; for (int j = nelems -i ; j < nelems; ++j) mask_values [j] = nelems; } else { for (int j = 0; j < i; ++j) mask_values [j] = nelems; for (int j = 0; j < nelems - i; ++j) mask_values [j + i] = j; } phi_values [i] = LLVMBuildShuffleVector (builder, value, LLVMGetUndef (t), create_const_vector_i32 (mask_values, nelems), ""); LLVMBuildBr (builder, cbb); } /* Default case */ LLVMPositionBuilderAtEnd (builder, bbs [nelems]); phi_values [nelems] = LLVMConstNull (t); LLVMBuildBr (builder, cbb); LLVMPositionBuilderAtEnd (builder, cbb); values [ins->dreg] = LLVMBuildPhi (builder, LLVMTypeOf (phi_values [0]), ""); LLVMAddIncoming (values [ins->dreg], phi_values, bbs, nelems + 1); values [ins->dreg] = convert (ctx, values [ins->dreg], type_to_sse_type (ins->inst_c1)); ctx->bblocks [bb->block_num].end_bblock = cbb; break; } case OP_SSE2_PSRAW_IMM: case OP_SSE2_PSRAD_IMM: case OP_SSE2_PSRLW_IMM: case OP_SSE2_PSRLD_IMM: case OP_SSE2_PSRLQ_IMM: { LLVMValueRef value = lhs; LLVMValueRef index = rhs; IntrinsicId id; // FIXME: Optimize const index case /* Use the non-immediate version */ switch (ins->opcode) { case OP_SSE2_PSRAW_IMM: id = INTRINS_SSE_PSRA_W; break; case OP_SSE2_PSRAD_IMM: id = INTRINS_SSE_PSRA_D; break; case OP_SSE2_PSRLW_IMM: id = INTRINS_SSE_PSRL_W; break; case OP_SSE2_PSRLD_IMM: id = INTRINS_SSE_PSRL_D; break; case OP_SSE2_PSRLQ_IMM: id = INTRINS_SSE_PSRL_Q; break; default: g_assert_not_reached (); break; } LLVMTypeRef t = LLVMTypeOf (value); LLVMValueRef index_vect = LLVMBuildInsertElement (builder, LLVMConstNull (t), convert (ctx, index, LLVMGetElementType (t)), const_int32 (0), ""); LLVMValueRef args [] = { value, index_vect }; values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_SSE_SHUFPS: case OP_SSE2_SHUFPD: case OP_SSE2_PSHUFD: case OP_SSE2_PSHUFHW: case OP_SSE2_PSHUFLW: { LLVMTypeRef ret_t = LLVMTypeOf (lhs); LLVMValueRef l = lhs; LLVMValueRef r = rhs; LLVMValueRef ctl = arg3; const char *oname = ""; int ncases = 0; switch (ins->opcode) { case OP_SSE_SHUFPS: ncases = 256; break; case OP_SSE2_SHUFPD: ncases = 4; break; case OP_SSE2_PSHUFD: case OP_SSE2_PSHUFHW: case OP_SSE2_PSHUFLW: ncases = 256; r = lhs; ctl = rhs; break; } switch (ins->opcode) { case OP_SSE_SHUFPS: oname = "sse_shufps"; break; case OP_SSE2_SHUFPD: oname = "sse2_shufpd"; break; case OP_SSE2_PSHUFD: oname = "sse2_pshufd"; break; case OP_SSE2_PSHUFHW: oname = "sse2_pshufhw"; break; case OP_SSE2_PSHUFLW: oname = "sse2_pshuflw"; break; } ctl = LLVMBuildAnd (builder, ctl, const_int32 (ncases - 1), ""); ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, ncases, ctl, ret_t, oname); int mask_values [8]; int mask_len = 0; int i = 0; while (immediate_unroll_next (&ictx, &i)) { switch (ins->opcode) { case OP_SSE_SHUFPS: mask_len = 4; mask_values [0] = ((i >> 0) & 0x3) + 0; // take two elements from lhs mask_values [1] = ((i >> 2) & 0x3) + 0; mask_values [2] = ((i >> 4) & 0x3) + 4; // and two from rhs mask_values [3] = ((i >> 6) & 0x3) + 4; break; case OP_SSE2_SHUFPD: mask_len = 2; mask_values [0] = ((i >> 0) & 0x1) + 0; mask_values [1] = ((i >> 1) & 0x1) + 2; break; case OP_SSE2_PSHUFD: /* * Each 2 bits in mask selects 1 dword from the the source and copies it to the * destination. */ mask_len = 4; for (int j = 0; j < 4; ++j) { int windex = (i >> (j * 2)) & 0x3; mask_values [j] = windex; } break; case OP_SSE2_PSHUFHW: /* * Each 2 bits in mask selects 1 word from the high quadword of the source and copies it to the * high quadword of the destination. */ mask_len = 8; /* The low quadword stays the same */ for (int j = 0; j < 4; ++j) mask_values [j] = j; for (int j = 0; j < 4; ++j) { int windex = (i >> (j * 2)) & 0x3; mask_values [j + 4] = 4 + windex; } break; case OP_SSE2_PSHUFLW: mask_len = 8; /* The high quadword stays the same */ for (int j = 0; j < 4; ++j) mask_values [j + 4] = j + 4; for (int j = 0; j < 4; ++j) { int windex = (i >> (j * 2)) & 0x3; mask_values [j] = windex; } break; } LLVMValueRef mask = create_const_vector_i32 (mask_values, mask_len); LLVMValueRef result = LLVMBuildShuffleVector (builder, l, r, mask, oname); immediate_unroll_commit (&ictx, i, result); } immediate_unroll_default (&ictx); immediate_unroll_commit_default (&ictx, LLVMGetUndef (ret_t)); values [ins->dreg] = immediate_unroll_end (&ictx, &cbb); break; } case OP_SSE3_MOVDDUP: { int mask [] = { 0, 0 }; values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, LLVMGetUndef (LLVMTypeOf (lhs)), create_const_vector_i32 (mask, 2), ""); break; } case OP_SSE3_MOVDDUP_MEM: { LLVMValueRef undef = LLVMGetUndef (v128_r8_t); LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (r8_t, 0)); LLVMValueRef elem = mono_llvm_build_aligned_load (builder, addr, "sse3_movddup_mem", FALSE, 1); LLVMValueRef val = LLVMBuildInsertElement (builder, undef, elem, const_int32 (0), "sse3_movddup_mem"); values [ins->dreg] = LLVMBuildShuffleVector (builder, val, undef, LLVMConstNull (LLVMVectorType (i4_t, 2)), "sse3_movddup_mem"); break; } case OP_SSE3_MOVSHDUP: { int mask [] = { 1, 1, 3, 3 }; values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), create_const_vector_i32 (mask, 4), ""); break; } case OP_SSE3_MOVSLDUP: { int mask [] = { 0, 0, 2, 2 }; values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), create_const_vector_i32 (mask, 4), ""); break; } case OP_SSSE3_SHUFFLE: { LLVMValueRef args [] = { lhs, rhs }; values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_PSHUFB, args, dname); break; } case OP_SSSE3_ABS: { // %sub = sub <16 x i8> zeroinitializer, %arg // %cmp = icmp sgt <16 x i8> %arg, zeroinitializer // %abs = select <16 x i1> %cmp, <16 x i8> %arg, <16 x i8> %sub LLVMTypeRef typ = type_to_sse_type (ins->inst_c1); LLVMValueRef sub = LLVMBuildSub(builder, LLVMConstNull(typ), lhs, ""); LLVMValueRef cmp = LLVMBuildICmp(builder, LLVMIntSGT, lhs, LLVMConstNull(typ), ""); LLVMValueRef abs = LLVMBuildSelect (builder, cmp, lhs, sub, ""); values [ins->dreg] = convert (ctx, abs, typ); break; } case OP_SSSE3_ALIGNR: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMValueRef zero = LLVMConstNull (v128_i1_t); LLVMValueRef hivec = convert (ctx, lhs, v128_i1_t); LLVMValueRef lovec = convert (ctx, rhs, v128_i1_t); LLVMValueRef rshift_amount = convert (ctx, arg3, i1_t); ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 32, rshift_amount, v128_i1_t, "ssse3_alignr"); LLVMValueRef mask_values [16]; // 128-bit vector, 8-bit elements, 16 total elements int i = 0; while (immediate_unroll_next (&ictx, &i)) { LLVMValueRef hi = NULL; LLVMValueRef lo = NULL; if (i <= 16) { for (int j = 0; j < 16; j++) mask_values [j] = const_int32 (i + j); lo = lovec; hi = hivec; } else { for (int j = 0; j < 16; j++) mask_values [j] = const_int32 (i + j - 16); lo = hivec; hi = zero; } LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, lo, hi, LLVMConstVector (mask_values, 16), "ssse3_alignr"); immediate_unroll_commit (&ictx, i, shuffled); } immediate_unroll_default (&ictx); immediate_unroll_commit_default (&ictx, zero); LLVMValueRef result = immediate_unroll_end (&ictx, &cbb); values [ins->dreg] = convert (ctx, result, ret_t); break; } case OP_SSE41_ROUNDP: { LLVMValueRef args [] = { lhs, LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE) }; values [ins->dreg] = call_intrins (ctx, ins->inst_c1 == MONO_TYPE_R4 ? INTRINS_SSE_ROUNDPS : INTRINS_SSE_ROUNDPD, args, dname); break; } case OP_SSE41_ROUNDS: { LLVMValueRef args [3]; args [0] = lhs; args [1] = rhs; args [2] = LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE); values [ins->dreg] = call_intrins (ctx, ins->inst_c1 == MONO_TYPE_R4 ? INTRINS_SSE_ROUNDSS : INTRINS_SSE_ROUNDSD, args, dname); break; } case OP_SSE41_DPPS: case OP_SSE41_DPPD: { /* Bits 0, 1, 4, 5 are meaningful for the control mask * in dppd; all bits are meaningful for dpps. */ LLVMTypeRef ret_t = NULL; LLVMValueRef mask = NULL; int mask_bits = 0; int high_shift = 0; int low_mask = 0; IntrinsicId iid = (IntrinsicId) 0; const char *oname = ""; switch (ins->opcode) { case OP_SSE41_DPPS: ret_t = v128_r4_t; mask = const_int8 (0xff); // 0b11111111 mask_bits = 8; high_shift = 4; low_mask = 0xf; iid = INTRINS_SSE_DPPS; oname = "sse41_dpps"; break; case OP_SSE41_DPPD: ret_t = v128_r8_t; mask = const_int8 (0x33); // 0b00110011 mask_bits = 4; high_shift = 2; low_mask = 0x3; iid = INTRINS_SSE_DPPD; oname = "sse41_dppd"; break; } LLVMValueRef args [] = { lhs, rhs, NULL }; LLVMValueRef index = LLVMBuildAnd (builder, convert (ctx, arg3, i1_t), mask, oname); ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 1 << mask_bits, index, ret_t, oname); int i = 0; while (immediate_unroll_next (&ictx, &i)) { int imm = ((i >> high_shift) << 4) | (i & low_mask); args [2] = const_int8 (imm); LLVMValueRef result = call_intrins (ctx, iid, args, dname); immediate_unroll_commit (&ictx, imm, result); } immediate_unroll_default (&ictx); immediate_unroll_commit_default (&ictx, LLVMGetUndef (ret_t)); values [ins->dreg] = immediate_unroll_end (&ictx, &cbb); break; } case OP_SSE41_MPSADBW: { LLVMValueRef args [] = { convert (ctx, lhs, sse_i1_t), convert (ctx, rhs, sse_i1_t), NULL, }; LLVMValueRef ctl = convert (ctx, arg3, i1_t); // Only 3 bits (bits 0-2) are used by mpsadbw and llvm.x86.sse41.mpsadbw int used_bits = 0x7; ctl = LLVMBuildAnd (builder, ctl, const_int8 (used_bits), "sse41_mpsadbw"); ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, used_bits + 1, ctl, v128_i2_t, "sse41_mpsadbw"); int i = 0; while (immediate_unroll_next (&ictx, &i)) { args [2] = const_int8 (i); LLVMValueRef result = call_intrins (ctx, INTRINS_SSE_MPSADBW, args, "sse41_mpsadbw"); immediate_unroll_commit (&ictx, i, result); } immediate_unroll_unreachable_default (&ictx); values [ins->dreg] = immediate_unroll_end (&ictx, &cbb); break; } case OP_SSE41_INSERTPS: { LLVMValueRef ctl = convert (ctx, arg3, i1_t); LLVMValueRef args [] = { lhs, rhs, NULL }; ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 256, ctl, v128_r4_t, "sse41_insertps"); int i = 0; while (immediate_unroll_next (&ictx, &i)) { args [2] = const_int8 (i); LLVMValueRef result = call_intrins (ctx, INTRINS_SSE_INSERTPS, args, dname); immediate_unroll_commit (&ictx, i, result); } immediate_unroll_unreachable_default (&ictx); values [ins->dreg] = immediate_unroll_end (&ictx, &cbb); break; } case OP_SSE41_BLEND: { LLVMTypeRef ret_t = LLVMTypeOf (lhs); int nelem = LLVMGetVectorSize (ret_t); g_assert (nelem >= 2 && nelem <= 8); // I2, U2, R4, R8 int unique_ctl_patterns = 1 << nelem; int ctlmask = unique_ctl_patterns - 1; LLVMValueRef ctl = convert (ctx, arg3, i1_t); ctl = LLVMBuildAnd (builder, ctl, const_int8 (ctlmask), "sse41_blend"); ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, unique_ctl_patterns, ctl, ret_t, "sse41_blend"); int i = 0; int mask_values [MAX_VECTOR_ELEMS] = { 0 }; while (immediate_unroll_next (&ictx, &i)) { for (int lane = 0; lane < nelem; ++lane) { // n-bit in inst_c0 (control byte) is set to 1 gboolean bit_set = (i & (1 << lane)) >> lane; mask_values [lane] = lane + (bit_set ? nelem : 0); } LLVMValueRef mask = create_const_vector_i32 (mask_values, nelem); LLVMValueRef result = LLVMBuildShuffleVector (builder, lhs, rhs, mask, "sse41_blend"); immediate_unroll_commit (&ictx, i, result); } immediate_unroll_default (&ictx); immediate_unroll_commit_default (&ictx, LLVMGetUndef (ret_t)); values [ins->dreg] = immediate_unroll_end (&ictx, &cbb); break; } case OP_SSE41_BLENDV: { LLVMValueRef args [] = { lhs, rhs, values [ins->sreg3] }; if (ins->inst_c1 == MONO_TYPE_R4) { values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_BLENDVPS, args, dname); } else if (ins->inst_c1 == MONO_TYPE_R8) { values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_BLENDVPD, args, dname); } else { // for other non-fp type just convert to <16 x i8> and pass to @llvm.x86.sse41.pblendvb args [0] = LLVMBuildBitCast (ctx->builder, args [0], sse_i1_t, ""); args [1] = LLVMBuildBitCast (ctx->builder, args [1], sse_i1_t, ""); args [2] = LLVMBuildBitCast (ctx->builder, args [2], sse_i1_t, ""); values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_PBLENDVB, args, dname); } break; } case OP_SSE_CVTII: { gboolean is_signed = (ins->inst_c1 == MONO_TYPE_I1) || (ins->inst_c1 == MONO_TYPE_I2) || (ins->inst_c1 == MONO_TYPE_I4); LLVMTypeRef vec_type; if ((ins->inst_c1 == MONO_TYPE_I1) || (ins->inst_c1 == MONO_TYPE_U1)) vec_type = sse_i1_t; else if ((ins->inst_c1 == MONO_TYPE_I2) || (ins->inst_c1 == MONO_TYPE_U2)) vec_type = sse_i2_t; else vec_type = sse_i4_t; LLVMValueRef value; if (LLVMGetTypeKind (LLVMTypeOf (lhs)) != LLVMVectorTypeKind) { LLVMValueRef bitcasted = LLVMBuildBitCast (ctx->builder, lhs, LLVMPointerType (vec_type, 0), ""); value = mono_llvm_build_aligned_load (builder, bitcasted, "", FALSE, 1); } else { value = LLVMBuildBitCast (ctx->builder, lhs, vec_type, ""); } LLVMValueRef mask_vec; LLVMTypeRef dst_type; if (ins->inst_c0 == MONO_TYPE_I2) { mask_vec = create_const_vector_i32 (mask_0_incr_1, 8); dst_type = sse_i2_t; } else if (ins->inst_c0 == MONO_TYPE_I4) { mask_vec = create_const_vector_i32 (mask_0_incr_1, 4); dst_type = sse_i4_t; } else { g_assert (ins->inst_c0 == MONO_TYPE_I8); mask_vec = create_const_vector_i32 (mask_0_incr_1, 2); dst_type = sse_i8_t; } LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, value, LLVMGetUndef (vec_type), mask_vec, ""); if (is_signed) values [ins->dreg] = LLVMBuildSExt (ctx->builder, shuffled, dst_type, ""); else values [ins->dreg] = LLVMBuildZExt (ctx->builder, shuffled, dst_type, ""); break; } case OP_SSE41_LOADANT: { LLVMValueRef dst_ptr = convert (ctx, lhs, LLVMPointerType (primitive_type_to_llvm_type (inst_c1_type (ins)), 0)); LLVMValueRef dst_vec = LLVMBuildBitCast (builder, dst_ptr, LLVMPointerType (type_to_sse_type (ins->inst_c1), 0), ""); LLVMValueRef load = mono_llvm_build_aligned_load (builder, dst_vec, "", FALSE, 16); set_nontemporal_flag (load); values [ins->dreg] = load; break; } case OP_SSE41_MUL: { const int shift_vals [] = { 32, 32 }; const LLVMValueRef args [] = { convert (ctx, lhs, sse_i8_t), convert (ctx, rhs, sse_i8_t), }; LLVMValueRef mul_args [2] = { 0 }; LLVMValueRef shift_vec = create_const_vector (LLVMInt64Type (), shift_vals, 2); for (int i = 0; i < 2; ++i) { LLVMValueRef padded = LLVMBuildShl (builder, args [i], shift_vec, ""); mul_args[i] = mono_llvm_build_exact_ashr (builder, padded, shift_vec); } values [ins->dreg] = LLVMBuildNSWMul (builder, mul_args [0], mul_args [1], dname); break; } case OP_SSE41_MULLO: { values [ins->dreg] = LLVMBuildMul (ctx->builder, lhs, rhs, ""); break; } case OP_SSE42_CRC32: case OP_SSE42_CRC64: { LLVMValueRef args [2]; args [0] = lhs; args [1] = convert (ctx, rhs, primitive_type_to_llvm_type (ins->inst_c0)); IntrinsicId id; switch (ins->inst_c0) { case MONO_TYPE_U1: id = INTRINS_SSE_CRC32_32_8; break; case MONO_TYPE_U2: id = INTRINS_SSE_CRC32_32_16; break; case MONO_TYPE_U4: id = INTRINS_SSE_CRC32_32_32; break; case MONO_TYPE_U8: id = INTRINS_SSE_CRC32_64_64; break; default: g_assert_not_reached (); break; } values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_PCLMULQDQ: { LLVMValueRef args [] = { lhs, rhs, NULL }; LLVMValueRef ctl = convert (ctx, arg3, i1_t); // Only bits 0 and 4 of the immediate operand are used by PCLMULQDQ. ctl = LLVMBuildAnd (builder, ctl, const_int8 (0x11), "pclmulqdq"); ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 1 << 2, ctl, v128_i8_t, "pclmulqdq"); int i = 0; while (immediate_unroll_next (&ictx, &i)) { int imm = ((i & 0x2) << 3) | (i & 0x1); args [2] = const_int8 (imm); LLVMValueRef result = call_intrins (ctx, INTRINS_PCLMULQDQ, args, "pclmulqdq"); immediate_unroll_commit (&ictx, imm, result); } immediate_unroll_unreachable_default (&ictx); values [ins->dreg] = immediate_unroll_end (&ictx, &cbb); break; } case OP_AES_KEYGENASSIST: { LLVMValueRef roundconstant = convert (ctx, rhs, i1_t); LLVMValueRef args [] = { convert (ctx, lhs, v128_i8_t), NULL }; ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 256, roundconstant, v128_i8_t, "aes_keygenassist"); int i = 0; while (immediate_unroll_next (&ictx, &i)) { args [1] = const_int8 (i); LLVMValueRef result = call_intrins (ctx, INTRINS_AESNI_AESKEYGENASSIST, args, "aes_keygenassist"); immediate_unroll_commit (&ictx, i, result); } immediate_unroll_unreachable_default (&ictx); LLVMValueRef result = immediate_unroll_end (&ictx, &cbb); values [ins->dreg] = convert (ctx, result, v128_i1_t); break; } #endif case OP_XCOMPARE_FP: { LLVMRealPredicate pred = fpcond_to_llvm_cond [ins->inst_c0]; LLVMValueRef cmp = LLVMBuildFCmp (builder, pred, lhs, rhs, ""); int nelems = LLVMGetVectorSize (LLVMTypeOf (cmp)); g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs)); if (ins->inst_c1 == MONO_TYPE_R8) values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt64Type (), nelems), ""), LLVMTypeOf (lhs), ""); else values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt32Type (), nelems), ""), LLVMTypeOf (lhs), ""); break; } case OP_XCOMPARE: { LLVMIntPredicate pred = cond_to_llvm_cond [ins->inst_c0]; LLVMValueRef cmp = LLVMBuildICmp (builder, pred, lhs, rhs, ""); g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs)); values [ins->dreg] = LLVMBuildSExt (builder, cmp, LLVMTypeOf (lhs), ""); break; } case OP_POPCNT32: values [ins->dreg] = call_intrins (ctx, INTRINS_CTPOP_I32, &lhs, ""); break; case OP_POPCNT64: values [ins->dreg] = call_intrins (ctx, INTRINS_CTPOP_I64, &lhs, ""); break; case OP_CTTZ32: case OP_CTTZ64: { LLVMValueRef args [2]; args [0] = lhs; args [1] = LLVMConstInt (LLVMInt1Type (), 0, FALSE); values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_CTTZ32 ? INTRINS_CTTZ_I32 : INTRINS_CTTZ_I64, args, ""); break; } case OP_BMI1_BEXTR32: case OP_BMI1_BEXTR64: { LLVMValueRef args [2]; args [0] = lhs; args [1] = convert (ctx, rhs, ins->opcode == OP_BMI1_BEXTR32 ? i4_t : i8_t); // cast ushort to u32/u64 values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_BMI1_BEXTR32 ? INTRINS_BEXTR_I32 : INTRINS_BEXTR_I64, args, ""); break; } case OP_BZHI32: case OP_BZHI64: { LLVMValueRef args [2]; args [0] = lhs; args [1] = rhs; values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_BZHI32 ? INTRINS_BZHI_I32 : INTRINS_BZHI_I64, args, ""); break; } case OP_MULX_H32: case OP_MULX_H64: case OP_MULX_HL32: case OP_MULX_HL64: { gboolean is_64 = ins->opcode == OP_MULX_H64 || ins->opcode == OP_MULX_HL64; gboolean only_high = ins->opcode == OP_MULX_H32 || ins->opcode == OP_MULX_H64; LLVMValueRef lx = LLVMBuildZExt (ctx->builder, lhs, LLVMInt128Type (), ""); LLVMValueRef rx = LLVMBuildZExt (ctx->builder, rhs, LLVMInt128Type (), ""); LLVMValueRef mulx = LLVMBuildMul (ctx->builder, lx, rx, ""); if (!only_high) { LLVMValueRef addr = convert (ctx, arg3, LLVMPointerType (is_64 ? i8_t : i4_t, 0)); LLVMValueRef lowx = LLVMBuildTrunc (ctx->builder, mulx, is_64 ? LLVMInt64Type () : LLVMInt32Type (), ""); LLVMBuildStore (ctx->builder, lowx, addr); } LLVMValueRef shift = LLVMConstInt (LLVMInt128Type (), is_64 ? 64 : 32, FALSE); LLVMValueRef highx = LLVMBuildLShr (ctx->builder, mulx, shift, ""); values [ins->dreg] = LLVMBuildTrunc (ctx->builder, highx, is_64 ? LLVMInt64Type () : LLVMInt32Type (), ""); break; } case OP_PEXT32: case OP_PEXT64: { LLVMValueRef args [2]; args [0] = lhs; args [1] = rhs; values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_PEXT32 ? INTRINS_PEXT_I32 : INTRINS_PEXT_I64, args, ""); break; } case OP_PDEP32: case OP_PDEP64: { LLVMValueRef args [2]; args [0] = lhs; args [1] = rhs; values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_PDEP32 ? INTRINS_PDEP_I32 : INTRINS_PDEP_I64, args, ""); break; } #endif /* defined(TARGET_X86) || defined(TARGET_AMD64) */ // Shared between ARM64 and X86 #if defined(TARGET_ARM64) || defined(TARGET_X86) || defined(TARGET_AMD64) case OP_LZCNT32: case OP_LZCNT64: { IntrinsicId iid = ins->opcode == OP_LZCNT32 ? INTRINS_CTLZ_I32 : INTRINS_CTLZ_I64; LLVMValueRef args [] = { lhs, const_int1 (FALSE) }; values [ins->dreg] = call_intrins (ctx, iid, args, ""); break; } #endif #if defined(TARGET_ARM64) || defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_WASM) case OP_XEQUAL: { LLVMTypeRef t; LLVMValueRef cmp, mask [MAX_VECTOR_ELEMS], shuffle; int nelems; #if defined(TARGET_WASM) /* The wasm code generator doesn't understand the shuffle/and code sequence below */ LLVMValueRef val; if (LLVMIsNull (lhs) || LLVMIsNull (rhs)) { val = LLVMIsNull (lhs) ? rhs : lhs; nelems = LLVMGetVectorSize (LLVMTypeOf (lhs)); IntrinsicId intrins = (IntrinsicId)0; switch (nelems) { case 16: intrins = INTRINS_WASM_ANYTRUE_V16; break; case 8: intrins = INTRINS_WASM_ANYTRUE_V8; break; case 4: intrins = INTRINS_WASM_ANYTRUE_V4; break; case 2: intrins = INTRINS_WASM_ANYTRUE_V2; break; default: g_assert_not_reached (); } /* res = !wasm.anytrue (val) */ values [ins->dreg] = call_intrins (ctx, intrins, &val, ""); values [ins->dreg] = LLVMBuildZExt (builder, LLVMBuildICmp (builder, LLVMIntEQ, values [ins->dreg], LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""), LLVMInt32Type (), dname); break; } #endif LLVMTypeRef srcelemt = LLVMGetElementType (LLVMTypeOf (lhs)); //%c = icmp sgt <16 x i8> %a0, %a1 if (srcelemt == LLVMDoubleType () || srcelemt == LLVMFloatType ()) cmp = LLVMBuildFCmp (builder, LLVMRealOEQ, lhs, rhs, ""); else cmp = LLVMBuildICmp (builder, LLVMIntEQ, lhs, rhs, ""); nelems = LLVMGetVectorSize (LLVMTypeOf (cmp)); LLVMTypeRef elemt; if (srcelemt == LLVMDoubleType ()) elemt = LLVMInt64Type (); else if (srcelemt == LLVMFloatType ()) elemt = LLVMInt32Type (); else elemt = srcelemt; t = LLVMVectorType (elemt, nelems); cmp = LLVMBuildSExt (builder, cmp, t, ""); // cmp is a <nelems x elemt> vector, each element is either 0xff... or 0 int half = nelems / 2; while (half >= 1) { // AND the top and bottom halfes into the bottom half for (int i = 0; i < half; ++i) mask [i] = LLVMConstInt (LLVMInt32Type (), half + i, FALSE); for (int i = half; i < nelems; ++i) mask [i] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); shuffle = LLVMBuildShuffleVector (builder, cmp, LLVMGetUndef (t), LLVMConstVector (mask, LLVMGetVectorSize (t)), ""); cmp = LLVMBuildAnd (builder, cmp, shuffle, ""); half = half / 2; } // Extract [0] LLVMValueRef first_elem = LLVMBuildExtractElement (builder, cmp, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); // convert to 0/1 LLVMValueRef cmp_zero = LLVMBuildICmp (builder, LLVMIntNE, first_elem, LLVMConstInt (elemt, 0, FALSE), ""); values [ins->dreg] = LLVMBuildZExt (builder, cmp_zero, LLVMInt8Type (), ""); break; } #endif #if defined(TARGET_ARM64) case OP_XOP_I4_I4: case OP_XOP_I8_I8: { IntrinsicId id = (IntrinsicId)ins->inst_c0; values [ins->dreg] = call_intrins (ctx, id, &lhs, ""); break; } case OP_XOP_X_X_X: case OP_XOP_I4_I4_I4: case OP_XOP_I4_I4_I8: { IntrinsicId id = (IntrinsicId)ins->inst_c0; gboolean zext_last = FALSE, bitcast_result = FALSE, getElement = FALSE; int element_idx = -1; switch (id) { case INTRINS_AARCH64_PMULL64: getElement = TRUE; bitcast_result = TRUE; element_idx = ins->inst_c1; break; case INTRINS_AARCH64_CRC32B: case INTRINS_AARCH64_CRC32H: case INTRINS_AARCH64_CRC32W: case INTRINS_AARCH64_CRC32CB: case INTRINS_AARCH64_CRC32CH: case INTRINS_AARCH64_CRC32CW: zext_last = TRUE; break; default: break; } LLVMValueRef arg1 = rhs; if (zext_last) arg1 = LLVMBuildZExt (ctx->builder, arg1, LLVMInt32Type (), ""); LLVMValueRef args [] = { lhs, arg1 }; if (getElement) { args [0] = LLVMBuildExtractElement (ctx->builder, args [0], const_int32 (element_idx), ""); args [1] = LLVMBuildExtractElement (ctx->builder, args [1], const_int32 (element_idx), ""); } values [ins->dreg] = call_intrins (ctx, id, args, ""); if (bitcast_result) values [ins->dreg] = convert (ctx, values [ins->dreg], LLVMVectorType (LLVMInt64Type (), 2)); break; } case OP_XOP_X_X_X_X: { IntrinsicId id = (IntrinsicId)ins->inst_c0; gboolean getLowerElement = FALSE; int arg_idx = -1; switch (id) { case INTRINS_AARCH64_SHA1C: case INTRINS_AARCH64_SHA1M: case INTRINS_AARCH64_SHA1P: getLowerElement = TRUE; arg_idx = 1; break; default: break; } LLVMValueRef args [] = { lhs, rhs, arg3 }; if (getLowerElement) args [arg_idx] = LLVMBuildExtractElement (ctx->builder, args [arg_idx], const_int32 (0), ""); values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_XOP_X_X: { IntrinsicId id = (IntrinsicId)ins->inst_c0; LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); gboolean getLowerElement = FALSE; switch (id) { case INTRINS_AARCH64_SHA1H: getLowerElement = TRUE; break; default: break; } LLVMValueRef arg0 = lhs; if (getLowerElement) arg0 = LLVMBuildExtractElement (ctx->builder, arg0, const_int32 (0), ""); LLVMValueRef result = call_intrins (ctx, id, &arg0, ""); if (getLowerElement) result = vector_from_scalar (ctx, ret_t, result); values [ins->dreg] = result; break; } case OP_XCOMPARE_FP_SCALAR: case OP_XCOMPARE_FP: { g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs)); gboolean scalar = ins->opcode == OP_XCOMPARE_FP_SCALAR; LLVMRealPredicate pred = fpcond_to_llvm_cond [ins->inst_c0]; LLVMTypeRef ret_t = LLVMTypeOf (lhs); LLVMTypeRef reti_t = to_integral_vector_type (ret_t); LLVMValueRef args [] = { lhs, rhs }; if (scalar) for (int i = 0; i < 2; ++i) args [i] = scalar_from_vector (ctx, args [i]); LLVMValueRef result = LLVMBuildFCmp (builder, pred, args [0], args [1], "xcompare_fp"); if (scalar) result = vector_from_scalar (ctx, LLVMVectorType (LLVMIntType (1), LLVMGetVectorSize (reti_t)), result); result = LLVMBuildSExt (builder, result, reti_t, ""); result = LLVMBuildBitCast (builder, result, ret_t, ""); values [ins->dreg] = result; break; } case OP_XCOMPARE_SCALAR: case OP_XCOMPARE: { g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs)); gboolean scalar = ins->opcode == OP_XCOMPARE_SCALAR; LLVMIntPredicate pred = cond_to_llvm_cond [ins->inst_c0]; LLVMTypeRef ret_t = LLVMTypeOf (lhs); LLVMValueRef args [] = { lhs, rhs }; if (scalar) for (int i = 0; i < 2; ++i) args [i] = scalar_from_vector (ctx, args [i]); LLVMValueRef result = LLVMBuildICmp (builder, pred, args [0], args [1], "xcompare"); if (scalar) result = vector_from_scalar (ctx, LLVMVectorType (LLVMIntType (1), LLVMGetVectorSize (ret_t)), result); values [ins->dreg] = LLVMBuildSExt (builder, result, ret_t, ""); break; } case OP_ARM64_EXT: { LLVMTypeRef ret_t = LLVMTypeOf (lhs); unsigned int elems = LLVMGetVectorSize (ret_t); g_assert (elems <= ARM64_MAX_VECTOR_ELEMS); LLVMValueRef index = arg3; LLVMValueRef default_value = lhs; ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, elems, index, ret_t, "arm64_ext"); int i = 0; while (immediate_unroll_next (&ictx, &i)) { LLVMValueRef mask = create_const_vector_i32 (&mask_0_incr_1 [i], elems); LLVMValueRef result = LLVMBuildShuffleVector (builder, lhs, rhs, mask, "arm64_ext"); immediate_unroll_commit (&ictx, i, result); } immediate_unroll_default (&ictx); immediate_unroll_commit_default (&ictx, default_value); values [ins->dreg] = immediate_unroll_end (&ictx, &cbb); break; } case OP_ARM64_MVN: { LLVMTypeRef ret_t = LLVMTypeOf (lhs); LLVMValueRef result = bitcast_to_integral (ctx, lhs); result = LLVMBuildNot (builder, result, "arm64_mvn"); result = convert (ctx, result, ret_t); values [ins->dreg] = result; break; } case OP_ARM64_BIC: { LLVMTypeRef ret_t = LLVMTypeOf (lhs); LLVMValueRef result = bitcast_to_integral (ctx, lhs); LLVMValueRef mask = bitcast_to_integral (ctx, rhs); mask = LLVMBuildNot (builder, mask, ""); result = LLVMBuildAnd (builder, mask, result, "arm64_bic"); result = convert (ctx, result, ret_t); values [ins->dreg] = result; break; } case OP_ARM64_BSL: { LLVMTypeRef ret_t = LLVMTypeOf (rhs); LLVMValueRef select = bitcast_to_integral (ctx, lhs); LLVMValueRef left = bitcast_to_integral (ctx, rhs); LLVMValueRef right = bitcast_to_integral (ctx, arg3); LLVMValueRef result1 = LLVMBuildAnd (builder, select, left, "arm64_bsl"); LLVMValueRef result2 = LLVMBuildAnd (builder, LLVMBuildNot (builder, select, ""), right, ""); LLVMValueRef result = LLVMBuildOr (builder, result1, result2, ""); result = convert (ctx, result, ret_t); values [ins->dreg] = result; break; } case OP_ARM64_CMTST: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMValueRef l = bitcast_to_integral (ctx, lhs); LLVMValueRef r = bitcast_to_integral (ctx, rhs); LLVMValueRef result = LLVMBuildAnd (builder, l, r, "arm64_cmtst"); LLVMTypeRef t = LLVMTypeOf (l); result = LLVMBuildICmp (builder, LLVMIntNE, result, LLVMConstNull (t), ""); result = LLVMBuildSExt (builder, result, t, ""); result = convert (ctx, result, ret_t); values [ins->dreg] = result; break; } case OP_ARM64_FCVTL: case OP_ARM64_FCVTL2: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); gboolean high = ins->opcode == OP_ARM64_FCVTL2; LLVMValueRef result = lhs; if (high) result = extract_high_elements (ctx, result); result = LLVMBuildFPExt (builder, result, ret_t, "arm64_fcvtl"); values [ins->dreg] = result; break; } case OP_ARM64_FCVTXN: case OP_ARM64_FCVTXN2: case OP_ARM64_FCVTN: case OP_ARM64_FCVTN2: { gboolean high = FALSE; int iid = 0; switch (ins->opcode) { case OP_ARM64_FCVTXN2: high = TRUE; case OP_ARM64_FCVTXN: iid = INTRINS_AARCH64_ADV_SIMD_FCVTXN; break; case OP_ARM64_FCVTN2: high = TRUE; break; } LLVMValueRef result = lhs; if (high) result = rhs; if (iid) result = call_intrins (ctx, iid, &result, ""); else result = LLVMBuildFPTrunc (builder, result, v64_r4_t, ""); if (high) result = concatenate_vectors (ctx, lhs, result); values [ins->dreg] = result; break; } case OP_ARM64_UCVTF: case OP_ARM64_SCVTF: case OP_ARM64_UCVTF_SCALAR: case OP_ARM64_SCVTF_SCALAR: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); gboolean scalar = FALSE; gboolean is_unsigned = FALSE; switch (ins->opcode) { case OP_ARM64_UCVTF_SCALAR: scalar = TRUE; case OP_ARM64_UCVTF: is_unsigned = TRUE; break; case OP_ARM64_SCVTF_SCALAR: scalar = TRUE; break; } LLVMValueRef result = lhs; LLVMTypeRef cvt_t = ret_t; if (scalar) { result = scalar_from_vector (ctx, result); cvt_t = LLVMGetElementType (ret_t); } if (is_unsigned) result = LLVMBuildUIToFP (builder, result, cvt_t, "arm64_ucvtf"); else result = LLVMBuildSIToFP (builder, result, cvt_t, "arm64_scvtf"); if (scalar) result = vector_from_scalar (ctx, ret_t, result); values [ins->dreg] = result; break; } case OP_ARM64_FCVTZS: case OP_ARM64_FCVTZS_SCALAR: case OP_ARM64_FCVTZU: case OP_ARM64_FCVTZU_SCALAR: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); gboolean scalar = FALSE; gboolean is_unsigned = FALSE; switch (ins->opcode) { case OP_ARM64_FCVTZU_SCALAR: scalar = TRUE; case OP_ARM64_FCVTZU: is_unsigned = TRUE; break; case OP_ARM64_FCVTZS_SCALAR: scalar = TRUE; break; } LLVMValueRef result = lhs; LLVMTypeRef cvt_t = ret_t; if (scalar) { result = scalar_from_vector (ctx, result); cvt_t = LLVMGetElementType (ret_t); } if (is_unsigned) result = LLVMBuildFPToUI (builder, result, cvt_t, "arm64_fcvtzu"); else result = LLVMBuildFPToSI (builder, result, cvt_t, "arm64_fcvtzs"); if (scalar) result = vector_from_scalar (ctx, ret_t, result); values [ins->dreg] = result; break; } case OP_ARM64_SELECT_SCALAR: { LLVMValueRef result = LLVMBuildExtractElement (builder, lhs, rhs, ""); LLVMTypeRef elem_t = LLVMTypeOf (result); unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t); LLVMTypeRef t = LLVMVectorType (elem_t, 64 / elem_bits); result = vector_from_scalar (ctx, t, result); values [ins->dreg] = result; break; } case OP_ARM64_SELECT_QUAD: { LLVMTypeRef src_type = simd_class_to_llvm_type (ctx, ins->data.op [1].klass); LLVMTypeRef ret_type = simd_class_to_llvm_type (ctx, ins->klass); unsigned int src_type_bits = mono_llvm_get_prim_size_bits (src_type); unsigned int ret_type_bits = mono_llvm_get_prim_size_bits (ret_type); unsigned int src_intermediate_elems = src_type_bits / 32; unsigned int ret_intermediate_elems = ret_type_bits / 32; LLVMTypeRef intermediate_type = LLVMVectorType (i4_t, src_intermediate_elems); LLVMValueRef result = LLVMBuildBitCast (builder, lhs, intermediate_type, "arm64_select_quad"); result = LLVMBuildExtractElement (builder, result, rhs, "arm64_select_quad"); result = broadcast_element (ctx, result, ret_intermediate_elems); result = LLVMBuildBitCast (builder, result, ret_type, "arm64_select_quad"); values [ins->dreg] = result; break; } case OP_LSCNT32: case OP_LSCNT64: { // %shr = ashr i32 %x, 31 // %xor = xor i32 %shr, %x // %mul = shl i32 %xor, 1 // %add = or i32 %mul, 1 // %0 = tail call i32 @llvm.ctlz.i32(i32 %add, i1 false) LLVMValueRef shr = LLVMBuildAShr (builder, lhs, ins->opcode == OP_LSCNT32 ? LLVMConstInt (LLVMInt32Type (), 31, FALSE) : LLVMConstInt (LLVMInt64Type (), 63, FALSE), ""); LLVMValueRef one = ins->opcode == OP_LSCNT32 ? LLVMConstInt (LLVMInt32Type (), 1, FALSE) : LLVMConstInt (LLVMInt64Type (), 1, FALSE); LLVMValueRef xor = LLVMBuildXor (builder, shr, lhs, ""); LLVMValueRef mul = LLVMBuildShl (builder, xor, one, ""); LLVMValueRef add = LLVMBuildOr (builder, mul, one, ""); LLVMValueRef args [2]; args [0] = add; args [1] = LLVMConstInt (LLVMInt1Type (), 0, FALSE); values [ins->dreg] = LLVMBuildCall (builder, get_intrins (ctx, ins->opcode == OP_LSCNT32 ? INTRINS_CTLZ_I32 : INTRINS_CTLZ_I64), args, 2, ""); break; } case OP_ARM64_SQRDMLAH: case OP_ARM64_SQRDMLAH_BYSCALAR: case OP_ARM64_SQRDMLAH_SCALAR: case OP_ARM64_SQRDMLSH: case OP_ARM64_SQRDMLSH_BYSCALAR: case OP_ARM64_SQRDMLSH_SCALAR: { gboolean byscalar = FALSE; gboolean scalar = FALSE; gboolean subtract = FALSE; switch (ins->opcode) { case OP_ARM64_SQRDMLAH_BYSCALAR: byscalar = TRUE; break; case OP_ARM64_SQRDMLAH_SCALAR: scalar = TRUE; break; case OP_ARM64_SQRDMLSH: subtract = TRUE; break; case OP_ARM64_SQRDMLSH_BYSCALAR: subtract = TRUE; byscalar = TRUE; break; case OP_ARM64_SQRDMLSH_SCALAR: subtract = TRUE; scalar = TRUE; break; } int acc_iid = subtract ? INTRINS_AARCH64_ADV_SIMD_SQSUB : INTRINS_AARCH64_ADV_SIMD_SQADD; LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (ret_t); ScalarOpFromVectorOpCtx sctx = scalar_op_from_vector_op (ctx, ret_t, ins); LLVMValueRef args [] = { lhs, rhs, arg3 }; if (byscalar) { unsigned int elems = LLVMGetVectorSize (ret_t); args [2] = broadcast_element (ctx, scalar_from_vector (ctx, args [2]), elems); } if (scalar) { ovr_tag = sctx.ovr_tag; scalar_op_from_vector_op_process_args (&sctx, args, 3); } LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_SQRDMULH, ovr_tag, &args [1], "arm64_sqrdmlxh"); args [1] = result; result = call_overloaded_intrins (ctx, acc_iid, ovr_tag, &args [0], "arm64_sqrdmlxh"); if (scalar) result = scalar_op_from_vector_op_process_result (&sctx, result); values [ins->dreg] = result; break; } case OP_ARM64_SMULH: case OP_ARM64_UMULH: { LLVMValueRef op1, op2; if (ins->opcode == OP_ARM64_SMULH) { op1 = LLVMBuildSExt (builder, lhs, LLVMInt128Type (), ""); op2 = LLVMBuildSExt (builder, rhs, LLVMInt128Type (), ""); } else { op1 = LLVMBuildZExt (builder, lhs, LLVMInt128Type (), ""); op2 = LLVMBuildZExt (builder, rhs, LLVMInt128Type (), ""); } LLVMValueRef mul = LLVMBuildMul (builder, op1, op2, ""); LLVMValueRef hi64 = LLVMBuildLShr (builder, mul, LLVMConstInt (LLVMInt128Type (), 64, FALSE), ""); values [ins->dreg] = LLVMBuildTrunc (builder, hi64, LLVMInt64Type (), ""); break; } case OP_ARM64_XNARROW_SCALAR: { // Unfortunately, @llvm.aarch64.neon.scalar.sqxtun isn't available for i8 or i16. LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (ret_t); LLVMTypeRef elem_t = LLVMGetElementType (ret_t); LLVMValueRef result = NULL; int iid = ins->inst_c0; int scalar_iid = 0; switch (iid) { case INTRINS_AARCH64_ADV_SIMD_SQXTUN: scalar_iid = INTRINS_AARCH64_ADV_SIMD_SCALAR_SQXTUN; break; case INTRINS_AARCH64_ADV_SIMD_SQXTN: scalar_iid = INTRINS_AARCH64_ADV_SIMD_SCALAR_SQXTN; break; case INTRINS_AARCH64_ADV_SIMD_UQXTN: scalar_iid = INTRINS_AARCH64_ADV_SIMD_SCALAR_UQXTN; break; default: g_assert_not_reached (); } if (elem_t == i4_t) { LLVMValueRef arg = scalar_from_vector (ctx, lhs); result = call_intrins (ctx, scalar_iid, &arg, "arm64_xnarrow_scalar"); result = vector_from_scalar (ctx, ret_t, result); } else { LLVMTypeRef arg_t = LLVMTypeOf (lhs); LLVMTypeRef argelem_t = LLVMGetElementType (arg_t); unsigned int argelems = LLVMGetVectorSize (arg_t); LLVMValueRef arg = keep_lowest_element (ctx, LLVMVectorType (argelem_t, argelems * 2), lhs); result = call_overloaded_intrins (ctx, iid, ovr_tag, &arg, "arm64_xnarrow_scalar"); result = keep_lowest_element (ctx, LLVMTypeOf (result), result); } values [ins->dreg] = result; break; } case OP_ARM64_SQXTUN2: case OP_ARM64_UQXTN2: case OP_ARM64_SQXTN2: case OP_ARM64_XTN: case OP_ARM64_XTN2: { llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); gboolean high = FALSE; int iid = 0; switch (ins->opcode) { case OP_ARM64_SQXTUN2: high = TRUE; iid = INTRINS_AARCH64_ADV_SIMD_SQXTUN; break; case OP_ARM64_UQXTN2: high = TRUE; iid = INTRINS_AARCH64_ADV_SIMD_UQXTN; break; case OP_ARM64_SQXTN2: high = TRUE; iid = INTRINS_AARCH64_ADV_SIMD_SQXTN; break; case OP_ARM64_XTN2: high = TRUE; break; } LLVMValueRef result = lhs; if (high) { result = rhs; ovr_tag = ovr_tag_smaller_vector (ovr_tag); } LLVMTypeRef t = LLVMTypeOf (result); LLVMTypeRef elem_t = LLVMGetElementType (t); unsigned int elems = LLVMGetVectorSize (t); unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t); LLVMTypeRef result_t = LLVMVectorType (LLVMIntType (elem_bits / 2), elems); if (iid != 0) result = call_overloaded_intrins (ctx, iid, ovr_tag, &result, ""); else result = LLVMBuildTrunc (builder, result, result_t, "arm64_xtn"); if (high) result = concatenate_vectors (ctx, lhs, result); values [ins->dreg] = result; break; } case OP_ARM64_CLZ: { llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); LLVMValueRef args [] = { lhs, const_int1 (0) }; LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_CLZ, ovr_tag, args, ""); values [ins->dreg] = result; break; } case OP_ARM64_FMSUB: case OP_ARM64_FMSUB_BYSCALAR: case OP_ARM64_FMSUB_SCALAR: case OP_ARM64_FNMSUB_SCALAR: case OP_ARM64_FMADD: case OP_ARM64_FMADD_BYSCALAR: case OP_ARM64_FMADD_SCALAR: case OP_ARM64_FNMADD_SCALAR: { llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); gboolean scalar = FALSE; gboolean negate = FALSE; gboolean subtract = FALSE; gboolean byscalar = FALSE; switch (ins->opcode) { case OP_ARM64_FMSUB: subtract = TRUE; break; case OP_ARM64_FMSUB_BYSCALAR: subtract = TRUE; byscalar = TRUE; break; case OP_ARM64_FMSUB_SCALAR: subtract = TRUE; scalar = TRUE; break; case OP_ARM64_FNMSUB_SCALAR: subtract = TRUE; scalar = TRUE; negate = TRUE; break; case OP_ARM64_FMADD: break; case OP_ARM64_FMADD_BYSCALAR: byscalar = TRUE; break; case OP_ARM64_FMADD_SCALAR: scalar = TRUE; break; case OP_ARM64_FNMADD_SCALAR: scalar = TRUE; negate = TRUE; break; } // llvm.fma argument order: mulop1, mulop2, addend LLVMValueRef args [] = { rhs, arg3, lhs }; if (byscalar) { unsigned int elems = LLVMGetVectorSize (LLVMTypeOf (args [0])); args [1] = broadcast_element (ctx, scalar_from_vector (ctx, args [1]), elems); } if (scalar) { ovr_tag = ovr_tag_force_scalar (ovr_tag); for (int i = 0; i < 3; ++i) args [i] = scalar_from_vector (ctx, args [i]); } if (subtract) args [0] = LLVMBuildFNeg (builder, args [0], "arm64_fma_sub"); if (negate) { args [0] = LLVMBuildFNeg (builder, args [0], "arm64_fma_negate"); args [2] = LLVMBuildFNeg (builder, args [2], "arm64_fma_negate"); } LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_FMA, ovr_tag, args, "arm64_fma"); if (scalar) result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result); values [ins->dreg] = result; break; } case OP_ARM64_SQDMULL: case OP_ARM64_SQDMULL_BYSCALAR: case OP_ARM64_SQDMULL2: case OP_ARM64_SQDMULL2_BYSCALAR: case OP_ARM64_SQDMLAL: case OP_ARM64_SQDMLAL_BYSCALAR: case OP_ARM64_SQDMLAL2: case OP_ARM64_SQDMLAL2_BYSCALAR: case OP_ARM64_SQDMLSL: case OP_ARM64_SQDMLSL_BYSCALAR: case OP_ARM64_SQDMLSL2: case OP_ARM64_SQDMLSL2_BYSCALAR: { llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); gboolean scalar = FALSE; gboolean add = FALSE; gboolean subtract = FALSE; gboolean high = FALSE; switch (ins->opcode) { case OP_ARM64_SQDMULL_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMULL: break; case OP_ARM64_SQDMULL2_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMULL2: high = TRUE; break; case OP_ARM64_SQDMLAL_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLAL: add = TRUE; break; case OP_ARM64_SQDMLAL2_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLAL2: high = TRUE; add = TRUE; break; case OP_ARM64_SQDMLSL_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLSL: subtract = TRUE; break; case OP_ARM64_SQDMLSL2_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLSL2: high = TRUE; subtract = TRUE; break; } int iid = 0; if (add) iid = INTRINS_AARCH64_ADV_SIMD_SQADD; else if (subtract) iid = INTRINS_AARCH64_ADV_SIMD_SQSUB; LLVMValueRef mul1 = lhs; LLVMValueRef mul2 = rhs; if (iid != 0) { mul1 = rhs; mul2 = arg3; } if (scalar) { LLVMTypeRef t = LLVMTypeOf (mul1); unsigned int elems = LLVMGetVectorSize (t); mul2 = broadcast_element (ctx, scalar_from_vector (ctx, mul2), elems); } LLVMValueRef args [] = { mul1, mul2 }; if (high) for (int i = 0; i < 2; ++i) args [i] = extract_high_elements (ctx, args [i]); LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_SQDMULL, ovr_tag, args, ""); LLVMValueRef args2 [] = { lhs, result }; if (iid != 0) result = call_overloaded_intrins (ctx, iid, ovr_tag, args2, ""); values [ins->dreg] = result; break; } case OP_ARM64_SQDMULL_SCALAR: case OP_ARM64_SQDMLAL_SCALAR: case OP_ARM64_SQDMLSL_SCALAR: { /* * define dso_local i32 @__vqdmlslh_lane_s16(i32, i16, <4 x i16>, i32) local_unnamed_addr #0 { * %5 = insertelement <4 x i16> undef, i16 %1, i64 0 * %6 = shufflevector <4 x i16> %2, <4 x i16> undef, <4 x i32> <i32 3, i32 undef, i32 undef, i32 undef> * %7 = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %5, <4 x i16> %6) * %8 = extractelement <4 x i32> %7, i64 0 * %9 = tail call i32 @llvm.aarch64.neon.sqsub.i32(i32 %0, i32 %8) * ret i32 %9 * } * * define dso_local i64 @__vqdmlals_s32(i64, i32, i32) local_unnamed_addr #0 { * %4 = tail call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %1, i32 %2) #2 * %5 = tail call i64 @llvm.aarch64.neon.sqadd.i64(i64 %0, i64 %4) #2 * ret i64 %5 * } */ int mulid = INTRINS_AARCH64_ADV_SIMD_SQDMULL; int iid = 0; gboolean scalar_mul_result = FALSE; gboolean scalar_acc_result = FALSE; switch (ins->opcode) { case OP_ARM64_SQDMLAL_SCALAR: iid = INTRINS_AARCH64_ADV_SIMD_SQADD; break; case OP_ARM64_SQDMLSL_SCALAR: iid = INTRINS_AARCH64_ADV_SIMD_SQSUB; break; } LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMValueRef mularg = lhs; LLVMValueRef selected_scalar = rhs; if (iid != 0) { mularg = rhs; selected_scalar = arg3; } llvm_ovr_tag_t multag = ovr_tag_smaller_elements (ovr_tag_from_llvm_type (ret_t)); llvm_ovr_tag_t iidtag = ovr_tag_force_scalar (ovr_tag_from_llvm_type (ret_t)); LLVMTypeRef mularg_t = ovr_tag_to_llvm_type (multag); if (multag & INTRIN_int32) { /* The (i32, i32) -> i64 variant of aarch64_neon_sqdmull has * a unique, non-overloaded name. */ mulid = INTRINS_AARCH64_ADV_SIMD_SQDMULL_SCALAR; multag = 0; iidtag = INTRIN_int64 | INTRIN_scalar; scalar_mul_result = TRUE; scalar_acc_result = TRUE; } else if (multag & INTRIN_int16) { /* We were passed a (<4 x i16>, <4 x i16>) but the * widening multiplication intrinsic will yield a <4 x i32>. */ multag = INTRIN_int32 | INTRIN_vector128; } else g_assert_not_reached (); if (scalar_mul_result) { mularg = scalar_from_vector (ctx, mularg); selected_scalar = scalar_from_vector (ctx, selected_scalar); } else { mularg = keep_lowest_element (ctx, mularg_t, mularg); selected_scalar = keep_lowest_element (ctx, mularg_t, selected_scalar); } LLVMValueRef mulargs [] = { mularg, selected_scalar }; LLVMValueRef result = call_overloaded_intrins (ctx, mulid, multag, mulargs, "arm64_sqdmull_scalar"); if (iid != 0) { LLVMValueRef acc = scalar_from_vector (ctx, lhs); if (!scalar_mul_result) result = scalar_from_vector (ctx, result); LLVMValueRef subargs [] = { acc, result }; result = call_overloaded_intrins (ctx, iid, iidtag, subargs, "arm64_sqdmlxl_scalar"); scalar_acc_result = TRUE; } if (scalar_acc_result) result = vector_from_scalar (ctx, ret_t, result); else result = keep_lowest_element (ctx, ret_t, result); values [ins->dreg] = result; break; } case OP_ARM64_FMUL_SEL: { LLVMValueRef mul2 = LLVMBuildExtractElement (builder, rhs, arg3, ""); LLVMValueRef mul1 = scalar_from_vector (ctx, lhs); LLVMValueRef result = LLVMBuildFMul (builder, mul1, mul2, "arm64_fmul_sel"); result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result); values [ins->dreg] = result; break; } case OP_ARM64_MLA: case OP_ARM64_MLA_SCALAR: case OP_ARM64_MLS: case OP_ARM64_MLS_SCALAR: { gboolean scalar = FALSE; gboolean add = FALSE; switch (ins->opcode) { case OP_ARM64_MLA_SCALAR: scalar = TRUE; case OP_ARM64_MLA: add = TRUE; break; case OP_ARM64_MLS_SCALAR: scalar = TRUE; case OP_ARM64_MLS: break; } LLVMTypeRef mul_t = LLVMTypeOf (rhs); unsigned int elems = LLVMGetVectorSize (mul_t); LLVMValueRef mul2 = arg3; if (scalar) mul2 = broadcast_element (ctx, scalar_from_vector (ctx, mul2), elems); LLVMValueRef result = LLVMBuildMul (builder, rhs, mul2, ""); if (add) result = LLVMBuildAdd (builder, lhs, result, ""); else result = LLVMBuildSub (builder, lhs, result, ""); values [ins->dreg] = result; break; } case OP_ARM64_SMULL: case OP_ARM64_SMULL_SCALAR: case OP_ARM64_SMULL2: case OP_ARM64_SMULL2_SCALAR: case OP_ARM64_UMULL: case OP_ARM64_UMULL_SCALAR: case OP_ARM64_UMULL2: case OP_ARM64_UMULL2_SCALAR: case OP_ARM64_SMLAL: case OP_ARM64_SMLAL_SCALAR: case OP_ARM64_SMLAL2: case OP_ARM64_SMLAL2_SCALAR: case OP_ARM64_UMLAL: case OP_ARM64_UMLAL_SCALAR: case OP_ARM64_UMLAL2: case OP_ARM64_UMLAL2_SCALAR: case OP_ARM64_SMLSL: case OP_ARM64_SMLSL_SCALAR: case OP_ARM64_SMLSL2: case OP_ARM64_SMLSL2_SCALAR: case OP_ARM64_UMLSL: case OP_ARM64_UMLSL_SCALAR: case OP_ARM64_UMLSL2: case OP_ARM64_UMLSL2_SCALAR: { llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); gboolean is_unsigned = FALSE; gboolean high = FALSE; gboolean add = FALSE; gboolean subtract = FALSE; gboolean scalar = FALSE; int opcode = ins->opcode; switch (opcode) { case OP_ARM64_SMULL_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMULL; break; case OP_ARM64_UMULL_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMULL; break; case OP_ARM64_SMLAL_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLAL; break; case OP_ARM64_UMLAL_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLAL; break; case OP_ARM64_SMLSL_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLSL; break; case OP_ARM64_UMLSL_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLSL; break; case OP_ARM64_SMULL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMULL2; break; case OP_ARM64_UMULL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMULL2; break; case OP_ARM64_SMLAL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLAL2; break; case OP_ARM64_UMLAL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLAL2; break; case OP_ARM64_SMLSL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLSL2; break; case OP_ARM64_UMLSL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLSL2; break; } switch (opcode) { case OP_ARM64_SMULL2: high = TRUE; case OP_ARM64_SMULL: break; case OP_ARM64_UMULL2: high = TRUE; case OP_ARM64_UMULL: is_unsigned = TRUE; break; case OP_ARM64_SMLAL2: high = TRUE; case OP_ARM64_SMLAL: add = TRUE; break; case OP_ARM64_UMLAL2: high = TRUE; case OP_ARM64_UMLAL: add = TRUE; is_unsigned = TRUE; break; case OP_ARM64_SMLSL2: high = TRUE; case OP_ARM64_SMLSL: subtract = TRUE; break; case OP_ARM64_UMLSL2: high = TRUE; case OP_ARM64_UMLSL: subtract = TRUE; is_unsigned = TRUE; break; } int iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UMULL : INTRINS_AARCH64_ADV_SIMD_SMULL; LLVMValueRef intrin_args [] = { lhs, rhs }; if (add || subtract) { intrin_args [0] = rhs; intrin_args [1] = arg3; } if (scalar) { LLVMValueRef sarg = intrin_args [1]; LLVMTypeRef t = LLVMTypeOf (intrin_args [0]); unsigned int elems = LLVMGetVectorSize (t); sarg = broadcast_element (ctx, scalar_from_vector (ctx, sarg), elems); intrin_args [1] = sarg; } if (high) for (int i = 0; i < 2; ++i) intrin_args [i] = extract_high_elements (ctx, intrin_args [i]); LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, ""); if (add) result = LLVMBuildAdd (builder, lhs, result, ""); if (subtract) result = LLVMBuildSub (builder, lhs, result, ""); values [ins->dreg] = result; break; } case OP_ARM64_XNEG: case OP_ARM64_XNEG_SCALAR: { gboolean scalar = ins->opcode == OP_ARM64_XNEG_SCALAR; gboolean is_float = FALSE; switch (inst_c1_type (ins)) { case MONO_TYPE_R4: case MONO_TYPE_R8: is_float = TRUE; } LLVMValueRef result = lhs; if (scalar) result = scalar_from_vector (ctx, result); if (is_float) result = LLVMBuildFNeg (builder, result, "arm64_xneg"); else result = LLVMBuildNeg (builder, result, "arm64_xneg"); if (scalar) result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result); values [ins->dreg] = result; break; } case OP_ARM64_PMULL: case OP_ARM64_PMULL2: { gboolean high = ins->opcode == OP_ARM64_PMULL2; LLVMValueRef args [] = { lhs, rhs }; if (high) for (int i = 0; i < 2; ++i) args [i] = extract_high_elements (ctx, args [i]); LLVMValueRef result = call_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_PMULL, args, "arm64_pmull"); values [ins->dreg] = result; break; } case OP_ARM64_REVN: { LLVMTypeRef t = LLVMTypeOf (lhs); LLVMTypeRef elem_t = LLVMGetElementType (t); unsigned int group_bits = mono_llvm_get_prim_size_bits (elem_t); unsigned int vec_bits = mono_llvm_get_prim_size_bits (t); unsigned int tmp_bits = ins->inst_c0; unsigned int tmp_elements = vec_bits / tmp_bits; const int cycle8 [] = { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }; const int cycle4 [] = { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }; const int cycle2 [] = { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 }; const int *cycle = NULL; switch (group_bits / tmp_bits) { case 2: cycle = cycle2; break; case 4: cycle = cycle4; break; case 8: cycle = cycle8; break; default: g_assert_not_reached (); } g_assert (tmp_elements <= ARM64_MAX_VECTOR_ELEMS); LLVMTypeRef tmp_t = LLVMVectorType (LLVMIntType (tmp_bits), tmp_elements); LLVMValueRef tmp = LLVMBuildBitCast (builder, lhs, tmp_t, "arm64_revn"); LLVMValueRef result = LLVMBuildShuffleVector (builder, tmp, LLVMGetUndef (tmp_t), create_const_vector_i32 (cycle, tmp_elements), ""); result = LLVMBuildBitCast (builder, result, t, ""); values [ins->dreg] = result; break; } case OP_ARM64_SHL: case OP_ARM64_SSHR: case OP_ARM64_SSRA: case OP_ARM64_USHR: case OP_ARM64_USRA: { gboolean right = FALSE; gboolean add = FALSE; gboolean arith = FALSE; switch (ins->opcode) { case OP_ARM64_USHR: right = TRUE; break; case OP_ARM64_USRA: right = TRUE; add = TRUE; break; case OP_ARM64_SSHR: arith = TRUE; break; case OP_ARM64_SSRA: arith = TRUE; add = TRUE; break; } LLVMValueRef shiftarg = lhs; LLVMValueRef shift = rhs; if (add) { shiftarg = rhs; shift = arg3; } shift = create_shift_vector (ctx, shiftarg, shift); LLVMValueRef result = NULL; if (right) result = LLVMBuildLShr (builder, shiftarg, shift, ""); else if (arith) result = LLVMBuildAShr (builder, shiftarg, shift, ""); else result = LLVMBuildShl (builder, shiftarg, shift, ""); if (add) result = LLVMBuildAdd (builder, lhs, result, "arm64_usra"); values [ins->dreg] = result; break; } case OP_ARM64_SHRN: case OP_ARM64_SHRN2: { LLVMValueRef shiftarg = lhs; LLVMValueRef shift = rhs; gboolean high = ins->opcode == OP_ARM64_SHRN2; if (high) { shiftarg = rhs; shift = arg3; } LLVMTypeRef arg_t = LLVMTypeOf (shiftarg); LLVMTypeRef elem_t = LLVMGetElementType (arg_t); unsigned int elems = LLVMGetVectorSize (arg_t); unsigned int bits = mono_llvm_get_prim_size_bits (elem_t); LLVMTypeRef trunc_t = LLVMVectorType (LLVMIntType (bits / 2), elems); shift = create_shift_vector (ctx, shiftarg, shift); LLVMValueRef result = LLVMBuildLShr (builder, shiftarg, shift, "shrn"); result = LLVMBuildTrunc (builder, result, trunc_t, ""); if (high) { result = concatenate_vectors (ctx, lhs, result); } values [ins->dreg] = result; break; } case OP_ARM64_SRSHR: case OP_ARM64_SRSRA: case OP_ARM64_URSHR: case OP_ARM64_URSRA: { llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); LLVMValueRef shiftarg = lhs; LLVMValueRef shift = rhs; gboolean right = FALSE; gboolean add = FALSE; switch (ins->opcode) { case OP_ARM64_URSRA: add = TRUE; case OP_ARM64_URSHR: right = TRUE; break; case OP_ARM64_SRSRA: add = TRUE; case OP_ARM64_SRSHR: right = TRUE; break; } int iid = 0; switch (ins->opcode) { case OP_ARM64_URSRA: case OP_ARM64_URSHR: iid = INTRINS_AARCH64_ADV_SIMD_URSHL; break; case OP_ARM64_SRSRA: case OP_ARM64_SRSHR: iid = INTRINS_AARCH64_ADV_SIMD_SRSHL; break; } if (add) { shiftarg = rhs; shift = arg3; } if (right) shift = LLVMBuildNeg (builder, shift, ""); shift = create_shift_vector (ctx, shiftarg, shift); LLVMValueRef args [] = { shiftarg, shift }; LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); if (add) result = LLVMBuildAdd (builder, result, lhs, ""); values [ins->dreg] = result; break; } case OP_ARM64_XNSHIFT_SCALAR: case OP_ARM64_XNSHIFT: case OP_ARM64_XNSHIFT2: { LLVMTypeRef intrin_result_t = simd_class_to_llvm_type (ctx, ins->klass); llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (intrin_result_t); LLVMValueRef shift_arg = lhs; LLVMValueRef shift_amount = rhs; gboolean high = FALSE; gboolean scalar = FALSE; int iid = ins->inst_c0; switch (ins->opcode) { case OP_ARM64_XNSHIFT_SCALAR: scalar = TRUE; break; case OP_ARM64_XNSHIFT2: high = TRUE; break; } if (high) { shift_arg = rhs; shift_amount = arg3; ovr_tag = ovr_tag_smaller_vector (ovr_tag); intrin_result_t = ovr_tag_to_llvm_type (ovr_tag); } LLVMTypeRef shift_arg_t = LLVMTypeOf (shift_arg); LLVMTypeRef shift_arg_elem_t = LLVMGetElementType (shift_arg_t); unsigned int element_bits = mono_llvm_get_prim_size_bits (shift_arg_elem_t); int range_min = 1; int range_max = element_bits / 2; if (scalar) { unsigned int elems = LLVMGetVectorSize (shift_arg_t); LLVMValueRef lo = scalar_from_vector (ctx, shift_arg); shift_arg = vector_from_scalar (ctx, LLVMVectorType (shift_arg_elem_t, elems * 2), lo); } int max_index = range_max - range_min + 1; ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, max_index, shift_amount, intrin_result_t, "arm64_xnshift"); int i = 0; while (immediate_unroll_next (&ictx, &i)) { int shift_const = i + range_min; LLVMValueRef intrin_args [] = { shift_arg, const_int32 (shift_const) }; LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, ""); immediate_unroll_commit (&ictx, shift_const, result); } { immediate_unroll_default (&ictx); LLVMValueRef intrin_args [] = { shift_arg, const_int32 (range_max) }; LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, ""); immediate_unroll_commit_default (&ictx, result); } LLVMValueRef result = immediate_unroll_end (&ictx, &cbb); if (high) result = concatenate_vectors (ctx, lhs, result); if (scalar) result = keep_lowest_element (ctx, LLVMTypeOf (result), result); values [ins->dreg] = result; break; } case OP_ARM64_SQSHLU: case OP_ARM64_SQSHLU_SCALAR: { gboolean scalar = ins->opcode == OP_ARM64_SQSHLU_SCALAR; LLVMTypeRef intrin_result_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMTypeRef elem_t = LLVMGetElementType (intrin_result_t); unsigned int element_bits = mono_llvm_get_prim_size_bits (elem_t); llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (intrin_result_t); int max_index = element_bits; ScalarOpFromVectorOpCtx sctx = scalar_op_from_vector_op (ctx, intrin_result_t, ins); intrin_result_t = scalar ? sctx.intermediate_type : intrin_result_t; ovr_tag = scalar ? sctx.ovr_tag : ovr_tag; ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, max_index, rhs, intrin_result_t, "arm64_sqshlu"); int i = 0; while (immediate_unroll_next (&ictx, &i)) { int shift_const = i; LLVMValueRef args [2] = { lhs, create_shift_vector (ctx, lhs, const_int32 (shift_const)) }; if (scalar) scalar_op_from_vector_op_process_args (&sctx, args, 2); LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_SQSHLU, ovr_tag, args, ""); immediate_unroll_commit (&ictx, shift_const, result); } { immediate_unroll_default (&ictx); LLVMValueRef srcarg = lhs; if (scalar) scalar_op_from_vector_op_process_args (&sctx, &srcarg, 1); immediate_unroll_commit_default (&ictx, srcarg); } LLVMValueRef result = immediate_unroll_end (&ictx, &cbb); if (scalar) result = scalar_op_from_vector_op_process_result (&sctx, result); values [ins->dreg] = result; break; } case OP_ARM64_SSHLL: case OP_ARM64_SSHLL2: case OP_ARM64_USHLL: case OP_ARM64_USHLL2: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); gboolean high = FALSE; gboolean is_unsigned = FALSE; switch (ins->opcode) { case OP_ARM64_SSHLL2: high = TRUE; break; case OP_ARM64_USHLL2: high = TRUE; case OP_ARM64_USHLL: is_unsigned = TRUE; break; } LLVMValueRef result = lhs; if (high) result = extract_high_elements (ctx, result); if (is_unsigned) result = LLVMBuildZExt (builder, result, ret_t, "arm64_ushll"); else result = LLVMBuildSExt (builder, result, ret_t, "arm64_ushll"); result = LLVMBuildShl (builder, result, create_shift_vector (ctx, result, rhs), ""); values [ins->dreg] = result; break; } case OP_ARM64_SLI: case OP_ARM64_SRI: { LLVMTypeRef intrin_result_t = simd_class_to_llvm_type (ctx, ins->klass); llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (intrin_result_t); unsigned int element_bits = mono_llvm_get_prim_size_bits (LLVMGetElementType (intrin_result_t)); int range_min = 0; int range_max = element_bits - 1; if (ins->opcode == OP_ARM64_SRI) { ++range_min; ++range_max; } int iid = ins->opcode == OP_ARM64_SRI ? INTRINS_AARCH64_ADV_SIMD_SRI : INTRINS_AARCH64_ADV_SIMD_SLI; int max_index = range_max - range_min + 1; ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, max_index, arg3, intrin_result_t, "arm64_ext"); LLVMValueRef intrin_args [3] = { lhs, rhs, arg3 }; int i = 0; while (immediate_unroll_next (&ictx, &i)) { int shift_const = i + range_min; intrin_args [2] = const_int32 (shift_const); LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, ""); immediate_unroll_commit (&ictx, shift_const, result); } immediate_unroll_default (&ictx); immediate_unroll_commit_default (&ictx, lhs); LLVMValueRef result = immediate_unroll_end (&ictx, &cbb); values [ins->dreg] = result; break; } case OP_ARM64_SQRT_SCALAR: { int iid = ins->inst_c0 == MONO_TYPE_R8 ? INTRINS_SQRT : INTRINS_SQRTF; LLVMTypeRef t = LLVMTypeOf (lhs); LLVMValueRef scalar = LLVMBuildExtractElement (builder, lhs, const_int32 (0), ""); LLVMValueRef result = call_intrins (ctx, iid, &scalar, "arm64_sqrt_scalar"); values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMGetUndef (t), result, const_int32 (0), ""); break; } case OP_ARM64_STP: case OP_ARM64_STP_SCALAR: case OP_ARM64_STNP: case OP_ARM64_STNP_SCALAR: { gboolean nontemporal = FALSE; gboolean scalar = FALSE; switch (ins->opcode) { case OP_ARM64_STNP: nontemporal = TRUE; break; case OP_ARM64_STNP_SCALAR: nontemporal = TRUE; scalar = TRUE; break; case OP_ARM64_STP_SCALAR: scalar = TRUE; break; } LLVMTypeRef rhs_t = LLVMTypeOf (rhs); LLVMValueRef val = NULL; LLVMTypeRef dst_t = LLVMPointerType (rhs_t, 0); if (scalar) val = LLVMBuildShuffleVector (builder, rhs, arg3, create_const_vector_2_i32 (0, 2), ""); else { unsigned int rhs_elems = LLVMGetVectorSize (rhs_t); LLVMTypeRef rhs_elt_t = LLVMGetElementType (rhs_t); dst_t = LLVMPointerType (LLVMVectorType (rhs_elt_t, rhs_elems * 2), 0); val = concatenate_vectors (ctx, rhs, arg3); } LLVMValueRef address = convert (ctx, lhs, dst_t); LLVMValueRef store = mono_llvm_build_store (builder, val, address, FALSE, LLVM_BARRIER_NONE); if (nontemporal) set_nontemporal_flag (store); break; } case OP_ARM64_LD1_INSERT: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMTypeRef elem_t = LLVMGetElementType (ret_t); LLVMValueRef address = convert (ctx, arg3, LLVMPointerType (elem_t, 0)); unsigned int alignment = mono_llvm_get_prim_size_bits (ret_t) / 8; LLVMValueRef result = mono_llvm_build_aligned_load (builder, address, "arm64_ld1_insert", FALSE, alignment); result = LLVMBuildInsertElement (builder, lhs, result, rhs, "arm64_ld1_insert"); values [ins->dreg] = result; break; } case OP_ARM64_LD1R: case OP_ARM64_LD1: { gboolean replicate = ins->opcode == OP_ARM64_LD1R; LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); unsigned int alignment = mono_llvm_get_prim_size_bits (ret_t) / 8; LLVMValueRef address = lhs; LLVMTypeRef address_t = LLVMPointerType (ret_t, 0); if (replicate) { LLVMTypeRef elem_t = LLVMGetElementType (ret_t); address_t = LLVMPointerType (elem_t, 0); } address = convert (ctx, address, address_t); LLVMValueRef result = mono_llvm_build_aligned_load (builder, address, "arm64_ld1", FALSE, alignment); if (replicate) { unsigned int elems = LLVMGetVectorSize (ret_t); result = broadcast_element (ctx, result, elems); } values [ins->dreg] = result; break; } case OP_ARM64_LDNP: case OP_ARM64_LDNP_SCALAR: case OP_ARM64_LDP: case OP_ARM64_LDP_SCALAR: { const char *oname = NULL; gboolean nontemporal = FALSE; gboolean scalar = FALSE; switch (ins->opcode) { case OP_ARM64_LDNP: oname = "arm64_ldnp"; nontemporal = TRUE; break; case OP_ARM64_LDNP_SCALAR: oname = "arm64_ldnp_scalar"; nontemporal = TRUE; scalar = TRUE; break; case OP_ARM64_LDP: oname = "arm64_ldp"; break; case OP_ARM64_LDP_SCALAR: oname = "arm64_ldp_scalar"; scalar = TRUE; break; } if (!addresses [ins->dreg]) addresses [ins->dreg] = build_named_alloca (ctx, m_class_get_byval_arg (ins->klass), oname); LLVMTypeRef ret_t = simd_valuetuple_to_llvm_type (ctx, ins->klass); LLVMTypeRef vec_t = LLVMGetElementType (ret_t); LLVMValueRef ix = const_int32 (1); LLVMTypeRef src_t = LLVMPointerType (scalar ? LLVMGetElementType (vec_t) : vec_t, 0); LLVMValueRef src0 = convert (ctx, lhs, src_t); LLVMValueRef src1 = LLVMBuildGEP (builder, src0, &ix, 1, oname); LLVMValueRef vals [] = { src0, src1 }; for (int i = 0; i < 2; ++i) { vals [i] = LLVMBuildLoad (builder, vals [i], oname); if (nontemporal) set_nontemporal_flag (vals [i]); } unsigned int vec_sz = mono_llvm_get_prim_size_bits (vec_t); if (scalar) { g_assert (vec_sz == 64); LLVMValueRef undef = LLVMGetUndef (vec_t); for (int i = 0; i < 2; ++i) vals [i] = LLVMBuildInsertElement (builder, undef, vals [i], const_int32 (0), oname); } LLVMValueRef val = LLVMGetUndef (ret_t); for (int i = 0; i < 2; ++i) val = LLVMBuildInsertValue (builder, val, vals [i], i, oname); LLVMTypeRef retptr_t = LLVMPointerType (ret_t, 0); LLVMValueRef dst = convert (ctx, addresses [ins->dreg], retptr_t); LLVMBuildStore (builder, val, dst); values [ins->dreg] = vec_sz == 64 ? val : NULL; break; } case OP_ARM64_ST1: { LLVMTypeRef t = LLVMTypeOf (rhs); LLVMValueRef address = convert (ctx, lhs, LLVMPointerType (t, 0)); unsigned int alignment = mono_llvm_get_prim_size_bits (t) / 8; mono_llvm_build_aligned_store (builder, rhs, address, FALSE, alignment); break; } case OP_ARM64_ST1_SCALAR: { LLVMTypeRef t = LLVMGetElementType (LLVMTypeOf (rhs)); LLVMValueRef val = LLVMBuildExtractElement (builder, rhs, arg3, "arm64_st1_scalar"); LLVMValueRef address = convert (ctx, lhs, LLVMPointerType (t, 0)); unsigned int alignment = mono_llvm_get_prim_size_bits (t) / 8; mono_llvm_build_aligned_store (builder, val, address, FALSE, alignment); break; } case OP_ARM64_ADDHN: case OP_ARM64_ADDHN2: case OP_ARM64_SUBHN: case OP_ARM64_SUBHN2: case OP_ARM64_RADDHN: case OP_ARM64_RADDHN2: case OP_ARM64_RSUBHN: case OP_ARM64_RSUBHN2: { LLVMValueRef args [2] = { lhs, rhs }; gboolean high = FALSE; gboolean subtract = FALSE; int iid = 0; switch (ins->opcode) { case OP_ARM64_ADDHN2: high = TRUE; case OP_ARM64_ADDHN: break; case OP_ARM64_SUBHN2: high = TRUE; case OP_ARM64_SUBHN: subtract = TRUE; break; case OP_ARM64_RSUBHN2: high = TRUE; case OP_ARM64_RSUBHN: iid = INTRINS_AARCH64_ADV_SIMD_RSUBHN; break; case OP_ARM64_RADDHN2: high = TRUE; case OP_ARM64_RADDHN: iid = INTRINS_AARCH64_ADV_SIMD_RADDHN; break; } llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); if (high) { args [0] = rhs; args [1] = arg3; ovr_tag = ovr_tag_smaller_vector (ovr_tag); } LLVMValueRef result = NULL; if (iid != 0) result = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); else { LLVMTypeRef t = LLVMTypeOf (args [0]); LLVMTypeRef elt_t = LLVMGetElementType (t); unsigned int elems = LLVMGetVectorSize (t); unsigned int elem_bits = mono_llvm_get_prim_size_bits (elt_t); if (subtract) result = LLVMBuildSub (builder, args [0], args [1], ""); else result = LLVMBuildAdd (builder, args [0], args [1], ""); result = LLVMBuildLShr (builder, result, broadcast_constant (elem_bits / 2, elt_t, elems), ""); result = LLVMBuildTrunc (builder, result, LLVMVectorType (LLVMIntType (elem_bits / 2), elems), ""); } if (high) result = concatenate_vectors (ctx, lhs, result); values [ins->dreg] = result; break; } case OP_ARM64_SADD: case OP_ARM64_UADD: case OP_ARM64_SADD2: case OP_ARM64_UADD2: case OP_ARM64_SSUB: case OP_ARM64_USUB: case OP_ARM64_SSUB2: case OP_ARM64_USUB2: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); gboolean is_unsigned = FALSE; gboolean high = FALSE; gboolean subtract = FALSE; switch (ins->opcode) { case OP_ARM64_SADD2: high = TRUE; case OP_ARM64_SADD: break; case OP_ARM64_UADD2: high = TRUE; case OP_ARM64_UADD: is_unsigned = TRUE; break; case OP_ARM64_SSUB2: high = TRUE; case OP_ARM64_SSUB: subtract = TRUE; break; case OP_ARM64_USUB2: high = TRUE; case OP_ARM64_USUB: subtract = TRUE; is_unsigned = TRUE; break; } LLVMValueRef args [] = { lhs, rhs }; for (int i = 0; i < 2; ++i) { LLVMValueRef arg = args [i]; LLVMTypeRef arg_t = LLVMTypeOf (arg); if (high && arg_t != ret_t) arg = extract_high_elements (ctx, arg); if (is_unsigned) arg = LLVMBuildZExt (builder, arg, ret_t, ""); else arg = LLVMBuildSExt (builder, arg, ret_t, ""); args [i] = arg; } LLVMValueRef result = NULL; if (subtract) result = LLVMBuildSub (builder, args [0], args [1], "arm64_sub"); else result = LLVMBuildAdd (builder, args [0], args [1], "arm64_add"); values [ins->dreg] = result; break; } case OP_ARM64_SABAL: case OP_ARM64_SABAL2: case OP_ARM64_UABAL: case OP_ARM64_UABAL2: case OP_ARM64_SABDL: case OP_ARM64_SABDL2: case OP_ARM64_UABDL: case OP_ARM64_UABDL2: case OP_ARM64_SABA: case OP_ARM64_UABA: case OP_ARM64_SABD: case OP_ARM64_UABD: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); gboolean is_unsigned = FALSE; gboolean high = FALSE; gboolean add = FALSE; gboolean widen = FALSE; switch (ins->opcode) { case OP_ARM64_SABAL2: high = TRUE; case OP_ARM64_SABAL: widen = TRUE; add = TRUE; break; case OP_ARM64_UABAL2: high = TRUE; case OP_ARM64_UABAL: widen = TRUE; add = TRUE; is_unsigned = TRUE; break; case OP_ARM64_SABDL2: high = TRUE; case OP_ARM64_SABDL: widen = TRUE; break; case OP_ARM64_UABDL2: high = TRUE; case OP_ARM64_UABDL: widen = TRUE; is_unsigned = TRUE; break; case OP_ARM64_SABA: add = TRUE; break; case OP_ARM64_UABA: add = TRUE; is_unsigned = TRUE; break; case OP_ARM64_UABD: is_unsigned = TRUE; break; } LLVMValueRef args [] = { lhs, rhs }; if (add) { args [0] = rhs; args [1] = arg3; } if (high) for (int i = 0; i < 2; ++i) args [i] = extract_high_elements (ctx, args [i]); int iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UABD : INTRINS_AARCH64_ADV_SIMD_SABD; llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (LLVMTypeOf (args [0])); LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); if (widen) result = LLVMBuildZExt (builder, result, ret_t, ""); if (add) result = LLVMBuildAdd (builder, result, lhs, ""); values [ins->dreg] = result; break; } case OP_ARM64_XHORIZ: { gboolean truncate = FALSE; LLVMTypeRef arg_t = LLVMTypeOf (lhs); LLVMTypeRef elem_t = LLVMGetElementType (arg_t); LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (arg_t); if (elem_t == i1_t || elem_t == i2_t) truncate = TRUE; LLVMValueRef result = call_overloaded_intrins (ctx, ins->inst_c0, ovr_tag, &lhs, ""); if (truncate) { // @llvm.aarch64.neon.saddv.i32.v8i16 ought to return an i16, but doesn't in LLVM 9. result = LLVMBuildTrunc (builder, result, elem_t, ""); } result = vector_from_scalar (ctx, ret_t, result); values [ins->dreg] = result; break; } case OP_ARM64_SADDLV: case OP_ARM64_UADDLV: { LLVMTypeRef arg_t = LLVMTypeOf (lhs); LLVMTypeRef elem_t = LLVMGetElementType (arg_t); LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (arg_t); gboolean truncate = elem_t == i1_t; int iid = ins->opcode == OP_ARM64_UADDLV ? INTRINS_AARCH64_ADV_SIMD_UADDLV : INTRINS_AARCH64_ADV_SIMD_SADDLV; LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, &lhs, ""); if (truncate) { // @llvm.aarch64.neon.saddlv.i32.v16i8 ought to return an i16, but doesn't in LLVM 9. result = LLVMBuildTrunc (builder, result, i2_t, ""); } result = vector_from_scalar (ctx, ret_t, result); values [ins->dreg] = result; break; } case OP_ARM64_UADALP: case OP_ARM64_SADALP: { llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); int iid = ins->opcode == OP_ARM64_UADALP ? INTRINS_AARCH64_ADV_SIMD_UADDLP : INTRINS_AARCH64_ADV_SIMD_SADDLP; LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, &rhs, ""); result = LLVMBuildAdd (builder, result, lhs, ""); values [ins->dreg] = result; break; } case OP_ARM64_ADDP_SCALAR: { llvm_ovr_tag_t ovr_tag = INTRIN_vector128 | INTRIN_int64; LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_UADDV, ovr_tag, &lhs, "arm64_addp_scalar"); result = LLVMBuildInsertElement (builder, LLVMConstNull (v64_i8_t), result, const_int32 (0), ""); values [ins->dreg] = result; break; } case OP_ARM64_FADDP_SCALAR: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMValueRef hi = LLVMBuildExtractElement (builder, lhs, const_int32 (0), ""); LLVMValueRef lo = LLVMBuildExtractElement (builder, lhs, const_int32 (1), ""); LLVMValueRef result = LLVMBuildFAdd (builder, hi, lo, "arm64_faddp_scalar"); result = LLVMBuildInsertElement (builder, LLVMConstNull (ret_t), result, const_int32 (0), ""); values [ins->dreg] = result; break; } case OP_ARM64_SXTL: case OP_ARM64_SXTL2: case OP_ARM64_UXTL: case OP_ARM64_UXTL2: { gboolean high = FALSE; gboolean is_unsigned = FALSE; switch (ins->opcode) { case OP_ARM64_SXTL2: high = TRUE; break; case OP_ARM64_UXTL2: high = TRUE; case OP_ARM64_UXTL: is_unsigned = TRUE; break; } LLVMTypeRef t = LLVMTypeOf (lhs); unsigned int elem_bits = LLVMGetIntTypeWidth (LLVMGetElementType (t)); unsigned int src_elems = LLVMGetVectorSize (t); unsigned int dst_elems = src_elems; LLVMValueRef arg = lhs; if (high) { arg = extract_high_elements (ctx, lhs); dst_elems = LLVMGetVectorSize (LLVMTypeOf (arg)); } LLVMTypeRef result_t = LLVMVectorType (LLVMIntType (elem_bits * 2), dst_elems); LLVMValueRef result = NULL; if (is_unsigned) result = LLVMBuildZExt (builder, arg, result_t, "arm64_uxtl"); else result = LLVMBuildSExt (builder, arg, result_t, "arm64_sxtl"); values [ins->dreg] = result; break; } case OP_ARM64_TRN1: case OP_ARM64_TRN2: { gboolean high = ins->opcode == OP_ARM64_TRN2; LLVMTypeRef t = LLVMTypeOf (lhs); unsigned int src_elems = LLVMGetVectorSize (t); int mask [MAX_VECTOR_ELEMS] = { 0 }; int laneix = high ? 1 : 0; for (unsigned int i = 0; i < src_elems; i += 2) { mask [i] = laneix; mask [i + 1] = laneix + src_elems; laneix += 2; } values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_i32 (mask, src_elems), "arm64_uzp"); break; } case OP_ARM64_UZP1: case OP_ARM64_UZP2: { gboolean high = ins->opcode == OP_ARM64_UZP2; LLVMTypeRef t = LLVMTypeOf (lhs); unsigned int src_elems = LLVMGetVectorSize (t); int mask [MAX_VECTOR_ELEMS] = { 0 }; int laneix = high ? 1 : 0; for (unsigned int i = 0; i < src_elems; ++i) { mask [i] = laneix; laneix += 2; } values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_i32 (mask, src_elems), "arm64_uzp"); break; } case OP_ARM64_ZIP1: case OP_ARM64_ZIP2: { gboolean high = ins->opcode == OP_ARM64_ZIP2; LLVMTypeRef t = LLVMTypeOf (lhs); unsigned int src_elems = LLVMGetVectorSize (t); int mask [MAX_VECTOR_ELEMS] = { 0 }; int laneix = high ? src_elems / 2 : 0; for (unsigned int i = 0; i < src_elems; i += 2) { mask [i] = laneix; mask [i + 1] = laneix + src_elems; ++laneix; } values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_i32 (mask, src_elems), "arm64_zip"); break; } case OP_ARM64_ABSCOMPARE: { IntrinsicId iid = (IntrinsicId) ins->inst_c0; gboolean scalar = ins->inst_c1; LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMTypeRef elem_t = LLVMGetElementType (ret_t); llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); ovr_tag = ovr_tag_corresponding_integer (ovr_tag); LLVMValueRef args [] = { lhs, rhs }; LLVMTypeRef result_t = ret_t; if (scalar) { ovr_tag = ovr_tag_force_scalar (ovr_tag); result_t = elem_t; for (int i = 0; i < 2; ++i) args [i] = scalar_from_vector (ctx, args [i]); } LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); result = LLVMBuildBitCast (builder, result, result_t, ""); if (scalar) result = vector_from_scalar (ctx, ret_t, result); values [ins->dreg] = result; break; } case OP_XOP_OVR_X_X: { IntrinsicId iid = (IntrinsicId) ins->inst_c0; llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, &lhs, ""); break; } case OP_XOP_OVR_X_X_X: { IntrinsicId iid = (IntrinsicId) ins->inst_c0; llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); LLVMValueRef args [] = { lhs, rhs }; values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); break; } case OP_XOP_OVR_X_X_X_X: { IntrinsicId iid = (IntrinsicId) ins->inst_c0; llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); LLVMValueRef args [] = { lhs, rhs, arg3 }; values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); break; } case OP_XOP_OVR_BYSCALAR_X_X_X: { IntrinsicId iid = (IntrinsicId) ins->inst_c0; llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); LLVMTypeRef t = LLVMTypeOf (lhs); unsigned int elems = LLVMGetVectorSize (t); LLVMValueRef arg2 = broadcast_element (ctx, scalar_from_vector (ctx, rhs), elems); LLVMValueRef args [] = { lhs, arg2 }; values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); break; } case OP_XOP_OVR_SCALAR_X_X: case OP_XOP_OVR_SCALAR_X_X_X: case OP_XOP_OVR_SCALAR_X_X_X_X: { int num_args = 0; IntrinsicId iid = (IntrinsicId) ins->inst_c0; LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); switch (ins->opcode) { case OP_XOP_OVR_SCALAR_X_X: num_args = 1; break; case OP_XOP_OVR_SCALAR_X_X_X: num_args = 2; break; case OP_XOP_OVR_SCALAR_X_X_X_X: num_args = 3; break; } /* LLVM 9 NEON intrinsic functions have scalar overloads. Unfortunately * only overloads for 32 and 64-bit integers and floating point types are * supported. 8 and 16-bit integers are unsupported, and will fail during * instruction selection. This is worked around by using a vector * operation and then explicitly clearing the upper bits of the register. */ ScalarOpFromVectorOpCtx sctx = scalar_op_from_vector_op (ctx, ret_t, ins); LLVMValueRef args [3] = { lhs, rhs, arg3 }; scalar_op_from_vector_op_process_args (&sctx, args, num_args); LLVMValueRef result = call_overloaded_intrins (ctx, iid, sctx.ovr_tag, args, ""); result = scalar_op_from_vector_op_process_result (&sctx, result); values [ins->dreg] = result; break; } #endif case OP_DUMMY_USE: break; /* * EXCEPTION HANDLING */ case OP_IMPLICIT_EXCEPTION: /* This marks a place where an implicit exception can happen */ if (bb->region != -1) set_failure (ctx, "implicit-exception"); break; case OP_THROW: case OP_RETHROW: { gboolean rethrow = (ins->opcode == OP_RETHROW); if (ctx->llvm_only) { emit_llvmonly_throw (ctx, bb, rethrow, lhs); has_terminator = TRUE; ctx->unreachable [bb->block_num] = TRUE; } else { emit_throw (ctx, bb, rethrow, lhs); builder = ctx->builder; } break; } case OP_CALL_HANDLER: { /* * We don't 'call' handlers, but instead simply branch to them. * The code generated by ENDFINALLY will branch back to us. */ LLVMBasicBlockRef noex_bb; GSList *bb_list; BBInfo *info = &bblocks [ins->inst_target_bb->block_num]; bb_list = info->call_handler_return_bbs; /* * Set the indicator variable for the finally clause. */ lhs = info->finally_ind; g_assert (lhs); LLVMBuildStore (builder, LLVMConstInt (LLVMInt32Type (), g_slist_length (bb_list) + 1, FALSE), lhs); /* Branch to the finally clause */ LLVMBuildBr (builder, info->call_handler_target_bb); noex_bb = gen_bb (ctx, "CALL_HANDLER_CONT_BB"); info->call_handler_return_bbs = g_slist_append_mempool (cfg->mempool, info->call_handler_return_bbs, noex_bb); builder = ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, noex_bb); bblocks [bb->block_num].end_bblock = noex_bb; break; } case OP_START_HANDLER: { break; } case OP_ENDFINALLY: { LLVMBasicBlockRef resume_bb; MonoBasicBlock *handler_bb; LLVMValueRef val, switch_ins, callee; GSList *bb_list; BBInfo *info; gboolean is_fault = MONO_REGION_FLAGS (bb->region) == MONO_EXCEPTION_CLAUSE_FAULT; /* * Fault clauses are like finally clauses, but they are only called if an exception is thrown. */ if (!is_fault) { handler_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->region_to_handler, GUINT_TO_POINTER (mono_get_block_region_notry (cfg, bb->region))); g_assert (handler_bb); info = &bblocks [handler_bb->block_num]; lhs = info->finally_ind; g_assert (lhs); bb_list = info->call_handler_return_bbs; resume_bb = gen_bb (ctx, "ENDFINALLY_RESUME_BB"); /* Load the finally variable */ val = LLVMBuildLoad (builder, lhs, ""); /* Reset the variable */ LLVMBuildStore (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), lhs); /* Branch to either resume_bb, or to the bblocks in bb_list */ switch_ins = LLVMBuildSwitch (builder, val, resume_bb, g_slist_length (bb_list)); /* * The other targets are added at the end to handle OP_CALL_HANDLER * opcodes processed later. */ info->endfinally_switch_ins_list = g_slist_append_mempool (cfg->mempool, info->endfinally_switch_ins_list, switch_ins); builder = ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, resume_bb); } if (ctx->llvm_only) { if (!cfg->deopt) { emit_resume_eh (ctx, bb); } else { /* Not needed */ LLVMBuildUnreachable (builder); } } else { LLVMTypeRef icall_sig = LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE); if (ctx->cfg->compile_aot) { callee = get_callee (ctx, icall_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_llvm_resume_unwind_trampoline)); } else { callee = get_jit_callee (ctx, "llvm_resume_unwind_trampoline", icall_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_llvm_resume_unwind_trampoline)); } LLVMBuildCall (builder, callee, NULL, 0, ""); LLVMBuildUnreachable (builder); } has_terminator = TRUE; break; } case OP_ENDFILTER: { g_assert (cfg->llvm_only && cfg->deopt); LLVMBuildUnreachable (builder); has_terminator = TRUE; break; } case OP_IL_SEQ_POINT: break; default: { char reason [128]; sprintf (reason, "opcode %s", mono_inst_name (ins->opcode)); set_failure (ctx, reason); break; } } if (!ctx_ok (ctx)) break; /* Convert the value to the type required by phi nodes */ if (spec [MONO_INST_DEST] != ' ' && !MONO_IS_STORE_MEMBASE (ins) && ctx->vreg_types [ins->dreg]) { if (ctx->is_vphi [ins->dreg]) /* vtypes */ values [ins->dreg] = addresses [ins->dreg]; else values [ins->dreg] = convert (ctx, values [ins->dreg], ctx->vreg_types [ins->dreg]); } /* Add stores for volatile/ref variables */ if (spec [MONO_INST_DEST] != ' ' && spec [MONO_INST_DEST] != 'v' && !MONO_IS_STORE_MEMBASE (ins)) { if (!skip_volatile_store) emit_volatile_store (ctx, ins->dreg); #ifdef TARGET_WASM if (vreg_is_ref (cfg, ins->dreg) && ctx->values [ins->dreg]) emit_gc_pin (ctx, builder, ins->dreg); #endif } } if (!ctx_ok (ctx)) return; if (!has_terminator && bb->next_bb && (bb == cfg->bb_entry || bb->in_count > 0)) { LLVMBuildBr (builder, get_bb (ctx, bb->next_bb)); } if (bb == cfg->bb_exit && sig->ret->type == MONO_TYPE_VOID) { emit_dbg_loc (ctx, builder, cfg->header->code + cfg->header->code_size - 1); LLVMBuildRetVoid (builder); } if (bb == cfg->bb_entry) ctx->last_alloca = LLVMGetLastInstruction (get_bb (ctx, cfg->bb_entry)); } /* * mono_llvm_check_method_supported: * * Do some quick checks to decide whenever cfg->method can be compiled by LLVM, to avoid * compiling a method twice. */ void mono_llvm_check_method_supported (MonoCompile *cfg) { int i, j; #ifdef TARGET_WASM if (mono_method_signature_internal (cfg->method)->call_convention == MONO_CALL_VARARG) { cfg->exception_message = g_strdup ("vararg callconv"); cfg->disable_llvm = TRUE; return; } #endif if (cfg->llvm_only) return; if (cfg->method->save_lmf) { cfg->exception_message = g_strdup ("lmf"); cfg->disable_llvm = TRUE; } if (cfg->disable_llvm) return; /* * Nested clauses where one of the clauses is a finally clause is * not supported, because LLVM can't figure out the control flow, * probably because we resume exception handling by calling our * own function instead of using the 'resume' llvm instruction. */ for (i = 0; i < cfg->header->num_clauses; ++i) { for (j = 0; j < cfg->header->num_clauses; ++j) { MonoExceptionClause *clause1 = &cfg->header->clauses [i]; MonoExceptionClause *clause2 = &cfg->header->clauses [j]; // FIXME: Nested try clauses fail in some cases too, i.e. #37273 if (i != j && clause1->try_offset >= clause2->try_offset && clause1->handler_offset <= clause2->handler_offset) { //(clause1->flags == MONO_EXCEPTION_CLAUSE_FINALLY || clause2->flags == MONO_EXCEPTION_CLAUSE_FINALLY)) { cfg->exception_message = g_strdup ("nested clauses"); cfg->disable_llvm = TRUE; break; } } } if (cfg->disable_llvm) return; /* FIXME: */ if (cfg->method->dynamic) { cfg->exception_message = g_strdup ("dynamic."); cfg->disable_llvm = TRUE; } if (cfg->disable_llvm) return; } static LLVMCallInfo* get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig) { LLVMCallInfo *linfo; int i; if (cfg->gsharedvt && cfg->llvm_only && mini_is_gsharedvt_variable_signature (sig)) { int i, n, pindex; /* * Gsharedvt methods have the following calling convention: * - all arguments are passed by ref, even non generic ones * - the return value is returned by ref too, using a vret * argument passed after 'this'. */ n = sig->param_count + sig->hasthis; linfo = (LLVMCallInfo*)mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n)); pindex = 0; if (sig->hasthis) linfo->args [pindex ++].storage = LLVMArgNormal; if (sig->ret->type != MONO_TYPE_VOID) { if (mini_is_gsharedvt_variable_type (sig->ret)) linfo->ret.storage = LLVMArgGsharedvtVariable; else if (mini_type_is_vtype (sig->ret)) linfo->ret.storage = LLVMArgGsharedvtFixedVtype; else linfo->ret.storage = LLVMArgGsharedvtFixed; linfo->vret_arg_index = pindex; } else { linfo->ret.storage = LLVMArgNone; } for (i = 0; i < sig->param_count; ++i) { if (m_type_is_byref (sig->params [i])) linfo->args [pindex].storage = LLVMArgNormal; else if (mini_is_gsharedvt_variable_type (sig->params [i])) linfo->args [pindex].storage = LLVMArgGsharedvtVariable; else if (mini_type_is_vtype (sig->params [i])) linfo->args [pindex].storage = LLVMArgGsharedvtFixedVtype; else linfo->args [pindex].storage = LLVMArgGsharedvtFixed; linfo->args [pindex].type = sig->params [i]; pindex ++; } return linfo; } linfo = mono_arch_get_llvm_call_info (cfg, sig); linfo->dummy_arg_pindex = -1; for (i = 0; i < sig->param_count; ++i) linfo->args [i + sig->hasthis].type = sig->params [i]; return linfo; } static void emit_method_inner (EmitContext *ctx); static void free_ctx (EmitContext *ctx) { GSList *l; g_free (ctx->values); g_free (ctx->addresses); g_free (ctx->vreg_types); g_free (ctx->is_vphi); g_free (ctx->vreg_cli_types); g_free (ctx->is_dead); g_free (ctx->unreachable); g_free (ctx->gc_var_indexes); g_ptr_array_free (ctx->phi_values, TRUE); g_free (ctx->bblocks); g_hash_table_destroy (ctx->region_to_handler); g_hash_table_destroy (ctx->clause_to_handler); g_hash_table_destroy (ctx->jit_callees); g_ptr_array_free (ctx->callsite_list, TRUE); g_free (ctx->method_name); g_ptr_array_free (ctx->bblock_list, TRUE); for (l = ctx->builders; l; l = l->next) { LLVMBuilderRef builder = (LLVMBuilderRef)l->data; LLVMDisposeBuilder (builder); } g_free (ctx); } static gboolean is_linkonce_method (MonoMethod *method) { #ifdef TARGET_WASM /* * Under wasm, linkonce works, so use it instead of the dedup pass for wrappers at least. * FIXME: Use for everything, i.e. can_dedup (). * FIXME: Fails System.Core tests * -> amodule->sorted_methods contains duplicates, screwing up jit tables. */ // FIXME: This works, but the aot data for the methods is still kept, so size still increases #if 0 if (method->wrapper_type == MONO_WRAPPER_OTHER) { WrapperInfo *info = mono_marshal_get_wrapper_info (method); if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG) return TRUE; } #endif #endif return FALSE; } /* * mono_llvm_emit_method: * * Emit LLVM IL from the mono IL, and compile it to native code using LLVM. */ void mono_llvm_emit_method (MonoCompile *cfg) { EmitContext *ctx; char *method_name; gboolean is_linkonce = FALSE; int i; if (cfg->skip) return; /* The code below might acquire the loader lock, so use it for global locking */ mono_loader_lock (); ctx = g_new0 (EmitContext, 1); ctx->cfg = cfg; ctx->mempool = cfg->mempool; /* * This maps vregs to the LLVM instruction defining them */ ctx->values = g_new0 (LLVMValueRef, cfg->next_vreg); /* * This maps vregs for volatile variables to the LLVM instruction defining their * address. */ ctx->addresses = g_new0 (LLVMValueRef, cfg->next_vreg); ctx->vreg_types = g_new0 (LLVMTypeRef, cfg->next_vreg); ctx->is_vphi = g_new0 (gboolean, cfg->next_vreg); ctx->vreg_cli_types = g_new0 (MonoType*, cfg->next_vreg); ctx->phi_values = g_ptr_array_sized_new (256); /* * This signals whenever the vreg was defined by a phi node with no input vars * (i.e. all its input bblocks end with NOT_REACHABLE). */ ctx->is_dead = g_new0 (gboolean, cfg->next_vreg); /* Whenever the bblock is unreachable */ ctx->unreachable = g_new0 (gboolean, cfg->max_block_num); ctx->bblock_list = g_ptr_array_sized_new (256); ctx->region_to_handler = g_hash_table_new (NULL, NULL); ctx->clause_to_handler = g_hash_table_new (NULL, NULL); ctx->callsite_list = g_ptr_array_new (); ctx->jit_callees = g_hash_table_new (NULL, NULL); if (cfg->compile_aot) { ctx->module = &aot_module; /* * Allow the linker to discard duplicate copies of wrappers, generic instances etc. by using the 'linkonce' * linkage for them. This requires the following: * - the method needs to have a unique mangled name * - llvmonly mode, since the code in aot-runtime.c would initialize got slots in the wrong aot image etc. */ if (ctx->module->llvm_only && ctx->module->static_link && is_linkonce_method (cfg->method)) is_linkonce = TRUE; if (is_linkonce || mono_aot_is_externally_callable (cfg->method)) method_name = mono_aot_get_mangled_method_name (cfg->method); else method_name = mono_aot_get_method_name (cfg); cfg->llvm_method_name = g_strdup (method_name); } else { ctx->module = init_jit_module (); method_name = mono_method_full_name (cfg->method, TRUE); } ctx->method_name = method_name; ctx->is_linkonce = is_linkonce; if (cfg->compile_aot) { ctx->lmodule = ctx->module->lmodule; } else { ctx->lmodule = LLVMModuleCreateWithName (g_strdup_printf ("jit-module-%s", cfg->method->name)); } ctx->llvm_only = ctx->module->llvm_only; #ifdef TARGET_WASM ctx->emit_dummy_arg = TRUE; #endif emit_method_inner (ctx); if (!ctx_ok (ctx)) { if (ctx->lmethod) { /* Need to add unused phi nodes as they can be referenced by other values */ LLVMBasicBlockRef phi_bb = LLVMAppendBasicBlock (ctx->lmethod, "PHI_BB"); LLVMBuilderRef builder; builder = create_builder (ctx); LLVMPositionBuilderAtEnd (builder, phi_bb); for (i = 0; i < ctx->phi_values->len; ++i) { LLVMValueRef v = (LLVMValueRef)g_ptr_array_index (ctx->phi_values, i); if (LLVMGetInstructionParent (v) == NULL) LLVMInsertIntoBuilder (builder, v); } if (ctx->module->llvm_only && ctx->module->static_link && cfg->interp) { /* The caller will retry compilation */ LLVMDeleteFunction (ctx->lmethod); } else if (ctx->module->llvm_only && ctx->module->static_link) { // Keep a stub for the function since it might be called directly int nbbs = LLVMCountBasicBlocks (ctx->lmethod); LLVMBasicBlockRef *bblocks = g_new0 (LLVMBasicBlockRef, nbbs); LLVMGetBasicBlocks (ctx->lmethod, bblocks); for (int i = 0; i < nbbs; ++i) LLVMRemoveBasicBlockFromParent (bblocks [i]); LLVMBasicBlockRef entry_bb = LLVMAppendBasicBlock (ctx->lmethod, "ENTRY"); builder = create_builder (ctx); LLVMPositionBuilderAtEnd (builder, entry_bb); ctx->builder = builder; LLVMTypeRef sig = LLVMFunctionType0 (LLVMVoidType (), FALSE); LLVMValueRef callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_nullref_exception)); LLVMBuildCall (builder, callee, NULL, 0, ""); LLVMBuildUnreachable (builder); /* Clean references to instructions inside the method */ for (int i = 0; i < ctx->callsite_list->len; ++i) { CallSite *callsite = (CallSite*)g_ptr_array_index (ctx->callsite_list, i); if (callsite->lmethod == ctx->lmethod) callsite->load = NULL; } } else { LLVMDeleteFunction (ctx->lmethod); } } } free_ctx (ctx); mono_loader_unlock (); } static void emit_method_inner (EmitContext *ctx) { MonoCompile *cfg = ctx->cfg; MonoMethodSignature *sig; MonoBasicBlock *bb; LLVMTypeRef method_type; LLVMValueRef method = NULL; LLVMValueRef *values = ctx->values; int i, max_block_num, bb_index; gboolean llvmonly_fail = FALSE; LLVMCallInfo *linfo; LLVMModuleRef lmodule = ctx->lmodule; BBInfo *bblocks; GPtrArray *bblock_list = ctx->bblock_list; MonoMethodHeader *header; MonoExceptionClause *clause; char **names; LLVMBuilderRef entry_builder = NULL; LLVMBasicBlockRef entry_bb = NULL; if (cfg->gsharedvt && !cfg->llvm_only) { set_failure (ctx, "gsharedvt"); return; } #if 0 { static int count = 0; count ++; char *llvm_count_str = g_getenv ("LLVM_COUNT"); if (llvm_count_str) { int lcount = atoi (llvm_count_str); g_free (llvm_count_str); if (count == lcount) { printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE)); fflush (stdout); } if (count > lcount) { set_failure (ctx, "count"); return; } } } #endif // If we come upon one of the init_method wrappers, we need to find // the method that we have already emitted and tell LLVM that this // managed method info for the wrapper is associated with this method // we constructed ourselves from LLVM IR. // // This is necessary to unwind through the init_method, in the case that // it has to run a static cctor that throws an exception if (cfg->method->wrapper_type == MONO_WRAPPER_OTHER) { WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method); if (info->subtype == WRAPPER_SUBTYPE_AOT_INIT) { method = get_init_func (ctx->module, info->d.aot_init.subtype); ctx->lmethod = method; ctx->module->max_method_idx = MAX (ctx->module->max_method_idx, cfg->method_index); const char *init_name = mono_marshal_get_aot_init_wrapper_name (info->d.aot_init.subtype); ctx->method_name = g_strdup_printf ("%s_%s", ctx->module->global_prefix, init_name); ctx->cfg->asm_symbol = g_strdup (ctx->method_name); if (!cfg->llvm_only && ctx->module->external_symbols) { LLVMSetLinkage (method, LLVMExternalLinkage); LLVMSetVisibility (method, LLVMHiddenVisibility); } /* Not looked up at runtime */ g_hash_table_insert (ctx->module->no_method_table_lmethods, method, method); goto after_codegen; } else if (info->subtype == WRAPPER_SUBTYPE_LLVM_FUNC) { g_assert (info->d.llvm_func.subtype == LLVM_FUNC_WRAPPER_GC_POLL); if (cfg->compile_aot) { method = ctx->module->gc_poll_cold_wrapper; g_assert (method); } else { method = emit_icall_cold_wrapper (ctx->module, lmodule, MONO_JIT_ICALL_mono_threads_state_poll, FALSE); } ctx->lmethod = method; ctx->module->max_method_idx = MAX (ctx->module->max_method_idx, cfg->method_index); ctx->method_name = g_strdup (LLVMGetValueName (method)); //g_strdup_printf ("%s_%s", ctx->module->global_prefix, LLVMGetValueName (method)); ctx->cfg->asm_symbol = g_strdup (ctx->method_name); if (!cfg->llvm_only && ctx->module->external_symbols) { LLVMSetLinkage (method, LLVMExternalLinkage); LLVMSetVisibility (method, LLVMHiddenVisibility); } goto after_codegen; } } sig = mono_method_signature_internal (cfg->method); ctx->sig = sig; linfo = get_llvm_call_info (cfg, sig); ctx->linfo = linfo; if (!ctx_ok (ctx)) return; if (cfg->rgctx_var) linfo->rgctx_arg = TRUE; else if (needs_extra_arg (ctx, cfg->method)) linfo->dummy_arg = TRUE; ctx->method_type = method_type = sig_to_llvm_sig_full (ctx, sig, linfo); if (!ctx_ok (ctx)) return; method = LLVMAddFunction (lmodule, ctx->method_name, method_type); ctx->lmethod = method; if (!cfg->llvm_only) LLVMSetFunctionCallConv (method, LLVMMono1CallConv); /* if the method doesn't contain * (1) a call (so it's a leaf method) * (2) and no loops * we can skip the GC safepoint on method entry. */ gboolean requires_safepoint; requires_safepoint = cfg->has_calls; if (!requires_safepoint) { for (bb = cfg->bb_entry->next_bb; bb; bb = bb->next_bb) { if (bb->loop_body_start || (bb->flags & BB_EXCEPTION_HANDLER)) { requires_safepoint = TRUE; } } } if (cfg->method->wrapper_type) { if (cfg->method->wrapper_type == MONO_WRAPPER_ALLOC || cfg->method->wrapper_type == MONO_WRAPPER_WRITE_BARRIER) { requires_safepoint = FALSE; } else { WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method); switch (info->subtype) { case WRAPPER_SUBTYPE_GSHAREDVT_IN: case WRAPPER_SUBTYPE_GSHAREDVT_OUT: case WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG: case WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG: /* Arguments are not used after the call */ requires_safepoint = FALSE; break; } } } ctx->has_safepoints = requires_safepoint; if (!cfg->llvm_only && mono_threads_are_safepoints_enabled () && requires_safepoint) { if (!cfg->compile_aot) { LLVMSetGC (method, "coreclr"); emit_gc_safepoint_poll (ctx->module, ctx->lmodule, cfg); } else { LLVMSetGC (method, "coreclr"); } } LLVMSetLinkage (method, LLVMPrivateLinkage); mono_llvm_add_func_attr (method, LLVM_ATTR_UW_TABLE); if (cfg->disable_omit_fp) mono_llvm_add_func_attr_nv (method, "frame-pointer", "all"); if (cfg->compile_aot) { if (mono_aot_is_externally_callable (cfg->method)) { LLVMSetLinkage (method, LLVMExternalLinkage); } else { LLVMSetLinkage (method, LLVMInternalLinkage); //all methods have internal visibility when doing llvm_only if (!cfg->llvm_only && ctx->module->external_symbols) { LLVMSetLinkage (method, LLVMExternalLinkage); LLVMSetVisibility (method, LLVMHiddenVisibility); } } if (ctx->is_linkonce) { LLVMSetLinkage (method, LLVMLinkOnceAnyLinkage); LLVMSetVisibility (method, LLVMDefaultVisibility); } } else { LLVMSetLinkage (method, LLVMExternalLinkage); } if (cfg->method->save_lmf && !cfg->llvm_only) { set_failure (ctx, "lmf"); return; } if (sig->pinvoke && cfg->method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE && !cfg->llvm_only) { set_failure (ctx, "pinvoke signature"); return; } #ifdef TARGET_WASM if (ctx->module->interp && cfg->header->code_size > 100000 && !cfg->interp_entry_only) { /* Large methods slow down llvm too much */ set_failure (ctx, "il code too large."); return; } #endif header = cfg->header; for (i = 0; i < header->num_clauses; ++i) { clause = &header->clauses [i]; if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT && clause->flags != MONO_EXCEPTION_CLAUSE_NONE) { if (cfg->llvm_only) { if (!cfg->deopt && !cfg->interp_entry_only) llvmonly_fail = TRUE; } else { set_failure (ctx, "non-finally/catch/fault clause."); return; } } } if (header->num_clauses || (cfg->method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) || cfg->no_inline) /* We can't handle inlined methods with clauses */ mono_llvm_add_func_attr (method, LLVM_ATTR_NO_INLINE); for (int i = 0; i < cfg->header->num_clauses; i++) { MonoExceptionClause *clause = &cfg->header->clauses [i]; if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE || clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) ctx->has_catch = TRUE; } if (linfo->rgctx_arg) { ctx->rgctx_arg = LLVMGetParam (method, linfo->rgctx_arg_pindex); ctx->rgctx_arg_pindex = linfo->rgctx_arg_pindex; /* * We mark the rgctx parameter with the inreg attribute, which is mapped to * MONO_ARCH_RGCTX_REG in the Mono calling convention in llvm, i.e. * CC_X86_64_Mono in X86CallingConv.td. */ if (!ctx->llvm_only) mono_llvm_add_param_attr (ctx->rgctx_arg, LLVM_ATTR_IN_REG); LLVMSetValueName (ctx->rgctx_arg, "rgctx"); } else { ctx->rgctx_arg_pindex = -1; } if (cfg->vret_addr) { values [cfg->vret_addr->dreg] = LLVMGetParam (method, linfo->vret_arg_pindex); LLVMSetValueName (values [cfg->vret_addr->dreg], "vret"); if (linfo->ret.storage == LLVMArgVtypeByRef) { mono_llvm_add_param_attr (LLVMGetParam (method, linfo->vret_arg_pindex), LLVM_ATTR_STRUCT_RET); mono_llvm_add_param_attr (LLVMGetParam (method, linfo->vret_arg_pindex), LLVM_ATTR_NO_ALIAS); } } if (sig->hasthis) { ctx->this_arg_pindex = linfo->this_arg_pindex; ctx->this_arg = LLVMGetParam (method, linfo->this_arg_pindex); values [cfg->args [0]->dreg] = ctx->this_arg; LLVMSetValueName (values [cfg->args [0]->dreg], "this"); } if (linfo->dummy_arg) LLVMSetValueName (LLVMGetParam (method, linfo->dummy_arg_pindex), "dummy_arg"); names = g_new (char *, sig->param_count); mono_method_get_param_names (cfg->method, (const char **) names); /* Set parameter names/attributes */ for (i = 0; i < sig->param_count; ++i) { LLVMArgInfo *ainfo = &linfo->args [i + sig->hasthis]; char *name; int pindex = ainfo->pindex + ainfo->ndummy_fpargs; int j; for (j = 0; j < ainfo->ndummy_fpargs; ++j) { name = g_strdup_printf ("dummy_%d_%d", i, j); LLVMSetValueName (LLVMGetParam (method, ainfo->pindex + j), name); g_free (name); } if (ainfo->storage == LLVMArgVtypeInReg && ainfo->pair_storage [0] == LLVMArgNone && ainfo->pair_storage [1] == LLVMArgNone) continue; values [cfg->args [i + sig->hasthis]->dreg] = LLVMGetParam (method, pindex); if (ainfo->storage == LLVMArgGsharedvtFixed || ainfo->storage == LLVMArgGsharedvtFixedVtype) { if (names [i] && names [i][0] != '\0') name = g_strdup_printf ("p_arg_%s", names [i]); else name = g_strdup_printf ("p_arg_%d", i); } else { if (names [i] && names [i][0] != '\0') name = g_strdup_printf ("arg_%s", names [i]); else name = g_strdup_printf ("arg_%d", i); } LLVMSetValueName (LLVMGetParam (method, pindex), name); g_free (name); if (ainfo->storage == LLVMArgVtypeByVal) mono_llvm_add_param_attr (LLVMGetParam (method, pindex), LLVM_ATTR_BY_VAL); if (ainfo->storage == LLVMArgVtypeByRef || ainfo->storage == LLVMArgVtypeAddr) { /* For OP_LDADDR */ cfg->args [i + sig->hasthis]->opcode = OP_VTARG_ADDR; } #ifdef TARGET_WASM if (ainfo->storage == LLVMArgVtypeByRef) { /* This causes llvm to make a copy of the value which is what we need */ mono_llvm_add_param_byval_attr (LLVMGetParam (method, pindex), LLVMGetElementType (LLVMTypeOf (LLVMGetParam (method, pindex)))); } #endif } g_free (names); if (ctx->module->emit_dwarf && cfg->compile_aot && mono_debug_enabled ()) { ctx->minfo = mono_debug_lookup_method (cfg->method); ctx->dbg_md = emit_dbg_subprogram (ctx, cfg, method, ctx->method_name); } max_block_num = 0; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) max_block_num = MAX (max_block_num, bb->block_num); ctx->bblocks = bblocks = g_new0 (BBInfo, max_block_num + 1); /* Add branches between non-consecutive bblocks */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) && bb->next_bb != bb->last_ins->inst_false_bb) { MonoInst *inst = (MonoInst*)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst)); inst->opcode = OP_BR; inst->inst_target_bb = bb->last_ins->inst_false_bb; mono_bblock_add_inst (bb, inst); } } /* * Make a first pass over the code to precreate PHI nodes/set INDIRECT flags. */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins; LLVMBuilderRef builder; char *dname; char dname_buf[128]; builder = create_builder (ctx); for (ins = bb->code; ins; ins = ins->next) { switch (ins->opcode) { case OP_PHI: case OP_FPHI: case OP_VPHI: case OP_XPHI: { LLVMTypeRef phi_type = llvm_type_to_stack_type (cfg, type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass))); if (!ctx_ok (ctx)) return; if (cfg->interp_entry_only) break; if (ins->opcode == OP_VPHI) { /* Treat valuetype PHI nodes as operating on the address itself */ g_assert (ins->klass); phi_type = LLVMPointerType (type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass)), 0); } /* * Have to precreate these, as they can be referenced by * earlier instructions. */ sprintf (dname_buf, "t%d", ins->dreg); dname = dname_buf; values [ins->dreg] = LLVMBuildPhi (builder, phi_type, dname); if (ins->opcode == OP_VPHI) ctx->addresses [ins->dreg] = values [ins->dreg]; g_ptr_array_add (ctx->phi_values, values [ins->dreg]); /* * Set the expected type of the incoming arguments since these have * to have the same type. */ for (i = 0; i < ins->inst_phi_args [0]; i++) { int sreg1 = ins->inst_phi_args [i + 1]; if (sreg1 != -1) { if (ins->opcode == OP_VPHI) ctx->is_vphi [sreg1] = TRUE; ctx->vreg_types [sreg1] = phi_type; } } break; } case OP_LDADDR: ((MonoInst*)ins->inst_p0)->flags |= MONO_INST_INDIRECT; break; default: break; } } } /* * Create an ordering for bblocks, use the depth first order first, then * put the exception handling bblocks last. */ for (bb_index = 0; bb_index < cfg->num_bblocks; ++bb_index) { bb = cfg->bblocks [bb_index]; if (!(bb->region != -1 && !MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_TRY))) { g_ptr_array_add (bblock_list, bb); bblocks [bb->block_num].added = TRUE; } } for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { if (!bblocks [bb->block_num].added) g_ptr_array_add (bblock_list, bb); } /* * Second pass: generate code. */ // Emit entry point entry_builder = create_builder (ctx); entry_bb = get_bb (ctx, cfg->bb_entry); LLVMPositionBuilderAtEnd (entry_builder, entry_bb); emit_entry_bb (ctx, entry_builder); if (llvmonly_fail) /* * In llvmonly mode, we want to emit an llvm method for every method even if it fails to compile, * so direct calls can be made from outside the assembly. */ goto after_codegen_1; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { int clause_index; char name [128]; if (ctx->cfg->interp_entry_only || !(bb->region != -1 && (bb->flags & BB_EXCEPTION_HANDLER))) continue; if (ctx->cfg->deopt && MONO_REGION_FLAGS (bb->region) == MONO_EXCEPTION_CLAUSE_FILTER) continue; clause_index = MONO_REGION_CLAUSE_INDEX (bb->region); g_hash_table_insert (ctx->region_to_handler, GUINT_TO_POINTER (mono_get_block_region_notry (cfg, bb->region)), bb); g_hash_table_insert (ctx->clause_to_handler, GINT_TO_POINTER (clause_index), bb); /* * Create a new bblock which CALL_HANDLER/landing pads can branch to, because branching to the * LLVM bblock containing a landing pad causes problems for the * LLVM optimizer passes. */ sprintf (name, "BB%d_CALL_HANDLER_TARGET", bb->block_num); ctx->bblocks [bb->block_num].call_handler_target_bb = LLVMAppendBasicBlock (ctx->lmethod, name); } // Make landing pads first ctx->exc_meta = g_hash_table_new_full (NULL, NULL, NULL, NULL); if (ctx->llvm_only && !ctx->cfg->interp_entry_only) { size_t group_index = 0; while (group_index < cfg->header->num_clauses) { if (cfg->clause_is_dead [group_index]) { group_index ++; continue; } int count = 0; size_t cursor = group_index; while (cursor < cfg->header->num_clauses && CLAUSE_START (&cfg->header->clauses [cursor]) == CLAUSE_START (&cfg->header->clauses [group_index]) && CLAUSE_END (&cfg->header->clauses [cursor]) == CLAUSE_END (&cfg->header->clauses [group_index])) { count++; cursor++; } LLVMBasicBlockRef lpad_bb = emit_landing_pad (ctx, group_index, count); intptr_t key = CLAUSE_END (&cfg->header->clauses [group_index]); g_hash_table_insert (ctx->exc_meta, (gpointer)key, lpad_bb); group_index = cursor; } } for (bb_index = 0; bb_index < bblock_list->len; ++bb_index) { bb = (MonoBasicBlock*)g_ptr_array_index (bblock_list, bb_index); // Prune unreachable mono BBs. if (!(bb == cfg->bb_entry || bb->in_count > 0)) continue; process_bb (ctx, bb); if (!ctx_ok (ctx)) return; } g_hash_table_destroy (ctx->exc_meta); mono_memory_barrier (); /* Add incoming phi values */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { GSList *l, *ins_list; ins_list = bblocks [bb->block_num].phi_nodes; for (l = ins_list; l; l = l->next) { PhiNode *node = (PhiNode*)l->data; MonoInst *phi = node->phi; int sreg1 = node->sreg; LLVMBasicBlockRef in_bb; if (sreg1 == -1) continue; in_bb = get_end_bb (ctx, node->in_bb); if (ctx->unreachable [node->in_bb->block_num]) continue; if (phi->opcode == OP_VPHI) { g_assert (LLVMTypeOf (ctx->addresses [sreg1]) == LLVMTypeOf (values [phi->dreg])); LLVMAddIncoming (values [phi->dreg], &ctx->addresses [sreg1], &in_bb, 1); } else { if (!values [sreg1]) { /* Can happen with values in EH clauses */ set_failure (ctx, "incoming phi sreg1"); return; } if (LLVMTypeOf (values [sreg1]) != LLVMTypeOf (values [phi->dreg])) { set_failure (ctx, "incoming phi arg type mismatch"); return; } g_assert (LLVMTypeOf (values [sreg1]) == LLVMTypeOf (values [phi->dreg])); LLVMAddIncoming (values [phi->dreg], &values [sreg1], &in_bb, 1); } } } /* Nullify empty phi instructions */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { GSList *l, *ins_list; ins_list = bblocks [bb->block_num].phi_nodes; for (l = ins_list; l; l = l->next) { PhiNode *node = (PhiNode*)l->data; MonoInst *phi = node->phi; LLVMValueRef phi_ins = values [phi->dreg]; if (!phi_ins) /* Already removed */ continue; if (LLVMCountIncoming (phi_ins) == 0) { mono_llvm_replace_uses_of (phi_ins, LLVMConstNull (LLVMTypeOf (phi_ins))); LLVMInstructionEraseFromParent (phi_ins); values [phi->dreg] = NULL; } } } /* Create the SWITCH statements for ENDFINALLY instructions */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { BBInfo *info = &bblocks [bb->block_num]; GSList *l; for (l = info->endfinally_switch_ins_list; l; l = l->next) { LLVMValueRef switch_ins = (LLVMValueRef)l->data; GSList *bb_list = info->call_handler_return_bbs; GSList *bb_list_iter; i = 0; for (bb_list_iter = bb_list; bb_list_iter; bb_list_iter = g_slist_next (bb_list_iter)) { LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i + 1, FALSE), (LLVMBasicBlockRef)bb_list_iter->data); i ++; } } } ctx->module->max_method_idx = MAX (ctx->module->max_method_idx, cfg->method_index); after_codegen_1: if (llvmonly_fail) { /* * FIXME: Maybe fallback to interpreter */ static LLVMTypeRef sig; ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, ctx->inited_bb); char *name = mono_method_get_full_name (cfg->method); int len = strlen (name); LLVMTypeRef type = LLVMArrayType (LLVMInt8Type (), len + 1); LLVMValueRef name_var = LLVMAddGlobal (ctx->lmodule, type, "missing_method_name"); LLVMSetVisibility (name_var, LLVMHiddenVisibility); LLVMSetLinkage (name_var, LLVMInternalLinkage); LLVMSetInitializer (name_var, mono_llvm_create_constant_data_array ((guint8*)name, len + 1)); mono_llvm_set_is_constant (name_var); g_free (name); if (!sig) sig = LLVMFunctionType1 (LLVMVoidType (), ctx->module->ptr_type, FALSE); LLVMValueRef callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_aot_failed_exception)); LLVMValueRef args [] = { convert (ctx, name_var, ctx->module->ptr_type) }; LLVMBuildCall (ctx->builder, callee, args, 1, ""); LLVMBuildUnreachable (ctx->builder); } /* Initialize the method if needed */ if (cfg->compile_aot) { // FIXME: Add more shared got entries ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, ctx->init_bb); // FIXME: beforefieldinit /* * NATIVE_TO_MANAGED methods might be called on a thread not attached to the runtime, so they are initialized when loaded * in load_method (). */ gboolean needs_init = ctx->cfg->got_access_count > 0; MonoMethod *cctor = NULL; if (!needs_init && (cctor = mono_class_get_cctor (cfg->method->klass))) { /* Needs init to run the cctor */ if (cfg->method->flags & METHOD_ATTRIBUTE_STATIC) needs_init = TRUE; if (cctor == cfg->method) needs_init = FALSE; // If we are a constructor, we need to init so the static // constructor gets called. if (!strcmp (cfg->method->name, ".ctor")) needs_init = TRUE; } if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) needs_init = FALSE; if (needs_init) emit_method_init (ctx); else LLVMBuildBr (ctx->builder, ctx->inited_bb); // Was observing LLVM moving field accesses into the caller's method // body before the init call (the inlined one), leading to NULL derefs // after the init_method returns (GOT is filled out though) if (needs_init) mono_llvm_add_func_attr (method, LLVM_ATTR_NO_INLINE); } if (mini_get_debug_options ()->llvm_disable_inlining) mono_llvm_add_func_attr (method, LLVM_ATTR_NO_INLINE); after_codegen: if (cfg->compile_aot) g_ptr_array_add (ctx->module->cfgs, cfg); if (cfg->llvm_only) { /* * Add the contents of ctx->callsite_list to module->callsite_list. * We can't do this earlier, as it contains llvm instructions which can be * freed if compilation fails. * FIXME: Get rid of this when all methods can be llvm compiled. */ for (int i = 0; i < ctx->callsite_list->len; ++i) g_ptr_array_add (ctx->module->callsite_list, g_ptr_array_index (ctx->callsite_list, i)); } if (cfg->verbose_level > 1) { g_print ("\n*** Unoptimized LLVM IR for %s ***\n", mono_method_full_name (cfg->method, TRUE)); if (cfg->compile_aot) { mono_llvm_dump_value (method); } else { mono_llvm_dump_module (ctx->lmodule); } g_print ("***\n\n"); } if (cfg->compile_aot && !cfg->llvm_only) mark_as_used (ctx->module, method); if (!cfg->llvm_only) { LLVMValueRef md_args [16]; LLVMValueRef md_node; int method_index; if (cfg->compile_aot) method_index = mono_aot_get_method_index (cfg->orig_method); else method_index = 1; md_args [0] = LLVMMDString (ctx->method_name, strlen (ctx->method_name)); md_args [1] = LLVMConstInt (LLVMInt32Type (), method_index, FALSE); md_node = LLVMMDNode (md_args, 2); LLVMAddNamedMetadataOperand (lmodule, "mono.function_indexes", md_node); //LLVMSetMetadata (method, md_kind, LLVMMDNode (&md_arg, 1)); } if (cfg->compile_aot) { /* Don't generate native code, keep the LLVM IR */ if (cfg->verbose_level) { char *name = mono_method_get_full_name (cfg->method); printf ("%s emitted as %s\n", name, ctx->method_name); g_free (name); } #if 0 int err = LLVMVerifyFunction (ctx->lmethod, LLVMPrintMessageAction); if (err != 0) LLVMDumpValue (ctx->lmethod); g_assert (err == 0); #endif } else { //LLVMVerifyFunction (method, 0); llvm_jit_finalize_method (ctx); } if (ctx->module->method_to_lmethod) g_hash_table_insert (ctx->module->method_to_lmethod, cfg->method, ctx->lmethod); if (ctx->module->idx_to_lmethod) g_hash_table_insert (ctx->module->idx_to_lmethod, GINT_TO_POINTER (cfg->method_index), ctx->lmethod); if (ctx->llvm_only && m_class_is_valuetype (cfg->orig_method->klass) && !(cfg->orig_method->flags & METHOD_ATTRIBUTE_STATIC)) emit_unbox_tramp (ctx, ctx->method_name, ctx->method_type, ctx->lmethod, cfg->method_index); } /* * mono_llvm_create_vars: * * Same as mono_arch_create_vars () for LLVM. */ void mono_llvm_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig; sig = mono_method_signature_internal (cfg->method); if (cfg->gsharedvt && cfg->llvm_only) { gboolean vretaddr = FALSE; if (mini_is_gsharedvt_variable_signature (sig) && sig->ret->type != MONO_TYPE_VOID) { vretaddr = TRUE; } else { MonoMethodSignature *sig = mono_method_signature_internal (cfg->method); LLVMCallInfo *linfo; linfo = get_llvm_call_info (cfg, sig); vretaddr = (linfo->ret.storage == LLVMArgVtypeRetAddr || linfo->ret.storage == LLVMArgVtypeByRef || linfo->ret.storage == LLVMArgGsharedvtFixed || linfo->ret.storage == LLVMArgGsharedvtVariable || linfo->ret.storage == LLVMArgGsharedvtFixedVtype); } if (vretaddr) { /* * Creating vret_addr forces CEE_SETRET to store the result into it, * so we don't have to generate any code in our OP_SETRET case. */ cfg->vret_addr = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_get_intptr_class ()), OP_ARG); if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr = "); mono_print_ins (cfg->vret_addr); } } } else { mono_arch_create_vars (cfg); } cfg->lmf_ir = TRUE; } /* * mono_llvm_emit_call: * * Same as mono_arch_emit_call () for LLVM. */ void mono_llvm_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoInst *in; MonoMethodSignature *sig; int i, n; LLVMArgInfo *ainfo; sig = call->signature; n = sig->param_count + sig->hasthis; if (sig->call_convention == MONO_CALL_VARARG) { cfg->exception_message = g_strdup ("varargs"); cfg->disable_llvm = TRUE; return; } call->cinfo = get_llvm_call_info (cfg, sig); if (cfg->disable_llvm) return; for (i = 0; i < n; ++i) { MonoInst *ins; ainfo = call->cinfo->args + i; in = call->args [i]; /* Simply remember the arguments */ switch (ainfo->storage) { case LLVMArgNormal: { MonoType *t = (sig->hasthis && i == 0) ? m_class_get_byval_arg (mono_get_intptr_class ()) : ainfo->type; int opcode; opcode = mono_type_to_regmove (cfg, t); if (opcode == OP_FMOVE) { MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->dreg = mono_alloc_freg (cfg); } else if (opcode == OP_LMOVE) { MONO_INST_NEW (cfg, ins, OP_LMOVE); ins->dreg = mono_alloc_lreg (cfg); } else if (opcode == OP_RMOVE) { MONO_INST_NEW (cfg, ins, OP_RMOVE); ins->dreg = mono_alloc_freg (cfg); } else { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); } ins->sreg1 = in->dreg; break; } case LLVMArgVtypeByVal: case LLVMArgVtypeByRef: case LLVMArgVtypeInReg: case LLVMArgVtypeAddr: case LLVMArgVtypeAsScalar: case LLVMArgAsIArgs: case LLVMArgAsFpArgs: case LLVMArgGsharedvtVariable: case LLVMArgGsharedvtFixed: case LLVMArgGsharedvtFixedVtype: case LLVMArgWasmVtypeAsScalar: MONO_INST_NEW (cfg, ins, OP_LLVM_OUTARG_VT); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; ins->inst_p0 = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMArgInfo)); memcpy (ins->inst_p0, ainfo, sizeof (LLVMArgInfo)); ins->inst_vtype = ainfo->type; ins->klass = mono_class_from_mono_type_internal (ainfo->type); break; default: cfg->exception_message = g_strdup ("ainfo->storage"); cfg->disable_llvm = TRUE; return; } if (!cfg->disable_llvm) { MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, 0, FALSE); } } } static inline void add_func (LLVMModuleRef module, const char *name, LLVMTypeRef ret_type, LLVMTypeRef *param_types, int nparams) { LLVMAddFunction (module, name, LLVMFunctionType (ret_type, param_types, nparams, FALSE)); } static LLVMValueRef add_intrins (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef *params, int nparams) { return mono_llvm_register_overloaded_intrinsic (module, id, params, nparams); } static LLVMValueRef add_intrins1 (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef param1) { return mono_llvm_register_overloaded_intrinsic (module, id, &param1, 1); } static LLVMValueRef add_intrins2 (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef param1, LLVMTypeRef param2) { LLVMTypeRef params [] = { param1, param2 }; return mono_llvm_register_overloaded_intrinsic (module, id, params, 2); } static LLVMValueRef add_intrins3 (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef param1, LLVMTypeRef param2, LLVMTypeRef param3) { LLVMTypeRef params [] = { param1, param2, param3 }; return mono_llvm_register_overloaded_intrinsic (module, id, params, 3); } static void add_intrinsic (LLVMModuleRef module, int id) { /* Register simple intrinsics */ LLVMValueRef intrins = mono_llvm_register_intrinsic (module, (IntrinsicId)id); if (intrins) { g_hash_table_insert (intrins_id_to_intrins, GINT_TO_POINTER (id), intrins); return; } if (intrin_arm64_ovr [id] != 0) { llvm_ovr_tag_t spec = intrin_arm64_ovr [id]; for (int vw = 0; vw < INTRIN_vectorwidths; ++vw) { for (int ew = 0; ew < INTRIN_elementwidths; ++ew) { llvm_ovr_tag_t vec_bit = INTRIN_vector128 >> ((INTRIN_vectorwidths - 1) - vw); llvm_ovr_tag_t elem_bit = INTRIN_int8 << ew; llvm_ovr_tag_t test = vec_bit | elem_bit; if ((spec & test) == test) { uint8_t kind = intrin_kind [id]; LLVMTypeRef distinguishing_type = intrin_types [vw][ew]; if (kind == INTRIN_kind_ftoi && (elem_bit & (INTRIN_int32 | INTRIN_int64))) { /* * @llvm.aarch64.neon.fcvtas.v4i32.v4f32 * @llvm.aarch64.neon.fcvtas.v2i64.v2f64 */ intrins = add_intrins2 (module, id, distinguishing_type, intrin_types [vw][ew + 2]); } else if (kind == INTRIN_kind_widen) { /* * @llvm.aarch64.neon.saddlp.v2i64.v4i32 * @llvm.aarch64.neon.saddlp.v4i16.v8i8 */ intrins = add_intrins2 (module, id, distinguishing_type, intrin_types [vw][ew - 1]); } else if (kind == INTRIN_kind_widen_across) { /* * @llvm.aarch64.neon.saddlv.i64.v4i32 * @llvm.aarch64.neon.saddlv.i32.v8i16 * @llvm.aarch64.neon.saddlv.i32.v16i8 * i8/i16 return types for NEON intrinsics will make isel fail as of LLVM 9. */ int associated_prim = MAX(ew + 1, 2); LLVMTypeRef associated_scalar_type = intrin_types [0][associated_prim]; intrins = add_intrins2 (module, id, associated_scalar_type, distinguishing_type); } else if (kind == INTRIN_kind_across) { /* * @llvm.aarch64.neon.uaddv.i64.v4i64 * @llvm.aarch64.neon.uaddv.i32.v4i32 * @llvm.aarch64.neon.uaddv.i32.v8i16 * @llvm.aarch64.neon.uaddv.i32.v16i8 * i8/i16 return types for NEON intrinsics will make isel fail as of LLVM 9. */ int associated_prim = MAX(ew, 2); LLVMTypeRef associated_scalar_type = intrin_types [0][associated_prim]; intrins = add_intrins2 (module, id, associated_scalar_type, distinguishing_type); } else if (kind == INTRIN_kind_arm64_dot_prod) { /* * @llvm.aarch64.neon.sdot.v2i32.v8i8 * @llvm.aarch64.neon.sdot.v4i32.v16i8 */ LLVMTypeRef associated_type = intrin_types [vw][0]; intrins = add_intrins2 (module, id, distinguishing_type, associated_type); } else intrins = add_intrins1 (module, id, distinguishing_type); int key = key_from_id_and_tag (id, test); g_hash_table_insert (intrins_id_to_intrins, GINT_TO_POINTER (key), intrins); } } } return; } /* Register overloaded intrinsics */ switch (id) { #define INTRINS(intrin_name, llvm_id, arch) #define INTRINS_OVR(intrin_name, llvm_id, arch, llvm_type) case INTRINS_ ## intrin_name: intrins = add_intrins1(module, id, llvm_type); break; #define INTRINS_OVR_2_ARG(intrin_name, llvm_id, arch, llvm_type1, llvm_type2) case INTRINS_ ## intrin_name: intrins = add_intrins2(module, id, llvm_type1, llvm_type2); break; #define INTRINS_OVR_3_ARG(intrin_name, llvm_id, arch, llvm_type1, llvm_type2, llvm_type3) case INTRINS_ ## intrin_name: intrins = add_intrins3(module, id, llvm_type1, llvm_type2, llvm_type3); break; #define INTRINS_OVR_TAG(...) #define INTRINS_OVR_TAG_KIND(...) #include "llvm-intrinsics.h" default: g_assert_not_reached (); break; } g_assert (intrins); g_hash_table_insert (intrins_id_to_intrins, GINT_TO_POINTER (id), intrins); } static LLVMValueRef get_intrins_from_module (LLVMModuleRef lmodule, int id) { LLVMValueRef res; res = (LLVMValueRef)g_hash_table_lookup (intrins_id_to_intrins, GINT_TO_POINTER (id)); g_assert (res); return res; } static LLVMValueRef get_intrins (EmitContext *ctx, int id) { return get_intrins_from_module (ctx->lmodule, id); } static void add_intrinsics (LLVMModuleRef module) { int i; /* Emit declarations of instrinsics */ /* * It would be nicer to emit only the intrinsics actually used, but LLVM's Module * type doesn't seem to do any locking. */ for (i = 0; i < INTRINS_NUM; ++i) add_intrinsic (module, i); /* EH intrinsics */ add_func (module, "mono_personality", LLVMVoidType (), NULL, 0); add_func (module, "llvm_resume_unwind_trampoline", LLVMVoidType (), NULL, 0); } static void add_types (MonoLLVMModule *module) { module->ptr_type = LLVMPointerType (TARGET_SIZEOF_VOID_P == 8 ? LLVMInt64Type () : LLVMInt32Type (), 0); } void mono_llvm_init (gboolean enable_jit) { intrin_types [0][0] = i1_t = LLVMInt8Type (); intrin_types [0][1] = i2_t = LLVMInt16Type (); intrin_types [0][2] = i4_t = LLVMInt32Type (); intrin_types [0][3] = i8_t = LLVMInt64Type (); intrin_types [0][4] = r4_t = LLVMFloatType (); intrin_types [0][5] = r8_t = LLVMDoubleType (); intrin_types [1][0] = v64_i1_t = LLVMVectorType (LLVMInt8Type (), 8); intrin_types [1][1] = v64_i2_t = LLVMVectorType (LLVMInt16Type (), 4); intrin_types [1][2] = v64_i4_t = LLVMVectorType (LLVMInt32Type (), 2); intrin_types [1][3] = v64_i8_t = LLVMVectorType (LLVMInt64Type (), 1); intrin_types [1][4] = v64_r4_t = LLVMVectorType (LLVMFloatType (), 2); intrin_types [1][5] = v64_r8_t = LLVMVectorType (LLVMDoubleType (), 1); intrin_types [2][0] = v128_i1_t = sse_i1_t = type_to_sse_type (MONO_TYPE_I1); intrin_types [2][1] = v128_i2_t = sse_i2_t = type_to_sse_type (MONO_TYPE_I2); intrin_types [2][2] = v128_i4_t = sse_i4_t = type_to_sse_type (MONO_TYPE_I4); intrin_types [2][3] = v128_i8_t = sse_i8_t = type_to_sse_type (MONO_TYPE_I8); intrin_types [2][4] = v128_r4_t = sse_r4_t = type_to_sse_type (MONO_TYPE_R4); intrin_types [2][5] = v128_r8_t = sse_r8_t = type_to_sse_type (MONO_TYPE_R8); intrins_id_to_intrins = g_hash_table_new (NULL, NULL); void_func_t = LLVMFunctionType0 (LLVMVoidType (), FALSE); if (enable_jit) mono_llvm_jit_init (); } void mono_llvm_free_mem_manager (MonoJitMemoryManager *mem_manager) { MonoLLVMModule *module = (MonoLLVMModule*)mem_manager->llvm_module; int i; if (!module) return; g_hash_table_destroy (module->llvm_types); mono_llvm_dispose_ee (module->mono_ee); if (module->bb_names) { for (i = 0; i < module->bb_names_len; ++i) g_free (module->bb_names [i]); g_free (module->bb_names); } //LLVMDisposeModule (module->module); g_free (module); mem_manager->llvm_module = NULL; } void mono_llvm_create_aot_module (MonoAssembly *assembly, const char *global_prefix, int initial_got_size, LLVMModuleFlags flags) { MonoLLVMModule *module = &aot_module; gboolean emit_dwarf = (flags & LLVM_MODULE_FLAG_DWARF) ? 1 : 0; #ifdef TARGET_WIN32_MSVC gboolean emit_codeview = (flags & LLVM_MODULE_FLAG_CODEVIEW) ? 1 : 0; #endif gboolean static_link = (flags & LLVM_MODULE_FLAG_STATIC) ? 1 : 0; gboolean llvm_only = (flags & LLVM_MODULE_FLAG_LLVM_ONLY) ? 1 : 0; gboolean interp = (flags & LLVM_MODULE_FLAG_INTERP) ? 1 : 0; /* Delete previous module */ g_hash_table_destroy (module->plt_entries); if (module->lmodule) LLVMDisposeModule (module->lmodule); memset (module, 0, sizeof (aot_module)); module->lmodule = LLVMModuleCreateWithName ("aot"); module->assembly = assembly; module->global_prefix = g_strdup (global_prefix); module->eh_frame_symbol = g_strdup_printf ("%s_eh_frame", global_prefix); module->get_method_symbol = g_strdup_printf ("%s_get_method", global_prefix); module->get_unbox_tramp_symbol = g_strdup_printf ("%s_get_unbox_tramp", global_prefix); module->init_aotconst_symbol = g_strdup_printf ("%s_init_aotconst", global_prefix); module->external_symbols = TRUE; module->emit_dwarf = emit_dwarf; module->static_link = static_link; module->llvm_only = llvm_only; module->interp = interp; /* The first few entries are reserved */ module->max_got_offset = initial_got_size; module->context = LLVMGetGlobalContext (); module->cfgs = g_ptr_array_new (); module->aotconst_vars = g_hash_table_new (NULL, NULL); module->llvm_types = g_hash_table_new (NULL, NULL); module->plt_entries = g_hash_table_new (g_str_hash, g_str_equal); module->plt_entries_ji = g_hash_table_new (NULL, NULL); module->direct_callables = g_hash_table_new (g_str_hash, g_str_equal); module->idx_to_lmethod = g_hash_table_new (NULL, NULL); module->method_to_lmethod = g_hash_table_new (NULL, NULL); module->method_to_call_info = g_hash_table_new (NULL, NULL); module->idx_to_unbox_tramp = g_hash_table_new (NULL, NULL); module->no_method_table_lmethods = g_hash_table_new (NULL, NULL); module->callsite_list = g_ptr_array_new (); if (llvm_only) /* clang ignores our debug info because it has an invalid version */ module->emit_dwarf = FALSE; add_intrinsics (module->lmodule); add_types (module); #ifdef MONO_ARCH_LLVM_TARGET_LAYOUT LLVMSetDataLayout (module->lmodule, MONO_ARCH_LLVM_TARGET_LAYOUT); #else g_assert_not_reached (); #endif #ifdef MONO_ARCH_LLVM_TARGET_TRIPLE LLVMSetTarget (module->lmodule, MONO_ARCH_LLVM_TARGET_TRIPLE); #endif if (module->emit_dwarf) { char *dir, *build_info, *s, *cu_name; module->di_builder = mono_llvm_create_di_builder (module->lmodule); // FIXME: dir = g_strdup ("."); build_info = mono_get_runtime_build_info (); s = g_strdup_printf ("Mono AOT Compiler %s (LLVM)", build_info); cu_name = g_path_get_basename (assembly->image->name); module->cu = mono_llvm_di_create_compile_unit (module->di_builder, cu_name, dir, s); g_free (dir); g_free (build_info); g_free (s); } #ifdef TARGET_WIN32_MSVC if (emit_codeview) { LLVMValueRef codeview_option_args[3]; codeview_option_args[0] = LLVMConstInt (LLVMInt32Type (), 2, FALSE); codeview_option_args[1] = LLVMMDString ("CodeView", 8); codeview_option_args[2] = LLVMConstInt (LLVMInt32Type (), 1, FALSE); LLVMAddNamedMetadataOperand (module->lmodule, "llvm.module.flags", LLVMMDNode (codeview_option_args, G_N_ELEMENTS (codeview_option_args))); } if (!static_link) { const char linker_options[] = "Linker Options"; const char *default_dynamic_lib_names[] = { "/DEFAULTLIB:msvcrt", "/DEFAULTLIB:ucrt.lib", "/DEFAULTLIB:vcruntime.lib" }; LLVMValueRef default_lib_args[G_N_ELEMENTS (default_dynamic_lib_names)]; LLVMValueRef default_lib_nodes[G_N_ELEMENTS(default_dynamic_lib_names)]; const char *default_lib_name = NULL; for (int i = 0; i < G_N_ELEMENTS (default_dynamic_lib_names); ++i) { const char *default_lib_name = default_dynamic_lib_names[i]; default_lib_args[i] = LLVMMDString (default_lib_name, strlen (default_lib_name)); default_lib_nodes[i] = LLVMMDNode (default_lib_args + i, 1); } LLVMAddNamedMetadataOperand (module->lmodule, "llvm.linker.options", LLVMMDNode (default_lib_args, G_N_ELEMENTS (default_lib_args))); } #endif { LLVMTypeRef got_type = LLVMArrayType (module->ptr_type, 16); module->dummy_got_var = LLVMAddGlobal (module->lmodule, got_type, "dummy_got"); module->got_idx_to_type = g_hash_table_new (NULL, NULL); LLVMSetInitializer (module->dummy_got_var, LLVMConstNull (got_type)); LLVMSetVisibility (module->dummy_got_var, LLVMHiddenVisibility); LLVMSetLinkage (module->dummy_got_var, LLVMInternalLinkage); } /* Add initialization array */ LLVMTypeRef inited_type = LLVMArrayType (LLVMInt8Type (), 0); module->inited_var = LLVMAddGlobal (aot_module.lmodule, inited_type, "mono_inited_tmp"); LLVMSetInitializer (module->inited_var, LLVMConstNull (inited_type)); create_aot_info_var (module); emit_gc_safepoint_poll (module, module->lmodule, NULL); emit_llvm_code_start (module); // Needs idx_to_lmethod emit_init_funcs (module); /* Add a dummy personality function */ if (!use_mono_personality_debug) { LLVMValueRef personality = LLVMAddFunction (module->lmodule, default_personality_name, LLVMFunctionType (LLVMInt32Type (), NULL, 0, TRUE)); LLVMSetLinkage (personality, LLVMExternalLinkage); //EMCC chockes if the personality function is referenced in the 'used' array #ifndef TARGET_WASM mark_as_used (module, personality); #endif } /* Add a reference to the c++ exception we throw/catch */ { LLVMTypeRef exc = LLVMPointerType (LLVMInt8Type (), 0); module->sentinel_exception = LLVMAddGlobal (module->lmodule, exc, "_ZTIPi"); LLVMSetLinkage (module->sentinel_exception, LLVMExternalLinkage); mono_llvm_set_is_constant (module->sentinel_exception); } } void mono_llvm_fixup_aot_module (void) { MonoLLVMModule *module = &aot_module; MonoMethod *method; /* * Replace GOT entries for directly callable methods with the methods themselves. * It would be easier to implement this by predefining all methods before compiling * their bodies, but that couldn't handle the case when a method fails to compile * with llvm. */ GHashTable *specializable = g_hash_table_new (NULL, NULL); GHashTable *patches_to_null = g_hash_table_new (mono_patch_info_hash, mono_patch_info_equal); for (int sindex = 0; sindex < module->callsite_list->len; ++sindex) { CallSite *site = (CallSite*)g_ptr_array_index (module->callsite_list, sindex); method = site->method; LLVMValueRef lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, method); LLVMValueRef placeholder = (LLVMValueRef)site->load; LLVMValueRef load; if (placeholder == NULL) /* Method failed LLVM compilation */ continue; gboolean can_direct_call = FALSE; /* Replace sharable instances with their shared version */ if (!lmethod && method->is_inflated) { if (mono_method_is_generic_sharable_full (method, FALSE, TRUE, FALSE)) { ERROR_DECL (error); MonoMethod *shared = mini_get_shared_method_full (method, SHARE_MODE_NONE, error); if (is_ok (error)) { lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, shared); if (lmethod) method = shared; } } } if (lmethod && !m_method_is_synchronized (method)) { can_direct_call = TRUE; } else if (m_method_is_wrapper (method) && !method->is_inflated) { WrapperInfo *info = mono_marshal_get_wrapper_info (method); /* This is a call from the synchronized wrapper to the real method */ if (info->subtype == WRAPPER_SUBTYPE_SYNCHRONIZED_INNER) { method = info->d.synchronized.method; lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, method); if (lmethod) can_direct_call = TRUE; } } if (can_direct_call) { mono_llvm_replace_uses_of (placeholder, lmethod); if (mono_aot_can_specialize (method)) g_hash_table_insert (specializable, lmethod, method); g_hash_table_insert (patches_to_null, site->ji, site->ji); } else { // FIXME: LLVMBuilderRef builder = LLVMCreateBuilder (); LLVMPositionBuilderBefore (builder, placeholder); load = get_aotconst_module (module, builder, site->ji->type, site->ji->data.target, site->type, NULL, NULL); LLVMReplaceAllUsesWith (placeholder, load); } g_free (site); } mono_llvm_propagate_nonnull_final (specializable, module); g_hash_table_destroy (specializable); for (int i = 0; i < module->cfgs->len; ++i) { /* * Nullify the patches pointing to direct calls. This is needed to * avoid allocating extra got slots, which is a perf problem and it * makes module->max_got_offset invalid. * It would be better to just store the patch_info in CallSite, but * cfg->patch_info is copied in aot-compiler.c. */ MonoCompile *cfg = (MonoCompile *)g_ptr_array_index (module->cfgs, i); for (MonoJumpInfo *patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { if (patch_info->type == MONO_PATCH_INFO_METHOD) { if (g_hash_table_lookup (patches_to_null, patch_info)) { patch_info->type = MONO_PATCH_INFO_NONE; /* Nullify the call to init_method () if possible */ g_assert (cfg->got_access_count); cfg->got_access_count --; if (cfg->got_access_count == 0) { LLVMValueRef br = (LLVMValueRef)cfg->llvmonly_init_cond; if (br) LLVMSetSuccessor (br, 0, LLVMGetSuccessor (br, 1)); } } } } } g_hash_table_destroy (patches_to_null); } static LLVMValueRef llvm_array_from_uints (LLVMTypeRef el_type, guint32 *values, int nvalues) { int i; LLVMValueRef res, *vals; vals = g_new0 (LLVMValueRef, nvalues); for (i = 0; i < nvalues; ++i) vals [i] = LLVMConstInt (LLVMInt32Type (), values [i], FALSE); res = LLVMConstArray (LLVMInt32Type (), vals, nvalues); g_free (vals); return res; } static LLVMValueRef llvm_array_from_bytes (guint8 *values, int nvalues) { int i; LLVMValueRef res, *vals; vals = g_new0 (LLVMValueRef, nvalues); for (i = 0; i < nvalues; ++i) vals [i] = LLVMConstInt (LLVMInt8Type (), values [i], FALSE); res = LLVMConstArray (LLVMInt8Type (), vals, nvalues); g_free (vals); return res; } /* * mono_llvm_emit_aot_file_info: * * Emit the MonoAotFileInfo structure. * Same as emit_aot_file_info () in aot-compiler.c. */ void mono_llvm_emit_aot_file_info (MonoAotFileInfo *info, gboolean has_jitted_code) { MonoLLVMModule *module = &aot_module; /* Save these for later */ memcpy (&module->aot_info, info, sizeof (MonoAotFileInfo)); module->has_jitted_code = has_jitted_code; } /* * mono_llvm_emit_aot_data: * * Emit the binary data DATA pointed to by symbol SYMBOL. * Return the LLVM variable for the data. */ gpointer mono_llvm_emit_aot_data_aligned (const char *symbol, guint8 *data, int data_len, int align) { MonoLLVMModule *module = &aot_module; LLVMTypeRef type; LLVMValueRef d; type = LLVMArrayType (LLVMInt8Type (), data_len); d = LLVMAddGlobal (module->lmodule, type, symbol); LLVMSetVisibility (d, LLVMHiddenVisibility); LLVMSetLinkage (d, LLVMInternalLinkage); LLVMSetInitializer (d, mono_llvm_create_constant_data_array (data, data_len)); if (align != 1) LLVMSetAlignment (d, align); mono_llvm_set_is_constant (d); return d; } gpointer mono_llvm_emit_aot_data (const char *symbol, guint8 *data, int data_len) { return mono_llvm_emit_aot_data_aligned (symbol, data, data_len, 8); } /* Add a reference to a global defined in JITted code */ static LLVMValueRef AddJitGlobal (MonoLLVMModule *module, LLVMTypeRef type, const char *name) { char *s; LLVMValueRef v; s = g_strdup_printf ("%s%s", module->global_prefix, name); v = LLVMAddGlobal (module->lmodule, LLVMInt8Type (), s); LLVMSetVisibility (v, LLVMHiddenVisibility); g_free (s); return v; } #define FILE_INFO_NUM_HEADER_FIELDS 2 #define FILE_INFO_NUM_SCALAR_FIELDS 23 #define FILE_INFO_NUM_ARRAY_FIELDS 5 #define FILE_INFO_NUM_AOTID_FIELDS 1 #define FILE_INFO_NFIELDS (FILE_INFO_NUM_HEADER_FIELDS + MONO_AOT_FILE_INFO_NUM_SYMBOLS + FILE_INFO_NUM_SCALAR_FIELDS + FILE_INFO_NUM_ARRAY_FIELDS + FILE_INFO_NUM_AOTID_FIELDS) static void create_aot_info_var (MonoLLVMModule *module) { LLVMTypeRef file_info_type; LLVMTypeRef *eltypes; LLVMValueRef info_var; int i, nfields, tindex; LLVMModuleRef lmodule = module->lmodule; /* Create an LLVM type to represent MonoAotFileInfo */ nfields = FILE_INFO_NFIELDS; eltypes = g_new (LLVMTypeRef, nfields); tindex = 0; eltypes [tindex ++] = LLVMInt32Type (); eltypes [tindex ++] = LLVMInt32Type (); /* Symbols */ for (i = 0; i < MONO_AOT_FILE_INFO_NUM_SYMBOLS; ++i) eltypes [tindex ++] = LLVMPointerType (LLVMInt8Type (), 0); /* Scalars */ for (i = 0; i < FILE_INFO_NUM_SCALAR_FIELDS; ++i) eltypes [tindex ++] = LLVMInt32Type (); /* Arrays */ eltypes [tindex ++] = LLVMArrayType (LLVMInt32Type (), MONO_AOT_TABLE_NUM); for (i = 0; i < FILE_INFO_NUM_ARRAY_FIELDS - 1; ++i) eltypes [tindex ++] = LLVMArrayType (LLVMInt32Type (), MONO_AOT_TRAMP_NUM); eltypes [tindex ++] = LLVMArrayType (LLVMInt8Type (), 16); g_assert (tindex == nfields); file_info_type = LLVMStructCreateNamed (module->context, "MonoAotFileInfo"); LLVMStructSetBody (file_info_type, eltypes, nfields, FALSE); info_var = LLVMAddGlobal (lmodule, file_info_type, "mono_aot_file_info"); module->info_var = info_var; module->info_var_eltypes = eltypes; } static void emit_aot_file_info (MonoLLVMModule *module) { LLVMTypeRef *eltypes, eltype; LLVMValueRef info_var; LLVMValueRef *fields; int i, nfields, tindex; MonoAotFileInfo *info; LLVMModuleRef lmodule = module->lmodule; info = &module->aot_info; info_var = module->info_var; eltypes = module->info_var_eltypes; nfields = FILE_INFO_NFIELDS; if (module->static_link) { LLVMSetVisibility (info_var, LLVMHiddenVisibility); LLVMSetLinkage (info_var, LLVMInternalLinkage); } #ifdef TARGET_WIN32 if (!module->static_link) { LLVMSetDLLStorageClass (info_var, LLVMDLLExportStorageClass); } #endif fields = g_new (LLVMValueRef, nfields); tindex = 0; fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->version, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->dummy, FALSE); /* Symbols */ /* * We use LLVMGetNamedGlobal () for symbol which are defined in LLVM code, and LLVMAddGlobal () * for symbols defined in the .s file emitted by the aot compiler. */ eltype = eltypes [tindex]; if (module->llvm_only) fields [tindex ++] = LLVMConstNull (eltype); else fields [tindex ++] = AddJitGlobal (module, eltype, "jit_got"); /* llc defines this directly */ if (!module->llvm_only) { fields [tindex ++] = LLVMAddGlobal (lmodule, eltype, module->eh_frame_symbol); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); } else { fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = module->get_method; fields [tindex ++] = module->get_unbox_tramp ? module->get_unbox_tramp : LLVMConstNull (eltype); } fields [tindex ++] = module->init_aotconst_func; if (module->has_jitted_code) { fields [tindex ++] = AddJitGlobal (module, eltype, "jit_code_start"); fields [tindex ++] = AddJitGlobal (module, eltype, "jit_code_end"); } else { fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); } if (!module->llvm_only) fields [tindex ++] = AddJitGlobal (module, eltype, "method_addresses"); else fields [tindex ++] = LLVMConstNull (eltype); if (module->llvm_only && module->unbox_tramp_indexes) { fields [tindex ++] = module->unbox_tramp_indexes; fields [tindex ++] = module->unbox_trampolines; } else { fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); } if (info->flags & MONO_AOT_FILE_FLAG_SEPARATE_DATA) { for (i = 0; i < MONO_AOT_TABLE_NUM; ++i) fields [tindex ++] = LLVMConstNull (eltype); } else { fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "blob"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "class_name_table"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "class_info_offsets"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "method_info_offsets"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "ex_info_offsets"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "extra_method_info_offsets"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "extra_method_table"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "got_info_offsets"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "llvm_got_info_offsets"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "image_table"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "weak_field_indexes"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "method_flags_table"); } /* Not needed (mem_end) */ fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "assembly_guid"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "runtime_version"); if (info->trampoline_size [0]) { fields [tindex ++] = AddJitGlobal (module, eltype, "specific_trampolines"); fields [tindex ++] = AddJitGlobal (module, eltype, "static_rgctx_trampolines"); fields [tindex ++] = AddJitGlobal (module, eltype, "imt_trampolines"); fields [tindex ++] = AddJitGlobal (module, eltype, "gsharedvt_arg_trampolines"); fields [tindex ++] = AddJitGlobal (module, eltype, "ftnptr_arg_trampolines"); fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_arbitrary_trampolines"); } else { fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); } if (module->static_link && !module->llvm_only) fields [tindex ++] = AddJitGlobal (module, eltype, "globals"); else fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "assembly_name"); if (!module->llvm_only) { fields [tindex ++] = AddJitGlobal (module, eltype, "plt"); fields [tindex ++] = AddJitGlobal (module, eltype, "plt_end"); fields [tindex ++] = AddJitGlobal (module, eltype, "unwind_info"); fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_trampolines"); fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_trampolines_end"); fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_trampoline_addresses"); } else { fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); } for (i = 0; i < MONO_AOT_FILE_INFO_NUM_SYMBOLS; ++i) { g_assert (fields [FILE_INFO_NUM_HEADER_FIELDS + i]); fields [FILE_INFO_NUM_HEADER_FIELDS + i] = LLVMConstBitCast (fields [FILE_INFO_NUM_HEADER_FIELDS + i], eltype); } /* Scalars */ fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->plt_got_offset_base, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->plt_got_info_offset_base, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->got_size, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->llvm_got_size, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->plt_size, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->nmethods, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->nextra_methods, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->flags, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->opts, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->simd_opts, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->gc_name_index, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->num_rgctx_fetch_trampolines, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->double_align, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->long_align, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->generic_tramp_num, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->card_table_shift_bits, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->card_table_mask, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->tramp_page_size, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->call_table_entry_size, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->nshared_got_entries, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->datafile_size, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), module->unbox_tramp_num, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), module->unbox_tramp_elemsize, FALSE); /* Arrays */ fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->table_offsets, MONO_AOT_TABLE_NUM); fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->num_trampolines, MONO_AOT_TRAMP_NUM); fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->trampoline_got_offset_base, MONO_AOT_TRAMP_NUM); fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->trampoline_size, MONO_AOT_TRAMP_NUM); fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->tramp_page_code_offsets, MONO_AOT_TRAMP_NUM); fields [tindex ++] = llvm_array_from_bytes (info->aotid, 16); g_assert (tindex == nfields); LLVMSetInitializer (info_var, LLVMConstNamedStruct (LLVMGetElementType (LLVMTypeOf (info_var)), fields, nfields)); if (module->static_link) { char *s, *p; LLVMValueRef var; s = g_strdup_printf ("mono_aot_module_%s_info", module->assembly->aname.name); /* Get rid of characters which cannot occur in symbols */ p = s; for (p = s; *p; ++p) { if (!(isalnum (*p) || *p == '_')) *p = '_'; } var = LLVMAddGlobal (module->lmodule, LLVMPointerType (LLVMInt8Type (), 0), s); g_free (s); LLVMSetInitializer (var, LLVMConstBitCast (LLVMGetNamedGlobal (module->lmodule, "mono_aot_file_info"), LLVMPointerType (LLVMInt8Type (), 0))); LLVMSetLinkage (var, LLVMExternalLinkage); } } typedef struct { LLVMValueRef lmethod; int argument; } NonnullPropWorkItem; static void mono_llvm_nonnull_state_update (EmitContext *ctx, LLVMValueRef lcall, MonoMethod *call_method, LLVMValueRef *args, int num_params) { if (mono_aot_can_specialize (call_method)) { int num_passed = LLVMGetNumArgOperands (lcall); g_assert (num_params <= num_passed); g_assert (ctx->module->method_to_call_info); GArray *call_site_union = (GArray *) g_hash_table_lookup (ctx->module->method_to_call_info, call_method); if (!call_site_union) { call_site_union = g_array_sized_new (FALSE, TRUE, sizeof (gint32), num_params); int zero = 0; for (int i = 0; i < num_params; i++) g_array_insert_val (call_site_union, i, zero); } for (int i = 0; i < num_params; i++) { if (mono_llvm_is_nonnull (args [i])) { g_assert (i < LLVMGetNumArgOperands (lcall)); mono_llvm_set_call_nonnull_arg (lcall, i); } else { gint32 *nullable_count = &g_array_index (call_site_union, gint32, i); *nullable_count = *nullable_count + 1; } } g_hash_table_insert (ctx->module->method_to_call_info, call_method, call_site_union); } } static void mono_llvm_propagate_nonnull_final (GHashTable *all_specializable, MonoLLVMModule *module) { // When we first traverse the mini IL, we mark the things that are // nonnull (the roots). Then, for all of the methods that can be specialized, we // see if their call sites have nonnull attributes. // If so, we mark the function's param. This param has uses to propagate // the attribute to. This propagation can trigger a need to mark more attributes // non-null, and so on and so forth. GSList *queue = NULL; GHashTableIter iter; LLVMValueRef lmethod; MonoMethod *method; g_hash_table_iter_init (&iter, all_specializable); while (g_hash_table_iter_next (&iter, (void**)&lmethod, (void**)&method)) { GArray *call_site_union = (GArray *) g_hash_table_lookup (module->method_to_call_info, method); // Basic sanity checking if (call_site_union) g_assert (call_site_union->len == LLVMCountParams (lmethod)); // Add root to work queue for (int i = 0; call_site_union && i < call_site_union->len; i++) { if (g_array_index (call_site_union, gint32, i) == 0) { NonnullPropWorkItem *item = g_malloc (sizeof (NonnullPropWorkItem)); item->lmethod = lmethod; item->argument = i; queue = g_slist_prepend (queue, item); } } } // This is essentially reference counting, and we are propagating // the refcount decrement here. We have less work to do than we may otherwise // because we are only working with a set of subgraphs of specializable functions. // // We rely on being able to see all of the references in the graph. // This is ensured by the function mono_aot_can_specialize. Everything in // all_specializable is a function that can be specialized, and is the resulting // node in the graph after all of the subsitutions are done. // // Anything disrupting the direct calls made with self-init will break this optimization. while (queue) { // Update the queue state. // Our only other per-iteration responsibility is now to free current NonnullPropWorkItem *current = (NonnullPropWorkItem *) queue->data; queue = queue->next; g_assert (current->argument < LLVMCountParams (current->lmethod)); // Does the actual leaf-node work here // Mark the function argument as nonnull for LLVM mono_llvm_set_func_nonnull_arg (current->lmethod, current->argument); // The rest of this is for propagating forward nullability changes // to calls that use the argument that is now nullable. // Get the actual LLVM value of the argument, so we can see which call instructions // used that argument LLVMValueRef caller_argument = LLVMGetParam (current->lmethod, current->argument); // Iterate over the calls using the newly-non-nullable argument GSList *calls = mono_llvm_calls_using (caller_argument); for (GSList *cursor = calls; cursor != NULL; cursor = cursor->next) { LLVMValueRef lcall = (LLVMValueRef) cursor->data; LLVMValueRef callee_lmethod = LLVMGetCalledValue (lcall); // If this wasn't a direct call for which mono_aot_can_specialize is true, // this lookup won't find a MonoMethod. MonoMethod *callee_method = (MonoMethod *) g_hash_table_lookup (all_specializable, callee_lmethod); if (!callee_method) continue; // Decrement number of nullable refs at that func's arg offset GArray *call_site_union = (GArray *) g_hash_table_lookup (module->method_to_call_info, callee_method); // It has module-local callers and is specializable, should have seen this call site // and inited this g_assert (call_site_union); // The function *definition* parameter arity should always be consistent int max_params = LLVMCountParams (callee_lmethod); if (call_site_union->len != max_params) { mono_llvm_dump_value (callee_lmethod); g_assert_not_reached (); } // Get the values that correspond to the parameters passed to the call // that used our argument LLVMValueRef *operands = mono_llvm_call_args (lcall); for (int call_argument = 0; call_argument < max_params; call_argument++) { // Every time we used the newly-non-nullable argument, decrement the nullable // refcount for that function. if (caller_argument == operands [call_argument]) { gint32 *nullable_count = &g_array_index (call_site_union, gint32, call_argument); g_assert (*nullable_count > 0); *nullable_count = *nullable_count - 1; // If we caused that callee's parameter to become newly nullable, add to work queue if (*nullable_count == 0) { NonnullPropWorkItem *item = g_malloc (sizeof (NonnullPropWorkItem)); item->lmethod = callee_lmethod; item->argument = call_argument; queue = g_slist_prepend (queue, item); } } } g_free (operands); // Update nullability refcount information for the callee now g_hash_table_insert (module->method_to_call_info, callee_method, call_site_union); } g_slist_free (calls); g_free (current); } } /* * Emit the aot module into the LLVM bitcode file FILENAME. */ void mono_llvm_emit_aot_module (const char *filename, const char *cu_name) { LLVMTypeRef inited_type; LLVMValueRef real_inited; MonoLLVMModule *module = &aot_module; emit_llvm_code_end (module); /* * Create the real init_var and replace all uses of the dummy variable with * the real one. */ inited_type = LLVMArrayType (LLVMInt8Type (), module->max_inited_idx + 1); real_inited = LLVMAddGlobal (module->lmodule, inited_type, "mono_inited"); LLVMSetInitializer (real_inited, LLVMConstNull (inited_type)); LLVMSetLinkage (real_inited, LLVMInternalLinkage); mono_llvm_replace_uses_of (module->inited_var, real_inited); LLVMDeleteGlobal (module->inited_var); /* Replace the dummy info_ variables with the real ones */ for (int i = 0; i < module->cfgs->len; ++i) { MonoCompile *cfg = (MonoCompile *)g_ptr_array_index (module->cfgs, i); // FIXME: Eliminate unused vars // FIXME: Speed this up if (cfg->llvm_dummy_info_var) { if (cfg->llvm_info_var) { mono_llvm_replace_uses_of (cfg->llvm_dummy_info_var, cfg->llvm_info_var); LLVMDeleteGlobal (cfg->llvm_dummy_info_var); } else { // FIXME: How can this happen ? LLVMSetInitializer (cfg->llvm_dummy_info_var, mono_llvm_create_constant_data_array (NULL, 0)); } } } if (module->llvm_only) { emit_get_method (&aot_module); emit_get_unbox_tramp (&aot_module); } emit_init_aotconst (module); emit_llvm_used (&aot_module); emit_dbg_info (&aot_module, filename, cu_name); emit_aot_file_info (&aot_module); /* Replace PLT entries for directly callable methods with the methods themselves */ { GHashTableIter iter; MonoJumpInfo *ji; LLVMValueRef callee; GHashTable *specializable = g_hash_table_new (NULL, NULL); g_hash_table_iter_init (&iter, module->plt_entries_ji); while (g_hash_table_iter_next (&iter, (void**)&ji, (void**)&callee)) { if (mono_aot_is_direct_callable (ji)) { LLVMValueRef lmethod; lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, ji->data.method); /* The types might not match because the caller might pass an rgctx */ if (lmethod && LLVMTypeOf (callee) == LLVMTypeOf (lmethod)) { mono_llvm_replace_uses_of (callee, lmethod); if (mono_aot_can_specialize (ji->data.method)) g_hash_table_insert (specializable, lmethod, ji->data.method); mono_aot_mark_unused_llvm_plt_entry (ji); } } } mono_llvm_propagate_nonnull_final (specializable, module); g_hash_table_destroy (specializable); } #if 0 { char *verifier_err; if (LLVMVerifyModule (module->lmodule, LLVMReturnStatusAction, &verifier_err)) { printf ("%s\n", verifier_err); g_assert_not_reached (); } } #endif /* Note: You can still dump an invalid bitcode file by running `llvm-dis` * in a debugger, set a breakpoint on `LLVMVerifyModule` and fake its * result to 0 (indicating success). */ LLVMWriteBitcodeToFile (module->lmodule, filename); } static LLVMValueRef md_string (const char *s) { return LLVMMDString (s, strlen (s)); } /* Debugging support */ static void emit_dbg_info (MonoLLVMModule *module, const char *filename, const char *cu_name) { LLVMModuleRef lmodule = module->lmodule; LLVMValueRef args [16], ver; /* * This can only be enabled when LLVM code is emitted into a separate object * file, since the AOT compiler also emits dwarf info, * and the abbrev indexes will not be correct since llvm has added its own * abbrevs. */ if (!module->emit_dwarf) return; mono_llvm_di_builder_finalize (module->di_builder); args [0] = LLVMConstInt (LLVMInt32Type (), 2, FALSE); args [1] = LLVMMDString ("Dwarf Version", strlen ("Dwarf Version")); args [2] = LLVMConstInt (LLVMInt32Type (), 2, FALSE); ver = LLVMMDNode (args, 3); LLVMAddNamedMetadataOperand (lmodule, "llvm.module.flags", ver); args [0] = LLVMConstInt (LLVMInt32Type (), 2, FALSE); args [1] = LLVMMDString ("Debug Info Version", strlen ("Debug Info Version")); args [2] = LLVMConstInt (LLVMInt64Type (), 3, FALSE); ver = LLVMMDNode (args, 3); LLVMAddNamedMetadataOperand (lmodule, "llvm.module.flags", ver); } static LLVMValueRef emit_dbg_subprogram (EmitContext *ctx, MonoCompile *cfg, LLVMValueRef method, const char *name) { MonoLLVMModule *module = ctx->module; MonoDebugMethodInfo *minfo = ctx->minfo; char *source_file, *dir, *filename; MonoSymSeqPoint *sym_seq_points; int n_seq_points; if (!minfo) return NULL; mono_debug_get_seq_points (minfo, &source_file, NULL, NULL, &sym_seq_points, &n_seq_points); if (!source_file) source_file = g_strdup ("<unknown>"); dir = g_path_get_dirname (source_file); filename = g_path_get_basename (source_file); g_free (source_file); return (LLVMValueRef)mono_llvm_di_create_function (module->di_builder, module->cu, method, cfg->method->name, name, dir, filename, n_seq_points ? sym_seq_points [0].line : 1); } static void emit_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder, const unsigned char *cil_code) { MonoCompile *cfg = ctx->cfg; if (ctx->minfo && cil_code && cil_code >= cfg->header->code && cil_code < cfg->header->code + cfg->header->code_size) { MonoDebugSourceLocation *loc; LLVMValueRef loc_md; loc = mono_debug_method_lookup_location (ctx->minfo, cil_code - cfg->header->code); if (loc) { loc_md = (LLVMValueRef)mono_llvm_di_create_location (ctx->module->di_builder, ctx->dbg_md, loc->row, loc->column); mono_llvm_di_set_location (builder, loc_md); mono_debug_free_source_location (loc); } } } static void emit_default_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder) { if (ctx->minfo) { LLVMValueRef loc_md; loc_md = (LLVMValueRef)mono_llvm_di_create_location (ctx->module->di_builder, ctx->dbg_md, 0, 0); mono_llvm_di_set_location (builder, loc_md); } } /* DESIGN: - Emit LLVM IR from the mono IR using the LLVM C API. - The original arch specific code remains, so we can fall back to it if we run into something we can't handle. */ /* A partial list of issues: - Handling of opcodes which can throw exceptions. In the mono JIT, these are implemented using code like this: method: <compare> throw_pos: b<cond> ex_label <rest of code> ex_label: push throw_pos - method call <exception trampoline> The problematic part is push throw_pos - method, which cannot be represented in the LLVM IR, since it does not support label values. -> this can be implemented in AOT mode using inline asm + labels, but cannot be implemented in JIT mode ? -> a possible but slower implementation would use the normal exception throwing code but it would need to control the placement of the throw code (it needs to be exactly after the compare+branch). -> perhaps add a PC offset intrinsics ? - efficient implementation of .ovf opcodes. These are currently implemented as: <ins which sets the condition codes> b<cond> ex_label Some overflow opcodes are now supported by LLVM SVN. - exception handling, unwinding. - SSA is disabled for methods with exception handlers - How to obtain unwind info for LLVM compiled methods ? -> this is now solved by converting the unwind info generated by LLVM into our format. - LLVM uses the c++ exception handling framework, while we use our home grown code, and couldn't use the c++ one: - its not supported under VC++, other exotic platforms. - it might be impossible to support filter clauses with it. - trampolines. The trampolines need a predictable call sequence, since they need to disasm the calling code to obtain register numbers / offsets. LLVM currently generates this code in non-JIT mode: mov -0x98(%rax),%eax callq *%rax Here, the vtable pointer is lost. -> solution: use one vtable trampoline per class. - passing/receiving the IMT pointer/RGCTX. -> solution: pass them as normal arguments ? - argument passing. LLVM does not allow the specification of argument registers etc. This means that all calls are made according to the platform ABI. - passing/receiving vtypes. Vtypes passed/received in registers are handled by the front end by using a signature with scalar arguments, and loading the parts of the vtype into those arguments. Vtypes passed on the stack are handled using the 'byval' attribute. - ldaddr. Supported though alloca, we need to emit the load/store code. - types. The mono JIT uses pointer sized iregs/double fregs, while LLVM uses precisely typed registers, so we have to keep track of the precise LLVM type of each vreg. This is made easier because the IR is already in SSA form. An additional problem is that our IR is not consistent with types, i.e. i32/i64 types are frequently used incorrectly. */ /* AOT SUPPORT: Emit LLVM bytecode into a .bc file, compile it using llc into a .s file, then link it with the file containing the methods emitted by the JIT and the AOT data structures. */ /* FIXME: Normalize some aspects of the mono IR to allow easier translation, like: * - each bblock should end with a branch * - setting the return value, making cfg->ret non-volatile * - avoid some transformations in the JIT which make it harder for us to generate * code. * - use pointer types to help optimizations. */ #else /* DISABLE_JIT */ void mono_llvm_cleanup (void) { } void mono_llvm_free_mem_manager (MonoJitMemoryManager *mem_manager) { } void mono_llvm_init (gboolean enable_jit) { } #endif /* DISABLE_JIT */ #if !defined(DISABLE_JIT) && !defined(MONO_CROSS_COMPILE) /* LLVM JIT support */ /* * decode_llvm_eh_info: * * Decode the EH table emitted by llvm in jit mode, and store * the result into cfg. */ static void decode_llvm_eh_info (EmitContext *ctx, gpointer eh_frame) { MonoCompile *cfg = ctx->cfg; guint8 *cie, *fde; int fde_len; MonoLLVMFDEInfo info; MonoJitExceptionInfo *ei; guint8 *p = (guint8*)eh_frame; int version, fde_count, fde_offset; guint32 ei_len, i, nested_len; gpointer *type_info; gint32 *table; guint8 *unw_info; /* * Decode the one element EH table emitted by the MonoException class * in llvm. */ /* Similar to decode_llvm_mono_eh_frame () in aot-runtime.c */ version = *p; g_assert (version == 3); p ++; p ++; p = (guint8 *)ALIGN_PTR_TO (p, 4); fde_count = *(guint32*)p; p += 4; table = (gint32*)p; g_assert (fde_count <= 2); /* The first entry is the real method */ g_assert (table [0] == 1); fde_offset = table [1]; table += fde_count * 2; /* Extra entry */ cfg->code_len = table [0]; fde_len = table [1] - fde_offset; table += 2; fde = (guint8*)eh_frame + fde_offset; cie = (guint8*)table; /* Compute lengths */ mono_unwind_decode_llvm_mono_fde (fde, fde_len, cie, cfg->native_code, &info, NULL, NULL, NULL); ei = (MonoJitExceptionInfo *)g_malloc0 (info.ex_info_len * sizeof (MonoJitExceptionInfo)); type_info = (gpointer *)g_malloc0 (info.ex_info_len * sizeof (gpointer)); unw_info = (guint8*)g_malloc0 (info.unw_info_len); mono_unwind_decode_llvm_mono_fde (fde, fde_len, cie, cfg->native_code, &info, ei, type_info, unw_info); cfg->encoded_unwind_ops = unw_info; cfg->encoded_unwind_ops_len = info.unw_info_len; if (cfg->verbose_level > 1) mono_print_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len); if (info.this_reg != -1) { cfg->llvm_this_reg = info.this_reg; cfg->llvm_this_offset = info.this_offset; } ei_len = info.ex_info_len; // Nested clauses are currently disabled nested_len = 0; cfg->llvm_ex_info = (MonoJitExceptionInfo*)mono_mempool_alloc0 (cfg->mempool, (ei_len + nested_len) * sizeof (MonoJitExceptionInfo)); cfg->llvm_ex_info_len = ei_len + nested_len; memcpy (cfg->llvm_ex_info, ei, ei_len * sizeof (MonoJitExceptionInfo)); /* Fill the rest of the information from the type info */ for (i = 0; i < ei_len; ++i) { gint32 clause_index = *(gint32*)type_info [i]; MonoExceptionClause *clause = &cfg->header->clauses [clause_index]; cfg->llvm_ex_info [i].flags = clause->flags; cfg->llvm_ex_info [i].data.catch_class = clause->data.catch_class; cfg->llvm_ex_info [i].clause_index = clause_index; } } static MonoLLVMModule* init_jit_module (void) { MonoJitMemoryManager *jit_mm; MonoLLVMModule *module; // FIXME: jit_mm = get_default_jit_mm (); if (jit_mm->llvm_module) return (MonoLLVMModule*)jit_mm->llvm_module; mono_loader_lock (); if (jit_mm->llvm_module) { mono_loader_unlock (); return (MonoLLVMModule*)jit_mm->llvm_module; } module = g_new0 (MonoLLVMModule, 1); module->context = LLVMGetGlobalContext (); module->mono_ee = (MonoEERef*)mono_llvm_create_ee (&module->ee); // This contains just the intrinsics module->lmodule = LLVMModuleCreateWithName ("jit-global-module"); add_intrinsics (module->lmodule); add_types (module); module->llvm_types = g_hash_table_new (NULL, NULL); mono_memory_barrier (); jit_mm->llvm_module = module; mono_loader_unlock (); return (MonoLLVMModule*)jit_mm->llvm_module; } static void llvm_jit_finalize_method (EmitContext *ctx) { MonoCompile *cfg = ctx->cfg; int nvars = g_hash_table_size (ctx->jit_callees); LLVMValueRef *callee_vars = g_new0 (LLVMValueRef, nvars); gpointer *callee_addrs = g_new0 (gpointer, nvars); GHashTableIter iter; LLVMValueRef var; MonoMethod *callee; gpointer eh_frame; int i; /* * Compute the addresses of the LLVM globals pointing to the * methods called by the current method. Pass it to the trampoline * code so it can update them after their corresponding method was * compiled. */ g_hash_table_iter_init (&iter, ctx->jit_callees); i = 0; while (g_hash_table_iter_next (&iter, NULL, (void**)&var)) callee_vars [i ++] = var; mono_llvm_optimize_method (ctx->lmethod); if (cfg->verbose_level > 1) { g_print ("\n*** Optimized LLVM IR for %s ***\n", mono_method_full_name (cfg->method, TRUE)); if (cfg->compile_aot) { mono_llvm_dump_value (ctx->lmethod); } else { mono_llvm_dump_module (ctx->lmodule); } g_print ("***\n\n"); } mono_codeman_enable_write (); cfg->native_code = (guint8*)mono_llvm_compile_method (ctx->module->mono_ee, cfg, ctx->lmethod, nvars, callee_vars, callee_addrs, &eh_frame); mono_llvm_remove_gc_safepoint_poll (ctx->lmodule); mono_codeman_disable_write (); decode_llvm_eh_info (ctx, eh_frame); // FIXME: MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); if (!jit_mm->llvm_jit_callees) jit_mm->llvm_jit_callees = g_hash_table_new (NULL, NULL); g_hash_table_iter_init (&iter, ctx->jit_callees); i = 0; while (g_hash_table_iter_next (&iter, (void**)&callee, (void**)&var)) { GSList *addrs = (GSList*)g_hash_table_lookup (jit_mm->llvm_jit_callees, callee); addrs = g_slist_prepend (addrs, callee_addrs [i]); g_hash_table_insert (jit_mm->llvm_jit_callees, callee, addrs); i ++; } jit_mm_unlock (jit_mm); } #else static MonoLLVMModule* init_jit_module (void) { g_assert_not_reached (); } static void llvm_jit_finalize_method (EmitContext *ctx) { g_assert_not_reached (); } #endif static MonoCPUFeatures cpu_features; MonoCPUFeatures mono_llvm_get_cpu_features (void) { static const CpuFeatureAliasFlag flags_map [] = { #if defined(TARGET_X86) || defined(TARGET_AMD64) { "sse", MONO_CPU_X86_SSE }, { "sse2", MONO_CPU_X86_SSE2 }, { "pclmul", MONO_CPU_X86_PCLMUL }, { "aes", MONO_CPU_X86_AES }, { "sse2", MONO_CPU_X86_SSE2 }, { "sse3", MONO_CPU_X86_SSE3 }, { "ssse3", MONO_CPU_X86_SSSE3 }, { "sse4.1", MONO_CPU_X86_SSE41 }, { "sse4.2", MONO_CPU_X86_SSE42 }, { "popcnt", MONO_CPU_X86_POPCNT }, { "avx", MONO_CPU_X86_AVX }, { "avx2", MONO_CPU_X86_AVX2 }, { "fma", MONO_CPU_X86_FMA }, { "lzcnt", MONO_CPU_X86_LZCNT }, { "bmi", MONO_CPU_X86_BMI1 }, { "bmi2", MONO_CPU_X86_BMI2 }, #endif #if defined(TARGET_ARM64) { "crc", MONO_CPU_ARM64_CRC }, { "crypto", MONO_CPU_ARM64_CRYPTO }, { "neon", MONO_CPU_ARM64_NEON }, { "rdm", MONO_CPU_ARM64_RDM }, { "dotprod", MONO_CPU_ARM64_DP }, #endif #if defined(TARGET_WASM) { "simd", MONO_CPU_WASM_SIMD }, #endif // flags_map cannot be zero length in MSVC, so add useless dummy entry for arm32 #if defined(TARGET_ARM) && defined(HOST_WIN32) { "inited", MONO_CPU_INITED}, #endif }; if (!cpu_features) cpu_features = MONO_CPU_INITED | (MonoCPUFeatures)mono_llvm_check_cpu_features (flags_map, G_N_ELEMENTS (flags_map)); return cpu_features; }
/** * \file * llvm "Backend" for the mono JIT * * Copyright 2009-2011 Novell Inc (http://www.novell.com) * Copyright 2011 Xamarin Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include "config.h" #include <mono/metadata/debug-helpers.h> #include <mono/metadata/debug-internals.h> #include <mono/metadata/mempool-internals.h> #include <mono/metadata/environment.h> #include <mono/metadata/object-internals.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/tokentype.h> #include <mono/utils/mono-tls.h> #include <mono/utils/mono-dl.h> #include <mono/utils/mono-time.h> #include <mono/utils/freebsd-dwarf.h> #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif #include "llvm-c/BitWriter.h" #include "llvm-c/Analysis.h" #include "mini-llvm-cpp.h" #include "llvm-jit.h" #include "aot-compiler.h" #include "mini-llvm.h" #include "mini-runtime.h" #include <mono/utils/mono-math.h> #ifndef DISABLE_JIT #if defined(TARGET_AMD64) && defined(TARGET_WIN32) && defined(HOST_WIN32) && defined(_MSC_VER) #define TARGET_X86_64_WIN32_MSVC #endif #if defined(TARGET_X86_64_WIN32_MSVC) #define TARGET_WIN32_MSVC #endif #if LLVM_API_VERSION < 900 #error "The version of the mono llvm repository is too old." #endif /* * Information associated by mono with LLVM modules. */ typedef struct { LLVMModuleRef lmodule; LLVMValueRef throw_icall, rethrow, throw_corlib_exception; GHashTable *llvm_types; LLVMValueRef dummy_got_var; const char *get_method_symbol; const char *get_unbox_tramp_symbol; const char *init_aotconst_symbol; GHashTable *plt_entries; GHashTable *plt_entries_ji; GHashTable *method_to_lmethod; GHashTable *method_to_call_info; GHashTable *lvalue_to_lcalls; GHashTable *direct_callables; /* Maps got slot index -> LLVMValueRef */ GHashTable *aotconst_vars; char **bb_names; int bb_names_len; GPtrArray *used; LLVMTypeRef ptr_type; GPtrArray *subprogram_mds; MonoEERef *mono_ee; LLVMExecutionEngineRef ee; gboolean external_symbols; gboolean emit_dwarf; int max_got_offset; LLVMValueRef personality; gpointer gc_poll_cold_wrapper_compiled; /* For AOT */ MonoAssembly *assembly; char *global_prefix; MonoAotFileInfo aot_info; const char *eh_frame_symbol; LLVMValueRef get_method, get_unbox_tramp, init_aotconst_func; LLVMValueRef init_methods [AOT_INIT_METHOD_NUM]; LLVMValueRef code_start, code_end; LLVMValueRef inited_var; LLVMValueRef unbox_tramp_indexes; LLVMValueRef unbox_trampolines; LLVMValueRef gc_poll_cold_wrapper; LLVMValueRef info_var; LLVMTypeRef *info_var_eltypes; int max_inited_idx, max_method_idx; gboolean has_jitted_code; gboolean static_link; gboolean llvm_only; gboolean interp; GHashTable *idx_to_lmethod; GHashTable *idx_to_unbox_tramp; GPtrArray *callsite_list; LLVMContextRef context; LLVMValueRef sentinel_exception; LLVMValueRef gc_safe_point_flag_var; LLVMValueRef interrupt_flag_var; void *di_builder, *cu; GHashTable *objc_selector_to_var; GPtrArray *cfgs; int unbox_tramp_num, unbox_tramp_elemsize; GHashTable *got_idx_to_type; GHashTable *no_method_table_lmethods; } MonoLLVMModule; /* * Information associated by the backend with mono basic blocks. */ typedef struct { LLVMBasicBlockRef bblock, end_bblock; LLVMValueRef finally_ind; gboolean added, invoke_target; /* * If this bblock is the start of a finally clause, this is a list of bblocks it * needs to branch to in ENDFINALLY. */ GSList *call_handler_return_bbs; /* * If this bblock is the start of a finally clause, this is the bblock that * CALL_HANDLER needs to branch to. */ LLVMBasicBlockRef call_handler_target_bb; /* The list of switch statements generated by ENDFINALLY instructions */ GSList *endfinally_switch_ins_list; GSList *phi_nodes; } BBInfo; /* * Structure containing emit state */ typedef struct { MonoMemPool *mempool; /* Maps method names to the corresponding LLVMValueRef */ GHashTable *emitted_method_decls; MonoCompile *cfg; LLVMValueRef lmethod; MonoLLVMModule *module; LLVMModuleRef lmodule; BBInfo *bblocks; int sindex, default_index, ex_index; LLVMBuilderRef builder; LLVMValueRef *values, *addresses; MonoType **vreg_cli_types; LLVMCallInfo *linfo; MonoMethodSignature *sig; GSList *builders; GHashTable *region_to_handler; GHashTable *clause_to_handler; LLVMBuilderRef alloca_builder; LLVMValueRef last_alloca; LLVMValueRef rgctx_arg; LLVMValueRef this_arg; LLVMTypeRef *vreg_types; gboolean *is_vphi; LLVMTypeRef method_type; LLVMBasicBlockRef init_bb, inited_bb; gboolean *is_dead; gboolean *unreachable; gboolean llvm_only; gboolean has_got_access; gboolean is_linkonce; gboolean emit_dummy_arg; gboolean has_safepoints; gboolean has_catch; int this_arg_pindex, rgctx_arg_pindex; LLVMValueRef imt_rgctx_loc; GHashTable *llvm_types; LLVMValueRef dbg_md; MonoDebugMethodInfo *minfo; /* For every clause, the clauses it is nested in */ GSList **nested_in; LLVMValueRef ex_var; GHashTable *exc_meta; GPtrArray *callsite_list; GPtrArray *phi_values; GPtrArray *bblock_list; char *method_name; GHashTable *jit_callees; LLVMValueRef long_bb_break_var; int *gc_var_indexes; LLVMValueRef gc_pin_area; LLVMValueRef il_state; LLVMValueRef il_state_ret; } EmitContext; typedef struct { MonoBasicBlock *bb; MonoInst *phi; MonoBasicBlock *in_bb; int sreg; } PhiNode; /* * Instruction metadata * This is the same as ins_info, but LREG != IREG. */ #ifdef MINI_OP #undef MINI_OP #endif #ifdef MINI_OP3 #undef MINI_OP3 #endif #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ', #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3, #define NONE ' ' #define IREG 'i' #define FREG 'f' #define VREG 'v' #define XREG 'x' #define LREG 'l' /* keep in sync with the enum in mini.h */ const char mini_llvm_ins_info[] = { #include "mini-ops.h" }; #undef MINI_OP #undef MINI_OP3 #if TARGET_SIZEOF_VOID_P == 4 #define GET_LONG_IMM(ins) ((ins)->inst_l) #else #define GET_LONG_IMM(ins) ((ins)->inst_imm) #endif #define LLVM_INS_INFO(opcode) (&mini_llvm_ins_info [((opcode) - OP_START - 1) * 4]) #if 0 #define TRACE_FAILURE(msg) do { printf ("%s\n", msg); } while (0) #else #define TRACE_FAILURE(msg) #endif #ifdef TARGET_X86 #define IS_TARGET_X86 1 #else #define IS_TARGET_X86 0 #endif #ifdef TARGET_AMD64 #define IS_TARGET_AMD64 1 #else #define IS_TARGET_AMD64 0 #endif #define ctx_ok(ctx) (!(ctx)->cfg->disable_llvm) enum { MAX_VECTOR_ELEMS = 32, // 2 vectors * 128 bits per vector / 8 bits per element ARM64_MAX_VECTOR_ELEMS = 16, }; const int mask_0_incr_1 [] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, }; static LLVMIntPredicate cond_to_llvm_cond [] = { LLVMIntEQ, LLVMIntNE, LLVMIntSLE, LLVMIntSGE, LLVMIntSLT, LLVMIntSGT, LLVMIntULE, LLVMIntUGE, LLVMIntULT, LLVMIntUGT, }; static LLVMRealPredicate fpcond_to_llvm_cond [] = { LLVMRealOEQ, LLVMRealUNE, LLVMRealOLE, LLVMRealOGE, LLVMRealOLT, LLVMRealOGT, LLVMRealULE, LLVMRealUGE, LLVMRealULT, LLVMRealUGT, LLVMRealORD, LLVMRealUNO }; /* See Table 3-1 ("Comparison Predicate for CMPPD and CMPPS Instructions") in * Vol. 2A of the Intel SDM. */ enum { SSE_eq_ord_nosignal = 0, SSE_lt_ord_signal = 1, SSE_le_ord_signal = 2, SSE_unord_nosignal = 3, SSE_neq_unord_nosignal = 4, SSE_nlt_unord_signal = 5, SSE_nle_unord_signal = 6, SSE_ord_nosignal = 7, }; static MonoLLVMModule aot_module; static GHashTable *intrins_id_to_intrins; static LLVMTypeRef i1_t, i2_t, i4_t, i8_t, r4_t, r8_t; static LLVMTypeRef sse_i1_t, sse_i2_t, sse_i4_t, sse_i8_t, sse_r4_t, sse_r8_t; static LLVMTypeRef v64_i1_t, v64_i2_t, v64_i4_t, v64_i8_t, v64_r4_t, v64_r8_t; static LLVMTypeRef v128_i1_t, v128_i2_t, v128_i4_t, v128_i8_t, v128_r4_t, v128_r8_t; static LLVMTypeRef void_func_t; static MonoLLVMModule *init_jit_module (void); static void emit_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder, const unsigned char *cil_code); static void emit_default_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder); static LLVMValueRef emit_dbg_subprogram (EmitContext *ctx, MonoCompile *cfg, LLVMValueRef method, const char *name); static void emit_dbg_info (MonoLLVMModule *module, const char *filename, const char *cu_name); static void emit_cond_system_exception (EmitContext *ctx, MonoBasicBlock *bb, const char *exc_type, LLVMValueRef cmp, gboolean force_explicit); static LLVMValueRef get_intrins (EmitContext *ctx, int id); static LLVMValueRef get_intrins_from_module (LLVMModuleRef lmodule, int id); static void llvm_jit_finalize_method (EmitContext *ctx); static void mono_llvm_nonnull_state_update (EmitContext *ctx, LLVMValueRef lcall, MonoMethod *call_method, LLVMValueRef *args, int num_params); static void mono_llvm_propagate_nonnull_final (GHashTable *all_specializable, MonoLLVMModule *module); static void create_aot_info_var (MonoLLVMModule *module); static void set_invariant_load_flag (LLVMValueRef v); static void set_nonnull_load_flag (LLVMValueRef v); enum { INTRIN_scalar = 1 << 0, INTRIN_vector64 = 1 << 1, INTRIN_vector128 = 1 << 2, INTRIN_vectorwidths = 3, INTRIN_vectormask = 0x7, INTRIN_int8 = 1 << 3, INTRIN_int16 = 1 << 4, INTRIN_int32 = 1 << 5, INTRIN_int64 = 1 << 6, INTRIN_float32 = 1 << 7, INTRIN_float64 = 1 << 8, INTRIN_elementwidths = 6, }; typedef uint16_t llvm_ovr_tag_t; static LLVMTypeRef intrin_types [INTRIN_vectorwidths][INTRIN_elementwidths]; static const llvm_ovr_tag_t intrin_arm64_ovr [] = { #define INTRINS(sym, ...) 0, #define INTRINS_OVR(sym, ...) 0, #define INTRINS_OVR_2_ARG(sym, ...) 0, #define INTRINS_OVR_3_ARG(sym, ...) 0, #define INTRINS_OVR_TAG(sym, _, arch, spec) spec, #define INTRINS_OVR_TAG_KIND(sym, _, kind, arch, spec) spec, #include "llvm-intrinsics.h" }; enum { INTRIN_kind_ftoi = 1, INTRIN_kind_widen, INTRIN_kind_widen_across, INTRIN_kind_across, INTRIN_kind_arm64_dot_prod, }; static const uint8_t intrin_kind [] = { #define INTRINS(sym, ...) 0, #define INTRINS_OVR(sym, ...) 0, #define INTRINS_OVR_2_ARG(sym, ...) 0, #define INTRINS_OVR_3_ARG(sym, ...) 0, #define INTRINS_OVR_TAG(sym, _, arch, spec) 0, #define INTRINS_OVR_TAG_KIND(sym, _, arch, kind, spec) kind, #include "llvm-intrinsics.h" }; static inline llvm_ovr_tag_t ovr_tag_force_scalar (llvm_ovr_tag_t tag) { return (tag & ~INTRIN_vectormask) | INTRIN_scalar; } static inline llvm_ovr_tag_t ovr_tag_smaller_vector (llvm_ovr_tag_t tag) { return (tag & ~INTRIN_vectormask) | ((tag & INTRIN_vectormask) >> 1); } static inline llvm_ovr_tag_t ovr_tag_smaller_elements (llvm_ovr_tag_t tag) { return ((tag & ~INTRIN_vectormask) >> 1) | (tag & INTRIN_vectormask); } static inline llvm_ovr_tag_t ovr_tag_corresponding_integer (llvm_ovr_tag_t tag) { return ((tag & ~INTRIN_vectormask) >> 2) | (tag & INTRIN_vectormask); } static LLVMTypeRef ovr_tag_to_llvm_type (llvm_ovr_tag_t tag) { int vw = 0; int ew = 0; if (tag & INTRIN_vector64) vw = 1; else if (tag & INTRIN_vector128) vw = 2; if (tag & INTRIN_int16) ew = 1; else if (tag & INTRIN_int32) ew = 2; else if (tag & INTRIN_int64) ew = 3; else if (tag & INTRIN_float32) ew = 4; else if (tag & INTRIN_float64) ew = 5; return intrin_types [vw][ew]; } static int key_from_id_and_tag (int id, llvm_ovr_tag_t ovr_tag) { return (((int) ovr_tag) << 23) | id; } static llvm_ovr_tag_t ovr_tag_from_mono_vector_class (MonoClass *klass) { int size = mono_class_value_size (klass, NULL); llvm_ovr_tag_t ret = 0; switch (size) { case 8: ret |= INTRIN_vector64; break; case 16: ret |= INTRIN_vector128; break; } MonoType *etype = mono_class_get_context (klass)->class_inst->type_argv [0]; switch (etype->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: ret |= INTRIN_int8; break; case MONO_TYPE_I2: case MONO_TYPE_U2: ret |= INTRIN_int16; break; case MONO_TYPE_I4: case MONO_TYPE_U4: ret |= INTRIN_int32; break; case MONO_TYPE_I8: case MONO_TYPE_U8: ret |= INTRIN_int64; break; case MONO_TYPE_R4: ret |= INTRIN_float32; break; case MONO_TYPE_R8: ret |= INTRIN_float64; break; } return ret; } static llvm_ovr_tag_t ovr_tag_from_llvm_type (LLVMTypeRef type) { llvm_ovr_tag_t ret = 0; LLVMTypeKind kind = LLVMGetTypeKind (type); LLVMTypeRef elem_t = NULL; switch (kind) { case LLVMVectorTypeKind: { elem_t = LLVMGetElementType (type); unsigned int bits = mono_llvm_get_prim_size_bits (type); switch (bits) { case 64: ret |= INTRIN_vector64; break; case 128: ret |= INTRIN_vector128; break; default: g_assert_not_reached (); } break; } default: g_assert_not_reached (); } if (elem_t == i1_t) ret |= INTRIN_int8; if (elem_t == i2_t) ret |= INTRIN_int16; if (elem_t == i4_t) ret |= INTRIN_int32; if (elem_t == i8_t) ret |= INTRIN_int64; if (elem_t == r4_t) ret |= INTRIN_float32; if (elem_t == r8_t) ret |= INTRIN_float64; return ret; } static inline void set_failure (EmitContext *ctx, const char *message) { TRACE_FAILURE (reason); ctx->cfg->exception_message = g_strdup (message); ctx->cfg->disable_llvm = TRUE; } static LLVMValueRef const_int1 (int v) { return LLVMConstInt (LLVMInt1Type (), v ? 1 : 0, FALSE); } static LLVMValueRef const_int8 (int v) { return LLVMConstInt (LLVMInt8Type (), v, FALSE); } static LLVMValueRef const_int32 (int v) { return LLVMConstInt (LLVMInt32Type (), v, FALSE); } static LLVMValueRef const_int64 (int64_t v) { return LLVMConstInt (LLVMInt64Type (), v, FALSE); } /* * IntPtrType: * * The LLVM type with width == TARGET_SIZEOF_VOID_P */ static LLVMTypeRef IntPtrType (void) { return TARGET_SIZEOF_VOID_P == 8 ? LLVMInt64Type () : LLVMInt32Type (); } static LLVMTypeRef ObjRefType (void) { return TARGET_SIZEOF_VOID_P == 8 ? LLVMPointerType (LLVMInt64Type (), 0) : LLVMPointerType (LLVMInt32Type (), 0); } static LLVMTypeRef ThisType (void) { return TARGET_SIZEOF_VOID_P == 8 ? LLVMPointerType (LLVMInt64Type (), 0) : LLVMPointerType (LLVMInt32Type (), 0); } typedef struct { int32_t size; uint32_t align; } MonoSizeAlign; /* * get_vtype_size: * * Return the size of the LLVM representation of the vtype T. */ static MonoSizeAlign get_vtype_size_align (MonoType *t) { uint32_t align = 0; int32_t size = mono_class_value_size (mono_class_from_mono_type_internal (t), &align); /* LLVMArgAsIArgs depends on this since it stores whole words */ while (size < 2 * TARGET_SIZEOF_VOID_P && mono_is_power_of_two (size) == -1) size ++; MonoSizeAlign ret = { size, align }; return ret; } /* * simd_class_to_llvm_type: * * Return the LLVM type corresponding to the Mono.SIMD class KLASS */ static LLVMTypeRef simd_class_to_llvm_type (EmitContext *ctx, MonoClass *klass) { const char *klass_name = m_class_get_name (klass); if (!strcmp (klass_name, "Vector2d")) { return LLVMVectorType (LLVMDoubleType (), 2); } else if (!strcmp (klass_name, "Vector2l")) { return LLVMVectorType (LLVMInt64Type (), 2); } else if (!strcmp (klass_name, "Vector2ul")) { return LLVMVectorType (LLVMInt64Type (), 2); } else if (!strcmp (klass_name, "Vector4i")) { return LLVMVectorType (LLVMInt32Type (), 4); } else if (!strcmp (klass_name, "Vector4ui")) { return LLVMVectorType (LLVMInt32Type (), 4); } else if (!strcmp (klass_name, "Vector4f")) { return LLVMVectorType (LLVMFloatType (), 4); } else if (!strcmp (klass_name, "Vector8s")) { return LLVMVectorType (LLVMInt16Type (), 8); } else if (!strcmp (klass_name, "Vector8us")) { return LLVMVectorType (LLVMInt16Type (), 8); } else if (!strcmp (klass_name, "Vector16sb")) { return LLVMVectorType (LLVMInt8Type (), 16); } else if (!strcmp (klass_name, "Vector16b")) { return LLVMVectorType (LLVMInt8Type (), 16); } else if (!strcmp (klass_name, "Vector2")) { /* System.Numerics */ return LLVMVectorType (LLVMFloatType (), 4); } else if (!strcmp (klass_name, "Vector3")) { return LLVMVectorType (LLVMFloatType (), 4); } else if (!strcmp (klass_name, "Vector4")) { return LLVMVectorType (LLVMFloatType (), 4); } else if (!strcmp (klass_name, "Vector`1") || !strcmp (klass_name, "Vector64`1") || !strcmp (klass_name, "Vector128`1") || !strcmp (klass_name, "Vector256`1")) { MonoType *etype = mono_class_get_generic_class (klass)->context.class_inst->type_argv [0]; int size = mono_class_value_size (klass, NULL); switch (etype->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return LLVMVectorType (LLVMInt8Type (), size); case MONO_TYPE_I2: case MONO_TYPE_U2: return LLVMVectorType (LLVMInt16Type (), size / 2); case MONO_TYPE_I4: case MONO_TYPE_U4: return LLVMVectorType (LLVMInt32Type (), size / 4); case MONO_TYPE_I8: case MONO_TYPE_U8: return LLVMVectorType (LLVMInt64Type (), size / 8); case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return LLVMVectorType (LLVMInt64Type (), size / 8); #else return LLVMVectorType (LLVMInt32Type (), size / 4); #endif case MONO_TYPE_R4: return LLVMVectorType (LLVMFloatType (), size / 4); case MONO_TYPE_R8: return LLVMVectorType (LLVMDoubleType (), size / 8); default: g_assert_not_reached (); return NULL; } } else { printf ("%s\n", klass_name); NOT_IMPLEMENTED; return NULL; } } static LLVMTypeRef simd_valuetuple_to_llvm_type (EmitContext *ctx, MonoClass *klass) { const char *klass_name = m_class_get_name (klass); if (!strcmp (klass_name, "ValueTuple`2")) { MonoType *etype = mono_class_get_generic_class (klass)->context.class_inst->type_argv [0]; if (etype->type != MONO_TYPE_GENERICINST) g_assert_not_reached (); MonoClass *eklass = etype->data.generic_class->cached_class; LLVMTypeRef ltype = simd_class_to_llvm_type (ctx, eklass); return LLVMArrayType (ltype, 2); } g_assert_not_reached (); } /* Return the 128 bit SIMD type corresponding to the mono type TYPE */ static inline G_GNUC_UNUSED LLVMTypeRef type_to_sse_type (int type) { switch (type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return LLVMVectorType (LLVMInt8Type (), 16); case MONO_TYPE_U2: case MONO_TYPE_I2: return LLVMVectorType (LLVMInt16Type (), 8); case MONO_TYPE_U4: case MONO_TYPE_I4: return LLVMVectorType (LLVMInt32Type (), 4); case MONO_TYPE_U8: case MONO_TYPE_I8: return LLVMVectorType (LLVMInt64Type (), 2); case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return LLVMVectorType (LLVMInt64Type (), 2); #else return LLVMVectorType (LLVMInt32Type (), 4); #endif case MONO_TYPE_R8: return LLVMVectorType (LLVMDoubleType (), 2); case MONO_TYPE_R4: return LLVMVectorType (LLVMFloatType (), 4); default: g_assert_not_reached (); return NULL; } } static LLVMTypeRef create_llvm_type_for_type (MonoLLVMModule *module, MonoClass *klass) { int i, size, nfields, esize; LLVMTypeRef *eltypes; char *name; MonoType *t; LLVMTypeRef ltype; t = m_class_get_byval_arg (klass); if (mini_type_is_hfa (t, &nfields, &esize)) { /* * This is needed on arm64 where HFAs are returned in * registers. */ /* SIMD types have size 16 in mono_class_value_size () */ if (m_class_is_simd_type (klass)) nfields = 16/ esize; size = nfields; eltypes = g_new (LLVMTypeRef, size); for (i = 0; i < size; ++i) eltypes [i] = esize == 4 ? LLVMFloatType () : LLVMDoubleType (); } else { MonoSizeAlign size_align = get_vtype_size_align (t); eltypes = g_new (LLVMTypeRef, size_align.size); size = 0; uint32_t bytes = 0; uint32_t chunk = size_align.align < TARGET_SIZEOF_VOID_P ? size_align.align : TARGET_SIZEOF_VOID_P; for (; chunk > 0; chunk = chunk >> 1) { for (; (bytes + chunk) <= size_align.size; bytes += chunk) { eltypes [size] = LLVMIntType (chunk * 8); ++size; } } } name = mono_type_full_name (m_class_get_byval_arg (klass)); ltype = LLVMStructCreateNamed (module->context, name); LLVMStructSetBody (ltype, eltypes, size, FALSE); g_free (eltypes); g_free (name); return ltype; } static LLVMTypeRef primitive_type_to_llvm_type (MonoTypeEnum type) { switch (type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return LLVMInt8Type (); case MONO_TYPE_I2: case MONO_TYPE_U2: return LLVMInt16Type (); case MONO_TYPE_I4: case MONO_TYPE_U4: return LLVMInt32Type (); case MONO_TYPE_I8: case MONO_TYPE_U8: return LLVMInt64Type (); case MONO_TYPE_R4: return LLVMFloatType (); case MONO_TYPE_R8: return LLVMDoubleType (); case MONO_TYPE_I: case MONO_TYPE_U: return IntPtrType (); default: return NULL; } } static MonoTypeEnum inst_c1_type (const MonoInst *ins) { return (MonoTypeEnum)ins->inst_c1; } /* * type_to_llvm_type: * * Return the LLVM type corresponding to T. */ static LLVMTypeRef type_to_llvm_type (EmitContext *ctx, MonoType *t) { if (m_type_is_byref (t)) return ThisType (); t = mini_get_underlying_type (t); LLVMTypeRef prim_llvm_type = primitive_type_to_llvm_type (t->type); if (prim_llvm_type != NULL) return prim_llvm_type; switch (t->type) { case MONO_TYPE_VOID: return LLVMVoidType (); case MONO_TYPE_OBJECT: return ObjRefType (); case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: { MonoClass *klass = mono_class_from_mono_type_internal (t); MonoClass *ptr_klass = m_class_get_element_class (klass); MonoType *ptr_type = m_class_get_byval_arg (ptr_klass); /* Handle primitive pointers */ switch (ptr_type->type) { case MONO_TYPE_I1: case MONO_TYPE_I2: case MONO_TYPE_I4: case MONO_TYPE_U1: case MONO_TYPE_U2: case MONO_TYPE_U4: return LLVMPointerType (type_to_llvm_type (ctx, ptr_type), 0); } return ObjRefType (); } case MONO_TYPE_VAR: case MONO_TYPE_MVAR: /* Because of generic sharing */ return ObjRefType (); case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (t)) return ObjRefType (); /* Fall through */ case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: { MonoClass *klass; LLVMTypeRef ltype; klass = mono_class_from_mono_type_internal (t); if (MONO_CLASS_IS_SIMD (ctx->cfg, klass)) return simd_class_to_llvm_type (ctx, klass); if (m_class_is_enumtype (klass)) return type_to_llvm_type (ctx, mono_class_enum_basetype_internal (klass)); ltype = (LLVMTypeRef)g_hash_table_lookup (ctx->module->llvm_types, klass); if (!ltype) { ltype = create_llvm_type_for_type (ctx->module, klass); g_hash_table_insert (ctx->module->llvm_types, klass, ltype); } return ltype; } default: printf ("X: %d\n", t->type); ctx->cfg->exception_message = g_strdup_printf ("type %s", mono_type_full_name (t)); ctx->cfg->disable_llvm = TRUE; return NULL; } } static gboolean primitive_type_is_unsigned (MonoTypeEnum t) { switch (t) { case MONO_TYPE_U1: case MONO_TYPE_U2: case MONO_TYPE_CHAR: case MONO_TYPE_U4: case MONO_TYPE_U8: case MONO_TYPE_U: return TRUE; default: return FALSE; } } /* * type_is_unsigned: * * Return whenever T is an unsigned int type. */ static gboolean type_is_unsigned (EmitContext *ctx, MonoType *t) { t = mini_get_underlying_type (t); if (m_type_is_byref (t)) return FALSE; return primitive_type_is_unsigned (t->type); } /* * type_to_llvm_arg_type: * * Same as type_to_llvm_type, but treat i8/i16 as i32. */ static LLVMTypeRef type_to_llvm_arg_type (EmitContext *ctx, MonoType *t) { LLVMTypeRef ptype = type_to_llvm_type (ctx, t); if (ctx->cfg->llvm_only) return ptype; /* * This works on all abis except arm64/ios which passes multiple * arguments in one stack slot. */ #ifndef TARGET_ARM64 if (ptype == LLVMInt8Type () || ptype == LLVMInt16Type ()) { /* * LLVM generates code which only sets the lower bits, while JITted * code expects all the bits to be set. */ ptype = LLVMInt32Type (); } #endif return ptype; } /* * llvm_type_to_stack_type: * * Return the LLVM type which needs to be used when a value of type TYPE is pushed * on the IL stack. */ static G_GNUC_UNUSED LLVMTypeRef llvm_type_to_stack_type (MonoCompile *cfg, LLVMTypeRef type) { if (type == NULL) return NULL; if (type == LLVMInt8Type ()) return LLVMInt32Type (); else if (type == LLVMInt16Type ()) return LLVMInt32Type (); else if (!cfg->r4fp && type == LLVMFloatType ()) return LLVMDoubleType (); else return type; } /* * regtype_to_llvm_type: * * Return the LLVM type corresponding to the regtype C used in instruction * descriptions. */ static LLVMTypeRef regtype_to_llvm_type (char c) { switch (c) { case 'i': return LLVMInt32Type (); case 'l': return LLVMInt64Type (); case 'f': return LLVMDoubleType (); default: return NULL; } } /* * op_to_llvm_type: * * Return the LLVM type corresponding to the unary/binary opcode OPCODE. */ static LLVMTypeRef op_to_llvm_type (int opcode) { switch (opcode) { case OP_ICONV_TO_I1: case OP_LCONV_TO_I1: return LLVMInt8Type (); case OP_ICONV_TO_U1: case OP_LCONV_TO_U1: return LLVMInt8Type (); case OP_ICONV_TO_I2: case OP_LCONV_TO_I2: return LLVMInt16Type (); case OP_ICONV_TO_U2: case OP_LCONV_TO_U2: return LLVMInt16Type (); case OP_ICONV_TO_I4: case OP_LCONV_TO_I4: return LLVMInt32Type (); case OP_ICONV_TO_U4: case OP_LCONV_TO_U4: return LLVMInt32Type (); case OP_ICONV_TO_I8: return LLVMInt64Type (); case OP_ICONV_TO_R4: return LLVMFloatType (); case OP_ICONV_TO_R8: return LLVMDoubleType (); case OP_ICONV_TO_U8: return LLVMInt64Type (); case OP_FCONV_TO_I4: return LLVMInt32Type (); case OP_FCONV_TO_I8: return LLVMInt64Type (); case OP_FCONV_TO_I1: case OP_FCONV_TO_U1: case OP_RCONV_TO_I1: case OP_RCONV_TO_U1: return LLVMInt8Type (); case OP_FCONV_TO_I2: case OP_FCONV_TO_U2: case OP_RCONV_TO_I2: case OP_RCONV_TO_U2: return LLVMInt16Type (); case OP_FCONV_TO_U4: case OP_RCONV_TO_U4: return LLVMInt32Type (); case OP_FCONV_TO_U8: case OP_RCONV_TO_U8: return LLVMInt64Type (); case OP_IADD_OVF: case OP_IADD_OVF_UN: case OP_ISUB_OVF: case OP_ISUB_OVF_UN: case OP_IMUL_OVF: case OP_IMUL_OVF_UN: return LLVMInt32Type (); case OP_LADD_OVF: case OP_LADD_OVF_UN: case OP_LSUB_OVF: case OP_LSUB_OVF_UN: case OP_LMUL_OVF: case OP_LMUL_OVF_UN: return LLVMInt64Type (); default: printf ("%s\n", mono_inst_name (opcode)); g_assert_not_reached (); return NULL; } } #define CLAUSE_START(clause) ((clause)->try_offset) #define CLAUSE_END(clause) (((clause))->try_offset + ((clause))->try_len) /* * load_store_to_llvm_type: * * Return the size/sign/zero extension corresponding to the load/store opcode * OPCODE. */ static LLVMTypeRef load_store_to_llvm_type (int opcode, int *size, gboolean *sext, gboolean *zext) { *sext = FALSE; *zext = FALSE; switch (opcode) { case OP_LOADI1_MEMBASE: case OP_STOREI1_MEMBASE_REG: case OP_STOREI1_MEMBASE_IMM: case OP_ATOMIC_LOAD_I1: case OP_ATOMIC_STORE_I1: *size = 1; *sext = TRUE; return LLVMInt8Type (); case OP_LOADU1_MEMBASE: case OP_LOADU1_MEM: case OP_ATOMIC_LOAD_U1: case OP_ATOMIC_STORE_U1: *size = 1; *zext = TRUE; return LLVMInt8Type (); case OP_LOADI2_MEMBASE: case OP_STOREI2_MEMBASE_REG: case OP_STOREI2_MEMBASE_IMM: case OP_ATOMIC_LOAD_I2: case OP_ATOMIC_STORE_I2: *size = 2; *sext = TRUE; return LLVMInt16Type (); case OP_LOADU2_MEMBASE: case OP_LOADU2_MEM: case OP_ATOMIC_LOAD_U2: case OP_ATOMIC_STORE_U2: *size = 2; *zext = TRUE; return LLVMInt16Type (); case OP_LOADI4_MEMBASE: case OP_LOADU4_MEMBASE: case OP_LOADI4_MEM: case OP_LOADU4_MEM: case OP_STOREI4_MEMBASE_REG: case OP_STOREI4_MEMBASE_IMM: case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_STORE_U4: *size = 4; return LLVMInt32Type (); case OP_LOADI8_MEMBASE: case OP_LOADI8_MEM: case OP_STOREI8_MEMBASE_REG: case OP_STOREI8_MEMBASE_IMM: case OP_ATOMIC_LOAD_I8: case OP_ATOMIC_STORE_I8: case OP_ATOMIC_LOAD_U8: case OP_ATOMIC_STORE_U8: *size = 8; return LLVMInt64Type (); case OP_LOADR4_MEMBASE: case OP_STORER4_MEMBASE_REG: case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_STORE_R4: *size = 4; return LLVMFloatType (); case OP_LOADR8_MEMBASE: case OP_STORER8_MEMBASE_REG: case OP_ATOMIC_LOAD_R8: case OP_ATOMIC_STORE_R8: *size = 8; return LLVMDoubleType (); case OP_LOAD_MEMBASE: case OP_LOAD_MEM: case OP_STORE_MEMBASE_REG: case OP_STORE_MEMBASE_IMM: *size = TARGET_SIZEOF_VOID_P; return IntPtrType (); default: g_assert_not_reached (); return NULL; } } /* * ovf_op_to_intrins: * * Return the LLVM intrinsics corresponding to the overflow opcode OPCODE. */ static IntrinsicId ovf_op_to_intrins (int opcode) { switch (opcode) { case OP_IADD_OVF: return INTRINS_SADD_OVF_I32; case OP_IADD_OVF_UN: return INTRINS_UADD_OVF_I32; case OP_ISUB_OVF: return INTRINS_SSUB_OVF_I32; case OP_ISUB_OVF_UN: return INTRINS_USUB_OVF_I32; case OP_IMUL_OVF: return INTRINS_SMUL_OVF_I32; case OP_IMUL_OVF_UN: return INTRINS_UMUL_OVF_I32; case OP_LADD_OVF: return INTRINS_SADD_OVF_I64; case OP_LADD_OVF_UN: return INTRINS_UADD_OVF_I64; case OP_LSUB_OVF: return INTRINS_SSUB_OVF_I64; case OP_LSUB_OVF_UN: return INTRINS_USUB_OVF_I64; case OP_LMUL_OVF: return INTRINS_SMUL_OVF_I64; case OP_LMUL_OVF_UN: return INTRINS_UMUL_OVF_I64; default: g_assert_not_reached (); return (IntrinsicId)0; } } static IntrinsicId simd_ins_to_intrins (int opcode) { switch (opcode) { #if defined(TARGET_X86) || defined(TARGET_AMD64) case OP_CVTPD2DQ: return INTRINS_SSE_CVTPD2DQ; case OP_CVTPS2DQ: return INTRINS_SSE_CVTPS2DQ; case OP_CVTPD2PS: return INTRINS_SSE_CVTPD2PS; case OP_CVTTPD2DQ: return INTRINS_SSE_CVTTPD2DQ; case OP_CVTTPS2DQ: return INTRINS_SSE_CVTTPS2DQ; case OP_SSE_SQRTSS: return INTRINS_SSE_SQRT_SS; case OP_SSE2_SQRTSD: return INTRINS_SSE_SQRT_SD; #endif default: g_assert_not_reached (); return (IntrinsicId)0; } } static LLVMTypeRef simd_op_to_llvm_type (int opcode) { #if defined(TARGET_X86) || defined(TARGET_AMD64) switch (opcode) { case OP_EXTRACT_R8: case OP_EXPAND_R8: return sse_r8_t; case OP_EXTRACT_I8: case OP_EXPAND_I8: return sse_i8_t; case OP_EXTRACT_I4: case OP_EXPAND_I4: return sse_i4_t; case OP_EXTRACT_I2: case OP_EXTRACTX_U2: case OP_EXPAND_I2: return sse_i2_t; case OP_EXTRACT_I1: case OP_EXPAND_I1: return sse_i1_t; case OP_EXTRACT_R4: case OP_EXPAND_R4: return sse_r4_t; case OP_CVTPD2DQ: case OP_CVTPD2PS: case OP_CVTTPD2DQ: return sse_r8_t; case OP_CVTPS2DQ: case OP_CVTTPS2DQ: return sse_r4_t; case OP_SQRTPS: case OP_RSQRTPS: case OP_DUPPS_LOW: case OP_DUPPS_HIGH: return sse_r4_t; case OP_SQRTPD: case OP_DUPPD: return sse_r8_t; default: g_assert_not_reached (); return NULL; } #else return NULL; #endif } static void set_cold_cconv (LLVMValueRef func) { /* * xcode10 (watchOS) and ARM/ARM64 doesn't seem to support preserveall, it fails with: * fatal error: error in backend: Unsupported calling convention */ #if !defined(TARGET_WATCHOS) && !defined(TARGET_ARM) && !defined(TARGET_ARM64) LLVMSetFunctionCallConv (func, LLVMColdCallConv); #endif } static void set_call_cold_cconv (LLVMValueRef func) { #if !defined(TARGET_WATCHOS) && !defined(TARGET_ARM) && !defined(TARGET_ARM64) LLVMSetInstructionCallConv (func, LLVMColdCallConv); #endif } /* * get_bb: * * Return the LLVM basic block corresponding to BB. */ static LLVMBasicBlockRef get_bb (EmitContext *ctx, MonoBasicBlock *bb) { char bb_name_buf [128]; char *bb_name; if (ctx->bblocks [bb->block_num].bblock == NULL) { if (bb->flags & BB_EXCEPTION_HANDLER) { int clause_index = (mono_get_block_region_notry (ctx->cfg, bb->region) >> 8) - 1; sprintf (bb_name_buf, "EH_CLAUSE%d_BB%d", clause_index, bb->block_num); bb_name = bb_name_buf; } else if (bb->block_num < 256) { if (!ctx->module->bb_names) { ctx->module->bb_names_len = 256; ctx->module->bb_names = g_new0 (char*, ctx->module->bb_names_len); } if (!ctx->module->bb_names [bb->block_num]) { char *n; n = g_strdup_printf ("BB%d", bb->block_num); mono_memory_barrier (); ctx->module->bb_names [bb->block_num] = n; } bb_name = ctx->module->bb_names [bb->block_num]; } else { sprintf (bb_name_buf, "BB%d", bb->block_num); bb_name = bb_name_buf; } ctx->bblocks [bb->block_num].bblock = LLVMAppendBasicBlock (ctx->lmethod, bb_name); ctx->bblocks [bb->block_num].end_bblock = ctx->bblocks [bb->block_num].bblock; } return ctx->bblocks [bb->block_num].bblock; } /* * get_end_bb: * * Return the last LLVM bblock corresponding to BB. * This might not be equal to the bb returned by get_bb () since we need to generate * multiple LLVM bblocks for a mono bblock to handle throwing exceptions. */ static LLVMBasicBlockRef get_end_bb (EmitContext *ctx, MonoBasicBlock *bb) { get_bb (ctx, bb); return ctx->bblocks [bb->block_num].end_bblock; } static LLVMBasicBlockRef gen_bb (EmitContext *ctx, const char *prefix) { char bb_name [128]; sprintf (bb_name, "%s%d", prefix, ++ ctx->ex_index); return LLVMAppendBasicBlock (ctx->lmethod, bb_name); } /* * resolve_patch: * * Return the target of the patch identified by TYPE and TARGET. */ static gpointer resolve_patch (MonoCompile *cfg, MonoJumpInfoType type, gconstpointer target) { MonoJumpInfo ji; ERROR_DECL (error); gpointer res; memset (&ji, 0, sizeof (ji)); ji.type = type; ji.data.target = target; res = mono_resolve_patch_target (cfg->method, NULL, &ji, FALSE, error); mono_error_assert_ok (error); return res; } /* * convert_full: * * Emit code to convert the LLVM value V to DTYPE. */ static LLVMValueRef convert_full (EmitContext *ctx, LLVMValueRef v, LLVMTypeRef dtype, gboolean is_unsigned) { LLVMTypeRef stype = LLVMTypeOf (v); if (stype != dtype) { gboolean ext = FALSE; /* Extend */ if (dtype == LLVMInt64Type () && (stype == LLVMInt32Type () || stype == LLVMInt16Type () || stype == LLVMInt8Type ())) ext = TRUE; else if (dtype == LLVMInt32Type () && (stype == LLVMInt16Type () || stype == LLVMInt8Type ())) ext = TRUE; else if (dtype == LLVMInt16Type () && (stype == LLVMInt8Type ())) ext = TRUE; if (ext) return is_unsigned ? LLVMBuildZExt (ctx->builder, v, dtype, "") : LLVMBuildSExt (ctx->builder, v, dtype, ""); if (dtype == LLVMDoubleType () && stype == LLVMFloatType ()) return LLVMBuildFPExt (ctx->builder, v, dtype, ""); /* Trunc */ if (stype == LLVMInt64Type () && (dtype == LLVMInt32Type () || dtype == LLVMInt16Type () || dtype == LLVMInt8Type ())) return LLVMBuildTrunc (ctx->builder, v, dtype, ""); if (stype == LLVMInt32Type () && (dtype == LLVMInt16Type () || dtype == LLVMInt8Type ())) return LLVMBuildTrunc (ctx->builder, v, dtype, ""); if (stype == LLVMInt16Type () && dtype == LLVMInt8Type ()) return LLVMBuildTrunc (ctx->builder, v, dtype, ""); if (stype == LLVMDoubleType () && dtype == LLVMFloatType ()) return LLVMBuildFPTrunc (ctx->builder, v, dtype, ""); if (LLVMGetTypeKind (stype) == LLVMPointerTypeKind && LLVMGetTypeKind (dtype) == LLVMPointerTypeKind) return LLVMBuildBitCast (ctx->builder, v, dtype, ""); if (LLVMGetTypeKind (dtype) == LLVMPointerTypeKind) return LLVMBuildIntToPtr (ctx->builder, v, dtype, ""); if (LLVMGetTypeKind (stype) == LLVMPointerTypeKind) return LLVMBuildPtrToInt (ctx->builder, v, dtype, ""); if (mono_arch_is_soft_float ()) { if (stype == LLVMInt32Type () && dtype == LLVMFloatType ()) return LLVMBuildBitCast (ctx->builder, v, dtype, ""); if (stype == LLVMInt32Type () && dtype == LLVMDoubleType ()) return LLVMBuildBitCast (ctx->builder, LLVMBuildZExt (ctx->builder, v, LLVMInt64Type (), ""), dtype, ""); } if (LLVMGetTypeKind (stype) == LLVMVectorTypeKind && LLVMGetTypeKind (dtype) == LLVMVectorTypeKind) { if (mono_llvm_get_prim_size_bits (stype) == mono_llvm_get_prim_size_bits (dtype)) return LLVMBuildBitCast (ctx->builder, v, dtype, ""); } mono_llvm_dump_value (v); mono_llvm_dump_type (dtype); printf ("\n"); g_assert_not_reached (); return NULL; } else { return v; } } static LLVMValueRef convert (EmitContext *ctx, LLVMValueRef v, LLVMTypeRef dtype) { return convert_full (ctx, v, dtype, FALSE); } static void emit_memset (EmitContext *ctx, LLVMBuilderRef builder, LLVMValueRef v, LLVMValueRef size, int alignment) { LLVMValueRef args [5]; int aindex = 0; args [aindex ++] = v; args [aindex ++] = LLVMConstInt (LLVMInt8Type (), 0, FALSE); args [aindex ++] = size; args [aindex ++] = LLVMConstInt (LLVMInt1Type (), 0, FALSE); LLVMBuildCall (builder, get_intrins (ctx, INTRINS_MEMSET), args, aindex, ""); } /* * emit_volatile_load: * * If vreg is volatile, emit a load from its address. */ static LLVMValueRef emit_volatile_load (EmitContext *ctx, int vreg) { MonoType *t; LLVMValueRef v; // On arm64, we pass the rgctx in a callee saved // register on arm64 (x15), and llvm might keep the value in that register // even through the register is marked as 'reserved' inside llvm. v = mono_llvm_build_load (ctx->builder, ctx->addresses [vreg], "", TRUE); t = ctx->vreg_cli_types [vreg]; if (t && !m_type_is_byref (t)) { /* * Might have to zero extend since llvm doesn't have * unsigned types. */ if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_U2 || t->type == MONO_TYPE_CHAR || t->type == MONO_TYPE_BOOLEAN) v = LLVMBuildZExt (ctx->builder, v, LLVMInt32Type (), ""); else if (t->type == MONO_TYPE_I1 || t->type == MONO_TYPE_I2) v = LLVMBuildSExt (ctx->builder, v, LLVMInt32Type (), ""); else if (t->type == MONO_TYPE_U8) v = LLVMBuildZExt (ctx->builder, v, LLVMInt64Type (), ""); } return v; } /* * emit_volatile_store: * * If VREG is volatile, emit a store from its value to its address. */ static void emit_volatile_store (EmitContext *ctx, int vreg) { MonoInst *var = get_vreg_to_inst (ctx->cfg, vreg); if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) { g_assert (ctx->addresses [vreg]); #ifdef TARGET_WASM /* Need volatile stores otherwise the compiler might move them */ mono_llvm_build_store (ctx->builder, convert (ctx, ctx->values [vreg], type_to_llvm_type (ctx, var->inst_vtype)), ctx->addresses [vreg], TRUE, LLVM_BARRIER_NONE); #else LLVMBuildStore (ctx->builder, convert (ctx, ctx->values [vreg], type_to_llvm_type (ctx, var->inst_vtype)), ctx->addresses [vreg]); #endif } } static LLVMTypeRef sig_to_llvm_sig_no_cinfo (EmitContext *ctx, MonoMethodSignature *sig) { LLVMTypeRef ret_type; LLVMTypeRef *param_types = NULL; LLVMTypeRef res; int i, pindex; ret_type = type_to_llvm_type (ctx, sig->ret); if (!ctx_ok (ctx)) return NULL; param_types = g_new0 (LLVMTypeRef, (sig->param_count * 8) + 3); pindex = 0; if (sig->hasthis) param_types [pindex ++] = ThisType (); for (i = 0; i < sig->param_count; ++i) param_types [pindex ++] = type_to_llvm_arg_type (ctx, sig->params [i]); if (!ctx_ok (ctx)) { g_free (param_types); return NULL; } res = LLVMFunctionType (ret_type, param_types, pindex, FALSE); g_free (param_types); return res; } /* * sig_to_llvm_sig_full: * * Return the LLVM signature corresponding to the mono signature SIG using the * calling convention information in CINFO. Fill out the parameter mapping information in CINFO. */ static LLVMTypeRef sig_to_llvm_sig_full (EmitContext *ctx, MonoMethodSignature *sig, LLVMCallInfo *cinfo) { LLVMTypeRef ret_type; LLVMTypeRef *param_types = NULL; LLVMTypeRef res; int i, j, pindex, vret_arg_pindex = 0; gboolean vretaddr = FALSE; MonoType *rtype; if (!cinfo) return sig_to_llvm_sig_no_cinfo (ctx, sig); ret_type = type_to_llvm_type (ctx, sig->ret); if (!ctx_ok (ctx)) return NULL; rtype = mini_get_underlying_type (sig->ret); switch (cinfo->ret.storage) { case LLVMArgVtypeInReg: /* LLVM models this by returning an aggregate value */ if (cinfo->ret.pair_storage [0] == LLVMArgInIReg && cinfo->ret.pair_storage [1] == LLVMArgNone) { LLVMTypeRef members [2]; members [0] = IntPtrType (); ret_type = LLVMStructType (members, 1, FALSE); } else if (cinfo->ret.pair_storage [0] == LLVMArgNone && cinfo->ret.pair_storage [1] == LLVMArgNone) { /* Empty struct */ ret_type = LLVMVoidType (); } else if (cinfo->ret.pair_storage [0] == LLVMArgInIReg && cinfo->ret.pair_storage [1] == LLVMArgInIReg) { LLVMTypeRef members [2]; members [0] = IntPtrType (); members [1] = IntPtrType (); ret_type = LLVMStructType (members, 2, FALSE); } else { g_assert_not_reached (); } break; case LLVMArgVtypeByVal: /* Vtype returned normally by val */ break; case LLVMArgVtypeAsScalar: { int size = mono_class_value_size (mono_class_from_mono_type_internal (rtype), NULL); /* LLVM models this by returning an int */ if (size < TARGET_SIZEOF_VOID_P) { g_assert (cinfo->ret.nslots == 1); ret_type = LLVMIntType (size * 8); } else { g_assert (cinfo->ret.nslots == 1 || cinfo->ret.nslots == 2); ret_type = LLVMIntType (cinfo->ret.nslots * sizeof (target_mgreg_t) * 8); } break; } case LLVMArgAsIArgs: ret_type = LLVMArrayType (IntPtrType (), cinfo->ret.nslots); break; case LLVMArgFpStruct: { /* Vtype returned as a fp struct */ LLVMTypeRef members [16]; /* Have to create our own structure since we don't map fp structures to LLVM fp structures yet */ for (i = 0; i < cinfo->ret.nslots; ++i) members [i] = cinfo->ret.esize == 8 ? LLVMDoubleType () : LLVMFloatType (); ret_type = LLVMStructType (members, cinfo->ret.nslots, FALSE); break; } case LLVMArgVtypeByRef: /* Vtype returned using a hidden argument */ ret_type = LLVMVoidType (); break; case LLVMArgVtypeRetAddr: case LLVMArgGsharedvtFixed: case LLVMArgGsharedvtFixedVtype: case LLVMArgGsharedvtVariable: vretaddr = TRUE; ret_type = LLVMVoidType (); break; case LLVMArgWasmVtypeAsScalar: g_assert (cinfo->ret.esize); ret_type = LLVMIntType (cinfo->ret.esize * 8); break; default: break; } param_types = g_new0 (LLVMTypeRef, (sig->param_count * 8) + 3); pindex = 0; if (cinfo->ret.storage == LLVMArgVtypeByRef) { /* * Has to be the first argument because of the sret argument attribute * FIXME: This might conflict with passing 'this' as the first argument, but * this is only used on arm64 which has a dedicated struct return register. */ cinfo->vret_arg_pindex = pindex; param_types [pindex] = type_to_llvm_arg_type (ctx, sig->ret); if (!ctx_ok (ctx)) { g_free (param_types); return NULL; } param_types [pindex] = LLVMPointerType (param_types [pindex], 0); pindex ++; } if (!ctx->llvm_only && cinfo->rgctx_arg) { cinfo->rgctx_arg_pindex = pindex; param_types [pindex] = ctx->module->ptr_type; pindex ++; } if (cinfo->imt_arg) { cinfo->imt_arg_pindex = pindex; param_types [pindex] = ctx->module->ptr_type; pindex ++; } if (vretaddr) { /* Compute the index in the LLVM signature where the vret arg needs to be passed */ vret_arg_pindex = pindex; if (cinfo->vret_arg_index == 1) { /* Add the slots consumed by the first argument */ LLVMArgInfo *ainfo = &cinfo->args [0]; switch (ainfo->storage) { case LLVMArgVtypeInReg: for (j = 0; j < 2; ++j) { if (ainfo->pair_storage [j] == LLVMArgInIReg) vret_arg_pindex ++; } break; default: vret_arg_pindex ++; } } cinfo->vret_arg_pindex = vret_arg_pindex; } if (vretaddr && vret_arg_pindex == pindex) param_types [pindex ++] = IntPtrType (); if (sig->hasthis) { cinfo->this_arg_pindex = pindex; param_types [pindex ++] = ThisType (); cinfo->args [0].pindex = cinfo->this_arg_pindex; } if (vretaddr && vret_arg_pindex == pindex) param_types [pindex ++] = IntPtrType (); for (i = 0; i < sig->param_count; ++i) { LLVMArgInfo *ainfo = &cinfo->args [i + sig->hasthis]; if (vretaddr && vret_arg_pindex == pindex) param_types [pindex ++] = IntPtrType (); ainfo->pindex = pindex; switch (ainfo->storage) { case LLVMArgVtypeInReg: for (j = 0; j < 2; ++j) { switch (ainfo->pair_storage [j]) { case LLVMArgInIReg: param_types [pindex ++] = LLVMIntType (TARGET_SIZEOF_VOID_P * 8); break; case LLVMArgNone: break; default: g_assert_not_reached (); } } break; case LLVMArgVtypeByVal: param_types [pindex] = type_to_llvm_arg_type (ctx, ainfo->type); if (!ctx_ok (ctx)) break; param_types [pindex] = LLVMPointerType (param_types [pindex], 0); pindex ++; break; case LLVMArgAsIArgs: if (ainfo->esize == 8) param_types [pindex] = LLVMArrayType (LLVMInt64Type (), ainfo->nslots); else param_types [pindex] = LLVMArrayType (IntPtrType (), ainfo->nslots); pindex ++; break; case LLVMArgVtypeAddr: case LLVMArgVtypeByRef: param_types [pindex] = type_to_llvm_arg_type (ctx, ainfo->type); if (!ctx_ok (ctx)) break; param_types [pindex] = LLVMPointerType (param_types [pindex], 0); pindex ++; break; case LLVMArgAsFpArgs: { int j; /* Emit dummy fp arguments if needed so the rest is passed on the stack */ for (j = 0; j < ainfo->ndummy_fpargs; ++j) param_types [pindex ++] = LLVMDoubleType (); for (j = 0; j < ainfo->nslots; ++j) param_types [pindex ++] = ainfo->esize == 8 ? LLVMDoubleType () : LLVMFloatType (); break; } case LLVMArgVtypeAsScalar: g_assert_not_reached (); break; case LLVMArgWasmVtypeAsScalar: g_assert (ainfo->esize); param_types [pindex ++] = LLVMIntType (ainfo->esize * 8); break; case LLVMArgGsharedvtFixed: case LLVMArgGsharedvtFixedVtype: param_types [pindex ++] = LLVMPointerType (type_to_llvm_arg_type (ctx, ainfo->type), 0); break; case LLVMArgGsharedvtVariable: param_types [pindex ++] = LLVMPointerType (IntPtrType (), 0); break; default: param_types [pindex ++] = type_to_llvm_arg_type (ctx, ainfo->type); break; } } if (!ctx_ok (ctx)) { g_free (param_types); return NULL; } if (vretaddr && vret_arg_pindex == pindex) param_types [pindex ++] = IntPtrType (); if (ctx->llvm_only && cinfo->rgctx_arg) { /* Pass the rgctx as the last argument */ cinfo->rgctx_arg_pindex = pindex; param_types [pindex] = ctx->module->ptr_type; pindex ++; } else if (ctx->llvm_only && cinfo->dummy_arg) { /* Pass a dummy arg last */ cinfo->dummy_arg_pindex = pindex; param_types [pindex] = ctx->module->ptr_type; pindex ++; } res = LLVMFunctionType (ret_type, param_types, pindex, FALSE); g_free (param_types); return res; } static LLVMTypeRef sig_to_llvm_sig (EmitContext *ctx, MonoMethodSignature *sig) { return sig_to_llvm_sig_full (ctx, sig, NULL); } /* * LLVMFunctionType1: * * Create an LLVM function type from the arguments. */ static G_GNUC_UNUSED LLVMTypeRef LLVMFunctionType0 (LLVMTypeRef ReturnType, int IsVarArg) { return LLVMFunctionType (ReturnType, NULL, 0, IsVarArg); } /* * LLVMFunctionType1: * * Create an LLVM function type from the arguments. */ static G_GNUC_UNUSED LLVMTypeRef LLVMFunctionType1 (LLVMTypeRef ReturnType, LLVMTypeRef ParamType1, int IsVarArg) { LLVMTypeRef param_types [1]; param_types [0] = ParamType1; return LLVMFunctionType (ReturnType, param_types, 1, IsVarArg); } /* * LLVMFunctionType2: * * Create an LLVM function type from the arguments. */ static G_GNUC_UNUSED LLVMTypeRef LLVMFunctionType2 (LLVMTypeRef ReturnType, LLVMTypeRef ParamType1, LLVMTypeRef ParamType2, int IsVarArg) { LLVMTypeRef param_types [2]; param_types [0] = ParamType1; param_types [1] = ParamType2; return LLVMFunctionType (ReturnType, param_types, 2, IsVarArg); } /* * LLVMFunctionType3: * * Create an LLVM function type from the arguments. */ static G_GNUC_UNUSED LLVMTypeRef LLVMFunctionType3 (LLVMTypeRef ReturnType, LLVMTypeRef ParamType1, LLVMTypeRef ParamType2, LLVMTypeRef ParamType3, int IsVarArg) { LLVMTypeRef param_types [3]; param_types [0] = ParamType1; param_types [1] = ParamType2; param_types [2] = ParamType3; return LLVMFunctionType (ReturnType, param_types, 3, IsVarArg); } static G_GNUC_UNUSED LLVMTypeRef LLVMFunctionType4 (LLVMTypeRef ReturnType, LLVMTypeRef ParamType1, LLVMTypeRef ParamType2, LLVMTypeRef ParamType3, LLVMTypeRef ParamType4, int IsVarArg) { LLVMTypeRef param_types [4]; param_types [0] = ParamType1; param_types [1] = ParamType2; param_types [2] = ParamType3; param_types [3] = ParamType4; return LLVMFunctionType (ReturnType, param_types, 4, IsVarArg); } static G_GNUC_UNUSED LLVMTypeRef LLVMFunctionType5 (LLVMTypeRef ReturnType, LLVMTypeRef ParamType1, LLVMTypeRef ParamType2, LLVMTypeRef ParamType3, LLVMTypeRef ParamType4, LLVMTypeRef ParamType5, int IsVarArg) { LLVMTypeRef param_types [5]; param_types [0] = ParamType1; param_types [1] = ParamType2; param_types [2] = ParamType3; param_types [3] = ParamType4; param_types [4] = ParamType5; return LLVMFunctionType (ReturnType, param_types, 5, IsVarArg); } /* * create_builder: * * Create an LLVM builder and remember it so it can be freed later. */ static LLVMBuilderRef create_builder (EmitContext *ctx) { LLVMBuilderRef builder = LLVMCreateBuilder (); if (mono_use_fast_math) mono_llvm_set_fast_math (builder); ctx->builders = g_slist_prepend_mempool (ctx->cfg->mempool, ctx->builders, builder); emit_default_dbg_loc (ctx, builder); return builder; } static char* get_aotconst_name (MonoJumpInfoType type, gconstpointer data, int got_offset) { char *name; int len; switch (type) { case MONO_PATCH_INFO_JIT_ICALL_ID: name = g_strdup_printf ("jit_icall_%s", mono_find_jit_icall_info ((MonoJitICallId)(gsize)data)->name); break; case MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL: name = g_strdup_printf ("jit_icall_addr_nocall_%s", mono_find_jit_icall_info ((MonoJitICallId)(gsize)data)->name); break; case MONO_PATCH_INFO_RGCTX_SLOT_INDEX: { MonoJumpInfoRgctxEntry *entry = (MonoJumpInfoRgctxEntry*)data; name = g_strdup_printf ("rgctx_slot_index_%s", mono_rgctx_info_type_to_str (entry->info_type)); break; } case MONO_PATCH_INFO_AOT_MODULE: case MONO_PATCH_INFO_GC_SAFE_POINT_FLAG: case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: case MONO_PATCH_INFO_GC_NURSERY_START: case MONO_PATCH_INFO_GC_NURSERY_BITS: case MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG: name = g_strdup_printf ("%s", mono_ji_type_to_string (type)); len = strlen (name); for (int i = 0; i < len; ++i) name [i] = tolower (name [i]); break; default: name = g_strdup_printf ("%s_%d", mono_ji_type_to_string (type), got_offset); len = strlen (name); for (int i = 0; i < len; ++i) name [i] = tolower (name [i]); break; } return name; } static int compute_aot_got_offset (MonoLLVMModule *module, MonoJumpInfo *ji, LLVMTypeRef llvm_type) { guint32 got_offset = mono_aot_get_got_offset (ji); LLVMTypeRef lookup_type = (LLVMTypeRef) g_hash_table_lookup (module->got_idx_to_type, GINT_TO_POINTER (got_offset)); if (!lookup_type) { lookup_type = llvm_type; } else if (llvm_type != lookup_type) { lookup_type = module->ptr_type; } else { return got_offset; } g_hash_table_insert (module->got_idx_to_type, GINT_TO_POINTER (got_offset), lookup_type); return got_offset; } /* Allocate a GOT slot for TYPE/DATA, and emit IR to load it */ static LLVMValueRef get_aotconst_module (MonoLLVMModule *module, LLVMBuilderRef builder, MonoJumpInfoType type, gconstpointer data, LLVMTypeRef llvm_type, guint32 *out_got_offset, MonoJumpInfo **out_ji) { guint32 got_offset; LLVMValueRef load; MonoJumpInfo tmp_ji; tmp_ji.type = type; tmp_ji.data.target = data; MonoJumpInfo *ji = mono_aot_patch_info_dup (&tmp_ji); if (out_ji) *out_ji = ji; got_offset = compute_aot_got_offset (module, ji, llvm_type); module->max_got_offset = MAX (module->max_got_offset, got_offset); if (out_got_offset) *out_got_offset = got_offset; if (module->static_link && type == MONO_PATCH_INFO_GC_SAFE_POINT_FLAG) { if (!module->gc_safe_point_flag_var) { const char *symbol = "mono_polling_required"; module->gc_safe_point_flag_var = LLVMAddGlobal (module->lmodule, llvm_type, symbol); LLVMSetLinkage (module->gc_safe_point_flag_var, LLVMExternalLinkage); } return module->gc_safe_point_flag_var; } if (module->static_link && type == MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG) { if (!module->interrupt_flag_var) { const char *symbol = "mono_thread_interruption_request_flag"; module->interrupt_flag_var = LLVMAddGlobal (module->lmodule, llvm_type, symbol); LLVMSetLinkage (module->interrupt_flag_var, LLVMExternalLinkage); } return module->interrupt_flag_var; } LLVMValueRef const_var = g_hash_table_lookup (module->aotconst_vars, GINT_TO_POINTER (got_offset)); if (!const_var) { LLVMTypeRef type = llvm_type; // FIXME: char *name = get_aotconst_name (ji->type, ji->data.target, got_offset); char *symbol = g_strdup_printf ("aotconst_%s", name); g_free (name); LLVMValueRef v = LLVMAddGlobal (module->lmodule, type, symbol); LLVMSetVisibility (v, LLVMHiddenVisibility); LLVMSetLinkage (v, LLVMInternalLinkage); LLVMSetInitializer (v, LLVMConstNull (type)); // FIXME: LLVMSetAlignment (v, 8); g_hash_table_insert (module->aotconst_vars, GINT_TO_POINTER (got_offset), v); const_var = v; } load = LLVMBuildLoad (builder, const_var, ""); if (mono_aot_is_shared_got_offset (got_offset)) set_invariant_load_flag (load); if (type == MONO_PATCH_INFO_LDSTR) set_nonnull_load_flag (load); load = LLVMBuildBitCast (builder, load, llvm_type, ""); return load; } static LLVMValueRef get_aotconst (EmitContext *ctx, MonoJumpInfoType type, gconstpointer data, LLVMTypeRef llvm_type) { MonoCompile *cfg; guint32 got_offset; MonoJumpInfo *ji; LLVMValueRef load; cfg = ctx->cfg; load = get_aotconst_module (ctx->module, ctx->builder, type, data, llvm_type, &got_offset, &ji); ji->next = cfg->patch_info; cfg->patch_info = ji; /* * If the got slot is shared, it means its initialized when the aot image is loaded, so we don't need to * explicitly initialize it. */ if (!mono_aot_is_shared_got_offset (got_offset)) { //mono_print_ji (ji); //printf ("\n"); ctx->cfg->got_access_count ++; } return load; } static LLVMValueRef get_dummy_aotconst (EmitContext *ctx, LLVMTypeRef llvm_type) { LLVMValueRef indexes [2]; LLVMValueRef got_entry_addr, load; LLVMBuilderRef builder = ctx->builder; indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); got_entry_addr = LLVMBuildGEP (builder, ctx->module->dummy_got_var, indexes, 2, ""); load = LLVMBuildLoad (builder, got_entry_addr, ""); load = convert (ctx, load, llvm_type); return load; } typedef struct { MonoJumpInfo *ji; MonoMethod *method; LLVMValueRef load; LLVMTypeRef type; LLVMValueRef lmethod; } CallSite; static LLVMValueRef get_callee_llvmonly (EmitContext *ctx, LLVMTypeRef llvm_sig, MonoJumpInfoType type, gconstpointer data) { LLVMValueRef callee; char *callee_name = NULL; if (ctx->module->static_link && ctx->module->assembly->image != mono_get_corlib ()) { if (type == MONO_PATCH_INFO_JIT_ICALL_ID) { MonoJitICallInfo * const info = mono_find_jit_icall_info ((MonoJitICallId)(gsize)data); g_assert (info); if (info->func != info->wrapper) { type = MONO_PATCH_INFO_METHOD; data = mono_icall_get_wrapper_method (info); callee_name = mono_aot_get_mangled_method_name ((MonoMethod*)data); } } else if (type == MONO_PATCH_INFO_METHOD) { MonoMethod *method = (MonoMethod*)data; if (m_class_get_image (method->klass) != ctx->module->assembly->image && mono_aot_is_externally_callable (method)) callee_name = mono_aot_get_mangled_method_name (method); } } if (!callee_name) callee_name = mono_aot_get_direct_call_symbol (type, data); if (callee_name) { /* Directly callable */ // FIXME: Locking callee = (LLVMValueRef)g_hash_table_lookup (ctx->module->direct_callables, callee_name); if (!callee) { callee = LLVMAddFunction (ctx->lmodule, callee_name, llvm_sig); LLVMSetVisibility (callee, LLVMHiddenVisibility); g_hash_table_insert (ctx->module->direct_callables, (char*)callee_name, callee); } else { /* LLVMTypeRef's are uniqued */ if (LLVMGetElementType (LLVMTypeOf (callee)) != llvm_sig) return LLVMConstBitCast (callee, LLVMPointerType (llvm_sig, 0)); g_free (callee_name); } return callee; } /* * Change references to icalls/pinvokes/jit icalls to their wrappers when in corlib, so * they can be called directly. */ if (ctx->module->assembly->image == mono_get_corlib () && type == MONO_PATCH_INFO_JIT_ICALL_ID) { MonoJitICallInfo * const info = mono_find_jit_icall_info ((MonoJitICallId)(gsize)data); if (info->func != info->wrapper) { type = MONO_PATCH_INFO_METHOD; data = mono_icall_get_wrapper_method (info); } } if (ctx->module->assembly->image == mono_get_corlib () && type == MONO_PATCH_INFO_METHOD) { MonoMethod *method = (MonoMethod*)data; if (m_method_is_icall (method) || m_method_is_pinvoke (method)) data = mono_marshal_get_native_wrapper (method, TRUE, TRUE); } /* * Instead of emitting an indirect call through a got slot, emit a placeholder, and * replace it with a direct call or an indirect call in mono_llvm_fixup_aot_module () * after all methods have been emitted. */ if (type == MONO_PATCH_INFO_METHOD) { MonoMethod *method = (MonoMethod*)data; if (m_class_get_image (method->klass)->assembly == ctx->module->assembly) { MonoJumpInfo tmp_ji; tmp_ji.type = type; tmp_ji.data.target = method; MonoJumpInfo *ji = mono_aot_patch_info_dup (&tmp_ji); ji->next = ctx->cfg->patch_info; ctx->cfg->patch_info = ji; LLVMTypeRef llvm_type = LLVMPointerType (llvm_sig, 0); ctx->cfg->got_access_count ++; CallSite *info = g_new0 (CallSite, 1); info->method = method; info->ji = ji; info->type = llvm_type; /* * Emit a dummy load to represent the callee, and either replace it with * a reference to the llvm method for the callee, or from a load from the * GOT. */ LLVMValueRef load = get_dummy_aotconst (ctx, llvm_type); info->load = load; info->lmethod = ctx->lmethod; g_ptr_array_add (ctx->callsite_list, info); return load; } } /* * All other calls are made through the GOT. */ callee = get_aotconst (ctx, type, data, LLVMPointerType (llvm_sig, 0)); return callee; } /* * get_callee: * * Return an llvm value representing the callee given by the arguments. */ static LLVMValueRef get_callee (EmitContext *ctx, LLVMTypeRef llvm_sig, MonoJumpInfoType type, gconstpointer data) { LLVMValueRef callee; char *callee_name; MonoJumpInfo *ji = NULL; if (ctx->llvm_only) return get_callee_llvmonly (ctx, llvm_sig, type, data); callee_name = NULL; /* Cross-assembly direct calls */ if (type == MONO_PATCH_INFO_METHOD) { MonoMethod *cmethod = (MonoMethod*)data; if (m_class_get_image (cmethod->klass) != ctx->module->assembly->image) { MonoJumpInfo tmp_ji; memset (&tmp_ji, 0, sizeof (MonoJumpInfo)); tmp_ji.type = type; tmp_ji.data.target = data; if (mono_aot_is_direct_callable (&tmp_ji)) { /* * This will add a reference to cmethod's image so it will * be loaded when the current AOT image is loaded, so * the GOT slots used by the init method code are initialized. */ tmp_ji.type = MONO_PATCH_INFO_IMAGE; tmp_ji.data.image = m_class_get_image (cmethod->klass); ji = mono_aot_patch_info_dup (&tmp_ji); mono_aot_get_got_offset (ji); callee_name = mono_aot_get_mangled_method_name (cmethod); callee = (LLVMValueRef)g_hash_table_lookup (ctx->module->direct_callables, callee_name); if (!callee) { callee = LLVMAddFunction (ctx->lmodule, callee_name, llvm_sig); LLVMSetLinkage (callee, LLVMExternalLinkage); g_hash_table_insert (ctx->module->direct_callables, callee_name, callee); } else { /* LLVMTypeRef's are uniqued */ if (LLVMGetElementType (LLVMTypeOf (callee)) != llvm_sig) callee = LLVMConstBitCast (callee, LLVMPointerType (llvm_sig, 0)); g_free (callee_name); } return callee; } } } callee_name = mono_aot_get_plt_symbol (type, data); if (!callee_name) return NULL; if (ctx->cfg->compile_aot) /* Add a patch so referenced wrappers can be compiled in full aot mode */ mono_add_patch_info (ctx->cfg, 0, type, data); // FIXME: Locking callee = (LLVMValueRef)g_hash_table_lookup (ctx->module->plt_entries, callee_name); if (!callee) { callee = LLVMAddFunction (ctx->lmodule, callee_name, llvm_sig); LLVMSetVisibility (callee, LLVMHiddenVisibility); g_hash_table_insert (ctx->module->plt_entries, (char*)callee_name, callee); } if (ctx->cfg->compile_aot) { ji = g_new0 (MonoJumpInfo, 1); ji->type = type; ji->data.target = data; g_hash_table_insert (ctx->module->plt_entries_ji, ji, callee); } return callee; } static LLVMValueRef get_jit_callee (EmitContext *ctx, const char *name, LLVMTypeRef llvm_sig, MonoJumpInfoType type, gconstpointer data) { gpointer target; // This won't be patched so compile the wrapper immediately if (type == MONO_PATCH_INFO_JIT_ICALL_ID) { MonoJitICallInfo * const info = mono_find_jit_icall_info ((MonoJitICallId)(gsize)data); target = (gpointer)mono_icall_get_wrapper_full (info, TRUE); } else { target = resolve_patch (ctx->cfg, type, data); } LLVMValueRef tramp_var = LLVMAddGlobal (ctx->lmodule, LLVMPointerType (llvm_sig, 0), name); LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (LLVMConstInt (LLVMInt64Type (), (guint64)(size_t)target, FALSE), LLVMPointerType (llvm_sig, 0))); LLVMSetLinkage (tramp_var, LLVMExternalLinkage); LLVMValueRef callee = LLVMBuildLoad (ctx->builder, tramp_var, ""); return callee; } static int get_handler_clause (MonoCompile *cfg, MonoBasicBlock *bb) { MonoMethodHeader *header = cfg->header; MonoExceptionClause *clause; int i; /* Directly */ if (bb->region != -1 && MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_TRY)) return (bb->region >> 8) - 1; /* Indirectly */ for (i = 0; i < header->num_clauses; ++i) { clause = &header->clauses [i]; if (MONO_OFFSET_IN_CLAUSE (clause, bb->real_offset) && clause->flags == MONO_EXCEPTION_CLAUSE_NONE) return i; } return -1; } static MonoExceptionClause * get_most_deep_clause (MonoCompile *cfg, EmitContext *ctx, MonoBasicBlock *bb) { if (bb == cfg->bb_init) return NULL; // Since they're sorted by nesting we just need // the first one that the bb is a member of for (int i = 0; i < cfg->header->num_clauses; i++) { MonoExceptionClause *curr = &cfg->header->clauses [i]; if (MONO_OFFSET_IN_CLAUSE (curr, bb->real_offset)) return curr; } return NULL; } static void set_metadata_flag (LLVMValueRef v, const char *flag_name) { LLVMValueRef md_arg; int md_kind; md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name)); md_arg = LLVMMDString ("mono", 4); LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1)); } static void set_nonnull_load_flag (LLVMValueRef v) { LLVMValueRef md_arg; int md_kind; const char *flag_name; flag_name = "nonnull"; md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name)); md_arg = LLVMMDString ("<index>", strlen ("<index>")); LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1)); } static void set_nontemporal_flag (LLVMValueRef v) { LLVMValueRef md_arg; int md_kind; const char *flag_name; // FIXME: Cache this flag_name = "nontemporal"; md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name)); md_arg = const_int32 (1); LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1)); } static void set_invariant_load_flag (LLVMValueRef v) { LLVMValueRef md_arg; int md_kind; const char *flag_name; // FIXME: Cache this flag_name = "invariant.load"; md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name)); md_arg = LLVMMDString ("<index>", strlen ("<index>")); LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1)); } /* * emit_call: * * Emit an LLVM call or invoke instruction depending on whenever the call is inside * a try region. */ static LLVMValueRef emit_call (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, LLVMValueRef callee, LLVMValueRef *args, int pindex) { MonoCompile *cfg = ctx->cfg; LLVMValueRef lcall = NULL; LLVMBuilderRef builder = *builder_ref; MonoExceptionClause *clause; if (ctx->llvm_only) { clause = bb ? get_most_deep_clause (cfg, ctx, bb) : NULL; // FIXME: Use an invoke only for calls inside try-catch blocks if (clause && (!cfg->deopt || ctx->has_catch)) { /* * Have to use an invoke instead of a call, branching to the * handler bblock of the clause containing this bblock. */ intptr_t key = CLAUSE_END (clause); LLVMBasicBlockRef lpad_bb = (LLVMBasicBlockRef)g_hash_table_lookup (ctx->exc_meta, (gconstpointer)key); // FIXME: Find the one that has the lowest end bound for the right start address // FIXME: Finally + nesting if (lpad_bb) { LLVMBasicBlockRef noex_bb = gen_bb (ctx, "CALL_NOEX_BB"); /* Use an invoke */ lcall = LLVMBuildInvoke (builder, callee, args, pindex, noex_bb, lpad_bb, ""); builder = ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, noex_bb); ctx->bblocks [bb->block_num].end_bblock = noex_bb; } } } else { int clause_index = get_handler_clause (cfg, bb); if (clause_index != -1) { MonoMethodHeader *header = cfg->header; MonoExceptionClause *ec = &header->clauses [clause_index]; MonoBasicBlock *tblock; LLVMBasicBlockRef ex_bb, noex_bb; /* * Have to use an invoke instead of a call, branching to the * handler bblock of the clause containing this bblock. */ g_assert (ec->flags == MONO_EXCEPTION_CLAUSE_NONE || ec->flags == MONO_EXCEPTION_CLAUSE_FINALLY || ec->flags == MONO_EXCEPTION_CLAUSE_FAULT); tblock = cfg->cil_offset_to_bb [ec->handler_offset]; g_assert (tblock); ctx->bblocks [tblock->block_num].invoke_target = TRUE; ex_bb = get_bb (ctx, tblock); noex_bb = gen_bb (ctx, "NOEX_BB"); /* Use an invoke */ lcall = LLVMBuildInvoke (builder, callee, args, pindex, noex_bb, ex_bb, ""); builder = ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, noex_bb); ctx->bblocks [bb->block_num].end_bblock = noex_bb; } } if (!lcall) { lcall = LLVMBuildCall (builder, callee, args, pindex, ""); ctx->builder = builder; } if (builder_ref) *builder_ref = ctx->builder; return lcall; } static LLVMValueRef emit_load (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef addr, LLVMValueRef base, const char *name, gboolean is_faulting, gboolean is_volatile, BarrierKind barrier) { LLVMValueRef res; /* * We emit volatile loads for loads which can fault, because otherwise * LLVM will generate invalid code when encountering a load from a * NULL address. */ if (barrier != LLVM_BARRIER_NONE) res = mono_llvm_build_atomic_load (*builder_ref, addr, name, is_volatile, size, barrier); else res = mono_llvm_build_load (*builder_ref, addr, name, is_volatile); return res; } static void emit_store_general (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef value, LLVMValueRef addr, LLVMValueRef base, gboolean is_faulting, gboolean is_volatile, BarrierKind barrier) { if (barrier != LLVM_BARRIER_NONE) mono_llvm_build_aligned_store (*builder_ref, value, addr, barrier, size); else mono_llvm_build_store (*builder_ref, value, addr, is_volatile, barrier); } static void emit_store (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef value, LLVMValueRef addr, LLVMValueRef base, gboolean is_faulting, gboolean is_volatile) { emit_store_general (ctx, bb, builder_ref, size, value, addr, base, is_faulting, is_volatile, LLVM_BARRIER_NONE); } /* * emit_cond_system_exception: * * Emit code to throw the exception EXC_TYPE if the condition CMP is false. * Might set the ctx exception. */ static void emit_cond_system_exception (EmitContext *ctx, MonoBasicBlock *bb, const char *exc_type, LLVMValueRef cmp, gboolean force_explicit) { LLVMBasicBlockRef ex_bb, ex2_bb = NULL, noex_bb; LLVMBuilderRef builder; MonoClass *exc_class; LLVMValueRef args [2]; LLVMValueRef callee; gboolean no_pc = FALSE; static MonoClass *exc_classes [MONO_EXC_INTRINS_NUM]; if (IS_TARGET_AMD64) /* Some platforms don't require the pc argument */ no_pc = TRUE; int exc_id = mini_exception_id_by_name (exc_type); if (!exc_classes [exc_id]) exc_classes [exc_id] = mono_class_load_from_name (mono_get_corlib (), "System", exc_type); exc_class = exc_classes [exc_id]; ex_bb = gen_bb (ctx, "EX_BB"); if (ctx->llvm_only) ex2_bb = gen_bb (ctx, "EX2_BB"); noex_bb = gen_bb (ctx, "NOEX_BB"); LLVMValueRef branch = LLVMBuildCondBr (ctx->builder, cmp, ex_bb, noex_bb); if (exc_id == MONO_EXC_NULL_REF && !ctx->cfg->disable_llvm_implicit_null_checks && !force_explicit) { mono_llvm_set_implicit_branch (ctx->builder, branch); } /* Emit exception throwing code */ ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (builder, ex_bb); if (ctx->cfg->llvm_only) { LLVMBuildBr (builder, ex2_bb); ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, ex2_bb); if (exc_id == MONO_EXC_NULL_REF) { static LLVMTypeRef sig; if (!sig) sig = LLVMFunctionType0 (LLVMVoidType (), FALSE); /* Can't cache this */ callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_nullref_exception)); emit_call (ctx, bb, &builder, callee, NULL, 0); } else { static LLVMTypeRef sig; if (!sig) sig = LLVMFunctionType1 (LLVMVoidType (), LLVMInt32Type (), FALSE); callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_corlib_exception)); args [0] = LLVMConstInt (LLVMInt32Type (), m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF, FALSE); emit_call (ctx, bb, &builder, callee, args, 1); } LLVMBuildUnreachable (builder); ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, noex_bb); ctx->bblocks [bb->block_num].end_bblock = noex_bb; ctx->ex_index ++; return; } callee = ctx->module->throw_corlib_exception; if (!callee) { LLVMTypeRef sig; if (no_pc) sig = LLVMFunctionType1 (LLVMVoidType (), LLVMInt32Type (), FALSE); else sig = LLVMFunctionType2 (LLVMVoidType (), LLVMInt32Type (), LLVMPointerType (LLVMInt8Type (), 0), FALSE); const MonoJitICallId icall_id = MONO_JIT_ICALL_mono_llvm_throw_corlib_exception_abs_trampoline; if (ctx->cfg->compile_aot) { callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); } else { /* * Differences between the LLVM/non-LLVM throw corlib exception trampoline: * - On x86, LLVM generated code doesn't push the arguments * - The trampoline takes the throw address as an arguments, not a pc offset. */ callee = get_jit_callee (ctx, "llvm_throw_corlib_exception_trampoline", sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); /* * Make sure that ex_bb starts with the invoke, so the block address points to it, and not to the load * added by get_jit_callee (). */ ex2_bb = gen_bb (ctx, "EX2_BB"); LLVMBuildBr (builder, ex2_bb); ex_bb = ex2_bb; ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, ex2_bb); } } args [0] = LLVMConstInt (LLVMInt32Type (), m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF, FALSE); /* * The LLVM mono branch contains changes so a block address can be passed as an * argument to a call. */ if (no_pc) { emit_call (ctx, bb, &builder, callee, args, 1); } else { args [1] = LLVMBlockAddress (ctx->lmethod, ex_bb); emit_call (ctx, bb, &builder, callee, args, 2); } LLVMBuildUnreachable (builder); ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, noex_bb); ctx->bblocks [bb->block_num].end_bblock = noex_bb; ctx->ex_index ++; return; } /* * emit_args_to_vtype: * * Emit code to store the vtype in the arguments args to the address ADDRESS. */ static void emit_args_to_vtype (EmitContext *ctx, LLVMBuilderRef builder, MonoType *t, LLVMValueRef address, LLVMArgInfo *ainfo, LLVMValueRef *args) { int j, size, nslots; MonoClass *klass; t = mini_get_underlying_type (t); klass = mono_class_from_mono_type_internal (t); size = mono_class_value_size (klass, NULL); if (MONO_CLASS_IS_SIMD (ctx->cfg, klass)) address = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (LLVMInt8Type (), 0), ""); if (ainfo->storage == LLVMArgAsFpArgs) nslots = ainfo->nslots; else nslots = 2; for (j = 0; j < nslots; ++j) { LLVMValueRef index [2], addr, daddr; int part_size = size > TARGET_SIZEOF_VOID_P ? TARGET_SIZEOF_VOID_P : size; LLVMTypeRef part_type; while (part_size != 1 && part_size != 2 && part_size != 4 && part_size < 8) part_size ++; if (ainfo->pair_storage [j] == LLVMArgNone) continue; switch (ainfo->pair_storage [j]) { case LLVMArgInIReg: { part_type = LLVMIntType (part_size * 8); if (MONO_CLASS_IS_SIMD (ctx->cfg, klass)) { index [0] = LLVMConstInt (LLVMInt32Type (), j * TARGET_SIZEOF_VOID_P, FALSE); addr = LLVMBuildGEP (builder, address, index, 1, ""); } else { daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (IntPtrType (), 0), ""); index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE); addr = LLVMBuildGEP (builder, daddr, index, 1, ""); } LLVMBuildStore (builder, convert (ctx, args [j], part_type), LLVMBuildBitCast (ctx->builder, addr, LLVMPointerType (part_type, 0), "")); break; } case LLVMArgInFPReg: { LLVMTypeRef arg_type; if (ainfo->esize == 8) arg_type = LLVMDoubleType (); else arg_type = LLVMFloatType (); index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE); daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (arg_type, 0), ""); addr = LLVMBuildGEP (builder, daddr, index, 1, ""); LLVMBuildStore (builder, args [j], addr); break; } case LLVMArgNone: break; default: g_assert_not_reached (); } size -= TARGET_SIZEOF_VOID_P; } } /* * emit_vtype_to_args: * * Emit code to load a vtype at address ADDRESS into scalar arguments. Store the arguments * into ARGS, and the number of arguments into NARGS. */ static void emit_vtype_to_args (EmitContext *ctx, LLVMBuilderRef builder, MonoType *t, LLVMValueRef address, LLVMArgInfo *ainfo, LLVMValueRef *args, guint32 *nargs) { int pindex = 0; int j, nslots; LLVMTypeRef arg_type; t = mini_get_underlying_type (t); int32_t size = get_vtype_size_align (t).size; if (MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (t))) address = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (LLVMInt8Type (), 0), ""); if (ainfo->storage == LLVMArgAsFpArgs) nslots = ainfo->nslots; else nslots = 2; for (j = 0; j < nslots; ++j) { LLVMValueRef index [2], addr, daddr; int partsize = size > TARGET_SIZEOF_VOID_P ? TARGET_SIZEOF_VOID_P : size; if (ainfo->pair_storage [j] == LLVMArgNone) continue; switch (ainfo->pair_storage [j]) { case LLVMArgInIReg: if (MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (t))) { index [0] = LLVMConstInt (LLVMInt32Type (), j * TARGET_SIZEOF_VOID_P, FALSE); addr = LLVMBuildGEP (builder, address, index, 1, ""); } else { daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (IntPtrType (), 0), ""); index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE); addr = LLVMBuildGEP (builder, daddr, index, 1, ""); } args [pindex ++] = convert (ctx, LLVMBuildLoad (builder, LLVMBuildBitCast (ctx->builder, addr, LLVMPointerType (LLVMIntType (partsize * 8), 0), ""), ""), IntPtrType ()); break; case LLVMArgInFPReg: if (ainfo->esize == 8) arg_type = LLVMDoubleType (); else arg_type = LLVMFloatType (); daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (arg_type, 0), ""); index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE); addr = LLVMBuildGEP (builder, daddr, index, 1, ""); args [pindex ++] = LLVMBuildLoad (builder, addr, ""); break; case LLVMArgNone: break; default: g_assert_not_reached (); } size -= TARGET_SIZEOF_VOID_P; } *nargs = pindex; } static LLVMValueRef build_alloca_llvm_type_name (EmitContext *ctx, LLVMTypeRef t, int align, const char *name) { /* * Have to place all alloca's at the end of the entry bb, since otherwise they would * get executed every time control reaches them. */ LLVMPositionBuilder (ctx->alloca_builder, get_bb (ctx, ctx->cfg->bb_entry), ctx->last_alloca); ctx->last_alloca = mono_llvm_build_alloca (ctx->alloca_builder, t, NULL, align, name); return ctx->last_alloca; } static LLVMValueRef build_alloca_llvm_type (EmitContext *ctx, LLVMTypeRef t, int align) { return build_alloca_llvm_type_name (ctx, t, align, ""); } static LLVMValueRef build_named_alloca (EmitContext *ctx, MonoType *t, char const *name) { MonoClass *k = mono_class_from_mono_type_internal (t); int align; g_assert (!mini_is_gsharedvt_variable_type (t)); if (MONO_CLASS_IS_SIMD (ctx->cfg, k)) align = mono_class_value_size (k, NULL); else align = mono_class_min_align (k); /* Sometimes align is not a power of 2 */ while (mono_is_power_of_two (align) == -1) align ++; return build_alloca_llvm_type_name (ctx, type_to_llvm_type (ctx, t), align, name); } static LLVMValueRef build_alloca (EmitContext *ctx, MonoType *t) { return build_named_alloca (ctx, t, ""); } static LLVMValueRef emit_gsharedvt_ldaddr (EmitContext *ctx, int vreg) { /* * gsharedvt local. * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx]. */ MonoCompile *cfg = ctx->cfg; LLVMBuilderRef builder = ctx->builder; LLVMValueRef offset, offset_var; LLVMValueRef info_var = ctx->values [cfg->gsharedvt_info_var->dreg]; LLVMValueRef locals_var = ctx->values [cfg->gsharedvt_locals_var->dreg]; LLVMValueRef ptr; char *name; g_assert (info_var); g_assert (locals_var); int idx = cfg->gsharedvt_vreg_to_idx [vreg] - 1; offset = LLVMConstInt (LLVMInt32Type (), MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P), FALSE); ptr = LLVMBuildAdd (builder, convert (ctx, info_var, IntPtrType ()), convert (ctx, offset, IntPtrType ()), ""); name = g_strdup_printf ("gsharedvt_local_%d_offset", vreg); offset_var = LLVMBuildLoad (builder, convert (ctx, ptr, LLVMPointerType (LLVMInt32Type (), 0)), name); return LLVMBuildAdd (builder, convert (ctx, locals_var, IntPtrType ()), convert (ctx, offset_var, IntPtrType ()), ""); } /* * Put the global into the 'llvm.used' array to prevent it from being optimized away. */ static void mark_as_used (MonoLLVMModule *module, LLVMValueRef global) { if (!module->used) module->used = g_ptr_array_sized_new (16); g_ptr_array_add (module->used, global); } static void emit_llvm_used (MonoLLVMModule *module) { LLVMModuleRef lmodule = module->lmodule; LLVMTypeRef used_type; LLVMValueRef used, *used_elem; int i; if (!module->used) return; used_type = LLVMArrayType (LLVMPointerType (LLVMInt8Type (), 0), module->used->len); used = LLVMAddGlobal (lmodule, used_type, "llvm.used"); used_elem = g_new0 (LLVMValueRef, module->used->len); for (i = 0; i < module->used->len; ++i) used_elem [i] = LLVMConstBitCast ((LLVMValueRef)g_ptr_array_index (module->used, i), LLVMPointerType (LLVMInt8Type (), 0)); LLVMSetInitializer (used, LLVMConstArray (LLVMPointerType (LLVMInt8Type (), 0), used_elem, module->used->len)); LLVMSetLinkage (used, LLVMAppendingLinkage); LLVMSetSection (used, "llvm.metadata"); } /* * emit_get_method: * * Emit a function mapping method indexes to their code */ static void emit_get_method (MonoLLVMModule *module) { LLVMModuleRef lmodule = module->lmodule; LLVMValueRef func, switch_ins, m; LLVMBasicBlockRef entry_bb, fail_bb, bb, code_start_bb, code_end_bb, main_bb; LLVMBasicBlockRef *bbs = NULL; LLVMTypeRef rtype; LLVMBuilderRef builder = LLVMCreateBuilder (); LLVMValueRef table = NULL; char *name; int i; gboolean emit_table = FALSE; #ifdef TARGET_WASM /* * Emit a table of functions instead of a switch statement, * its very efficient on wasm. This might be usable on * other platforms too. */ emit_table = TRUE; #endif rtype = LLVMPointerType (LLVMInt8Type (), 0); int table_len = module->max_method_idx + 1; if (emit_table) { LLVMTypeRef table_type; LLVMValueRef *table_elems; char *table_name; table_type = LLVMArrayType (rtype, table_len); table_name = g_strdup_printf ("%s_method_table", module->global_prefix); table = LLVMAddGlobal (lmodule, table_type, table_name); table_elems = g_new0 (LLVMValueRef, table_len); for (i = 0; i < table_len; ++i) { m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_lmethod, GINT_TO_POINTER (i)); if (m && !g_hash_table_lookup (module->no_method_table_lmethods, m)) table_elems [i] = LLVMBuildBitCast (builder, m, rtype, ""); else table_elems [i] = LLVMConstNull (rtype); } LLVMSetInitializer (table, LLVMConstArray (LLVMPointerType (LLVMInt8Type (), 0), table_elems, table_len)); } /* * Emit a switch statement. Emitting a table of function addresses is smaller/faster, * but generating code seems safer. */ func = LLVMAddFunction (lmodule, module->get_method_symbol, LLVMFunctionType1 (rtype, LLVMInt32Type (), FALSE)); LLVMSetLinkage (func, LLVMExternalLinkage); LLVMSetVisibility (func, LLVMHiddenVisibility); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND); module->get_method = func; entry_bb = LLVMAppendBasicBlock (func, "ENTRY"); /* * Return llvm_code_start/llvm_code_end when called with -1/-2. * Hopefully, the toolchain doesn't reorder these functions. If it does, * then we will have to find another solution. */ name = g_strdup_printf ("BB_CODE_START"); code_start_bb = LLVMAppendBasicBlock (func, name); g_free (name); LLVMPositionBuilderAtEnd (builder, code_start_bb); LLVMBuildRet (builder, LLVMBuildBitCast (builder, module->code_start, rtype, "")); name = g_strdup_printf ("BB_CODE_END"); code_end_bb = LLVMAppendBasicBlock (func, name); g_free (name); LLVMPositionBuilderAtEnd (builder, code_end_bb); LLVMBuildRet (builder, LLVMBuildBitCast (builder, module->code_end, rtype, "")); if (emit_table) { /* * Because table_len is computed using the method indexes available for us, it * might not include methods which are not compiled because of AOT profiles. * So table_len can be smaller than info->nmethods. Add a bounds check because * of that. * switch (index) { * case -1: return code_start; * case -2: return code_end; * default: return index < table_len ? method_table [index] : 0; */ fail_bb = LLVMAppendBasicBlock (func, "FAIL"); LLVMPositionBuilderAtEnd (builder, fail_bb); LLVMBuildRet (builder, LLVMBuildIntToPtr (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), rtype, "")); main_bb = LLVMAppendBasicBlock (func, "MAIN"); LLVMPositionBuilderAtEnd (builder, main_bb); LLVMValueRef base = table; LLVMValueRef indexes [2]; indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMGetParam (func, 0); LLVMValueRef addr = LLVMBuildGEP (builder, base, indexes, 2, ""); LLVMValueRef res = mono_llvm_build_load (builder, addr, "", FALSE); LLVMBuildRet (builder, res); LLVMBasicBlockRef default_bb = LLVMAppendBasicBlock (func, "DEFAULT"); LLVMPositionBuilderAtEnd (builder, default_bb); LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSGE, LLVMGetParam (func, 0), LLVMConstInt (LLVMInt32Type (), table_len, FALSE), ""); LLVMBuildCondBr (builder, cmp, fail_bb, main_bb); LLVMPositionBuilderAtEnd (builder, entry_bb); switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), default_bb, 0); LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -1, FALSE), code_start_bb); LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -2, FALSE), code_end_bb); } else { bbs = g_new0 (LLVMBasicBlockRef, module->max_method_idx + 1); for (i = 0; i < module->max_method_idx + 1; ++i) { name = g_strdup_printf ("BB_%d", i); bb = LLVMAppendBasicBlock (func, name); g_free (name); bbs [i] = bb; LLVMPositionBuilderAtEnd (builder, bb); m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_lmethod, GINT_TO_POINTER (i)); if (m && !g_hash_table_lookup (module->no_method_table_lmethods, m)) LLVMBuildRet (builder, LLVMBuildBitCast (builder, m, rtype, "")); else LLVMBuildRet (builder, LLVMConstNull (rtype)); } fail_bb = LLVMAppendBasicBlock (func, "FAIL"); LLVMPositionBuilderAtEnd (builder, fail_bb); LLVMBuildRet (builder, LLVMConstNull (rtype)); LLVMPositionBuilderAtEnd (builder, entry_bb); switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), fail_bb, 0); LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -1, FALSE), code_start_bb); LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -2, FALSE), code_end_bb); for (i = 0; i < module->max_method_idx + 1; ++i) { LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]); } } mark_as_used (module, func); LLVMDisposeBuilder (builder); } /* * emit_get_unbox_tramp: * * Emit a function mapping method indexes to their unbox trampoline */ static void emit_get_unbox_tramp (MonoLLVMModule *module) { LLVMModuleRef lmodule = module->lmodule; LLVMValueRef func, switch_ins, m; LLVMBasicBlockRef entry_bb, fail_bb, bb; LLVMBasicBlockRef *bbs; LLVMTypeRef rtype; LLVMBuilderRef builder = LLVMCreateBuilder (); char *name; int i; gboolean emit_table = FALSE; /* Similar to emit_get_method () */ #ifndef TARGET_WATCHOS emit_table = TRUE; #endif rtype = LLVMPointerType (LLVMInt8Type (), 0); if (emit_table) { // About 10% of methods have an unbox tramp, so emit a table of indexes for them // that the runtime can search using a binary search int len = 0; for (i = 0; i < module->max_method_idx + 1; ++i) { m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i)); if (m) len ++; } LLVMTypeRef table_type, elemtype; LLVMValueRef *table_elems; LLVMValueRef table; char *table_name; int table_len; int elemsize; table_len = len; elemsize = module->max_method_idx < 65000 ? 2 : 4; // The index table elemtype = elemsize == 2 ? LLVMInt16Type () : LLVMInt32Type (); table_type = LLVMArrayType (elemtype, table_len); table_name = g_strdup_printf ("%s_unbox_tramp_indexes", module->global_prefix); table = LLVMAddGlobal (lmodule, table_type, table_name); table_elems = g_new0 (LLVMValueRef, table_len); int idx = 0; for (i = 0; i < module->max_method_idx + 1; ++i) { m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i)); if (m) table_elems [idx ++] = LLVMConstInt (elemtype, i, FALSE); } LLVMSetInitializer (table, LLVMConstArray (elemtype, table_elems, table_len)); module->unbox_tramp_indexes = table; // The trampoline table elemtype = rtype; table_type = LLVMArrayType (elemtype, table_len); table_name = g_strdup_printf ("%s_unbox_trampolines", module->global_prefix); table = LLVMAddGlobal (lmodule, table_type, table_name); table_elems = g_new0 (LLVMValueRef, table_len); idx = 0; for (i = 0; i < module->max_method_idx + 1; ++i) { m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i)); if (m) table_elems [idx ++] = LLVMBuildBitCast (builder, m, rtype, ""); } LLVMSetInitializer (table, LLVMConstArray (elemtype, table_elems, table_len)); module->unbox_trampolines = table; module->unbox_tramp_num = table_len; module->unbox_tramp_elemsize = elemsize; return; } func = LLVMAddFunction (lmodule, module->get_unbox_tramp_symbol, LLVMFunctionType1 (rtype, LLVMInt32Type (), FALSE)); LLVMSetLinkage (func, LLVMExternalLinkage); LLVMSetVisibility (func, LLVMHiddenVisibility); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND); module->get_unbox_tramp = func; entry_bb = LLVMAppendBasicBlock (func, "ENTRY"); bbs = g_new0 (LLVMBasicBlockRef, module->max_method_idx + 1); for (i = 0; i < module->max_method_idx + 1; ++i) { m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i)); if (!m) continue; name = g_strdup_printf ("BB_%d", i); bb = LLVMAppendBasicBlock (func, name); g_free (name); bbs [i] = bb; LLVMPositionBuilderAtEnd (builder, bb); LLVMBuildRet (builder, LLVMBuildBitCast (builder, m, rtype, "")); } fail_bb = LLVMAppendBasicBlock (func, "FAIL"); LLVMPositionBuilderAtEnd (builder, fail_bb); LLVMBuildRet (builder, LLVMConstNull (rtype)); LLVMPositionBuilderAtEnd (builder, entry_bb); switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), fail_bb, 0); for (i = 0; i < module->max_method_idx + 1; ++i) { m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i)); if (!m) continue; LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]); } mark_as_used (module, func); LLVMDisposeBuilder (builder); } /* * emit_init_aotconst: * * Emit a function to initialize the aotconst_ variables. Called by the runtime. */ static void emit_init_aotconst (MonoLLVMModule *module) { LLVMModuleRef lmodule = module->lmodule; LLVMValueRef func; LLVMBasicBlockRef entry_bb; LLVMBuilderRef builder = LLVMCreateBuilder (); func = LLVMAddFunction (lmodule, module->init_aotconst_symbol, LLVMFunctionType2 (LLVMVoidType (), LLVMInt32Type (), IntPtrType (), FALSE)); LLVMSetLinkage (func, LLVMExternalLinkage); LLVMSetVisibility (func, LLVMHiddenVisibility); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND); module->init_aotconst_func = func; entry_bb = LLVMAppendBasicBlock (func, "ENTRY"); LLVMPositionBuilderAtEnd (builder, entry_bb); #ifdef TARGET_WASM /* Emit a table of aotconst addresses instead of a switch statement to save space */ LLVMValueRef aotconsts; LLVMTypeRef aotconst_addr_type = LLVMPointerType (module->ptr_type, 0); int table_size = module->max_got_offset + 1; LLVMTypeRef aotconst_arr_type = LLVMArrayType (aotconst_addr_type, table_size); LLVMValueRef aotconst_dummy = LLVMAddGlobal (module->lmodule, module->ptr_type, "aotconst_dummy"); LLVMSetInitializer (aotconst_dummy, LLVMConstNull (module->ptr_type)); LLVMSetVisibility (aotconst_dummy, LLVMHiddenVisibility); LLVMSetLinkage (aotconst_dummy, LLVMInternalLinkage); aotconsts = LLVMAddGlobal (module->lmodule, aotconst_arr_type, "aotconsts"); LLVMValueRef *aotconst_init = g_new0 (LLVMValueRef, table_size); for (int i = 0; i < table_size; ++i) { LLVMValueRef aotconst = (LLVMValueRef)g_hash_table_lookup (module->aotconst_vars, GINT_TO_POINTER (i)); if (aotconst) aotconst_init [i] = LLVMConstBitCast (aotconst, aotconst_addr_type); else aotconst_init [i] = LLVMConstBitCast (aotconst_dummy, aotconst_addr_type); } LLVMSetInitializer (aotconsts, LLVMConstArray (aotconst_addr_type, aotconst_init, table_size)); LLVMSetVisibility (aotconsts, LLVMHiddenVisibility); LLVMSetLinkage (aotconsts, LLVMInternalLinkage); LLVMBasicBlockRef exit_bb = LLVMAppendBasicBlock (func, "EXIT_BB"); LLVMBasicBlockRef main_bb = LLVMAppendBasicBlock (func, "BB"); LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSGE, LLVMGetParam (func, 0), LLVMConstInt (LLVMInt32Type (), table_size, FALSE), ""); LLVMBuildCondBr (builder, cmp, exit_bb, main_bb); LLVMPositionBuilderAtEnd (builder, main_bb); LLVMValueRef indexes [2]; indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMGetParam (func, 0); LLVMValueRef aotconst_addr = LLVMBuildLoad (builder, LLVMBuildGEP (builder, aotconsts, indexes, 2, ""), ""); LLVMBuildStore (builder, LLVMBuildIntToPtr (builder, LLVMGetParam (func, 1), module->ptr_type, ""), aotconst_addr); LLVMBuildBr (builder, exit_bb); LLVMPositionBuilderAtEnd (builder, exit_bb); LLVMBuildRetVoid (builder); #else LLVMValueRef switch_ins; LLVMBasicBlockRef fail_bb, bb; LLVMBasicBlockRef *bbs = NULL; char *name; bbs = g_new0 (LLVMBasicBlockRef, module->max_got_offset + 1); for (int i = 0; i < module->max_got_offset + 1; ++i) { name = g_strdup_printf ("BB_%d", i); bb = LLVMAppendBasicBlock (func, name); g_free (name); bbs [i] = bb; LLVMPositionBuilderAtEnd (builder, bb); LLVMValueRef var = g_hash_table_lookup (module->aotconst_vars, GINT_TO_POINTER (i)); if (var) { LLVMValueRef addr = LLVMBuildBitCast (builder, var, LLVMPointerType (IntPtrType (), 0), ""); LLVMBuildStore (builder, LLVMGetParam (func, 1), addr); } LLVMBuildRetVoid (builder); } fail_bb = LLVMAppendBasicBlock (func, "FAIL"); LLVMPositionBuilderAtEnd (builder, fail_bb); LLVMBuildRetVoid (builder); LLVMPositionBuilderAtEnd (builder, entry_bb); switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), fail_bb, 0); for (int i = 0; i < module->max_got_offset + 1; ++i) LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]); #endif LLVMDisposeBuilder (builder); } /* Add a function to mark the beginning of LLVM code */ static void emit_llvm_code_start (MonoLLVMModule *module) { LLVMModuleRef lmodule = module->lmodule; LLVMValueRef func; LLVMBasicBlockRef entry_bb; LLVMBuilderRef builder; func = LLVMAddFunction (lmodule, "llvm_code_start", LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE)); LLVMSetLinkage (func, LLVMInternalLinkage); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND); module->code_start = func; entry_bb = LLVMAppendBasicBlock (func, "ENTRY"); builder = LLVMCreateBuilder (); LLVMPositionBuilderAtEnd (builder, entry_bb); LLVMBuildRetVoid (builder); LLVMDisposeBuilder (builder); } /* * emit_init_func: * * Emit functions to initialize LLVM methods. * These are wrappers around the mini_llvm_init_method () JIT icall. * The wrappers handle adding the 'amodule' argument, loading the vtable from different locations, and they have * a cold calling convention. */ static LLVMValueRef emit_init_func (MonoLLVMModule *module, MonoAotInitSubtype subtype) { LLVMModuleRef lmodule = module->lmodule; LLVMValueRef func, indexes [2], args [16], callee, info_var, index_var, inited_var, cmp; LLVMBasicBlockRef entry_bb, inited_bb, notinited_bb; LLVMBuilderRef builder; LLVMTypeRef icall_sig; const char *wrapper_name = mono_marshal_get_aot_init_wrapper_name (subtype); LLVMTypeRef func_type = NULL; LLVMTypeRef arg_type = module->ptr_type; char *name = g_strdup_printf ("%s_%s", module->global_prefix, wrapper_name); switch (subtype) { case AOT_INIT_METHOD: func_type = LLVMFunctionType1 (LLVMVoidType (), arg_type, FALSE); break; case AOT_INIT_METHOD_GSHARED_MRGCTX: case AOT_INIT_METHOD_GSHARED_VTABLE: func_type = LLVMFunctionType2 (LLVMVoidType (), arg_type, IntPtrType (), FALSE); break; case AOT_INIT_METHOD_GSHARED_THIS: func_type = LLVMFunctionType2 (LLVMVoidType (), arg_type, ObjRefType (), FALSE); break; default: g_assert_not_reached (); } func = LLVMAddFunction (lmodule, name, func_type); info_var = LLVMGetParam (func, 0); LLVMSetLinkage (func, LLVMInternalLinkage); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_INLINE); set_cold_cconv (func); entry_bb = LLVMAppendBasicBlock (func, "ENTRY"); builder = LLVMCreateBuilder (); LLVMPositionBuilderAtEnd (builder, entry_bb); /* Load method_index which is emitted at the start of the method info */ indexes [0] = const_int32 (0); indexes [1] = const_int32 (0); // FIXME: Make sure its aligned index_var = LLVMBuildLoad (builder, LLVMBuildGEP (builder, LLVMBuildBitCast (builder, info_var, LLVMPointerType (LLVMInt32Type (), 0), ""), indexes, 1, ""), "method_index"); /* Check for is_inited here as well, since this can be called from JITted code which might not check it */ indexes [0] = const_int32 (0); indexes [1] = index_var; inited_var = LLVMBuildLoad (builder, LLVMBuildGEP (builder, module->inited_var, indexes, 2, ""), "is_inited"); cmp = LLVMBuildICmp (builder, LLVMIntEQ, inited_var, LLVMConstInt (LLVMTypeOf (inited_var), 0, FALSE), ""); inited_bb = LLVMAppendBasicBlock (func, "INITED"); notinited_bb = LLVMAppendBasicBlock (func, "NOT_INITED"); LLVMBuildCondBr (builder, cmp, notinited_bb, inited_bb); LLVMPositionBuilderAtEnd (builder, notinited_bb); LLVMValueRef amodule_var = get_aotconst_module (module, builder, MONO_PATCH_INFO_AOT_MODULE, NULL, LLVMPointerType (IntPtrType (), 0), NULL, NULL); args [0] = LLVMBuildPtrToInt (builder, module->info_var, IntPtrType (), ""); args [1] = LLVMBuildPtrToInt (builder, amodule_var, IntPtrType (), ""); args [2] = info_var; switch (subtype) { case AOT_INIT_METHOD: args [3] = LLVMConstNull (IntPtrType ()); break; case AOT_INIT_METHOD_GSHARED_VTABLE: args [3] = LLVMGetParam (func, 1); break; case AOT_INIT_METHOD_GSHARED_THIS: /* Load this->vtable */ args [3] = LLVMBuildBitCast (builder, LLVMGetParam (func, 1), LLVMPointerType (IntPtrType (), 0), ""); indexes [0] = const_int32 (MONO_STRUCT_OFFSET (MonoObject, vtable) / SIZEOF_VOID_P); args [3] = LLVMBuildLoad (builder, LLVMBuildGEP (builder, args [3], indexes, 1, ""), "vtable"); break; case AOT_INIT_METHOD_GSHARED_MRGCTX: /* Load mrgctx->vtable */ args [3] = LLVMBuildIntToPtr (builder, LLVMGetParam (func, 1), LLVMPointerType (IntPtrType (), 0), ""); indexes [0] = const_int32 (MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable) / SIZEOF_VOID_P); args [3] = LLVMBuildLoad (builder, LLVMBuildGEP (builder, args [3], indexes, 1, ""), "vtable"); break; default: g_assert_not_reached (); break; } /* Call the mini_llvm_init_method JIT icall */ icall_sig = LLVMFunctionType4 (LLVMVoidType (), IntPtrType (), IntPtrType (), arg_type, IntPtrType (), FALSE); callee = get_aotconst_module (module, builder, MONO_PATCH_INFO_JIT_ICALL_ID, GINT_TO_POINTER (MONO_JIT_ICALL_mini_llvm_init_method), LLVMPointerType (icall_sig, 0), NULL, NULL); LLVMBuildCall (builder, callee, args, LLVMCountParamTypes (icall_sig), ""); /* * Set the inited flag * This is already done by the LLVM methods themselves, but its needed by JITted methods. */ indexes [0] = const_int32 (0); indexes [1] = index_var; LLVMBuildStore (builder, LLVMConstInt (LLVMInt8Type (), 1, FALSE), LLVMBuildGEP (builder, module->inited_var, indexes, 2, "")); LLVMBuildBr (builder, inited_bb); LLVMPositionBuilderAtEnd (builder, inited_bb); LLVMBuildRetVoid (builder); LLVMVerifyFunction (func, LLVMAbortProcessAction); LLVMDisposeBuilder (builder); g_free (name); return func; } /* Emit a wrapper around the parameterless JIT icall ICALL_ID with a cold calling convention */ static LLVMValueRef emit_icall_cold_wrapper (MonoLLVMModule *module, LLVMModuleRef lmodule, MonoJitICallId icall_id, gboolean aot) { LLVMValueRef func, callee; LLVMBasicBlockRef entry_bb; LLVMBuilderRef builder; LLVMTypeRef sig; char *name; name = g_strdup_printf ("%s_icall_cold_wrapper_%d", module->global_prefix, icall_id); func = LLVMAddFunction (lmodule, name, LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE)); sig = LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE); LLVMSetLinkage (func, LLVMInternalLinkage); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_INLINE); set_cold_cconv (func); entry_bb = LLVMAppendBasicBlock (func, "ENTRY"); builder = LLVMCreateBuilder (); LLVMPositionBuilderAtEnd (builder, entry_bb); if (aot) { callee = get_aotconst_module (module, builder, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id), LLVMPointerType (sig, 0), NULL, NULL); } else { MonoJitICallInfo * const info = mono_find_jit_icall_info (icall_id); gpointer target = (gpointer)mono_icall_get_wrapper_full (info, TRUE); LLVMValueRef tramp_var = LLVMAddGlobal (lmodule, LLVMPointerType (sig, 0), name); LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (LLVMConstInt (LLVMInt64Type (), (guint64)(size_t)target, FALSE), LLVMPointerType (sig, 0))); LLVMSetLinkage (tramp_var, LLVMExternalLinkage); callee = LLVMBuildLoad (builder, tramp_var, ""); } LLVMBuildCall (builder, callee, NULL, 0, ""); LLVMBuildRetVoid (builder); LLVMVerifyFunction(func, LLVMAbortProcessAction); LLVMDisposeBuilder (builder); return func; } /* * Emit wrappers around the C icalls used to initialize llvm methods, to * make the calling code smaller and to enable usage of the llvm * cold calling convention. */ static void emit_init_funcs (MonoLLVMModule *module) { for (int i = 0; i < AOT_INIT_METHOD_NUM; ++i) module->init_methods [i] = emit_init_func (module, i); } static LLVMValueRef get_init_func (MonoLLVMModule *module, MonoAotInitSubtype subtype) { return module->init_methods [subtype]; } static void emit_gc_safepoint_poll (MonoLLVMModule *module, LLVMModuleRef lmodule, MonoCompile *cfg) { gboolean is_aot = cfg == NULL || cfg->compile_aot; LLVMValueRef func = mono_llvm_get_or_insert_gc_safepoint_poll (lmodule); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND); if (is_aot) { #if TARGET_WIN32 if (module->static_link) { LLVMSetLinkage (func, LLVMInternalLinkage); /* Prevent it from being optimized away, leading to asserts inside 'opt' */ mark_as_used (module, func); } else { LLVMSetLinkage (func, LLVMWeakODRLinkage); } #else LLVMSetLinkage (func, LLVMWeakODRLinkage); #endif } else { mono_llvm_add_func_attr (func, LLVM_ATTR_OPTIMIZE_NONE); // no need to waste time here, the function is already optimized and will be inlined. mono_llvm_add_func_attr (func, LLVM_ATTR_NO_INLINE); // optnone attribute requires noinline (but it will be inlined anyway) if (!module->gc_poll_cold_wrapper_compiled) { ERROR_DECL (error); /* Compiling a method here is a bit ugly, but it works */ MonoMethod *wrapper = mono_marshal_get_llvm_func_wrapper (LLVM_FUNC_WRAPPER_GC_POLL); module->gc_poll_cold_wrapper_compiled = mono_jit_compile_method (wrapper, error); mono_error_assert_ok (error); } } LLVMBasicBlockRef entry_bb = LLVMAppendBasicBlock (func, "gc.safepoint_poll.entry"); LLVMBasicBlockRef poll_bb = LLVMAppendBasicBlock (func, "gc.safepoint_poll.poll"); LLVMBasicBlockRef exit_bb = LLVMAppendBasicBlock (func, "gc.safepoint_poll.exit"); LLVMTypeRef ptr_type = LLVMPointerType (IntPtrType (), 0); LLVMBuilderRef builder = LLVMCreateBuilder (); /* entry: */ LLVMPositionBuilderAtEnd (builder, entry_bb); LLVMValueRef poll_val_ptr; if (is_aot) { poll_val_ptr = get_aotconst_module (module, builder, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, NULL, ptr_type, NULL, NULL); } else { LLVMValueRef poll_val_int = LLVMConstInt (IntPtrType (), (guint64) &mono_polling_required, FALSE); poll_val_ptr = LLVMBuildIntToPtr (builder, poll_val_int, ptr_type, ""); } LLVMValueRef poll_val_ptr_load = LLVMBuildLoad (builder, poll_val_ptr, ""); // probably needs to be volatile LLVMValueRef poll_val = LLVMBuildPtrToInt (builder, poll_val_ptr_load, IntPtrType (), ""); LLVMValueRef poll_val_zero = LLVMConstNull (LLVMTypeOf (poll_val)); LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntEQ, poll_val, poll_val_zero, ""); mono_llvm_build_weighted_branch (builder, cmp, exit_bb, poll_bb, 1000 /* weight for exit_bb */, 1 /* weight for poll_bb */); /* poll: */ LLVMPositionBuilderAtEnd (builder, poll_bb); LLVMValueRef call; if (is_aot) { LLVMValueRef icall_wrapper = emit_icall_cold_wrapper (module, lmodule, MONO_JIT_ICALL_mono_threads_state_poll, TRUE); module->gc_poll_cold_wrapper = icall_wrapper; call = LLVMBuildCall (builder, icall_wrapper, NULL, 0, ""); } else { // in JIT mode we have to emit @gc.safepoint_poll function for each method (module) // this function calls gc_poll_cold_wrapper_compiled via a global variable. // @gc.safepoint_poll will be inlined and can be deleted after -place-safepoints pass. LLVMTypeRef poll_sig = LLVMFunctionType0 (LLVMVoidType (), FALSE); LLVMTypeRef poll_sig_ptr = LLVMPointerType (poll_sig, 0); gpointer target = resolve_patch (cfg, MONO_PATCH_INFO_ABS, module->gc_poll_cold_wrapper_compiled); LLVMValueRef tramp_var = LLVMAddGlobal (lmodule, poll_sig_ptr, "mono_threads_state_poll"); LLVMValueRef target_val = LLVMConstInt (LLVMInt64Type (), (guint64) target, FALSE); LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (target_val, poll_sig_ptr)); LLVMSetLinkage (tramp_var, LLVMExternalLinkage); LLVMValueRef callee = LLVMBuildLoad (builder, tramp_var, ""); call = LLVMBuildCall (builder, callee, NULL, 0, ""); } set_call_cold_cconv (call); LLVMBuildBr (builder, exit_bb); /* exit: */ LLVMPositionBuilderAtEnd (builder, exit_bb); LLVMBuildRetVoid (builder); LLVMDisposeBuilder (builder); } static void emit_llvm_code_end (MonoLLVMModule *module) { LLVMModuleRef lmodule = module->lmodule; LLVMValueRef func; LLVMBasicBlockRef entry_bb; LLVMBuilderRef builder; func = LLVMAddFunction (lmodule, "llvm_code_end", LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE)); LLVMSetLinkage (func, LLVMInternalLinkage); mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND); module->code_end = func; entry_bb = LLVMAppendBasicBlock (func, "ENTRY"); builder = LLVMCreateBuilder (); LLVMPositionBuilderAtEnd (builder, entry_bb); LLVMBuildRetVoid (builder); LLVMDisposeBuilder (builder); } static void emit_div_check (EmitContext *ctx, LLVMBuilderRef builder, MonoBasicBlock *bb, MonoInst *ins, LLVMValueRef lhs, LLVMValueRef rhs) { gboolean need_div_check = ctx->cfg->backend->need_div_check; if (bb->region) /* LLVM doesn't know that these can throw an exception since they are not called through an intrinsic */ need_div_check = TRUE; if (!need_div_check) return; switch (ins->opcode) { case OP_IDIV: case OP_LDIV: case OP_IREM: case OP_LREM: case OP_IDIV_UN: case OP_LDIV_UN: case OP_IREM_UN: case OP_LREM_UN: case OP_IDIV_IMM: case OP_LDIV_IMM: case OP_IREM_IMM: case OP_LREM_IMM: case OP_IDIV_UN_IMM: case OP_LDIV_UN_IMM: case OP_IREM_UN_IMM: case OP_LREM_UN_IMM: { LLVMValueRef cmp; gboolean is_signed = (ins->opcode == OP_IDIV || ins->opcode == OP_LDIV || ins->opcode == OP_IREM || ins->opcode == OP_LREM || ins->opcode == OP_IDIV_IMM || ins->opcode == OP_LDIV_IMM || ins->opcode == OP_IREM_IMM || ins->opcode == OP_LREM_IMM); cmp = LLVMBuildICmp (builder, LLVMIntEQ, rhs, LLVMConstInt (LLVMTypeOf (rhs), 0, FALSE), ""); emit_cond_system_exception (ctx, bb, "DivideByZeroException", cmp, FALSE); if (!ctx_ok (ctx)) break; builder = ctx->builder; /* b == -1 && a == 0x80000000 */ if (is_signed) { LLVMValueRef c = (LLVMTypeOf (lhs) == LLVMInt32Type ()) ? LLVMConstInt (LLVMTypeOf (lhs), 0x80000000, FALSE) : LLVMConstInt (LLVMTypeOf (lhs), 0x8000000000000000LL, FALSE); LLVMValueRef cond1 = LLVMBuildICmp (builder, LLVMIntEQ, rhs, LLVMConstInt (LLVMTypeOf (rhs), -1, FALSE), ""); LLVMValueRef cond2 = LLVMBuildICmp (builder, LLVMIntEQ, lhs, c, ""); cmp = LLVMBuildICmp (builder, LLVMIntEQ, LLVMBuildAnd (builder, cond1, cond2, ""), LLVMConstInt (LLVMInt1Type (), 1, FALSE), ""); emit_cond_system_exception (ctx, bb, "OverflowException", cmp, FALSE); if (!ctx_ok (ctx)) break; builder = ctx->builder; } break; } default: break; } } /* * emit_method_init: * * Emit code to initialize the GOT slots used by the method. */ static void emit_method_init (EmitContext *ctx) { LLVMValueRef indexes [16], args [16]; LLVMValueRef inited_var, cmp, call; LLVMBasicBlockRef inited_bb, notinited_bb; LLVMBuilderRef builder = ctx->builder; MonoCompile *cfg = ctx->cfg; MonoAotInitSubtype subtype; ctx->module->max_inited_idx = MAX (ctx->module->max_inited_idx, cfg->method_index); indexes [0] = const_int32 (0); indexes [1] = const_int32 (cfg->method_index); inited_var = LLVMBuildLoad (builder, LLVMBuildGEP (builder, ctx->module->inited_var, indexes, 2, ""), "is_inited"); args [0] = inited_var; args [1] = LLVMConstInt (LLVMInt8Type (), 1, FALSE); inited_var = LLVMBuildCall (ctx->builder, get_intrins (ctx, INTRINS_EXPECT_I8), args, 2, ""); cmp = LLVMBuildICmp (builder, LLVMIntEQ, inited_var, LLVMConstInt (LLVMTypeOf (inited_var), 0, FALSE), ""); inited_bb = ctx->inited_bb; notinited_bb = gen_bb (ctx, "NOTINITED_BB"); ctx->cfg->llvmonly_init_cond = LLVMBuildCondBr (ctx->builder, cmp, notinited_bb, inited_bb); builder = ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, notinited_bb); LLVMTypeRef type = LLVMArrayType (LLVMInt8Type (), 0); char *symbol = g_strdup_printf ("info_dummy_%s", cfg->llvm_method_name); LLVMValueRef info_var = LLVMAddGlobal (ctx->lmodule, type, symbol); g_free (symbol); cfg->llvm_dummy_info_var = info_var; int nargs = 0; args [nargs ++] = convert (ctx, info_var, ctx->module->ptr_type); switch (cfg->rgctx_access) { case MONO_RGCTX_ACCESS_MRGCTX: if (ctx->rgctx_arg) { args [nargs ++] = convert (ctx, ctx->rgctx_arg, IntPtrType ()); subtype = AOT_INIT_METHOD_GSHARED_MRGCTX; } else { g_assert (ctx->this_arg); args [nargs ++] = convert (ctx, ctx->this_arg, ObjRefType ()); subtype = AOT_INIT_METHOD_GSHARED_THIS; } break; case MONO_RGCTX_ACCESS_VTABLE: args [nargs ++] = convert (ctx, ctx->rgctx_arg, IntPtrType ()); subtype = AOT_INIT_METHOD_GSHARED_VTABLE; break; case MONO_RGCTX_ACCESS_THIS: args [nargs ++] = convert (ctx, ctx->this_arg, ObjRefType ()); subtype = AOT_INIT_METHOD_GSHARED_THIS; break; case MONO_RGCTX_ACCESS_NONE: subtype = AOT_INIT_METHOD; break; default: g_assert_not_reached (); } call = LLVMBuildCall (builder, ctx->module->init_methods [subtype], args, nargs, ""); /* * This enables llvm to keep arguments in their original registers/ * scratch registers, since the call will not clobber them. */ set_call_cold_cconv (call); // Set the inited flag indexes [0] = const_int32 (0); indexes [1] = const_int32 (cfg->method_index); LLVMBuildStore (builder, LLVMConstInt (LLVMInt8Type (), 1, FALSE), LLVMBuildGEP (builder, ctx->module->inited_var, indexes, 2, "")); LLVMBuildBr (builder, inited_bb); ctx->bblocks [cfg->bb_entry->block_num].end_bblock = inited_bb; builder = ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, inited_bb); } static void emit_unbox_tramp (EmitContext *ctx, const char *method_name, LLVMTypeRef method_type, LLVMValueRef method, int method_index) { /* * Emit unbox trampoline using a tailcall */ LLVMValueRef tramp, call, *args; LLVMBuilderRef builder; LLVMBasicBlockRef lbb; LLVMCallInfo *linfo; char *tramp_name; int i, nargs; tramp_name = g_strdup_printf ("ut_%s", method_name); tramp = LLVMAddFunction (ctx->module->lmodule, tramp_name, method_type); LLVMSetLinkage (tramp, LLVMInternalLinkage); mono_llvm_add_func_attr (tramp, LLVM_ATTR_OPTIMIZE_FOR_SIZE); //mono_llvm_add_func_attr (tramp, LLVM_ATTR_NO_UNWIND); linfo = ctx->linfo; // FIXME: Reduce code duplication with mono_llvm_compile_method () etc. if (!ctx->llvm_only && ctx->rgctx_arg_pindex != -1) mono_llvm_add_param_attr (LLVMGetParam (tramp, ctx->rgctx_arg_pindex), LLVM_ATTR_IN_REG); if (ctx->cfg->vret_addr) { LLVMSetValueName (LLVMGetParam (tramp, linfo->vret_arg_pindex), "vret"); if (linfo->ret.storage == LLVMArgVtypeByRef) { mono_llvm_add_param_attr (LLVMGetParam (tramp, linfo->vret_arg_pindex), LLVM_ATTR_STRUCT_RET); mono_llvm_add_param_attr (LLVMGetParam (tramp, linfo->vret_arg_pindex), LLVM_ATTR_NO_ALIAS); } } lbb = LLVMAppendBasicBlock (tramp, ""); builder = LLVMCreateBuilder (); LLVMPositionBuilderAtEnd (builder, lbb); nargs = LLVMCountParamTypes (method_type); args = g_new0 (LLVMValueRef, nargs); for (i = 0; i < nargs; ++i) { args [i] = LLVMGetParam (tramp, i); if (i == ctx->this_arg_pindex) { LLVMTypeRef arg_type = LLVMTypeOf (args [i]); args [i] = LLVMBuildPtrToInt (builder, args [i], IntPtrType (), ""); args [i] = LLVMBuildAdd (builder, args [i], LLVMConstInt (IntPtrType (), MONO_ABI_SIZEOF (MonoObject), FALSE), ""); args [i] = LLVMBuildIntToPtr (builder, args [i], arg_type, ""); } } call = LLVMBuildCall (builder, method, args, nargs, ""); if (!ctx->llvm_only && ctx->rgctx_arg_pindex != -1) mono_llvm_add_instr_attr (call, 1 + ctx->rgctx_arg_pindex, LLVM_ATTR_IN_REG); if (linfo->ret.storage == LLVMArgVtypeByRef) mono_llvm_add_instr_attr (call, 1 + linfo->vret_arg_pindex, LLVM_ATTR_STRUCT_RET); // FIXME: This causes assertions in clang //mono_llvm_set_must_tailcall (call); if (LLVMGetReturnType (method_type) == LLVMVoidType ()) LLVMBuildRetVoid (builder); else LLVMBuildRet (builder, call); g_hash_table_insert (ctx->module->idx_to_unbox_tramp, GINT_TO_POINTER (method_index), tramp); LLVMDisposeBuilder (builder); } #ifdef TARGET_WASM static void emit_gc_pin (EmitContext *ctx, LLVMBuilderRef builder, int vreg) { LLVMValueRef index0 = LLVMConstInt (LLVMInt32Type (), 0, FALSE); LLVMValueRef index1 = LLVMConstInt (LLVMInt32Type (), ctx->gc_var_indexes [vreg] - 1, FALSE); LLVMValueRef indexes [] = { index0, index1 }; LLVMValueRef addr = LLVMBuildGEP (builder, ctx->gc_pin_area, indexes, 2, ""); mono_llvm_build_store (builder, convert (ctx, ctx->values [vreg], IntPtrType ()), addr, TRUE, LLVM_BARRIER_NONE); } #endif /* * emit_entry_bb: * * Emit code to load/convert arguments. */ static void emit_entry_bb (EmitContext *ctx, LLVMBuilderRef builder) { int i, j, pindex; MonoCompile *cfg = ctx->cfg; MonoMethodSignature *sig = ctx->sig; LLVMCallInfo *linfo = ctx->linfo; MonoBasicBlock *bb; char **names; LLVMBuilderRef old_builder = ctx->builder; ctx->builder = builder; ctx->alloca_builder = create_builder (ctx); #ifdef TARGET_WASM /* * For GC stack scanning to work, allocate an area on the stack and store * every ref vreg into it after its written. Because the stack is scanned * conservatively, the objects will be pinned, so the vregs can directly * reference the objects, there is no need to load them from the stack * on every access. */ ctx->gc_var_indexes = g_new0 (int, cfg->next_vreg); int ngc_vars = 0; for (i = 0; i < cfg->next_vreg; ++i) { if (vreg_is_ref (cfg, i)) { ctx->gc_var_indexes [i] = ngc_vars + 1; ngc_vars ++; } } // FIXME: Count only live vregs ctx->gc_pin_area = build_alloca_llvm_type_name (ctx, LLVMArrayType (IntPtrType (), ngc_vars), 0, "gc_pin"); #endif /* * Handle indirect/volatile variables by allocating memory for them * using 'alloca', and storing their address in a temporary. */ for (i = 0; i < cfg->num_varinfo; ++i) { MonoInst *var = cfg->varinfo [i]; if ((var->opcode == OP_GSHAREDVT_LOCAL || var->opcode == OP_GSHAREDVT_ARG_REGOFFSET)) continue; if (var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (mini_type_is_vtype (var->inst_vtype) && !MONO_CLASS_IS_SIMD (ctx->cfg, var->klass))) { if (!ctx_ok (ctx)) return; /* Could be already created by an OP_VPHI */ if (!ctx->addresses [var->dreg]) { if (var->flags & MONO_INST_LMF) { // FIXME: Allocate a smaller struct in the deopt case int size = cfg->deopt ? MONO_ABI_SIZEOF (MonoLMFExt) : MONO_ABI_SIZEOF (MonoLMF); ctx->addresses [var->dreg] = build_alloca_llvm_type_name (ctx, LLVMArrayType (LLVMInt8Type (), size), sizeof (target_mgreg_t), "lmf"); } else { char *name = g_strdup_printf ("vreg_loc_%d", var->dreg); ctx->addresses [var->dreg] = build_named_alloca (ctx, var->inst_vtype, name); g_free (name); } } ctx->vreg_cli_types [var->dreg] = var->inst_vtype; } } names = g_new (char *, sig->param_count); mono_method_get_param_names (cfg->method, (const char **) names); for (i = 0; i < sig->param_count; ++i) { LLVMArgInfo *ainfo = &linfo->args [i + sig->hasthis]; int reg = cfg->args [i + sig->hasthis]->dreg; char *name; pindex = ainfo->pindex; LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex); switch (ainfo->storage) { case LLVMArgVtypeInReg: case LLVMArgAsFpArgs: { LLVMValueRef args [8]; int j; pindex += ainfo->ndummy_fpargs; /* The argument is received as a set of int/fp arguments, store them into the real argument */ memset (args, 0, sizeof (args)); if (ainfo->storage == LLVMArgVtypeInReg) { args [0] = LLVMGetParam (ctx->lmethod, pindex); if (ainfo->pair_storage [1] != LLVMArgNone) args [1] = LLVMGetParam (ctx->lmethod, pindex + 1); } else { g_assert (ainfo->nslots <= 8); for (j = 0; j < ainfo->nslots; ++j) args [j] = LLVMGetParam (ctx->lmethod, pindex + j); } ctx->addresses [reg] = build_alloca (ctx, ainfo->type); emit_args_to_vtype (ctx, builder, ainfo->type, ctx->addresses [reg], ainfo, args); break; } case LLVMArgVtypeByVal: { ctx->addresses [reg] = LLVMGetParam (ctx->lmethod, pindex); break; } case LLVMArgVtypeAddr: case LLVMArgVtypeByRef: { /* The argument is passed by ref */ ctx->addresses [reg] = LLVMGetParam (ctx->lmethod, pindex); break; } case LLVMArgAsIArgs: { LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex); int size; MonoType *t = mini_get_underlying_type (ainfo->type); /* The argument is received as an array of ints, store it into the real argument */ ctx->addresses [reg] = build_alloca (ctx, t); size = mono_class_value_size (mono_class_from_mono_type_internal (t), NULL); if (size == 0) { } else if (size < TARGET_SIZEOF_VOID_P) { /* The upper bits of the registers might not be valid */ LLVMValueRef val = LLVMBuildExtractValue (builder, arg, 0, ""); LLVMValueRef dest = convert (ctx, ctx->addresses [reg], LLVMPointerType (LLVMIntType (size * 8), 0)); LLVMBuildStore (ctx->builder, LLVMBuildTrunc (builder, val, LLVMIntType (size * 8), ""), dest); } else { LLVMBuildStore (ctx->builder, arg, convert (ctx, ctx->addresses [reg], LLVMPointerType (LLVMTypeOf (arg), 0))); } break; } case LLVMArgVtypeAsScalar: g_assert_not_reached (); break; case LLVMArgWasmVtypeAsScalar: { MonoType *t = mini_get_underlying_type (ainfo->type); /* The argument is received as a scalar */ ctx->addresses [reg] = build_alloca (ctx, t); LLVMValueRef dest = convert (ctx, ctx->addresses [reg], LLVMPointerType (LLVMIntType (ainfo->esize * 8), 0)); LLVMBuildStore (ctx->builder, arg, dest); break; } case LLVMArgGsharedvtFixed: { /* These are non-gsharedvt arguments passed by ref, the rest of the IR treats them as scalars */ LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex); if (names [i]) name = g_strdup_printf ("arg_%s", names [i]); else name = g_strdup_printf ("arg_%d", i); ctx->values [reg] = LLVMBuildLoad (builder, convert (ctx, arg, LLVMPointerType (type_to_llvm_type (ctx, ainfo->type), 0)), name); break; } case LLVMArgGsharedvtFixedVtype: { LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex); if (names [i]) name = g_strdup_printf ("vtype_arg_%s", names [i]); else name = g_strdup_printf ("vtype_arg_%d", i); /* Non-gsharedvt vtype argument passed by ref, the rest of the IR treats it as a vtype */ g_assert (ctx->addresses [reg]); LLVMSetValueName (ctx->addresses [reg], name); LLVMBuildStore (builder, LLVMBuildLoad (builder, convert (ctx, arg, LLVMPointerType (type_to_llvm_type (ctx, ainfo->type), 0)), ""), ctx->addresses [reg]); break; } case LLVMArgGsharedvtVariable: /* The IR treats these as variables with addresses */ if (!ctx->addresses [reg]) ctx->addresses [reg] = LLVMGetParam (ctx->lmethod, pindex); break; default: { LLVMTypeRef t; /* Needed to avoid phi argument mismatch errors since operations on pointers produce i32/i64 */ if (m_type_is_byref (ainfo->type)) t = IntPtrType (); else t = type_to_llvm_type (ctx, ainfo->type); ctx->values [reg] = convert_full (ctx, ctx->values [reg], llvm_type_to_stack_type (cfg, t), type_is_unsigned (ctx, ainfo->type)); break; } } switch (ainfo->storage) { case LLVMArgVtypeInReg: case LLVMArgVtypeByVal: case LLVMArgAsIArgs: // FIXME: Enabling this fails on windows case LLVMArgVtypeAddr: case LLVMArgVtypeByRef: { if (MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (ainfo->type))) /* Treat these as normal values */ ctx->values [reg] = LLVMBuildLoad (builder, ctx->addresses [reg], "simd_vtype"); break; } default: break; } } g_free (names); if (sig->hasthis) { /* Handle this arguments as inputs to phi nodes */ int reg = cfg->args [0]->dreg; if (ctx->vreg_types [reg]) ctx->values [reg] = convert (ctx, ctx->values [reg], ctx->vreg_types [reg]); } if (cfg->vret_addr) emit_volatile_store (ctx, cfg->vret_addr->dreg); if (sig->hasthis) emit_volatile_store (ctx, cfg->args [0]->dreg); for (i = 0; i < sig->param_count; ++i) if (!mini_type_is_vtype (sig->params [i])) emit_volatile_store (ctx, cfg->args [i + sig->hasthis]->dreg); if (sig->hasthis && !cfg->rgctx_var && cfg->gshared && !cfg->llvm_only) { LLVMValueRef this_alloc; /* * The exception handling code needs the location where the this argument was * stored for gshared methods. We create a separate alloca to hold it, and mark it * with the "mono.this" custom metadata to tell llvm that it needs to save its * location into the LSDA. */ this_alloc = mono_llvm_build_alloca (builder, ThisType (), LLVMConstInt (LLVMInt32Type (), 1, FALSE), 0, ""); /* This volatile store will keep the alloca alive */ mono_llvm_build_store (builder, ctx->values [cfg->args [0]->dreg], this_alloc, TRUE, LLVM_BARRIER_NONE); set_metadata_flag (this_alloc, "mono.this"); } if (cfg->rgctx_var) { if (!(cfg->rgctx_var->flags & MONO_INST_VOLATILE)) { /* FIXME: This could be volatile even in llvmonly mode if used inside a clause etc. */ g_assert (!ctx->addresses [cfg->rgctx_var->dreg]); ctx->values [cfg->rgctx_var->dreg] = ctx->rgctx_arg; } else { LLVMValueRef rgctx_alloc, store; /* * We handle the rgctx arg similarly to the this pointer. */ g_assert (ctx->addresses [cfg->rgctx_var->dreg]); rgctx_alloc = ctx->addresses [cfg->rgctx_var->dreg]; /* This volatile store will keep the alloca alive */ store = mono_llvm_build_store (builder, convert (ctx, ctx->rgctx_arg, IntPtrType ()), rgctx_alloc, TRUE, LLVM_BARRIER_NONE); (void)store; /* unused */ set_metadata_flag (rgctx_alloc, "mono.this"); } } #ifdef TARGET_WASM /* * Store ref arguments to the pin area. * FIXME: This might not be needed, since the caller already does it ? */ for (i = 0; i < cfg->num_varinfo; ++i) { MonoInst *var = cfg->varinfo [i]; if (var->opcode == OP_ARG && vreg_is_ref (cfg, var->dreg) && ctx->values [var->dreg]) emit_gc_pin (ctx, builder, var->dreg); } #endif if (cfg->deopt) { LLVMValueRef addr, index [2]; MonoMethodHeader *header = cfg->header; int nfields = (sig->ret->type != MONO_TYPE_VOID ? 1 : 0) + sig->hasthis + sig->param_count + header->num_locals + 2; LLVMTypeRef *types = g_alloca (nfields * sizeof (LLVMTypeRef)); int findex = 0; /* method */ types [findex ++] = IntPtrType (); /* il_offset */ types [findex ++] = LLVMInt32Type (); int data_start = findex; /* data */ if (sig->ret->type != MONO_TYPE_VOID) types [findex ++] = IntPtrType (); if (sig->hasthis) types [findex ++] = IntPtrType (); for (int i = 0; i < sig->param_count; ++i) types [findex ++] = LLVMPointerType (type_to_llvm_type (ctx, sig->params [i]), 0); for (int i = 0; i < header->num_locals; ++i) types [findex ++] = LLVMPointerType (type_to_llvm_type (ctx, header->locals [i]), 0); g_assert (findex == nfields); char *name = g_strdup_printf ("%s_il_state", ctx->method_name); LLVMTypeRef il_state_type = LLVMStructCreateNamed (ctx->module->context, name); LLVMStructSetBody (il_state_type, types, nfields, FALSE); g_free (name); ctx->il_state = build_alloca_llvm_type_name (ctx, il_state_type, 0, "il_state"); g_assert (cfg->il_state_var); ctx->addresses [cfg->il_state_var->dreg] = ctx->il_state; /* Set il_state->il_offset = -1 */ index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); index [1] = LLVMConstInt (LLVMInt32Type (), 1, FALSE); addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, ""); LLVMBuildStore (ctx->builder, LLVMConstInt (types [1], -1, FALSE), addr); /* * Set il_state->data [i] to either the address of the arg/local, or NULL. * Because of mono_liveness_handle_exception_clauses (), all locals used/reachable from * clauses are supposed to be volatile, so they have an address. */ findex = data_start; if (sig->ret->type != MONO_TYPE_VOID) { LLVMTypeRef ret_type = type_to_llvm_type (ctx, sig->ret); ctx->il_state_ret = build_alloca_llvm_type_name (ctx, ret_type, 0, "il_state_ret"); index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); index [1] = LLVMConstInt (LLVMInt32Type (), findex, FALSE); addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, ""); LLVMBuildStore (ctx->builder, ctx->il_state_ret, convert (ctx, addr, LLVMPointerType (LLVMTypeOf (ctx->il_state_ret), 0))); findex ++; } for (int i = 0; i < sig->hasthis + sig->param_count; ++i) { LLVMValueRef var_addr = ctx->addresses [cfg->args [i]->dreg]; index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); index [1] = LLVMConstInt (LLVMInt32Type (), findex, FALSE); addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, ""); if (var_addr) LLVMBuildStore (ctx->builder, var_addr, convert (ctx, addr, LLVMPointerType (LLVMTypeOf (var_addr), 0))); else LLVMBuildStore (ctx->builder, LLVMConstNull (types [findex]), addr); findex ++; } for (int i = 0; i < header->num_locals; ++i) { LLVMValueRef var_addr = ctx->addresses [cfg->locals [i]->dreg]; index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); index [1] = LLVMConstInt (LLVMInt32Type (), findex, FALSE); addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, ""); if (var_addr) LLVMBuildStore (ctx->builder, LLVMBuildBitCast (builder, var_addr, types [findex], ""), addr); else LLVMBuildStore (ctx->builder, LLVMConstNull (types [findex]), addr); findex ++; } } /* Initialize the method if needed */ if (cfg->compile_aot) { /* Emit a location for the initialization code */ ctx->init_bb = gen_bb (ctx, "INIT_BB"); ctx->inited_bb = gen_bb (ctx, "INITED_BB"); LLVMBuildBr (ctx->builder, ctx->init_bb); builder = ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, ctx->inited_bb); ctx->bblocks [cfg->bb_entry->block_num].end_bblock = ctx->inited_bb; } /* Compute nesting between clauses */ ctx->nested_in = (GSList**)mono_mempool_alloc0 (cfg->mempool, sizeof (GSList*) * cfg->header->num_clauses); for (i = 0; i < cfg->header->num_clauses; ++i) { for (j = 0; j < cfg->header->num_clauses; ++j) { MonoExceptionClause *clause1 = &cfg->header->clauses [i]; MonoExceptionClause *clause2 = &cfg->header->clauses [j]; if (i != j && clause1->try_offset >= clause2->try_offset && clause1->handler_offset <= clause2->handler_offset) ctx->nested_in [i] = g_slist_prepend_mempool (cfg->mempool, ctx->nested_in [i], GINT_TO_POINTER (j)); } } /* * For finally clauses, create an indicator variable telling OP_ENDFINALLY whenever * it needs to continue normally, or return back to the exception handling system. */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { char name [128]; if (!(bb->region != -1 && (bb->flags & BB_EXCEPTION_HANDLER))) continue; if (bb->in_scount == 0) { LLVMValueRef val; sprintf (name, "finally_ind_bb%d", bb->block_num); val = LLVMBuildAlloca (builder, LLVMInt32Type (), name); LLVMBuildStore (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), val); ctx->bblocks [bb->block_num].finally_ind = val; } else { /* Create a variable to hold the exception var */ if (!ctx->ex_var) ctx->ex_var = LLVMBuildAlloca (builder, ObjRefType (), "exvar"); } } ctx->builder = old_builder; } static gboolean needs_extra_arg (EmitContext *ctx, MonoMethod *method) { WrapperInfo *info = NULL; /* * When targeting wasm, the caller and callee signature has to match exactly. This means * that every method which can be called indirectly need an extra arg since the caller * will call it through an ftnptr and will pass an extra arg. */ if (!ctx->cfg->llvm_only || !ctx->emit_dummy_arg) return FALSE; if (method->wrapper_type) info = mono_marshal_get_wrapper_info (method); switch (method->wrapper_type) { case MONO_WRAPPER_OTHER: if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG) /* Already have an explicit extra arg */ return FALSE; break; case MONO_WRAPPER_MANAGED_TO_NATIVE: if (strstr (method->name, "icall_wrapper")) /* These are JIT icall wrappers which are only called from JITted code directly */ return FALSE; /* Normal icalls can be virtual methods which need an extra arg */ break; case MONO_WRAPPER_RUNTIME_INVOKE: case MONO_WRAPPER_ALLOC: case MONO_WRAPPER_CASTCLASS: case MONO_WRAPPER_WRITE_BARRIER: case MONO_WRAPPER_NATIVE_TO_MANAGED: return FALSE; case MONO_WRAPPER_STELEMREF: if (info->subtype != WRAPPER_SUBTYPE_VIRTUAL_STELEMREF) return FALSE; break; case MONO_WRAPPER_MANAGED_TO_MANAGED: if (info->subtype == WRAPPER_SUBTYPE_STRING_CTOR) return FALSE; break; default: break; } if (method->string_ctor) return FALSE; /* These are called from gsharedvt code with an indirect call which doesn't pass an extra arg */ if (method->klass == mono_get_string_class () && (strstr (method->name, "memcpy") || strstr (method->name, "bzero"))) return FALSE; return TRUE; } static inline gboolean is_supported_callconv (EmitContext *ctx, MonoCallInst *call) { #if defined(TARGET_WIN32) && defined(TARGET_AMD64) gboolean result = (call->signature->call_convention == MONO_CALL_DEFAULT) || (call->signature->call_convention == MONO_CALL_C) || (call->signature->call_convention == MONO_CALL_STDCALL); #else gboolean result = (call->signature->call_convention == MONO_CALL_DEFAULT) || ((call->signature->call_convention == MONO_CALL_C) && ctx->llvm_only); #endif return result; } static void process_call (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, MonoInst *ins) { MonoCompile *cfg = ctx->cfg; LLVMValueRef *values = ctx->values; LLVMValueRef *addresses = ctx->addresses; MonoCallInst *call = (MonoCallInst*)ins; MonoMethodSignature *sig = call->signature; LLVMValueRef callee = NULL, lcall; LLVMValueRef *args; LLVMCallInfo *cinfo; GSList *l; int i, len, nargs; gboolean vretaddr; LLVMTypeRef llvm_sig; gpointer target; gboolean is_virtual, calli; LLVMBuilderRef builder = *builder_ref; /* If both imt and rgctx arg are required, only pass the imt arg, the rgctx trampoline will pass the rgctx */ if (call->imt_arg_reg) call->rgctx_arg_reg = 0; if (!is_supported_callconv (ctx, call)) { set_failure (ctx, "non-default callconv"); return; } cinfo = call->cinfo; g_assert (cinfo); if (call->rgctx_arg_reg) cinfo->rgctx_arg = TRUE; if (call->imt_arg_reg) cinfo->imt_arg = TRUE; if (!call->rgctx_arg_reg && call->method && needs_extra_arg (ctx, call->method)) cinfo->dummy_arg = TRUE; vretaddr = (cinfo->ret.storage == LLVMArgVtypeRetAddr || cinfo->ret.storage == LLVMArgVtypeByRef || cinfo->ret.storage == LLVMArgGsharedvtFixed || cinfo->ret.storage == LLVMArgGsharedvtVariable || cinfo->ret.storage == LLVMArgGsharedvtFixedVtype); llvm_sig = sig_to_llvm_sig_full (ctx, sig, cinfo); if (!ctx_ok (ctx)) return; int const opcode = ins->opcode; is_virtual = opcode == OP_VOIDCALL_MEMBASE || opcode == OP_CALL_MEMBASE || opcode == OP_VCALL_MEMBASE || opcode == OP_LCALL_MEMBASE || opcode == OP_FCALL_MEMBASE || opcode == OP_RCALL_MEMBASE || opcode == OP_TAILCALL_MEMBASE; calli = !call->fptr_is_patch && (opcode == OP_VOIDCALL_REG || opcode == OP_CALL_REG || opcode == OP_VCALL_REG || opcode == OP_LCALL_REG || opcode == OP_FCALL_REG || opcode == OP_RCALL_REG || opcode == OP_TAILCALL_REG); /* FIXME: Avoid creating duplicate methods */ if (ins->flags & MONO_INST_HAS_METHOD) { if (is_virtual) { callee = NULL; } else { if (cfg->compile_aot) { callee = get_callee (ctx, llvm_sig, MONO_PATCH_INFO_METHOD, call->method); if (!callee) { set_failure (ctx, "can't encode patch"); return; } } else if (cfg->method == call->method) { callee = ctx->lmethod; } else { ERROR_DECL (error); static int tramp_index; char *name; name = g_strdup_printf ("[tramp_%d] %s", tramp_index, mono_method_full_name (call->method, TRUE)); tramp_index ++; /* * Use our trampoline infrastructure for lazy compilation instead of llvm's. * Make all calls through a global. The address of the global will be saved in * MonoJitDomainInfo.llvm_jit_callees and updated when the method it refers to is * compiled. */ LLVMValueRef tramp_var = (LLVMValueRef)g_hash_table_lookup (ctx->jit_callees, call->method); if (!tramp_var) { target = mono_create_jit_trampoline (call->method, error); if (!is_ok (error)) { set_failure (ctx, mono_error_get_message (error)); mono_error_cleanup (error); return; } tramp_var = LLVMAddGlobal (ctx->lmodule, LLVMPointerType (llvm_sig, 0), name); LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (LLVMConstInt (LLVMInt64Type (), (guint64)(size_t)target, FALSE), LLVMPointerType (llvm_sig, 0))); LLVMSetLinkage (tramp_var, LLVMExternalLinkage); g_hash_table_insert (ctx->jit_callees, call->method, tramp_var); } callee = LLVMBuildLoad (builder, tramp_var, ""); } } if (!cfg->llvm_only && call->method && strstr (m_class_get_name (call->method->klass), "AsyncVoidMethodBuilder")) { /* LLVM miscompiles async methods */ set_failure (ctx, "#13734"); return; } } else if (calli) { } else { const MonoJitICallId jit_icall_id = call->jit_icall_id; if (jit_icall_id) { if (cfg->compile_aot) { callee = get_callee (ctx, llvm_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (jit_icall_id)); if (!callee) { set_failure (ctx, "can't encode patch"); return; } } else { callee = get_jit_callee (ctx, "", llvm_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (jit_icall_id)); } } else { if (cfg->compile_aot) { callee = NULL; if (cfg->abs_patches) { MonoJumpInfo *abs_ji = (MonoJumpInfo*)g_hash_table_lookup (cfg->abs_patches, call->fptr); if (abs_ji) { callee = get_callee (ctx, llvm_sig, abs_ji->type, abs_ji->data.target); if (!callee) { set_failure (ctx, "can't encode patch"); return; } } } if (!callee) { set_failure (ctx, "aot"); return; } } else { if (cfg->abs_patches) { MonoJumpInfo *abs_ji = (MonoJumpInfo*)g_hash_table_lookup (cfg->abs_patches, call->fptr); if (abs_ji) { ERROR_DECL (error); target = mono_resolve_patch_target (cfg->method, NULL, abs_ji, FALSE, error); mono_error_assert_ok (error); callee = get_jit_callee (ctx, "", llvm_sig, abs_ji->type, abs_ji->data.target); } else { g_assert_not_reached (); } } else { g_assert_not_reached (); } } } } if (is_virtual) { int size = TARGET_SIZEOF_VOID_P; LLVMValueRef index; g_assert (ins->inst_offset % size == 0); index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE); callee = convert (ctx, LLVMBuildLoad (builder, LLVMBuildGEP (builder, convert (ctx, values [ins->inst_basereg], LLVMPointerType (LLVMPointerType (IntPtrType (), 0), 0)), &index, 1, ""), ""), LLVMPointerType (llvm_sig, 0)); } else if (calli) { callee = convert (ctx, values [ins->sreg1], LLVMPointerType (llvm_sig, 0)); } else { if (ins->flags & MONO_INST_HAS_METHOD) { } } /* * Collect and convert arguments */ nargs = (sig->param_count * 16) + sig->hasthis + vretaddr + call->rgctx_reg + call->imt_arg_reg + call->cinfo->dummy_arg + 1; len = sizeof (LLVMValueRef) * nargs; args = g_newa (LLVMValueRef, nargs); memset (args, 0, len); l = call->out_ireg_args; if (call->rgctx_arg_reg) { g_assert (values [call->rgctx_arg_reg]); g_assert (cinfo->rgctx_arg_pindex < nargs); /* * On ARM, the imt/rgctx argument is passed in a caller save register, but some of our trampolines etc. clobber it, leading to * problems is LLVM moves the arg assignment earlier. To work around this, save the argument into a stack slot and load * it using a volatile load. */ #ifdef TARGET_ARM if (!ctx->imt_rgctx_loc) ctx->imt_rgctx_loc = build_alloca_llvm_type (ctx, ctx->module->ptr_type, TARGET_SIZEOF_VOID_P); LLVMBuildStore (builder, convert (ctx, ctx->values [call->rgctx_arg_reg], ctx->module->ptr_type), ctx->imt_rgctx_loc); args [cinfo->rgctx_arg_pindex] = mono_llvm_build_load (builder, ctx->imt_rgctx_loc, "", TRUE); #else args [cinfo->rgctx_arg_pindex] = convert (ctx, values [call->rgctx_arg_reg], ctx->module->ptr_type); #endif } if (call->imt_arg_reg) { g_assert (!ctx->llvm_only); g_assert (values [call->imt_arg_reg]); g_assert (cinfo->imt_arg_pindex < nargs); #ifdef TARGET_ARM if (!ctx->imt_rgctx_loc) ctx->imt_rgctx_loc = build_alloca_llvm_type (ctx, ctx->module->ptr_type, TARGET_SIZEOF_VOID_P); LLVMBuildStore (builder, convert (ctx, ctx->values [call->imt_arg_reg], ctx->module->ptr_type), ctx->imt_rgctx_loc); args [cinfo->imt_arg_pindex] = mono_llvm_build_load (builder, ctx->imt_rgctx_loc, "", TRUE); #else args [cinfo->imt_arg_pindex] = convert (ctx, values [call->imt_arg_reg], ctx->module->ptr_type); #endif } switch (cinfo->ret.storage) { case LLVMArgGsharedvtVariable: { MonoInst *var = get_vreg_to_inst (cfg, call->inst.dreg); if (var && var->opcode == OP_GSHAREDVT_LOCAL) { args [cinfo->vret_arg_pindex] = convert (ctx, emit_gsharedvt_ldaddr (ctx, var->dreg), IntPtrType ()); } else { g_assert (addresses [call->inst.dreg]); args [cinfo->vret_arg_pindex] = convert (ctx, addresses [call->inst.dreg], IntPtrType ()); } break; } default: if (vretaddr) { if (!addresses [call->inst.dreg]) addresses [call->inst.dreg] = build_alloca (ctx, sig->ret); g_assert (cinfo->vret_arg_pindex < nargs); if (cinfo->ret.storage == LLVMArgVtypeByRef) args [cinfo->vret_arg_pindex] = addresses [call->inst.dreg]; else args [cinfo->vret_arg_pindex] = LLVMBuildPtrToInt (builder, addresses [call->inst.dreg], IntPtrType (), ""); } break; } /* * Sometimes the same method is called with two different signatures (i.e. with and without 'this'), so * use the real callee for argument type conversion. */ LLVMTypeRef callee_type = LLVMGetElementType (LLVMTypeOf (callee)); LLVMTypeRef *param_types = (LLVMTypeRef*)g_alloca (sizeof (LLVMTypeRef) * LLVMCountParamTypes (callee_type)); LLVMGetParamTypes (callee_type, param_types); for (i = 0; i < sig->param_count + sig->hasthis; ++i) { guint32 regpair; int reg, pindex; LLVMArgInfo *ainfo = &call->cinfo->args [i]; pindex = ainfo->pindex; regpair = (guint32)(gssize)(l->data); reg = regpair & 0xffffff; args [pindex] = values [reg]; switch (ainfo->storage) { case LLVMArgVtypeInReg: case LLVMArgAsFpArgs: { guint32 nargs; int j; for (j = 0; j < ainfo->ndummy_fpargs; ++j) args [pindex + j] = LLVMConstNull (LLVMDoubleType ()); pindex += ainfo->ndummy_fpargs; g_assert (addresses [reg]); emit_vtype_to_args (ctx, builder, ainfo->type, addresses [reg], ainfo, args + pindex, &nargs); pindex += nargs; // FIXME: alignment // FIXME: Get rid of the VMOVE break; } case LLVMArgVtypeByVal: g_assert (addresses [reg]); args [pindex] = addresses [reg]; break; case LLVMArgVtypeAddr : case LLVMArgVtypeByRef: { g_assert (addresses [reg]); args [pindex] = convert (ctx, addresses [reg], LLVMPointerType (type_to_llvm_arg_type (ctx, ainfo->type), 0)); break; } case LLVMArgAsIArgs: g_assert (addresses [reg]); if (ainfo->esize == 8) args [pindex] = LLVMBuildLoad (ctx->builder, convert (ctx, addresses [reg], LLVMPointerType (LLVMArrayType (LLVMInt64Type (), ainfo->nslots), 0)), ""); else args [pindex] = LLVMBuildLoad (ctx->builder, convert (ctx, addresses [reg], LLVMPointerType (LLVMArrayType (IntPtrType (), ainfo->nslots), 0)), ""); break; case LLVMArgVtypeAsScalar: g_assert_not_reached (); break; case LLVMArgWasmVtypeAsScalar: g_assert (addresses [reg]); args [pindex] = LLVMBuildLoad (ctx->builder, convert (ctx, addresses [reg], LLVMPointerType (LLVMIntType (ainfo->esize * 8), 0)), ""); break; case LLVMArgGsharedvtFixed: case LLVMArgGsharedvtFixedVtype: g_assert (addresses [reg]); args [pindex] = convert (ctx, addresses [reg], LLVMPointerType (type_to_llvm_arg_type (ctx, ainfo->type), 0)); break; case LLVMArgGsharedvtVariable: g_assert (addresses [reg]); args [pindex] = convert (ctx, addresses [reg], LLVMPointerType (IntPtrType (), 0)); break; default: g_assert (args [pindex]); if (i == 0 && sig->hasthis) args [pindex] = convert (ctx, args [pindex], param_types [pindex]); else args [pindex] = convert (ctx, args [pindex], type_to_llvm_arg_type (ctx, ainfo->type)); break; } g_assert (pindex <= nargs); l = l->next; } if (call->cinfo->dummy_arg) { g_assert (call->cinfo->dummy_arg_pindex < nargs); args [call->cinfo->dummy_arg_pindex] = LLVMConstNull (ctx->module->ptr_type); } // FIXME: Align call sites /* * Emit the call */ lcall = emit_call (ctx, bb, &builder, callee, args, LLVMCountParamTypes (llvm_sig)); mono_llvm_nonnull_state_update (ctx, lcall, call->method, args, LLVMCountParamTypes (llvm_sig)); // If we just allocated an object, it's not null. if (call->method && call->method->wrapper_type == MONO_WRAPPER_ALLOC) { mono_llvm_set_call_nonnull_ret (lcall); } if (ins->opcode != OP_TAILCALL && ins->opcode != OP_TAILCALL_MEMBASE && LLVMGetInstructionOpcode (lcall) == LLVMCall) mono_llvm_set_call_notailcall (lcall); // Add original method name we are currently emitting as a custom string metadata (the only way to leave comments in LLVM IR) if (mono_debug_enabled () && call && call->method) mono_llvm_add_string_metadata (lcall, "managed_name", mono_method_full_name (call->method, TRUE)); // As per the LLVM docs, a function has a noalias return value if and only if // it is an allocation function. This is an allocation function. if (call->method && call->method->wrapper_type == MONO_WRAPPER_ALLOC) { mono_llvm_set_call_noalias_ret (lcall); // All objects are expected to be 8-byte aligned (SGEN_ALLOC_ALIGN) mono_llvm_set_alignment_ret (lcall, 8); } /* * Modify cconv and parameter attributes to pass rgctx/imt correctly. */ #if defined(MONO_ARCH_IMT_REG) && defined(MONO_ARCH_RGCTX_REG) g_assert (MONO_ARCH_IMT_REG == MONO_ARCH_RGCTX_REG); #endif /* The two can't be used together, so use only one LLVM calling conv to pass them */ g_assert (!(call->rgctx_arg_reg && call->imt_arg_reg)); if (!sig->pinvoke && !cfg->llvm_only) LLVMSetInstructionCallConv (lcall, LLVMMono1CallConv); if (cinfo->ret.storage == LLVMArgVtypeByRef) mono_llvm_add_instr_attr (lcall, 1 + cinfo->vret_arg_pindex, LLVM_ATTR_STRUCT_RET); if (!ctx->llvm_only && call->rgctx_arg_reg) mono_llvm_add_instr_attr (lcall, 1 + cinfo->rgctx_arg_pindex, LLVM_ATTR_IN_REG); if (call->imt_arg_reg) mono_llvm_add_instr_attr (lcall, 1 + cinfo->imt_arg_pindex, LLVM_ATTR_IN_REG); /* Add byval attributes if needed */ for (i = 0; i < sig->param_count; ++i) { LLVMArgInfo *ainfo = &call->cinfo->args [i + sig->hasthis]; if (ainfo && ainfo->storage == LLVMArgVtypeByVal) mono_llvm_add_instr_attr (lcall, 1 + ainfo->pindex, LLVM_ATTR_BY_VAL); #ifdef TARGET_WASM if (ainfo && ainfo->storage == LLVMArgVtypeByRef) /* This causes llvm to make a copy of the value which is what we need */ mono_llvm_add_instr_byval_attr (lcall, 1 + ainfo->pindex, LLVMGetElementType (param_types [ainfo->pindex])); #endif } gboolean is_simd = MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (sig->ret)); gboolean should_promote_to_value = FALSE; const char *load_name = NULL; /* * Convert the result. Non-SIMD value types are manipulated via an * indirection. SIMD value types are represented directly as LLVM vector * values, and must have a corresponding LLVM value definition in * `values`. */ switch (cinfo->ret.storage) { case LLVMArgAsIArgs: case LLVMArgFpStruct: if (!addresses [call->inst.dreg]) addresses [call->inst.dreg] = build_alloca (ctx, sig->ret); LLVMBuildStore (builder, lcall, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (LLVMTypeOf (lcall), 0), FALSE)); break; case LLVMArgVtypeByVal: /* * Only used by amd64 and x86. Only ever used when passing * arguments; never used for return values. */ g_assert_not_reached (); break; case LLVMArgVtypeInReg: { if (LLVMTypeOf (lcall) == LLVMVoidType ()) /* Empty struct */ break; if (!addresses [ins->dreg]) addresses [ins->dreg] = build_alloca (ctx, sig->ret); LLVMValueRef regs [2] = { 0 }; regs [0] = LLVMBuildExtractValue (builder, lcall, 0, ""); if (cinfo->ret.pair_storage [1] != LLVMArgNone) regs [1] = LLVMBuildExtractValue (builder, lcall, 1, ""); emit_args_to_vtype (ctx, builder, sig->ret, addresses [ins->dreg], &cinfo->ret, regs); load_name = "process_call_vtype_in_reg"; should_promote_to_value = is_simd; break; } case LLVMArgVtypeAsScalar: if (!addresses [call->inst.dreg]) addresses [call->inst.dreg] = build_alloca (ctx, sig->ret); LLVMBuildStore (builder, lcall, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (LLVMTypeOf (lcall), 0), FALSE)); load_name = "process_call_vtype_as_scalar"; should_promote_to_value = is_simd; break; case LLVMArgVtypeRetAddr: case LLVMArgVtypeByRef: load_name = "process_call_vtype_ret_addr"; should_promote_to_value = is_simd; break; case LLVMArgGsharedvtVariable: break; case LLVMArgGsharedvtFixed: case LLVMArgGsharedvtFixedVtype: values [ins->dreg] = LLVMBuildLoad (builder, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (type_to_llvm_type (ctx, sig->ret), 0), FALSE), ""); break; case LLVMArgWasmVtypeAsScalar: if (!addresses [call->inst.dreg]) addresses [call->inst.dreg] = build_alloca (ctx, sig->ret); LLVMBuildStore (builder, lcall, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (LLVMTypeOf (lcall), 0), FALSE)); break; default: if (sig->ret->type != MONO_TYPE_VOID) /* If the method returns an unsigned value, need to zext it */ values [ins->dreg] = convert_full (ctx, lcall, llvm_type_to_stack_type (cfg, type_to_llvm_type (ctx, sig->ret)), type_is_unsigned (ctx, sig->ret)); break; } if (should_promote_to_value) { g_assert (addresses [call->inst.dreg]); LLVMTypeRef addr_type = LLVMPointerType (type_to_llvm_type (ctx, sig->ret), 0); LLVMValueRef addr = convert_full (ctx, addresses [call->inst.dreg], addr_type, FALSE); values [ins->dreg] = LLVMBuildLoad (builder, addr, load_name); } *builder_ref = ctx->builder; } static void emit_llvmonly_throw (EmitContext *ctx, MonoBasicBlock *bb, gboolean rethrow, LLVMValueRef exc) { MonoJitICallId icall_id = rethrow ? MONO_JIT_ICALL_mini_llvmonly_rethrow_exception : MONO_JIT_ICALL_mini_llvmonly_throw_exception; LLVMValueRef callee = rethrow ? ctx->module->rethrow : ctx->module->throw_icall; LLVMTypeRef exc_type = type_to_llvm_type (ctx, m_class_get_byval_arg (mono_get_exception_class ())); if (!callee) { LLVMTypeRef fun_sig = LLVMFunctionType1 (LLVMVoidType (), exc_type, FALSE); g_assert (ctx->cfg->compile_aot); callee = get_callee (ctx, fun_sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (icall_id)); } LLVMValueRef args [2]; args [0] = convert (ctx, exc, exc_type); emit_call (ctx, bb, &ctx->builder, callee, args, 1); LLVMBuildUnreachable (ctx->builder); ctx->builder = create_builder (ctx); } static void emit_throw (EmitContext *ctx, MonoBasicBlock *bb, gboolean rethrow, LLVMValueRef exc) { MonoMethodSignature *throw_sig; LLVMValueRef * const pcallee = rethrow ? &ctx->module->rethrow : &ctx->module->throw_icall; LLVMValueRef callee = *pcallee; char const * const icall_name = rethrow ? "mono_arch_rethrow_exception" : "mono_arch_throw_exception"; #ifndef TARGET_X86 const #endif MonoJitICallId icall_id = rethrow ? MONO_JIT_ICALL_mono_arch_rethrow_exception : MONO_JIT_ICALL_mono_arch_throw_exception; if (!callee) { throw_sig = mono_metadata_signature_alloc (mono_get_corlib (), 1); throw_sig->ret = m_class_get_byval_arg (mono_get_void_class ()); throw_sig->params [0] = m_class_get_byval_arg (mono_get_object_class ()); if (ctx->cfg->compile_aot) { callee = get_callee (ctx, sig_to_llvm_sig (ctx, throw_sig), MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); } else { #ifdef TARGET_X86 /* * LLVM doesn't push the exception argument, so we need a different * trampoline. */ icall_id = rethrow ? MONO_JIT_ICALL_mono_llvm_rethrow_exception_trampoline : MONO_JIT_ICALL_mono_llvm_throw_exception_trampoline; #endif callee = get_jit_callee (ctx, icall_name, sig_to_llvm_sig (ctx, throw_sig), MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); } mono_memory_barrier (); } LLVMValueRef arg; arg = convert (ctx, exc, type_to_llvm_type (ctx, m_class_get_byval_arg (mono_get_object_class ()))); emit_call (ctx, bb, &ctx->builder, callee, &arg, 1); } static void emit_resume_eh (EmitContext *ctx, MonoBasicBlock *bb) { const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_resume_exception; LLVMValueRef callee; LLVMTypeRef fun_sig = LLVMFunctionType0 (LLVMVoidType (), FALSE); g_assert (ctx->cfg->compile_aot); callee = get_callee (ctx, fun_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); emit_call (ctx, bb, &ctx->builder, callee, NULL, 0); LLVMBuildUnreachable (ctx->builder); ctx->builder = create_builder (ctx); } static LLVMValueRef mono_llvm_emit_clear_exception_call (EmitContext *ctx, LLVMBuilderRef builder) { const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_clear_exception; LLVMTypeRef call_sig = LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE); LLVMValueRef callee = NULL; if (!callee) { callee = get_callee (ctx, call_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); } g_assert (builder && callee); return LLVMBuildCall (builder, callee, NULL, 0, ""); } static LLVMValueRef mono_llvm_emit_load_exception_call (EmitContext *ctx, LLVMBuilderRef builder) { const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_load_exception; LLVMTypeRef call_sig = LLVMFunctionType (ObjRefType (), NULL, 0, FALSE); LLVMValueRef callee = NULL; g_assert (ctx->cfg->compile_aot); if (!callee) { callee = get_callee (ctx, call_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); } g_assert (builder && callee); return LLVMBuildCall (builder, callee, NULL, 0, "load_exception"); } static LLVMValueRef mono_llvm_emit_match_exception_call (EmitContext *ctx, LLVMBuilderRef builder, gint32 region_start, gint32 region_end) { const char *icall_name = "mini_llvmonly_match_exception"; const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_match_exception; ctx->builder = builder; LLVMValueRef args[5]; const int num_args = G_N_ELEMENTS (args); args [0] = convert (ctx, get_aotconst (ctx, MONO_PATCH_INFO_AOT_JIT_INFO, GINT_TO_POINTER (ctx->cfg->method_index), LLVMPointerType (IntPtrType (), 0)), IntPtrType ()); args [1] = LLVMConstInt (LLVMInt32Type (), region_start, 0); args [2] = LLVMConstInt (LLVMInt32Type (), region_end, 0); if (ctx->cfg->rgctx_var) { if (ctx->cfg->llvm_only) { args [3] = convert (ctx, ctx->rgctx_arg, IntPtrType ()); } else { LLVMValueRef rgctx_alloc = ctx->addresses [ctx->cfg->rgctx_var->dreg]; g_assert (rgctx_alloc); args [3] = LLVMBuildLoad (builder, convert (ctx, rgctx_alloc, LLVMPointerType (IntPtrType (), 0)), ""); } } else { args [3] = LLVMConstInt (IntPtrType (), 0, 0); } if (ctx->this_arg) args [4] = convert (ctx, ctx->this_arg, IntPtrType ()); else args [4] = LLVMConstInt (IntPtrType (), 0, 0); LLVMTypeRef match_sig = LLVMFunctionType5 (LLVMInt32Type (), IntPtrType (), LLVMInt32Type (), LLVMInt32Type (), IntPtrType (), IntPtrType (), FALSE); LLVMValueRef callee; g_assert (ctx->cfg->compile_aot); ctx->builder = builder; // get_callee expects ctx->builder to be the emitting builder callee = get_callee (ctx, match_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); g_assert (builder && callee); g_assert (ctx->ex_var); return LLVMBuildCall (builder, callee, args, num_args, icall_name); } // FIXME: This won't work because the code-finding makes this // not a constant. /*#define MONO_PERSONALITY_DEBUG*/ #ifdef MONO_PERSONALITY_DEBUG static const gboolean use_mono_personality_debug = TRUE; static const char *default_personality_name = "mono_debug_personality"; #else static const gboolean use_mono_personality_debug = FALSE; static const char *default_personality_name = "__gxx_personality_v0"; #endif static LLVMTypeRef default_cpp_lpad_exc_signature (void) { static LLVMTypeRef sig; if (!sig) { LLVMTypeRef signature [2]; signature [0] = LLVMPointerType (LLVMInt8Type (), 0); signature [1] = LLVMInt32Type (); sig = LLVMStructType (signature, 2, FALSE); } return sig; } static LLVMValueRef get_mono_personality (EmitContext *ctx) { LLVMValueRef personality = NULL; LLVMTypeRef personality_type = LLVMFunctionType (LLVMInt32Type (), NULL, 0, TRUE); g_assert (ctx->cfg->compile_aot); if (!use_mono_personality_debug) { personality = LLVMGetNamedFunction (ctx->lmodule, default_personality_name); } else { personality = get_callee (ctx, personality_type, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_debug_personality)); } g_assert (personality); return personality; } static LLVMBasicBlockRef emit_landing_pad (EmitContext *ctx, int group_index, int group_size) { MonoCompile *cfg = ctx->cfg; LLVMBuilderRef old_builder = ctx->builder; MonoExceptionClause *group_start = cfg->header->clauses + group_index; LLVMBuilderRef lpadBuilder = create_builder (ctx); ctx->builder = lpadBuilder; MonoBasicBlock *handler_bb = cfg->cil_offset_to_bb [CLAUSE_START (group_start)]; g_assert (handler_bb); // <resultval> = landingpad <somety> personality <type> <pers_fn> <clause>+ LLVMValueRef personality = get_mono_personality (ctx); g_assert (personality); char *bb_name = g_strdup_printf ("LPAD%d_BB", group_index); LLVMBasicBlockRef lpad_bb = gen_bb (ctx, bb_name); g_free (bb_name); LLVMPositionBuilderAtEnd (lpadBuilder, lpad_bb); LLVMValueRef landing_pad = LLVMBuildLandingPad (lpadBuilder, default_cpp_lpad_exc_signature (), personality, 0, ""); g_assert (landing_pad); LLVMValueRef cast = LLVMBuildBitCast (lpadBuilder, ctx->module->sentinel_exception, LLVMPointerType (LLVMInt8Type (), 0), "int8TypeInfo"); LLVMAddClause (landing_pad, cast); if (ctx->cfg->deopt) { /* * Call mini_llvmonly_resume_exception_il_state (lmf, il_state) * * The call will execute the catch clause and the rest of the method and store the return * value into ctx->il_state_ret. */ if (!ctx->has_catch) { /* Unused */ LLVMBuildUnreachable (lpadBuilder); return lpad_bb; } const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_resume_exception_il_state; LLVMValueRef callee; LLVMValueRef args [2]; LLVMTypeRef fun_sig = LLVMFunctionType2 (LLVMVoidType (), IntPtrType (), IntPtrType (), FALSE); callee = get_callee (ctx, fun_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id)); g_assert (ctx->cfg->lmf_var); g_assert (ctx->addresses [ctx->cfg->lmf_var->dreg]); args [0] = LLVMBuildPtrToInt (ctx->builder, ctx->addresses [ctx->cfg->lmf_var->dreg], IntPtrType (), ""); args [1] = LLVMBuildPtrToInt (ctx->builder, ctx->il_state, IntPtrType (), ""); emit_call (ctx, NULL, &ctx->builder, callee, args, 2); /* Return the value set in ctx->il_state_ret */ LLVMTypeRef ret_type = LLVMGetReturnType (LLVMGetElementType (LLVMTypeOf (ctx->lmethod))); LLVMBuilderRef builder = ctx->builder; LLVMValueRef addr, retval, gep, indexes [2]; switch (ctx->linfo->ret.storage) { case LLVMArgNone: LLVMBuildRetVoid (builder); break; case LLVMArgNormal: case LLVMArgWasmVtypeAsScalar: case LLVMArgVtypeInReg: { if (ctx->sig->ret->type == MONO_TYPE_VOID) { LLVMBuildRetVoid (builder); break; } addr = ctx->il_state_ret; g_assert (addr); addr = convert (ctx, ctx->il_state_ret, LLVMPointerType (ret_type, 0)); indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); gep = LLVMBuildGEP (builder, addr, indexes, 1, ""); LLVMBuildRet (builder, LLVMBuildLoad (builder, gep, "")); break; } case LLVMArgVtypeRetAddr: { LLVMValueRef ret_addr; g_assert (cfg->vret_addr); ret_addr = ctx->values [cfg->vret_addr->dreg]; addr = ctx->il_state_ret; g_assert (addr); /* The ret value is in il_state_ret, copy it to the memory pointed to by the vret arg */ ret_type = type_to_llvm_type (ctx, ctx->sig->ret); indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); gep = LLVMBuildGEP (builder, addr, indexes, 1, ""); retval = convert (ctx, LLVMBuildLoad (builder, gep, ""), ret_type); LLVMBuildStore (builder, retval, convert (ctx, ret_addr, LLVMPointerType (ret_type, 0))); LLVMBuildRetVoid (builder); break; } default: g_assert_not_reached (); break; } return lpad_bb; } LLVMBasicBlockRef resume_bb = gen_bb (ctx, "RESUME_BB"); LLVMBuilderRef resume_builder = create_builder (ctx); ctx->builder = resume_builder; LLVMPositionBuilderAtEnd (resume_builder, resume_bb); emit_resume_eh (ctx, handler_bb); // Build match ctx->builder = lpadBuilder; LLVMPositionBuilderAtEnd (lpadBuilder, lpad_bb); gboolean finally_only = TRUE; MonoExceptionClause *group_cursor = group_start; for (int i = 0; i < group_size; i ++) { if (!(group_cursor->flags & MONO_EXCEPTION_CLAUSE_FINALLY || group_cursor->flags & MONO_EXCEPTION_CLAUSE_FAULT)) finally_only = FALSE; group_cursor++; } // FIXME: // Handle landing pad inlining if (!finally_only) { // So at each level of the exception stack we will match the exception again. // During that match, we need to compare against the handler types for the current // protected region. We send the try start and end so that we can only check against // handlers for this lexical protected region. LLVMValueRef match = mono_llvm_emit_match_exception_call (ctx, lpadBuilder, group_start->try_offset, group_start->try_offset + group_start->try_len); // if returns -1, resume LLVMValueRef switch_ins = LLVMBuildSwitch (lpadBuilder, match, resume_bb, group_size); // else move to that target bb for (int i = 0; i < group_size; i++) { MonoExceptionClause *clause = group_start + i; int clause_index = clause - cfg->header->clauses; MonoBasicBlock *handler_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->clause_to_handler, GINT_TO_POINTER (clause_index)); g_assert (handler_bb); g_assert (ctx->bblocks [handler_bb->block_num].call_handler_target_bb); LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), clause_index, FALSE), ctx->bblocks [handler_bb->block_num].call_handler_target_bb); } } else { int clause_index = group_start - cfg->header->clauses; MonoBasicBlock *finally_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->clause_to_handler, GINT_TO_POINTER (clause_index)); g_assert (finally_bb); LLVMBuildBr (ctx->builder, ctx->bblocks [finally_bb->block_num].call_handler_target_bb); } ctx->builder = old_builder; return lpad_bb; } static LLVMValueRef create_const_vector (LLVMTypeRef t, const int *vals, int count) { g_assert (count <= MAX_VECTOR_ELEMS); LLVMValueRef llvm_vals [MAX_VECTOR_ELEMS]; for (int i = 0; i < count; i++) llvm_vals [i] = LLVMConstInt (t, vals [i], FALSE); return LLVMConstVector (llvm_vals, count); } static LLVMValueRef create_const_vector_i32 (const int *mask, int count) { return create_const_vector (LLVMInt32Type (), mask, count); } static LLVMValueRef create_const_vector_4_i32 (int v0, int v1, int v2, int v3) { LLVMValueRef mask [4]; mask [0] = LLVMConstInt (LLVMInt32Type (), v0, FALSE); mask [1] = LLVMConstInt (LLVMInt32Type (), v1, FALSE); mask [2] = LLVMConstInt (LLVMInt32Type (), v2, FALSE); mask [3] = LLVMConstInt (LLVMInt32Type (), v3, FALSE); return LLVMConstVector (mask, 4); } static LLVMValueRef create_const_vector_2_i32 (int v0, int v1) { LLVMValueRef mask [2]; mask [0] = LLVMConstInt (LLVMInt32Type (), v0, FALSE); mask [1] = LLVMConstInt (LLVMInt32Type (), v1, FALSE); return LLVMConstVector (mask, 2); } static LLVMValueRef broadcast_element (EmitContext *ctx, LLVMValueRef elem, int count) { LLVMTypeRef t = LLVMTypeOf (elem); LLVMTypeRef init_vec_t = LLVMVectorType (t, 1); LLVMValueRef undef = LLVMGetUndef (init_vec_t); LLVMValueRef vec = LLVMBuildInsertElement (ctx->builder, undef, elem, const_int32 (0), ""); LLVMValueRef select_zero = LLVMConstNull (LLVMVectorType (LLVMInt32Type (), count)); return LLVMBuildShuffleVector (ctx->builder, vec, undef, select_zero, "broadcast"); } static LLVMValueRef broadcast_constant (int const_val, LLVMTypeRef elem_t, int count) { int vals [MAX_VECTOR_ELEMS]; for (int i = 0; i < count; ++i) vals [i] = const_val; return create_const_vector (elem_t, vals, count); } static LLVMValueRef create_shift_vector (EmitContext *ctx, LLVMValueRef type_donor, LLVMValueRef shiftamt) { LLVMTypeRef t = LLVMTypeOf (type_donor); unsigned int elems = LLVMGetVectorSize (t); LLVMTypeRef elem_t = LLVMGetElementType (t); shiftamt = convert_full (ctx, shiftamt, elem_t, TRUE); shiftamt = broadcast_element (ctx, shiftamt, elems); return shiftamt; } static LLVMTypeRef to_integral_vector_type (LLVMTypeRef t) { unsigned int elems = LLVMGetVectorSize (t); LLVMTypeRef elem_t = LLVMGetElementType (t); unsigned int bits = mono_llvm_get_prim_size_bits (elem_t); return LLVMVectorType (LLVMIntType (bits), elems); } static LLVMValueRef bitcast_to_integral (EmitContext *ctx, LLVMValueRef vec) { LLVMTypeRef src_t = LLVMTypeOf (vec); LLVMTypeRef dst_t = to_integral_vector_type (src_t); if (dst_t != src_t) return LLVMBuildBitCast (ctx->builder, vec, dst_t, "bc2i"); return vec; } static LLVMValueRef extract_high_elements (EmitContext *ctx, LLVMValueRef src_vec) { LLVMTypeRef src_t = LLVMTypeOf (src_vec); unsigned int src_elems = LLVMGetVectorSize (src_t); unsigned int dst_elems = src_elems / 2; int mask [MAX_VECTOR_ELEMS] = { 0 }; for (int i = 0; i < dst_elems; ++i) mask [i] = dst_elems + i; return LLVMBuildShuffleVector (ctx->builder, src_vec, LLVMGetUndef (src_t), create_const_vector_i32 (mask, dst_elems), "extract_high"); } static LLVMValueRef keep_lowest_element (EmitContext *ctx, LLVMTypeRef dst_t, LLVMValueRef vec) { LLVMTypeRef t = LLVMTypeOf (vec); g_assert (LLVMGetElementType (dst_t) == LLVMGetElementType (t)); unsigned int elems = LLVMGetVectorSize (dst_t); unsigned int src_elems = LLVMGetVectorSize (t); int mask [MAX_VECTOR_ELEMS] = { 0 }; mask [0] = 0; for (unsigned int i = 1; i < elems; ++i) mask [i] = src_elems; return LLVMBuildShuffleVector (ctx->builder, vec, LLVMConstNull (t), create_const_vector_i32 (mask, elems), "keep_lowest"); } static LLVMValueRef concatenate_vectors (EmitContext *ctx, LLVMValueRef xs, LLVMValueRef ys) { LLVMTypeRef t = LLVMTypeOf (xs); unsigned int elems = LLVMGetVectorSize (t) * 2; int mask [MAX_VECTOR_ELEMS] = { 0 }; for (int i = 0; i < elems; ++i) mask [i] = i; return LLVMBuildShuffleVector (ctx->builder, xs, ys, create_const_vector_i32 (mask, elems), "concat_vecs"); } static LLVMValueRef scalar_from_vector (EmitContext *ctx, LLVMValueRef xs) { return LLVMBuildExtractElement (ctx->builder, xs, const_int32 (0), "v2s"); } static LLVMValueRef vector_from_scalar (EmitContext *ctx, LLVMTypeRef type, LLVMValueRef x) { return LLVMBuildInsertElement (ctx->builder, LLVMConstNull (type), x, const_int32 (0), "s2v"); } typedef struct { EmitContext *ctx; MonoBasicBlock *bb; LLVMBasicBlockRef continuation; LLVMValueRef phi; LLVMValueRef switch_ins; LLVMBasicBlockRef tmp_block; LLVMBasicBlockRef default_case; LLVMTypeRef switch_index_type; const char *name; int max_cases; int i; } ImmediateUnrollCtx; static ImmediateUnrollCtx immediate_unroll_begin ( EmitContext *ctx, MonoBasicBlock *bb, int max_cases, LLVMValueRef switch_index, LLVMTypeRef return_type, const char *name) { LLVMBasicBlockRef default_case = gen_bb (ctx, name); LLVMBasicBlockRef continuation = gen_bb (ctx, name); LLVMValueRef switch_ins = LLVMBuildSwitch (ctx->builder, switch_index, default_case, max_cases); LLVMPositionBuilderAtEnd (ctx->builder, continuation); LLVMValueRef phi = LLVMBuildPhi (ctx->builder, return_type, name); ImmediateUnrollCtx ictx = { 0 }; ictx.ctx = ctx; ictx.bb = bb; ictx.continuation = continuation; ictx.phi = phi; ictx.switch_ins = switch_ins; ictx.default_case = default_case; ictx.switch_index_type = LLVMTypeOf (switch_index); ictx.name = name; ictx.max_cases = max_cases; return ictx; } static gboolean immediate_unroll_next (ImmediateUnrollCtx *ictx, int *i) { if (ictx->i >= ictx->max_cases) return FALSE; ictx->tmp_block = gen_bb (ictx->ctx, ictx->name); LLVMPositionBuilderAtEnd (ictx->ctx->builder, ictx->tmp_block); *i = ictx->i; ++ictx->i; return TRUE; } static void immediate_unroll_commit (ImmediateUnrollCtx *ictx, int switch_const, LLVMValueRef value) { LLVMBuildBr (ictx->ctx->builder, ictx->continuation); LLVMAddCase (ictx->switch_ins, LLVMConstInt (ictx->switch_index_type, switch_const, FALSE), ictx->tmp_block); LLVMAddIncoming (ictx->phi, &value, &ictx->tmp_block, 1); } static void immediate_unroll_default (ImmediateUnrollCtx *ictx) { LLVMPositionBuilderAtEnd (ictx->ctx->builder, ictx->default_case); } static void immediate_unroll_commit_default (ImmediateUnrollCtx *ictx, LLVMValueRef value) { LLVMBuildBr (ictx->ctx->builder, ictx->continuation); LLVMAddIncoming (ictx->phi, &value, &ictx->default_case, 1); } static void immediate_unroll_unreachable_default (ImmediateUnrollCtx *ictx) { immediate_unroll_default (ictx); LLVMBuildUnreachable (ictx->ctx->builder); } static LLVMValueRef immediate_unroll_end (ImmediateUnrollCtx *ictx, LLVMBasicBlockRef *continuation) { EmitContext *ctx = ictx->ctx; LLVMBuilderRef builder = ctx->builder; LLVMPositionBuilderAtEnd (builder, ictx->continuation); *continuation = ictx->continuation; ctx->bblocks [ictx->bb->block_num].end_bblock = ictx->continuation; return ictx->phi; } typedef struct { EmitContext *ctx; LLVMTypeRef intermediate_type; LLVMTypeRef return_type; gboolean needs_fake_scalar_op; llvm_ovr_tag_t ovr_tag; } ScalarOpFromVectorOpCtx; static inline gboolean check_needs_fake_scalar_op (MonoTypeEnum type) { #if defined(TARGET_ARM64) switch (type) { case MONO_TYPE_U1: case MONO_TYPE_I1: case MONO_TYPE_U2: case MONO_TYPE_I2: return TRUE; } #endif return FALSE; } static ScalarOpFromVectorOpCtx scalar_op_from_vector_op (EmitContext *ctx, LLVMTypeRef return_type, MonoInst *ins) { ScalarOpFromVectorOpCtx ret = { 0 }; ret.ctx = ctx; ret.intermediate_type = return_type; ret.return_type = return_type; ret.needs_fake_scalar_op = check_needs_fake_scalar_op (inst_c1_type (ins)); ret.ovr_tag = ovr_tag_from_llvm_type (return_type); if (!ret.needs_fake_scalar_op) { ret.ovr_tag = ovr_tag_force_scalar (ret.ovr_tag); ret.intermediate_type = ovr_tag_to_llvm_type (ret.ovr_tag); } return ret; } static void scalar_op_from_vector_op_process_args (ScalarOpFromVectorOpCtx *sctx, LLVMValueRef *args, int num_args) { if (!sctx->needs_fake_scalar_op) for (int i = 0; i < num_args; ++i) args [i] = scalar_from_vector (sctx->ctx, args [i]); } static LLVMValueRef scalar_op_from_vector_op_process_result (ScalarOpFromVectorOpCtx *sctx, LLVMValueRef result) { if (sctx->needs_fake_scalar_op) return keep_lowest_element (sctx->ctx, LLVMTypeOf (result), result); return vector_from_scalar (sctx->ctx, sctx->return_type, result); } static void emit_llvmonly_handler_start (EmitContext *ctx, MonoBasicBlock *bb, LLVMBasicBlockRef cbb) { int clause_index = MONO_REGION_CLAUSE_INDEX (bb->region); MonoExceptionClause *clause = &ctx->cfg->header->clauses [clause_index]; // Make exception available to catch blocks if (!(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY || clause->flags & MONO_EXCEPTION_CLAUSE_FAULT)) { LLVMValueRef mono_exc = mono_llvm_emit_load_exception_call (ctx, ctx->builder); g_assert (ctx->ex_var); LLVMBuildStore (ctx->builder, LLVMBuildBitCast (ctx->builder, mono_exc, ObjRefType (), ""), ctx->ex_var); if (bb->in_scount == 1) { MonoInst *exvar = bb->in_stack [0]; g_assert (!ctx->values [exvar->dreg]); g_assert (ctx->ex_var); ctx->values [exvar->dreg] = LLVMBuildLoad (ctx->builder, ctx->ex_var, "save_exception"); emit_volatile_store (ctx, exvar->dreg); } mono_llvm_emit_clear_exception_call (ctx, ctx->builder); } #ifdef TARGET_WASM if (ctx->cfg->lmf_var && !ctx->cfg->deopt) { LLVMValueRef callee; LLVMValueRef args [1]; LLVMTypeRef sig = LLVMFunctionType1 (LLVMVoidType (), ctx->module->ptr_type, FALSE); /* * There might be an LMF on the stack inserted to enable stack walking, see * method_needs_stack_walk (). If an exception is thrown, the LMF popping code * is not executed, so do it here. */ g_assert (ctx->addresses [ctx->cfg->lmf_var->dreg]); callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_pop_lmf)); args [0] = convert (ctx, ctx->addresses [ctx->cfg->lmf_var->dreg], ctx->module->ptr_type); emit_call (ctx, bb, &ctx->builder, callee, args, 1); } #endif LLVMBuilderRef handler_builder = create_builder (ctx); LLVMBasicBlockRef target_bb = ctx->bblocks [bb->block_num].call_handler_target_bb; LLVMPositionBuilderAtEnd (handler_builder, target_bb); // Make the handler code end with a jump to cbb LLVMBuildBr (handler_builder, cbb); } static void emit_handler_start (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef builder) { MonoCompile *cfg = ctx->cfg; LLVMValueRef *values = ctx->values; LLVMModuleRef lmodule = ctx->lmodule; BBInfo *bblocks = ctx->bblocks; LLVMTypeRef i8ptr; LLVMValueRef personality; LLVMValueRef landing_pad; LLVMBasicBlockRef target_bb; MonoInst *exvar; static int ti_generator; char ti_name [128]; LLVMValueRef type_info; int clause_index; GSList *l; // <resultval> = landingpad <somety> personality <type> <pers_fn> <clause>+ if (cfg->compile_aot) { /* Use a dummy personality function */ personality = LLVMGetNamedFunction (lmodule, "mono_personality"); g_assert (personality); } else { /* Can't cache this as each method is in its own llvm module */ LLVMTypeRef personality_type = LLVMFunctionType (LLVMInt32Type (), NULL, 0, TRUE); personality = LLVMAddFunction (ctx->lmodule, "mono_personality", personality_type); mono_llvm_add_func_attr (personality, LLVM_ATTR_NO_UNWIND); LLVMBasicBlockRef entry_bb = LLVMAppendBasicBlock (personality, "ENTRY"); LLVMBuilderRef builder2 = LLVMCreateBuilder (); LLVMPositionBuilderAtEnd (builder2, entry_bb); LLVMBuildRet (builder2, LLVMConstInt (LLVMInt32Type (), 0, FALSE)); LLVMDisposeBuilder (builder2); } i8ptr = LLVMPointerType (LLVMInt8Type (), 0); clause_index = (mono_get_block_region_notry (cfg, bb->region) >> 8) - 1; /* * Create the type info */ sprintf (ti_name, "type_info_%d", ti_generator); ti_generator ++; if (cfg->compile_aot) { /* decode_eh_frame () in aot-runtime.c will decode this */ type_info = LLVMAddGlobal (lmodule, LLVMInt32Type (), ti_name); LLVMSetInitializer (type_info, LLVMConstInt (LLVMInt32Type (), clause_index, FALSE)); /* * These symbols are not really used, the clause_index is embedded into the EH tables generated by DwarfMonoException in LLVM. */ LLVMSetLinkage (type_info, LLVMInternalLinkage); } else { type_info = LLVMAddGlobal (lmodule, LLVMInt32Type (), ti_name); LLVMSetInitializer (type_info, LLVMConstInt (LLVMInt32Type (), clause_index, FALSE)); } { LLVMTypeRef members [2], ret_type; members [0] = i8ptr; members [1] = LLVMInt32Type (); ret_type = LLVMStructType (members, 2, FALSE); landing_pad = LLVMBuildLandingPad (builder, ret_type, personality, 1, ""); LLVMAddClause (landing_pad, type_info); /* Store the exception into the exvar */ if (ctx->ex_var) LLVMBuildStore (builder, convert (ctx, LLVMBuildExtractValue (builder, landing_pad, 0, "ex_obj"), ObjRefType ()), ctx->ex_var); } /* * LLVM throw sites are associated with a one landing pad, and LLVM generated * code expects control to be transferred to this landing pad even in the * presence of nested clauses. The landing pad needs to branch to the landing * pads belonging to nested clauses based on the selector value returned by * the landing pad instruction, which is passed to the landing pad in a * register by the EH code. */ target_bb = bblocks [bb->block_num].call_handler_target_bb; g_assert (target_bb); /* * Branch to the correct landing pad */ LLVMValueRef ex_selector = LLVMBuildExtractValue (builder, landing_pad, 1, "ex_selector"); LLVMValueRef switch_ins = LLVMBuildSwitch (builder, ex_selector, target_bb, 0); for (l = ctx->nested_in [clause_index]; l; l = l->next) { int nesting_clause_index = GPOINTER_TO_INT (l->data); MonoBasicBlock *handler_bb; handler_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->clause_to_handler, GINT_TO_POINTER (nesting_clause_index)); g_assert (handler_bb); g_assert (ctx->bblocks [handler_bb->block_num].call_handler_target_bb); LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), nesting_clause_index, FALSE), ctx->bblocks [handler_bb->block_num].call_handler_target_bb); } /* Start a new bblock which CALL_HANDLER can branch to */ ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, target_bb); ctx->bblocks [bb->block_num].end_bblock = target_bb; /* Store the exception into the IL level exvar */ if (bb->in_scount == 1) { g_assert (bb->in_scount == 1); exvar = bb->in_stack [0]; // FIXME: This is shared with filter clauses ? g_assert (!values [exvar->dreg]); g_assert (ctx->ex_var); values [exvar->dreg] = LLVMBuildLoad (builder, ctx->ex_var, ""); emit_volatile_store (ctx, exvar->dreg); } /* Make normal branches to the start of the clause branch to the new bblock */ bblocks [bb->block_num].bblock = target_bb; } static LLVMValueRef get_double_const (MonoCompile *cfg, double val) { //#ifdef TARGET_WASM #if 0 //Wasm requires us to canonicalize NaNs. if (mono_isnan (val)) *(gint64 *)&val = 0x7FF8000000000000ll; #endif return LLVMConstReal (LLVMDoubleType (), val); } static LLVMValueRef get_float_const (MonoCompile *cfg, float val) { //#ifdef TARGET_WASM #if 0 if (mono_isnan (val)) *(int *)&val = 0x7FC00000; #endif if (cfg->r4fp) return LLVMConstReal (LLVMFloatType (), val); else return LLVMConstFPExt (LLVMConstReal (LLVMFloatType (), val), LLVMDoubleType ()); } static LLVMValueRef call_overloaded_intrins (EmitContext *ctx, int id, llvm_ovr_tag_t ovr_tag, LLVMValueRef *args, const char *name) { int key = key_from_id_and_tag (id, ovr_tag); LLVMValueRef intrins = get_intrins (ctx, key); int nargs = LLVMCountParamTypes (LLVMGetElementType (LLVMTypeOf (intrins))); for (int i = 0; i < nargs; ++i) { LLVMTypeRef t1 = LLVMTypeOf (args [i]); LLVMTypeRef t2 = LLVMTypeOf (LLVMGetParam (intrins, i)); if (t1 != t2) args [i] = convert (ctx, args [i], t2); } return LLVMBuildCall (ctx->builder, intrins, args, nargs, name); } static LLVMValueRef call_intrins (EmitContext *ctx, int id, LLVMValueRef *args, const char *name) { return call_overloaded_intrins (ctx, id, 0, args, name); } static void process_bb (EmitContext *ctx, MonoBasicBlock *bb) { MonoCompile *cfg = ctx->cfg; MonoMethodSignature *sig = ctx->sig; LLVMValueRef method = ctx->lmethod; LLVMValueRef *values = ctx->values; LLVMValueRef *addresses = ctx->addresses; LLVMCallInfo *linfo = ctx->linfo; BBInfo *bblocks = ctx->bblocks; MonoInst *ins; LLVMBasicBlockRef cbb; LLVMBuilderRef builder; gboolean has_terminator; LLVMValueRef v; LLVMValueRef lhs, rhs, arg3; int nins = 0; cbb = get_end_bb (ctx, bb); builder = create_builder (ctx); ctx->builder = builder; LLVMPositionBuilderAtEnd (builder, cbb); if (!ctx_ok (ctx)) return; if (cfg->interp_entry_only && bb != cfg->bb_init && bb != cfg->bb_entry && bb != cfg->bb_exit) { /* The interp entry code is in bb_entry, skip the rest as we might not be able to compile it */ LLVMBuildUnreachable (builder); return; } if (bb->flags & BB_EXCEPTION_HANDLER) { if (!ctx->llvm_only && !bblocks [bb->block_num].invoke_target) { set_failure (ctx, "handler without invokes"); return; } if (ctx->llvm_only) emit_llvmonly_handler_start (ctx, bb, cbb); else emit_handler_start (ctx, bb, builder); if (!ctx_ok (ctx)) return; builder = ctx->builder; } /* Handle PHI nodes first */ /* They should be grouped at the start of the bb */ for (ins = bb->code; ins; ins = ins->next) { emit_dbg_loc (ctx, builder, ins->cil_code); if (ins->opcode == OP_NOP) continue; if (!MONO_IS_PHI (ins)) break; if (cfg->interp_entry_only) break; int i; gboolean empty = TRUE; /* Check that all input bblocks really branch to us */ for (i = 0; i < bb->in_count; ++i) { if (bb->in_bb [i]->last_ins && bb->in_bb [i]->last_ins->opcode == OP_NOT_REACHED) ins->inst_phi_args [i + 1] = -1; else empty = FALSE; } if (empty) { /* LLVM doesn't like phi instructions with zero operands */ ctx->is_dead [ins->dreg] = TRUE; continue; } /* Created earlier, insert it now */ LLVMInsertIntoBuilder (builder, values [ins->dreg]); for (i = 0; i < ins->inst_phi_args [0]; i++) { int sreg1 = ins->inst_phi_args [i + 1]; int count, j; /* * Count the number of times the incoming bblock branches to us, * since llvm requires a separate entry for each. */ if (bb->in_bb [i]->last_ins && bb->in_bb [i]->last_ins->opcode == OP_SWITCH) { MonoInst *switch_ins = bb->in_bb [i]->last_ins; count = 0; for (j = 0; j < GPOINTER_TO_UINT (switch_ins->klass); ++j) { if (switch_ins->inst_many_bb [j] == bb) count ++; } } else { count = 1; } /* Remember for later */ for (j = 0; j < count; ++j) { PhiNode *node = (PhiNode*)mono_mempool_alloc0 (ctx->mempool, sizeof (PhiNode)); node->bb = bb; node->phi = ins; node->in_bb = bb->in_bb [i]; node->sreg = sreg1; bblocks [bb->in_bb [i]->block_num].phi_nodes = g_slist_prepend_mempool (ctx->mempool, bblocks [bb->in_bb [i]->block_num].phi_nodes, node); } } } // Add volatile stores for PHI nodes // These need to be emitted after the PHI nodes for (ins = bb->code; ins; ins = ins->next) { const char *spec = LLVM_INS_INFO (ins->opcode); if (ins->opcode == OP_NOP) continue; if (!MONO_IS_PHI (ins)) break; if (spec [MONO_INST_DEST] != 'v') emit_volatile_store (ctx, ins->dreg); } has_terminator = FALSE; for (ins = bb->code; ins; ins = ins->next) { const char *spec = LLVM_INS_INFO (ins->opcode); char *dname = NULL; char dname_buf [128]; emit_dbg_loc (ctx, builder, ins->cil_code); nins ++; if (nins > 1000) { /* * Some steps in llc are non-linear in the size of basic blocks, see #5714. * Start a new bblock. * Prevent the bblocks to be merged by doing a volatile load + cond branch * from localloc-ed memory. */ if (!cfg->llvm_only) ;//set_failure (ctx, "basic block too long"); if (!ctx->long_bb_break_var) { ctx->long_bb_break_var = build_alloca_llvm_type_name (ctx, LLVMInt32Type (), 0, "long_bb_break"); mono_llvm_build_store (ctx->alloca_builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ctx->long_bb_break_var, TRUE, LLVM_BARRIER_NONE); } cbb = gen_bb (ctx, "CONT_LONG_BB"); LLVMBasicBlockRef dummy_bb = gen_bb (ctx, "CONT_LONG_BB_DUMMY"); LLVMValueRef load = mono_llvm_build_load (builder, ctx->long_bb_break_var, "", TRUE); /* * The long_bb_break_var is initialized to 0 in the prolog, so this branch will always go to 'cbb' * but llvm doesn't know that, so the branch is not going to be eliminated. */ LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntEQ, load, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); LLVMBuildCondBr (builder, cmp, cbb, dummy_bb); /* Emit a dummy false bblock which does nothing but contains a volatile store so it cannot be eliminated */ ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (builder, dummy_bb); mono_llvm_build_store (builder, LLVMConstInt (LLVMInt32Type (), 1, FALSE), ctx->long_bb_break_var, TRUE, LLVM_BARRIER_NONE); LLVMBuildBr (builder, cbb); ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (builder, cbb); ctx->bblocks [bb->block_num].end_bblock = cbb; nins = 0; emit_dbg_loc (ctx, builder, ins->cil_code); } if (has_terminator) /* There could be instructions after a terminator, skip them */ break; if (spec [MONO_INST_DEST] != ' ' && !MONO_IS_STORE_MEMBASE (ins)) { sprintf (dname_buf, "t%d", ins->dreg); dname = dname_buf; } if (spec [MONO_INST_SRC1] != ' ' && spec [MONO_INST_SRC1] != 'v') { MonoInst *var = get_vreg_to_inst (cfg, ins->sreg1); if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) && var->opcode != OP_GSHAREDVT_ARG_REGOFFSET) { lhs = emit_volatile_load (ctx, ins->sreg1); } else { /* It is ok for SETRET to have an uninitialized argument */ if (!values [ins->sreg1] && ins->opcode != OP_SETRET) { set_failure (ctx, "sreg1"); return; } lhs = values [ins->sreg1]; } } else { lhs = NULL; } if (spec [MONO_INST_SRC2] != ' ' && spec [MONO_INST_SRC2] != 'v') { MonoInst *var = get_vreg_to_inst (cfg, ins->sreg2); if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) { rhs = emit_volatile_load (ctx, ins->sreg2); } else { if (!values [ins->sreg2]) { set_failure (ctx, "sreg2"); return; } rhs = values [ins->sreg2]; } } else { rhs = NULL; } if (spec [MONO_INST_SRC3] != ' ' && spec [MONO_INST_SRC3] != 'v') { MonoInst *var = get_vreg_to_inst (cfg, ins->sreg3); if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) { arg3 = emit_volatile_load (ctx, ins->sreg3); } else { if (!values [ins->sreg3]) { set_failure (ctx, "sreg3"); return; } arg3 = values [ins->sreg3]; } } else { arg3 = NULL; } //mono_print_ins (ins); gboolean skip_volatile_store = FALSE; switch (ins->opcode) { case OP_NOP: case OP_NOT_NULL: case OP_LIVERANGE_START: case OP_LIVERANGE_END: break; case OP_ICONST: values [ins->dreg] = LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE); break; case OP_I8CONST: #if TARGET_SIZEOF_VOID_P == 4 values [ins->dreg] = LLVMConstInt (LLVMInt64Type (), GET_LONG_IMM (ins), FALSE); #else values [ins->dreg] = LLVMConstInt (LLVMInt64Type (), (gint64)ins->inst_c0, FALSE); #endif break; case OP_R8CONST: values [ins->dreg] = get_double_const (cfg, *(double*)ins->inst_p0); break; case OP_R4CONST: values [ins->dreg] = get_float_const (cfg, *(float*)ins->inst_p0); break; case OP_DUMMY_ICONST: values [ins->dreg] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); break; case OP_DUMMY_I8CONST: values [ins->dreg] = LLVMConstInt (LLVMInt64Type (), 0, FALSE); break; case OP_DUMMY_R8CONST: values [ins->dreg] = LLVMConstReal (LLVMDoubleType (), 0.0f); break; case OP_BR: { LLVMBasicBlockRef target_bb = get_bb (ctx, ins->inst_target_bb); LLVMBuildBr (builder, target_bb); has_terminator = TRUE; break; } case OP_SWITCH: { int i; LLVMValueRef v; char bb_name [128]; LLVMBasicBlockRef new_bb; LLVMBuilderRef new_builder; // The default branch is already handled // FIXME: Handle it here /* Start new bblock */ sprintf (bb_name, "SWITCH_DEFAULT_BB%d", ctx->default_index ++); new_bb = LLVMAppendBasicBlock (ctx->lmethod, bb_name); lhs = convert (ctx, lhs, LLVMInt32Type ()); v = LLVMBuildSwitch (builder, lhs, new_bb, GPOINTER_TO_UINT (ins->klass)); for (i = 0; i < GPOINTER_TO_UINT (ins->klass); ++i) { MonoBasicBlock *target_bb = ins->inst_many_bb [i]; LLVMAddCase (v, LLVMConstInt (LLVMInt32Type (), i, FALSE), get_bb (ctx, target_bb)); } new_builder = create_builder (ctx); LLVMPositionBuilderAtEnd (new_builder, new_bb); LLVMBuildUnreachable (new_builder); has_terminator = TRUE; g_assert (!ins->next); break; } case OP_SETRET: switch (linfo->ret.storage) { case LLVMArgNormal: case LLVMArgVtypeInReg: case LLVMArgVtypeAsScalar: case LLVMArgWasmVtypeAsScalar: { LLVMTypeRef ret_type = LLVMGetReturnType (LLVMGetElementType (LLVMTypeOf (method))); LLVMValueRef retval = LLVMGetUndef (ret_type); gboolean src_in_reg = FALSE; gboolean is_simd = MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (sig->ret)); switch (linfo->ret.storage) { case LLVMArgNormal: src_in_reg = TRUE; break; case LLVMArgVtypeInReg: case LLVMArgVtypeAsScalar: src_in_reg = is_simd; break; } if (src_in_reg && (!lhs || ctx->is_dead [ins->sreg1])) { /* * The method did not set its return value, probably because it * ends with a throw. */ LLVMBuildRet (builder, retval); break; } switch (linfo->ret.storage) { case LLVMArgNormal: retval = convert (ctx, lhs, type_to_llvm_type (ctx, sig->ret)); break; case LLVMArgVtypeInReg: if (is_simd) { /* The return type is an LLVM aggregate type, so a bare bitcast cannot be used to do this conversion. */ int width = mono_type_size (sig->ret, NULL); int elems = width / TARGET_SIZEOF_VOID_P; /* The return value might not be set if there is a throw */ LLVMValueRef val = LLVMBuildBitCast (builder, lhs, LLVMVectorType (IntPtrType (), elems), ""); for (int i = 0; i < elems; ++i) { LLVMValueRef element = LLVMBuildExtractElement (builder, val, const_int32 (i), ""); retval = LLVMBuildInsertValue (builder, retval, element, i, "setret_simd_vtype_in_reg"); } } else { LLVMValueRef addr = LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (ret_type, 0), ""); for (int i = 0; i < 2; ++i) { if (linfo->ret.pair_storage [i] == LLVMArgInIReg) { LLVMValueRef indexes [2], part_addr; indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMConstInt (LLVMInt32Type (), i, FALSE); part_addr = LLVMBuildGEP (builder, addr, indexes, 2, ""); retval = LLVMBuildInsertValue (builder, retval, LLVMBuildLoad (builder, part_addr, ""), i, ""); } else { g_assert (linfo->ret.pair_storage [i] == LLVMArgNone); } } } break; case LLVMArgVtypeAsScalar: if (is_simd) { retval = LLVMBuildBitCast (builder, values [ins->sreg1], ret_type, "setret_simd_vtype_as_scalar"); } else { g_assert (addresses [ins->sreg1]); retval = LLVMBuildLoad (builder, LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (ret_type, 0), ""), ""); } break; case LLVMArgWasmVtypeAsScalar: g_assert (addresses [ins->sreg1]); retval = LLVMBuildLoad (builder, LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (ret_type, 0), ""), ""); break; } LLVMBuildRet (builder, retval); break; } case LLVMArgVtypeByRef: { LLVMBuildRetVoid (builder); break; } case LLVMArgGsharedvtFixed: { LLVMTypeRef ret_type = type_to_llvm_type (ctx, sig->ret); /* The return value is in lhs, need to store to the vret argument */ /* sreg1 might not be set */ if (lhs) { g_assert (cfg->vret_addr); g_assert (values [cfg->vret_addr->dreg]); LLVMBuildStore (builder, convert (ctx, lhs, ret_type), convert (ctx, values [cfg->vret_addr->dreg], LLVMPointerType (ret_type, 0))); } LLVMBuildRetVoid (builder); break; } case LLVMArgGsharedvtFixedVtype: { /* Already set */ LLVMBuildRetVoid (builder); break; } case LLVMArgGsharedvtVariable: { /* Already set */ LLVMBuildRetVoid (builder); break; } case LLVMArgVtypeRetAddr: { LLVMBuildRetVoid (builder); break; } case LLVMArgAsIArgs: case LLVMArgFpStruct: { LLVMTypeRef ret_type = LLVMGetReturnType (LLVMGetElementType (LLVMTypeOf (method))); LLVMValueRef retval; g_assert (addresses [ins->sreg1]); retval = LLVMBuildLoad (builder, convert (ctx, addresses [ins->sreg1], LLVMPointerType (ret_type, 0)), ""); LLVMBuildRet (builder, retval); break; } case LLVMArgNone: LLVMBuildRetVoid (builder); break; default: g_assert_not_reached (); break; } has_terminator = TRUE; break; case OP_ICOMPARE: case OP_FCOMPARE: case OP_RCOMPARE: case OP_LCOMPARE: case OP_COMPARE: case OP_ICOMPARE_IMM: case OP_LCOMPARE_IMM: case OP_COMPARE_IMM: { CompRelation rel; LLVMValueRef cmp, args [16]; gboolean likely = (ins->flags & MONO_INST_LIKELY) != 0; gboolean unlikely = FALSE; if (MONO_IS_COND_BRANCH_OP (ins->next)) { if (ins->next->inst_false_bb->out_of_line) likely = TRUE; else if (ins->next->inst_true_bb->out_of_line) unlikely = TRUE; } if (ins->next->opcode == OP_NOP) break; if (ins->next->opcode == OP_BR) /* The comparison result is not needed */ continue; rel = mono_opcode_to_cond (ins->next->opcode); if (ins->opcode == OP_ICOMPARE_IMM) { lhs = convert (ctx, lhs, LLVMInt32Type ()); rhs = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE); } if (ins->opcode == OP_LCOMPARE_IMM) { lhs = convert (ctx, lhs, LLVMInt64Type ()); rhs = LLVMConstInt (LLVMInt64Type (), GET_LONG_IMM (ins), FALSE); } if (ins->opcode == OP_LCOMPARE) { lhs = convert (ctx, lhs, LLVMInt64Type ()); rhs = convert (ctx, rhs, LLVMInt64Type ()); } if (ins->opcode == OP_ICOMPARE) { lhs = convert (ctx, lhs, LLVMInt32Type ()); rhs = convert (ctx, rhs, LLVMInt32Type ()); } if (lhs && rhs) { if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind) rhs = convert (ctx, rhs, LLVMTypeOf (lhs)); else if (LLVMGetTypeKind (LLVMTypeOf (rhs)) == LLVMPointerTypeKind) lhs = convert (ctx, lhs, LLVMTypeOf (rhs)); } /* We use COMPARE+SETcc/Bcc, llvm uses SETcc+br cond */ if (ins->opcode == OP_FCOMPARE) { cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMDoubleType ()), convert (ctx, rhs, LLVMDoubleType ()), ""); } else if (ins->opcode == OP_RCOMPARE) { cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMFloatType ()), convert (ctx, rhs, LLVMFloatType ()), ""); } else if (ins->opcode == OP_COMPARE_IMM) { LLVMIntPredicate llvm_pred = cond_to_llvm_cond [rel]; if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind && ins->inst_imm == 0) { // We are emitting a NULL check for a pointer gboolean nonnull = mono_llvm_is_nonnull (lhs); if (nonnull && llvm_pred == LLVMIntEQ) cmp = LLVMConstInt (LLVMInt1Type (), FALSE, FALSE); else if (nonnull && llvm_pred == LLVMIntNE) cmp = LLVMConstInt (LLVMInt1Type (), TRUE, FALSE); else cmp = LLVMBuildICmp (builder, llvm_pred, lhs, LLVMConstNull (LLVMTypeOf (lhs)), ""); } else { cmp = LLVMBuildICmp (builder, llvm_pred, convert (ctx, lhs, IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), ""); } } else if (ins->opcode == OP_LCOMPARE_IMM) { cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, ""); } else if (ins->opcode == OP_COMPARE) { if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind && LLVMTypeOf (lhs) == LLVMTypeOf (rhs)) cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, ""); else cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], convert (ctx, lhs, IntPtrType ()), convert (ctx, rhs, IntPtrType ()), ""); } else cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, ""); if (likely || unlikely) { args [0] = cmp; args [1] = LLVMConstInt (LLVMInt1Type (), likely ? 1 : 0, FALSE); cmp = call_intrins (ctx, INTRINS_EXPECT_I1, args, ""); } if (MONO_IS_COND_BRANCH_OP (ins->next)) { if (ins->next->inst_true_bb == ins->next->inst_false_bb) { /* * If the target bb contains PHI instructions, LLVM requires * two PHI entries for this bblock, while we only generate one. * So convert this to an unconditional bblock. (bxc #171). */ LLVMBuildBr (builder, get_bb (ctx, ins->next->inst_true_bb)); } else { LLVMBuildCondBr (builder, cmp, get_bb (ctx, ins->next->inst_true_bb), get_bb (ctx, ins->next->inst_false_bb)); } has_terminator = TRUE; } else if (MONO_IS_SETCC (ins->next)) { sprintf (dname_buf, "t%d", ins->next->dreg); dname = dname_buf; values [ins->next->dreg] = LLVMBuildZExt (builder, cmp, LLVMInt32Type (), dname); /* Add stores for volatile variables */ emit_volatile_store (ctx, ins->next->dreg); } else if (MONO_IS_COND_EXC (ins->next)) { gboolean force_explicit_branch = FALSE; if (bb->region != -1) { /* Don't tag null check branches in exception-handling * regions with `make.implicit`. */ force_explicit_branch = TRUE; } emit_cond_system_exception (ctx, bb, (const char*)ins->next->inst_p1, cmp, force_explicit_branch); if (!ctx_ok (ctx)) break; builder = ctx->builder; } else { set_failure (ctx, "next"); break; } ins = ins->next; break; } case OP_FCEQ: case OP_FCNEQ: case OP_FCLT: case OP_FCLT_UN: case OP_FCGT: case OP_FCGT_UN: case OP_FCGE: case OP_FCLE: { CompRelation rel; LLVMValueRef cmp; rel = mono_opcode_to_cond (ins->opcode); cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMDoubleType ()), convert (ctx, rhs, LLVMDoubleType ()), ""); values [ins->dreg] = LLVMBuildZExt (builder, cmp, LLVMInt32Type (), dname); break; } case OP_RCEQ: case OP_RCNEQ: case OP_RCLT: case OP_RCLT_UN: case OP_RCGT: case OP_RCGT_UN: { CompRelation rel; LLVMValueRef cmp; rel = mono_opcode_to_cond (ins->opcode); cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMFloatType ()), convert (ctx, rhs, LLVMFloatType ()), ""); values [ins->dreg] = LLVMBuildZExt (builder, cmp, LLVMInt32Type (), dname); break; } case OP_PHI: case OP_FPHI: case OP_VPHI: case OP_XPHI: { // Handled above skip_volatile_store = TRUE; break; } case OP_MOVE: case OP_LMOVE: case OP_XMOVE: case OP_SETFRET: g_assert (lhs); values [ins->dreg] = lhs; break; case OP_FMOVE: case OP_RMOVE: { MonoInst *var = get_vreg_to_inst (cfg, ins->dreg); g_assert (lhs); values [ins->dreg] = lhs; if (var && m_class_get_byval_arg (var->klass)->type == MONO_TYPE_R4) { /* * This is added by the spilling pass in case of the JIT, * but we have to do it ourselves. */ values [ins->dreg] = convert (ctx, values [ins->dreg], LLVMFloatType ()); } break; } case OP_MOVE_F_TO_I4: { values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildFPTrunc (builder, lhs, LLVMFloatType (), ""), LLVMInt32Type (), ""); break; } case OP_MOVE_I4_TO_F: { values [ins->dreg] = LLVMBuildFPExt (builder, LLVMBuildBitCast (builder, lhs, LLVMFloatType (), ""), LLVMDoubleType (), ""); break; } case OP_MOVE_F_TO_I8: { values [ins->dreg] = LLVMBuildBitCast (builder, lhs, LLVMInt64Type (), ""); break; } case OP_MOVE_I8_TO_F: { values [ins->dreg] = LLVMBuildBitCast (builder, lhs, LLVMDoubleType (), ""); break; } case OP_IADD: case OP_ISUB: case OP_IAND: case OP_IMUL: case OP_IDIV: case OP_IDIV_UN: case OP_IREM: case OP_IREM_UN: case OP_IOR: case OP_IXOR: case OP_ISHL: case OP_ISHR: case OP_ISHR_UN: case OP_FADD: case OP_FSUB: case OP_FMUL: case OP_FDIV: case OP_LADD: case OP_LSUB: case OP_LMUL: case OP_LDIV: case OP_LDIV_UN: case OP_LREM: case OP_LREM_UN: case OP_LAND: case OP_LOR: case OP_LXOR: case OP_LSHL: case OP_LSHR: case OP_LSHR_UN: lhs = convert (ctx, lhs, regtype_to_llvm_type (spec [MONO_INST_DEST])); rhs = convert (ctx, rhs, regtype_to_llvm_type (spec [MONO_INST_DEST])); emit_div_check (ctx, builder, bb, ins, lhs, rhs); if (!ctx_ok (ctx)) break; builder = ctx->builder; switch (ins->opcode) { case OP_IADD: case OP_LADD: values [ins->dreg] = LLVMBuildAdd (builder, lhs, rhs, dname); break; case OP_ISUB: case OP_LSUB: values [ins->dreg] = LLVMBuildSub (builder, lhs, rhs, dname); break; case OP_IMUL: case OP_LMUL: values [ins->dreg] = LLVMBuildMul (builder, lhs, rhs, dname); break; case OP_IREM: case OP_LREM: values [ins->dreg] = LLVMBuildSRem (builder, lhs, rhs, dname); break; case OP_IREM_UN: case OP_LREM_UN: values [ins->dreg] = LLVMBuildURem (builder, lhs, rhs, dname); break; case OP_IDIV: case OP_LDIV: values [ins->dreg] = LLVMBuildSDiv (builder, lhs, rhs, dname); break; case OP_IDIV_UN: case OP_LDIV_UN: values [ins->dreg] = LLVMBuildUDiv (builder, lhs, rhs, dname); break; case OP_FDIV: case OP_RDIV: values [ins->dreg] = LLVMBuildFDiv (builder, lhs, rhs, dname); break; case OP_IAND: case OP_LAND: values [ins->dreg] = LLVMBuildAnd (builder, lhs, rhs, dname); break; case OP_IOR: case OP_LOR: values [ins->dreg] = LLVMBuildOr (builder, lhs, rhs, dname); break; case OP_IXOR: case OP_LXOR: values [ins->dreg] = LLVMBuildXor (builder, lhs, rhs, dname); break; case OP_ISHL: case OP_LSHL: values [ins->dreg] = LLVMBuildShl (builder, lhs, rhs, dname); break; case OP_ISHR: case OP_LSHR: values [ins->dreg] = LLVMBuildAShr (builder, lhs, rhs, dname); break; case OP_ISHR_UN: case OP_LSHR_UN: values [ins->dreg] = LLVMBuildLShr (builder, lhs, rhs, dname); break; case OP_FADD: values [ins->dreg] = LLVMBuildFAdd (builder, lhs, rhs, dname); break; case OP_FSUB: values [ins->dreg] = LLVMBuildFSub (builder, lhs, rhs, dname); break; case OP_FMUL: values [ins->dreg] = LLVMBuildFMul (builder, lhs, rhs, dname); break; default: g_assert_not_reached (); } break; case OP_RADD: case OP_RSUB: case OP_RMUL: case OP_RDIV: { lhs = convert (ctx, lhs, LLVMFloatType ()); rhs = convert (ctx, rhs, LLVMFloatType ()); switch (ins->opcode) { case OP_RADD: values [ins->dreg] = LLVMBuildFAdd (builder, lhs, rhs, dname); break; case OP_RSUB: values [ins->dreg] = LLVMBuildFSub (builder, lhs, rhs, dname); break; case OP_RMUL: values [ins->dreg] = LLVMBuildFMul (builder, lhs, rhs, dname); break; case OP_RDIV: values [ins->dreg] = LLVMBuildFDiv (builder, lhs, rhs, dname); break; default: g_assert_not_reached (); break; } break; } case OP_IADD_IMM: case OP_ISUB_IMM: case OP_IMUL_IMM: case OP_IREM_IMM: case OP_IREM_UN_IMM: case OP_IDIV_IMM: case OP_IDIV_UN_IMM: case OP_IAND_IMM: case OP_IOR_IMM: case OP_IXOR_IMM: case OP_ISHL_IMM: case OP_ISHR_IMM: case OP_ISHR_UN_IMM: case OP_LADD_IMM: case OP_LSUB_IMM: case OP_LMUL_IMM: case OP_LREM_IMM: case OP_LAND_IMM: case OP_LOR_IMM: case OP_LXOR_IMM: case OP_LSHL_IMM: case OP_LSHR_IMM: case OP_LSHR_UN_IMM: case OP_ADD_IMM: case OP_AND_IMM: case OP_MUL_IMM: case OP_SHL_IMM: case OP_SHR_IMM: case OP_SHR_UN_IMM: { LLVMValueRef imm; if (spec [MONO_INST_SRC1] == 'l') { imm = LLVMConstInt (LLVMInt64Type (), GET_LONG_IMM (ins), FALSE); } else { imm = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE); } emit_div_check (ctx, builder, bb, ins, lhs, imm); if (!ctx_ok (ctx)) break; builder = ctx->builder; #if TARGET_SIZEOF_VOID_P == 4 if (ins->opcode == OP_LSHL_IMM || ins->opcode == OP_LSHR_IMM || ins->opcode == OP_LSHR_UN_IMM) imm = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE); #endif if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind) lhs = convert (ctx, lhs, IntPtrType ()); imm = convert (ctx, imm, LLVMTypeOf (lhs)); switch (ins->opcode) { case OP_IADD_IMM: case OP_LADD_IMM: case OP_ADD_IMM: values [ins->dreg] = LLVMBuildAdd (builder, lhs, imm, dname); break; case OP_ISUB_IMM: case OP_LSUB_IMM: values [ins->dreg] = LLVMBuildSub (builder, lhs, imm, dname); break; case OP_IMUL_IMM: case OP_MUL_IMM: case OP_LMUL_IMM: values [ins->dreg] = LLVMBuildMul (builder, lhs, imm, dname); break; case OP_IDIV_IMM: case OP_LDIV_IMM: values [ins->dreg] = LLVMBuildSDiv (builder, lhs, imm, dname); break; case OP_IDIV_UN_IMM: case OP_LDIV_UN_IMM: values [ins->dreg] = LLVMBuildUDiv (builder, lhs, imm, dname); break; case OP_IREM_IMM: case OP_LREM_IMM: values [ins->dreg] = LLVMBuildSRem (builder, lhs, imm, dname); break; case OP_IREM_UN_IMM: values [ins->dreg] = LLVMBuildURem (builder, lhs, imm, dname); break; case OP_IAND_IMM: case OP_LAND_IMM: case OP_AND_IMM: values [ins->dreg] = LLVMBuildAnd (builder, lhs, imm, dname); break; case OP_IOR_IMM: case OP_LOR_IMM: values [ins->dreg] = LLVMBuildOr (builder, lhs, imm, dname); break; case OP_IXOR_IMM: case OP_LXOR_IMM: values [ins->dreg] = LLVMBuildXor (builder, lhs, imm, dname); break; case OP_ISHL_IMM: case OP_LSHL_IMM: values [ins->dreg] = LLVMBuildShl (builder, lhs, imm, dname); break; case OP_SHL_IMM: if (TARGET_SIZEOF_VOID_P == 8) { /* The IL is not regular */ lhs = convert (ctx, lhs, LLVMInt64Type ()); imm = convert (ctx, imm, LLVMInt64Type ()); } values [ins->dreg] = LLVMBuildShl (builder, lhs, imm, dname); break; case OP_ISHR_IMM: case OP_LSHR_IMM: case OP_SHR_IMM: values [ins->dreg] = LLVMBuildAShr (builder, lhs, imm, dname); break; case OP_ISHR_UN_IMM: /* This is used to implement conv.u4, so the lhs could be an i8 */ lhs = convert (ctx, lhs, LLVMInt32Type ()); imm = convert (ctx, imm, LLVMInt32Type ()); values [ins->dreg] = LLVMBuildLShr (builder, lhs, imm, dname); break; case OP_LSHR_UN_IMM: case OP_SHR_UN_IMM: values [ins->dreg] = LLVMBuildLShr (builder, lhs, imm, dname); break; default: g_assert_not_reached (); } break; } case OP_INEG: values [ins->dreg] = LLVMBuildSub (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), convert (ctx, lhs, LLVMInt32Type ()), dname); break; case OP_LNEG: if (LLVMTypeOf (lhs) != LLVMInt64Type ()) lhs = convert (ctx, lhs, LLVMInt64Type ()); values [ins->dreg] = LLVMBuildSub (builder, LLVMConstInt (LLVMInt64Type (), 0, FALSE), lhs, dname); break; case OP_FNEG: lhs = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = LLVMBuildFNeg (builder, lhs, dname); break; case OP_RNEG: lhs = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = LLVMBuildFNeg (builder, lhs, dname); break; case OP_INOT: { guint32 v = 0xffffffff; values [ins->dreg] = LLVMBuildXor (builder, LLVMConstInt (LLVMInt32Type (), v, FALSE), convert (ctx, lhs, LLVMInt32Type ()), dname); break; } case OP_LNOT: { if (LLVMTypeOf (lhs) != LLVMInt64Type ()) lhs = convert (ctx, lhs, LLVMInt64Type ()); guint64 v = 0xffffffffffffffffLL; values [ins->dreg] = LLVMBuildXor (builder, LLVMConstInt (LLVMInt64Type (), v, FALSE), lhs, dname); break; } #if defined(TARGET_X86) || defined(TARGET_AMD64) case OP_X86_LEA: { LLVMValueRef v1, v2; rhs = LLVMBuildSExt (builder, convert (ctx, rhs, LLVMInt32Type ()), LLVMInt64Type (), ""); v1 = LLVMBuildMul (builder, convert (ctx, rhs, IntPtrType ()), LLVMConstInt (IntPtrType (), ((unsigned long long)1 << ins->backend.shift_amount), FALSE), ""); v2 = LLVMBuildAdd (builder, convert (ctx, lhs, IntPtrType ()), v1, ""); values [ins->dreg] = LLVMBuildAdd (builder, v2, LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), dname); break; } case OP_X86_BSF32: case OP_X86_BSF64: { LLVMValueRef args [] = { lhs, LLVMConstInt (LLVMInt1Type (), 1, TRUE), }; int op = ins->opcode == OP_X86_BSF32 ? INTRINS_CTTZ_I32 : INTRINS_CTTZ_I64; values [ins->dreg] = call_intrins (ctx, op, args, dname); break; } case OP_X86_BSR32: case OP_X86_BSR64: { LLVMValueRef args [] = { lhs, LLVMConstInt (LLVMInt1Type (), 1, TRUE), }; int op = ins->opcode == OP_X86_BSR32 ? INTRINS_CTLZ_I32 : INTRINS_CTLZ_I64; LLVMValueRef width = ins->opcode == OP_X86_BSR32 ? const_int32 (31) : const_int64 (63); LLVMValueRef tz = call_intrins (ctx, op, args, ""); values [ins->dreg] = LLVMBuildXor (builder, tz, width, dname); break; } #endif case OP_ICONV_TO_I1: case OP_ICONV_TO_I2: case OP_ICONV_TO_I4: case OP_ICONV_TO_U1: case OP_ICONV_TO_U2: case OP_ICONV_TO_U4: case OP_LCONV_TO_I1: case OP_LCONV_TO_I2: case OP_LCONV_TO_U1: case OP_LCONV_TO_U2: case OP_LCONV_TO_U4: { gboolean sign; sign = (ins->opcode == OP_ICONV_TO_I1) || (ins->opcode == OP_ICONV_TO_I2) || (ins->opcode == OP_ICONV_TO_I4) || (ins->opcode == OP_LCONV_TO_I1) || (ins->opcode == OP_LCONV_TO_I2); /* Have to do two casts since our vregs have type int */ v = LLVMBuildTrunc (builder, lhs, op_to_llvm_type (ins->opcode), ""); if (sign) values [ins->dreg] = LLVMBuildSExt (builder, v, LLVMInt32Type (), dname); else values [ins->dreg] = LLVMBuildZExt (builder, v, LLVMInt32Type (), dname); break; } case OP_ICONV_TO_I8: values [ins->dreg] = LLVMBuildSExt (builder, lhs, LLVMInt64Type (), dname); break; case OP_ICONV_TO_U8: values [ins->dreg] = LLVMBuildZExt (builder, lhs, LLVMInt64Type (), dname); break; case OP_FCONV_TO_I4: case OP_RCONV_TO_I4: values [ins->dreg] = LLVMBuildFPToSI (builder, lhs, LLVMInt32Type (), dname); break; case OP_FCONV_TO_I1: case OP_RCONV_TO_I1: values [ins->dreg] = LLVMBuildSExt (builder, LLVMBuildFPToSI (builder, lhs, LLVMInt8Type (), dname), LLVMInt32Type (), ""); break; case OP_FCONV_TO_U1: case OP_RCONV_TO_U1: values [ins->dreg] = LLVMBuildZExt (builder, LLVMBuildTrunc (builder, LLVMBuildFPToUI (builder, lhs, IntPtrType (), dname), LLVMInt8Type (), ""), LLVMInt32Type (), ""); break; case OP_FCONV_TO_I2: case OP_RCONV_TO_I2: values [ins->dreg] = LLVMBuildSExt (builder, LLVMBuildFPToSI (builder, lhs, LLVMInt16Type (), dname), LLVMInt32Type (), ""); break; case OP_FCONV_TO_U2: case OP_RCONV_TO_U2: values [ins->dreg] = LLVMBuildZExt (builder, LLVMBuildFPToUI (builder, lhs, LLVMInt16Type (), dname), LLVMInt32Type (), ""); break; case OP_FCONV_TO_U4: case OP_RCONV_TO_U4: values [ins->dreg] = LLVMBuildFPToUI (builder, lhs, LLVMInt32Type (), dname); break; case OP_FCONV_TO_U8: case OP_RCONV_TO_U8: values [ins->dreg] = LLVMBuildFPToUI (builder, lhs, LLVMInt64Type (), dname); break; case OP_FCONV_TO_I8: case OP_RCONV_TO_I8: values [ins->dreg] = LLVMBuildFPToSI (builder, lhs, LLVMInt64Type (), dname); break; case OP_ICONV_TO_R8: case OP_LCONV_TO_R8: values [ins->dreg] = LLVMBuildSIToFP (builder, lhs, LLVMDoubleType (), dname); break; case OP_ICONV_TO_R_UN: case OP_LCONV_TO_R_UN: values [ins->dreg] = LLVMBuildUIToFP (builder, lhs, LLVMDoubleType (), dname); break; #if TARGET_SIZEOF_VOID_P == 4 case OP_LCONV_TO_U: #endif case OP_LCONV_TO_I4: values [ins->dreg] = LLVMBuildTrunc (builder, lhs, LLVMInt32Type (), dname); break; case OP_ICONV_TO_R4: case OP_LCONV_TO_R4: v = LLVMBuildSIToFP (builder, lhs, LLVMFloatType (), ""); if (cfg->r4fp) values [ins->dreg] = v; else values [ins->dreg] = LLVMBuildFPExt (builder, v, LLVMDoubleType (), dname); break; case OP_FCONV_TO_R4: v = LLVMBuildFPTrunc (builder, lhs, LLVMFloatType (), ""); if (cfg->r4fp) values [ins->dreg] = v; else values [ins->dreg] = LLVMBuildFPExt (builder, v, LLVMDoubleType (), dname); break; case OP_RCONV_TO_R8: values [ins->dreg] = LLVMBuildFPExt (builder, lhs, LLVMDoubleType (), dname); break; case OP_RCONV_TO_R4: values [ins->dreg] = lhs; break; case OP_SEXT_I4: values [ins->dreg] = LLVMBuildSExt (builder, convert (ctx, lhs, LLVMInt32Type ()), LLVMInt64Type (), dname); break; case OP_ZEXT_I4: values [ins->dreg] = LLVMBuildZExt (builder, convert (ctx, lhs, LLVMInt32Type ()), LLVMInt64Type (), dname); break; case OP_TRUNC_I4: values [ins->dreg] = LLVMBuildTrunc (builder, lhs, LLVMInt32Type (), dname); break; case OP_LOCALLOC_IMM: { LLVMValueRef v; guint32 size = ins->inst_imm; size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1); v = mono_llvm_build_alloca (builder, LLVMInt8Type (), LLVMConstInt (LLVMInt32Type (), size, FALSE), MONO_ARCH_FRAME_ALIGNMENT, ""); if (ins->flags & MONO_INST_INIT) emit_memset (ctx, builder, v, const_int32 (size), MONO_ARCH_FRAME_ALIGNMENT); values [ins->dreg] = v; break; } case OP_LOCALLOC: { LLVMValueRef v, size; size = LLVMBuildAnd (builder, LLVMBuildAdd (builder, convert (ctx, lhs, LLVMInt32Type ()), LLVMConstInt (LLVMInt32Type (), MONO_ARCH_FRAME_ALIGNMENT - 1, FALSE), ""), LLVMConstInt (LLVMInt32Type (), ~ (MONO_ARCH_FRAME_ALIGNMENT - 1), FALSE), ""); v = mono_llvm_build_alloca (builder, LLVMInt8Type (), size, MONO_ARCH_FRAME_ALIGNMENT, ""); if (ins->flags & MONO_INST_INIT) emit_memset (ctx, builder, v, size, MONO_ARCH_FRAME_ALIGNMENT); values [ins->dreg] = v; break; } case OP_LOADI1_MEMBASE: case OP_LOADU1_MEMBASE: case OP_LOADI2_MEMBASE: case OP_LOADU2_MEMBASE: case OP_LOADI4_MEMBASE: case OP_LOADU4_MEMBASE: case OP_LOADI8_MEMBASE: case OP_LOADR4_MEMBASE: case OP_LOADR8_MEMBASE: case OP_LOAD_MEMBASE: case OP_LOADI8_MEM: case OP_LOADU1_MEM: case OP_LOADU2_MEM: case OP_LOADI4_MEM: case OP_LOADU4_MEM: case OP_LOAD_MEM: { int size = 8; LLVMValueRef base, index, addr; LLVMTypeRef t; gboolean sext = FALSE, zext = FALSE; gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0; gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0; gboolean is_unaligned = (ins->flags & MONO_INST_UNALIGNED) != 0; t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext); if (sext || zext) dname = (char*)""; if ((ins->opcode == OP_LOADI8_MEM) || (ins->opcode == OP_LOAD_MEM) || (ins->opcode == OP_LOADI4_MEM) || (ins->opcode == OP_LOADU4_MEM) || (ins->opcode == OP_LOADU1_MEM) || (ins->opcode == OP_LOADU2_MEM)) { addr = LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE); base = addr; } else { /* _MEMBASE */ base = lhs; if (ins->inst_offset == 0) { LLVMValueRef gep_base, gep_offset; if (mono_llvm_can_be_gep (base, &gep_base, &gep_offset)) { addr = LLVMBuildGEP (builder, convert (ctx, gep_base, LLVMPointerType (LLVMInt8Type (), 0)), &gep_offset, 1, ""); } else { addr = base; } } else if (ins->inst_offset % size != 0) { /* Unaligned load */ index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (LLVMInt8Type (), 0)), &index, 1, ""); } else { index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, ""); } } addr = convert (ctx, addr, LLVMPointerType (t, 0)); if (is_unaligned) values [ins->dreg] = mono_llvm_build_aligned_load (builder, addr, dname, is_volatile, 1); else values [ins->dreg] = emit_load (ctx, bb, &builder, size, addr, base, dname, is_faulting, is_volatile, LLVM_BARRIER_NONE); if (!(is_faulting || is_volatile) && (ins->flags & MONO_INST_INVARIANT_LOAD)) { /* * These will signal LLVM that these loads do not alias any stores, and * they can't fail, allowing them to be hoisted out of loops. */ set_invariant_load_flag (values [ins->dreg]); } if (sext) values [ins->dreg] = LLVMBuildSExt (builder, values [ins->dreg], LLVMInt32Type (), dname); else if (zext) values [ins->dreg] = LLVMBuildZExt (builder, values [ins->dreg], LLVMInt32Type (), dname); else if (!cfg->r4fp && ins->opcode == OP_LOADR4_MEMBASE) values [ins->dreg] = LLVMBuildFPExt (builder, values [ins->dreg], LLVMDoubleType (), dname); break; } case OP_STOREI1_MEMBASE_REG: case OP_STOREI2_MEMBASE_REG: case OP_STOREI4_MEMBASE_REG: case OP_STOREI8_MEMBASE_REG: case OP_STORER4_MEMBASE_REG: case OP_STORER8_MEMBASE_REG: case OP_STORE_MEMBASE_REG: { int size = 8; LLVMValueRef index, addr, base; LLVMTypeRef t; gboolean sext = FALSE, zext = FALSE; gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0; gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0; gboolean is_unaligned = (ins->flags & MONO_INST_UNALIGNED) != 0; if (!values [ins->inst_destbasereg]) { set_failure (ctx, "inst_destbasereg"); break; } t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext); base = values [ins->inst_destbasereg]; LLVMValueRef gep_base, gep_offset; if (ins->inst_offset == 0 && mono_llvm_can_be_gep (base, &gep_base, &gep_offset)) { addr = LLVMBuildGEP (builder, convert (ctx, gep_base, LLVMPointerType (LLVMInt8Type (), 0)), &gep_offset, 1, ""); } else if (ins->inst_offset % size != 0) { /* Unaligned store */ index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (LLVMInt8Type (), 0)), &index, 1, ""); } else { index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, ""); } if (is_volatile && LLVMGetInstructionOpcode (base) == LLVMAlloca && !(ins->flags & MONO_INST_VOLATILE)) /* Storing to an alloca cannot fail */ is_volatile = FALSE; LLVMValueRef srcval = convert (ctx, values [ins->sreg1], t); LLVMValueRef ptrdst = convert (ctx, addr, LLVMPointerType (t, 0)); if (is_unaligned) mono_llvm_build_aligned_store (builder, srcval, ptrdst, is_volatile, 1); else emit_store (ctx, bb, &builder, size, srcval, ptrdst, base, is_faulting, is_volatile); break; } case OP_STOREI1_MEMBASE_IMM: case OP_STOREI2_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: case OP_STOREI8_MEMBASE_IMM: case OP_STORE_MEMBASE_IMM: { int size = 8; LLVMValueRef index, addr, base; LLVMTypeRef t; gboolean sext = FALSE, zext = FALSE; gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0; gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0; gboolean is_unaligned = (ins->flags & MONO_INST_UNALIGNED) != 0; t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext); base = values [ins->inst_destbasereg]; LLVMValueRef gep_base, gep_offset; if (ins->inst_offset == 0 && mono_llvm_can_be_gep (base, &gep_base, &gep_offset)) { addr = LLVMBuildGEP (builder, convert (ctx, gep_base, LLVMPointerType (LLVMInt8Type (), 0)), &gep_offset, 1, ""); } else if (ins->inst_offset % size != 0) { /* Unaligned store */ index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (LLVMInt8Type (), 0)), &index, 1, ""); } else { index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, ""); } LLVMValueRef srcval = convert (ctx, LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), t); LLVMValueRef ptrdst = convert (ctx, addr, LLVMPointerType (t, 0)); if (is_unaligned) mono_llvm_build_aligned_store (builder, srcval, ptrdst, is_volatile, 1); else emit_store (ctx, bb, &builder, size, srcval, ptrdst, base, is_faulting, is_volatile); break; } case OP_CHECK_THIS: emit_load (ctx, bb, &builder, TARGET_SIZEOF_VOID_P, convert (ctx, lhs, LLVMPointerType (IntPtrType (), 0)), lhs, "", TRUE, FALSE, LLVM_BARRIER_NONE); break; case OP_OUTARG_VTRETADDR: break; case OP_VOIDCALL: case OP_CALL: case OP_LCALL: case OP_FCALL: case OP_RCALL: case OP_VCALL: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_FCALL_MEMBASE: case OP_RCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VOIDCALL_REG: case OP_CALL_REG: case OP_LCALL_REG: case OP_FCALL_REG: case OP_RCALL_REG: case OP_VCALL_REG: { process_call (ctx, bb, &builder, ins); break; } case OP_AOTCONST: { MonoJumpInfoType ji_type = ins->inst_c1; gpointer ji_data = ins->inst_p0; if (ji_type == MONO_PATCH_INFO_ICALL_ADDR) { char *symbol = mono_aot_get_direct_call_symbol (MONO_PATCH_INFO_ICALL_ADDR_CALL, ji_data); if (symbol) { /* * Avoid emitting a got entry for these since the method is directly called, and it might not be * resolvable at runtime using dlsym (). */ g_free (symbol); values [ins->dreg] = LLVMConstInt (IntPtrType (), 0, FALSE); break; } } values [ins->dreg] = get_aotconst (ctx, ji_type, ji_data, LLVMPointerType (IntPtrType (), 0)); break; } case OP_MEMMOVE: { int argn = 0; LLVMValueRef args [5]; args [argn++] = convert (ctx, values [ins->sreg1], LLVMPointerType (LLVMInt8Type (), 0)); args [argn++] = convert (ctx, values [ins->sreg2], LLVMPointerType (LLVMInt8Type (), 0)); args [argn++] = convert (ctx, values [ins->sreg3], LLVMInt64Type ()); args [argn++] = LLVMConstInt (LLVMInt1Type (), 0, FALSE); // is_volatile call_intrins (ctx, INTRINS_MEMMOVE, args, ""); break; } case OP_NOT_REACHED: LLVMBuildUnreachable (builder); has_terminator = TRUE; g_assert (bb->block_num < cfg->max_block_num); ctx->unreachable [bb->block_num] = TRUE; /* Might have instructions after this */ while (ins->next) { MonoInst *next = ins->next; /* * FIXME: If later code uses the regs defined by these instructions, * compilation will fail. */ const char *spec = INS_INFO (next->opcode); if (spec [MONO_INST_DEST] == 'i' && !MONO_IS_STORE_MEMBASE (next)) ctx->values [next->dreg] = LLVMConstNull (LLVMInt32Type ()); MONO_DELETE_INS (bb, next); } break; case OP_LDADDR: { MonoInst *var = ins->inst_i0; MonoClass *klass = var->klass; if (var->opcode == OP_VTARG_ADDR && !MONO_CLASS_IS_SIMD(cfg, klass)) { /* The variable contains the vtype address */ values [ins->dreg] = values [var->dreg]; } else if (var->opcode == OP_GSHAREDVT_LOCAL) { values [ins->dreg] = emit_gsharedvt_ldaddr (ctx, var->dreg); } else { values [ins->dreg] = addresses [var->dreg]; } break; } case OP_SIN: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_SIN, args, dname); break; } case OP_SINF: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_SINF, args, dname); break; } case OP_EXP: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_EXP, args, dname); break; } case OP_EXPF: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_EXPF, args, dname); break; } case OP_LOG2: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_LOG2, args, dname); break; } case OP_LOG2F: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_LOG2F, args, dname); break; } case OP_LOG10: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_LOG10, args, dname); break; } case OP_LOG10F: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_LOG10F, args, dname); break; } case OP_LOG: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_LOG, args, dname); break; } case OP_TRUNC: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_TRUNC, args, dname); break; } case OP_TRUNCF: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_TRUNCF, args, dname); break; } case OP_COS: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_COS, args, dname); break; } case OP_COSF: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_COSF, args, dname); break; } case OP_SQRT: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_SQRT, args, dname); break; } case OP_SQRTF: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_SQRTF, args, dname); break; } case OP_FLOOR: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_FLOOR, args, dname); break; } case OP_FLOORF: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_FLOORF, args, dname); break; } case OP_CEIL: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_CEIL, args, dname); break; } case OP_CEILF: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_CEILF, args, dname); break; } case OP_FMA: { LLVMValueRef args [3]; args [0] = convert (ctx, values [ins->sreg1], LLVMDoubleType ()); args [1] = convert (ctx, values [ins->sreg2], LLVMDoubleType ()); args [2] = convert (ctx, values [ins->sreg3], LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_FMA, args, dname); break; } case OP_FMAF: { LLVMValueRef args [3]; args [0] = convert (ctx, values [ins->sreg1], LLVMFloatType ()); args [1] = convert (ctx, values [ins->sreg2], LLVMFloatType ()); args [2] = convert (ctx, values [ins->sreg3], LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_FMAF, args, dname); break; } case OP_ABS: { LLVMValueRef args [1]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_FABS, args, dname); break; } case OP_ABSF: { LLVMValueRef args [1]; #ifdef TARGET_AMD64 args [0] = convert (ctx, lhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_ABSF, args, dname); #else /* llvm.fabs not supported on all platforms */ args [0] = convert (ctx, lhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_FABS, args, dname); values [ins->dreg] = convert (ctx, values [ins->dreg], LLVMFloatType ()); #endif break; } case OP_RPOW: { LLVMValueRef args [2]; args [0] = convert (ctx, lhs, LLVMFloatType ()); args [1] = convert (ctx, rhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_POWF, args, dname); break; } case OP_FPOW: { LLVMValueRef args [2]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); args [1] = convert (ctx, rhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_POW, args, dname); break; } case OP_FCOPYSIGN: { LLVMValueRef args [2]; args [0] = convert (ctx, lhs, LLVMDoubleType ()); args [1] = convert (ctx, rhs, LLVMDoubleType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_COPYSIGN, args, dname); break; } case OP_RCOPYSIGN: { LLVMValueRef args [2]; args [0] = convert (ctx, lhs, LLVMFloatType ()); args [1] = convert (ctx, rhs, LLVMFloatType ()); values [ins->dreg] = call_intrins (ctx, INTRINS_COPYSIGNF, args, dname); break; } case OP_IMIN: case OP_LMIN: case OP_IMAX: case OP_LMAX: case OP_IMIN_UN: case OP_LMIN_UN: case OP_IMAX_UN: case OP_LMAX_UN: case OP_FMIN: case OP_FMAX: case OP_RMIN: case OP_RMAX: { LLVMValueRef v; lhs = convert (ctx, lhs, regtype_to_llvm_type (spec [MONO_INST_DEST])); rhs = convert (ctx, rhs, regtype_to_llvm_type (spec [MONO_INST_DEST])); switch (ins->opcode) { case OP_IMIN: case OP_LMIN: v = LLVMBuildICmp (builder, LLVMIntSLE, lhs, rhs, ""); break; case OP_IMAX: case OP_LMAX: v = LLVMBuildICmp (builder, LLVMIntSGE, lhs, rhs, ""); break; case OP_IMIN_UN: case OP_LMIN_UN: v = LLVMBuildICmp (builder, LLVMIntULE, lhs, rhs, ""); break; case OP_IMAX_UN: case OP_LMAX_UN: v = LLVMBuildICmp (builder, LLVMIntUGE, lhs, rhs, ""); break; case OP_FMAX: case OP_RMAX: v = LLVMBuildFCmp (builder, LLVMRealUGE, lhs, rhs, ""); break; case OP_FMIN: case OP_RMIN: v = LLVMBuildFCmp (builder, LLVMRealULE, lhs, rhs, ""); break; default: g_assert_not_reached (); break; } values [ins->dreg] = LLVMBuildSelect (builder, v, lhs, rhs, dname); break; } /* * See the ARM64 comment in mono/utils/atomic.h for an explanation of why this * hack is necessary (for now). */ #ifdef TARGET_ARM64 #define ARM64_ATOMIC_FENCE_FIX mono_llvm_build_fence (builder, LLVM_BARRIER_SEQ) #else #define ARM64_ATOMIC_FENCE_FIX #endif case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_EXCHANGE_I8: { LLVMValueRef args [2]; LLVMTypeRef t; if (ins->opcode == OP_ATOMIC_EXCHANGE_I4) t = LLVMInt32Type (); else t = LLVMInt64Type (); g_assert (ins->inst_offset == 0); args [0] = convert (ctx, lhs, LLVMPointerType (t, 0)); args [1] = convert (ctx, rhs, t); ARM64_ATOMIC_FENCE_FIX; values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_XCHG, args [0], args [1]); ARM64_ATOMIC_FENCE_FIX; break; } case OP_ATOMIC_ADD_I4: case OP_ATOMIC_ADD_I8: case OP_ATOMIC_AND_I4: case OP_ATOMIC_AND_I8: case OP_ATOMIC_OR_I4: case OP_ATOMIC_OR_I8: { LLVMValueRef args [2]; LLVMTypeRef t; if (ins->type == STACK_I4) t = LLVMInt32Type (); else t = LLVMInt64Type (); g_assert (ins->inst_offset == 0); args [0] = convert (ctx, lhs, LLVMPointerType (t, 0)); args [1] = convert (ctx, rhs, t); ARM64_ATOMIC_FENCE_FIX; if (ins->opcode == OP_ATOMIC_ADD_I4 || ins->opcode == OP_ATOMIC_ADD_I8) // Interlocked.Add returns new value (that's why we emit additional Add here) // see https://github.com/dotnet/runtime/pull/33102 values [ins->dreg] = LLVMBuildAdd (builder, mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_ADD, args [0], args [1]), args [1], dname); else if (ins->opcode == OP_ATOMIC_AND_I4 || ins->opcode == OP_ATOMIC_AND_I8) values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_AND, args [0], args [1]); else if (ins->opcode == OP_ATOMIC_OR_I4 || ins->opcode == OP_ATOMIC_OR_I8) values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_OR, args [0], args [1]); else g_assert_not_reached (); ARM64_ATOMIC_FENCE_FIX; break; } case OP_ATOMIC_CAS_I4: case OP_ATOMIC_CAS_I8: { LLVMValueRef args [3], val; LLVMTypeRef t; if (ins->opcode == OP_ATOMIC_CAS_I4) t = LLVMInt32Type (); else t = LLVMInt64Type (); args [0] = convert (ctx, lhs, LLVMPointerType (t, 0)); /* comparand */ args [1] = convert (ctx, values [ins->sreg3], t); /* new value */ args [2] = convert (ctx, values [ins->sreg2], t); ARM64_ATOMIC_FENCE_FIX; val = mono_llvm_build_cmpxchg (builder, args [0], args [1], args [2]); ARM64_ATOMIC_FENCE_FIX; /* cmpxchg returns a pair */ values [ins->dreg] = LLVMBuildExtractValue (builder, val, 0, ""); break; } case OP_MEMORY_BARRIER: { mono_llvm_build_fence (builder, (BarrierKind) ins->backend.memory_barrier_kind); break; } case OP_ATOMIC_LOAD_I1: case OP_ATOMIC_LOAD_I2: case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_I8: case OP_ATOMIC_LOAD_U1: case OP_ATOMIC_LOAD_U2: case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_LOAD_U8: case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_LOAD_R8: { int size; gboolean sext, zext; LLVMTypeRef t; gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0; gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0; BarrierKind barrier = (BarrierKind) ins->backend.memory_barrier_kind; LLVMValueRef index, addr; t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext); if (sext || zext) dname = (char *)""; if (ins->inst_offset != 0) { index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, lhs, LLVMPointerType (t, 0)), &index, 1, ""); } else { addr = lhs; } addr = convert (ctx, addr, LLVMPointerType (t, 0)); ARM64_ATOMIC_FENCE_FIX; values [ins->dreg] = emit_load (ctx, bb, &builder, size, addr, lhs, dname, is_faulting, is_volatile, barrier); ARM64_ATOMIC_FENCE_FIX; if (sext) values [ins->dreg] = LLVMBuildSExt (builder, values [ins->dreg], LLVMInt32Type (), dname); else if (zext) values [ins->dreg] = LLVMBuildZExt (builder, values [ins->dreg], LLVMInt32Type (), dname); break; } case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_I8: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_U4: case OP_ATOMIC_STORE_U8: case OP_ATOMIC_STORE_R4: case OP_ATOMIC_STORE_R8: { int size; gboolean sext, zext; LLVMTypeRef t; gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0; gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0; BarrierKind barrier = (BarrierKind) ins->backend.memory_barrier_kind; LLVMValueRef index, addr, value, base; if (!values [ins->inst_destbasereg]) { set_failure (ctx, "inst_destbasereg"); break; } t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext); base = values [ins->inst_destbasereg]; index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE); addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, ""); value = convert (ctx, values [ins->sreg1], t); ARM64_ATOMIC_FENCE_FIX; emit_store_general (ctx, bb, &builder, size, value, addr, base, is_faulting, is_volatile, barrier); ARM64_ATOMIC_FENCE_FIX; break; } case OP_RELAXED_NOP: { #if defined(TARGET_AMD64) || defined(TARGET_X86) call_intrins (ctx, INTRINS_SSE_PAUSE, NULL, ""); break; #else break; #endif } case OP_TLS_GET: { #if (defined(TARGET_AMD64) || defined(TARGET_X86)) && defined(__linux__) #ifdef TARGET_AMD64 // 257 == FS segment register LLVMTypeRef ptrtype = LLVMPointerType (IntPtrType (), 257); #else // 256 == GS segment register LLVMTypeRef ptrtype = LLVMPointerType (IntPtrType (), 256); #endif // FIXME: XEN values [ins->dreg] = LLVMBuildLoad (builder, LLVMBuildIntToPtr (builder, LLVMConstInt (IntPtrType (), ins->inst_offset, TRUE), ptrtype, ""), ""); #elif defined(TARGET_AMD64) && defined(TARGET_OSX) /* See mono_amd64_emit_tls_get () */ int offset = mono_amd64_get_tls_gs_offset () + (ins->inst_offset * 8); // 256 == GS segment register LLVMTypeRef ptrtype = LLVMPointerType (IntPtrType (), 256); values [ins->dreg] = LLVMBuildLoad (builder, LLVMBuildIntToPtr (builder, LLVMConstInt (IntPtrType (), offset, TRUE), ptrtype, ""), ""); #else set_failure (ctx, "opcode tls-get"); break; #endif break; } case OP_GC_SAFE_POINT: { LLVMValueRef val, cmp, callee, call; LLVMBasicBlockRef poll_bb, cont_bb; LLVMValueRef args [2]; static LLVMTypeRef sig; const char *icall_name = "mono_threads_state_poll"; /* * Create the cold wrapper around the icall, along with a managed method for it so * unwinding works. */ if (!cfg->compile_aot && !ctx->module->gc_poll_cold_wrapper_compiled) { ERROR_DECL (error); /* Compiling a method here is a bit ugly, but it works */ MonoMethod *wrapper = mono_marshal_get_llvm_func_wrapper (LLVM_FUNC_WRAPPER_GC_POLL); ctx->module->gc_poll_cold_wrapper_compiled = mono_jit_compile_method (wrapper, error); mono_error_assert_ok (error); } if (!sig) sig = LLVMFunctionType0 (LLVMVoidType (), FALSE); /* * if (!*sreg1) * mono_threads_state_poll (); */ val = mono_llvm_build_load (builder, convert (ctx, lhs, LLVMPointerType (IntPtrType (), 0)), "", TRUE); cmp = LLVMBuildICmp (builder, LLVMIntEQ, val, LLVMConstNull (LLVMTypeOf (val)), ""); poll_bb = gen_bb (ctx, "POLL_BB"); cont_bb = gen_bb (ctx, "CONT_BB"); args [0] = cmp; args [1] = LLVMConstInt (LLVMInt1Type (), 1, FALSE); cmp = call_intrins (ctx, INTRINS_EXPECT_I1, args, ""); mono_llvm_build_weighted_branch (builder, cmp, cont_bb, poll_bb, 1000, 1); ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (builder, poll_bb); if (ctx->cfg->compile_aot) { callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll)); call = LLVMBuildCall (builder, callee, NULL, 0, ""); } else { callee = get_jit_callee (ctx, icall_name, sig, MONO_PATCH_INFO_ABS, ctx->module->gc_poll_cold_wrapper_compiled); call = LLVMBuildCall (builder, callee, NULL, 0, ""); set_call_cold_cconv (call); } LLVMBuildBr (builder, cont_bb); ctx->builder = builder = create_builder (ctx); LLVMPositionBuilderAtEnd (builder, cont_bb); ctx->bblocks [bb->block_num].end_bblock = cont_bb; break; } /* * Overflow opcodes. */ case OP_IADD_OVF: case OP_IADD_OVF_UN: case OP_ISUB_OVF: case OP_ISUB_OVF_UN: case OP_IMUL_OVF: case OP_IMUL_OVF_UN: case OP_LADD_OVF: case OP_LADD_OVF_UN: case OP_LSUB_OVF: case OP_LSUB_OVF_UN: case OP_LMUL_OVF: case OP_LMUL_OVF_UN: { LLVMValueRef args [2], val, ovf; IntrinsicId intrins; args [0] = convert (ctx, lhs, op_to_llvm_type (ins->opcode)); args [1] = convert (ctx, rhs, op_to_llvm_type (ins->opcode)); intrins = ovf_op_to_intrins (ins->opcode); val = call_intrins (ctx, intrins, args, ""); values [ins->dreg] = LLVMBuildExtractValue (builder, val, 0, dname); ovf = LLVMBuildExtractValue (builder, val, 1, ""); emit_cond_system_exception (ctx, bb, ins->inst_exc_name, ovf, FALSE); if (!ctx_ok (ctx)) break; builder = ctx->builder; break; } /* * Valuetypes. * We currently model them using arrays. Promotion to local vregs is * disabled for them in mono_handle_global_vregs () in the LLVM case, * so we always have an entry in cfg->varinfo for them. * FIXME: Is this needed ? */ case OP_VZERO: { MonoClass *klass = ins->klass; if (!klass) { // FIXME: set_failure (ctx, "!klass"); break; } if (!addresses [ins->dreg]) addresses [ins->dreg] = build_named_alloca (ctx, m_class_get_byval_arg (klass), "vzero"); LLVMValueRef ptr = LLVMBuildBitCast (builder, addresses [ins->dreg], LLVMPointerType (LLVMInt8Type (), 0), ""); emit_memset (ctx, builder, ptr, const_int32 (mono_class_value_size (klass, NULL)), 0); break; } case OP_DUMMY_VZERO: break; case OP_STOREV_MEMBASE: case OP_LOADV_MEMBASE: case OP_VMOVE: { MonoClass *klass = ins->klass; LLVMValueRef src = NULL, dst, args [5]; gboolean done = FALSE; gboolean is_volatile = FALSE; if (!klass) { // FIXME: set_failure (ctx, "!klass"); break; } if (mini_is_gsharedvt_klass (klass)) { // FIXME: set_failure (ctx, "gsharedvt"); break; } switch (ins->opcode) { case OP_STOREV_MEMBASE: if (cfg->gen_write_barriers && m_class_has_references (klass) && ins->inst_destbasereg != cfg->frame_reg && LLVMGetInstructionOpcode (values [ins->inst_destbasereg]) != LLVMAlloca) { /* Decomposed earlier */ g_assert_not_reached (); break; } if (!addresses [ins->sreg1]) { /* SIMD */ g_assert (values [ins->sreg1]); dst = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_destbasereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (type_to_llvm_type (ctx, m_class_get_byval_arg (klass)), 0)); LLVMBuildStore (builder, values [ins->sreg1], dst); done = TRUE; } else { src = LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (LLVMInt8Type (), 0), ""); dst = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_destbasereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (LLVMInt8Type (), 0)); } break; case OP_LOADV_MEMBASE: if (!addresses [ins->dreg]) addresses [ins->dreg] = build_alloca (ctx, m_class_get_byval_arg (klass)); src = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_basereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (LLVMInt8Type (), 0)); dst = LLVMBuildBitCast (builder, addresses [ins->dreg], LLVMPointerType (LLVMInt8Type (), 0), ""); break; case OP_VMOVE: if (!addresses [ins->sreg1]) addresses [ins->sreg1] = build_alloca (ctx, m_class_get_byval_arg (klass)); if (!addresses [ins->dreg]) addresses [ins->dreg] = build_alloca (ctx, m_class_get_byval_arg (klass)); src = LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (LLVMInt8Type (), 0), ""); dst = LLVMBuildBitCast (builder, addresses [ins->dreg], LLVMPointerType (LLVMInt8Type (), 0), ""); break; default: g_assert_not_reached (); } if (!ctx_ok (ctx)) break; if (done) break; #ifdef TARGET_WASM is_volatile = m_class_has_references (klass); #endif int aindex = 0; args [aindex ++] = dst; args [aindex ++] = src; args [aindex ++] = LLVMConstInt (LLVMInt32Type (), mono_class_value_size (klass, NULL), FALSE); args [aindex ++] = LLVMConstInt (LLVMInt1Type (), is_volatile ? 1 : 0, FALSE); call_intrins (ctx, INTRINS_MEMCPY, args, ""); break; } case OP_LLVM_OUTARG_VT: { LLVMArgInfo *ainfo = (LLVMArgInfo*)ins->inst_p0; MonoType *t = mini_get_underlying_type (ins->inst_vtype); if (ainfo->storage == LLVMArgGsharedvtVariable) { MonoInst *var = get_vreg_to_inst (cfg, ins->sreg1); if (var && var->opcode == OP_GSHAREDVT_LOCAL) { addresses [ins->dreg] = convert (ctx, emit_gsharedvt_ldaddr (ctx, var->dreg), LLVMPointerType (IntPtrType (), 0)); } else { g_assert (addresses [ins->sreg1]); addresses [ins->dreg] = addresses [ins->sreg1]; } } else if (ainfo->storage == LLVMArgGsharedvtFixed) { if (!addresses [ins->sreg1]) { addresses [ins->sreg1] = build_alloca (ctx, t); g_assert (values [ins->sreg1]); } LLVMBuildStore (builder, convert (ctx, values [ins->sreg1], LLVMGetElementType (LLVMTypeOf (addresses [ins->sreg1]))), addresses [ins->sreg1]); addresses [ins->dreg] = addresses [ins->sreg1]; } else { if (!addresses [ins->sreg1]) { addresses [ins->sreg1] = build_named_alloca (ctx, t, "llvm_outarg_vt"); g_assert (values [ins->sreg1]); LLVMBuildStore (builder, convert (ctx, values [ins->sreg1], type_to_llvm_type (ctx, t)), addresses [ins->sreg1]); addresses [ins->dreg] = addresses [ins->sreg1]; } else if (ainfo->storage == LLVMArgVtypeAddr || values [ins->sreg1] == addresses [ins->sreg1]) { /* LLVMArgVtypeByRef/LLVMArgVtypeAddr, have to make a copy */ addresses [ins->dreg] = build_alloca (ctx, t); LLVMValueRef v = LLVMBuildLoad (builder, addresses [ins->sreg1], "llvm_outarg_vt_copy"); LLVMBuildStore (builder, convert (ctx, v, type_to_llvm_type (ctx, t)), addresses [ins->dreg]); } else { if (values [ins->sreg1]) { LLVMTypeRef src_t = LLVMTypeOf (values [ins->sreg1]); LLVMValueRef dst = convert (ctx, addresses [ins->sreg1], LLVMPointerType (src_t, 0)); LLVMBuildStore (builder, values [ins->sreg1], dst); } addresses [ins->dreg] = addresses [ins->sreg1]; } } break; } case OP_OBJC_GET_SELECTOR: { const char *name = (const char*)ins->inst_p0; LLVMValueRef var; if (!ctx->module->objc_selector_to_var) { ctx->module->objc_selector_to_var = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); LLVMValueRef info_var = LLVMAddGlobal (ctx->lmodule, LLVMArrayType (LLVMInt8Type (), 8), "@OBJC_IMAGE_INFO"); int32_t objc_imageinfo [] = { 0, 16 }; LLVMSetInitializer (info_var, mono_llvm_create_constant_data_array ((uint8_t *) &objc_imageinfo, 8)); LLVMSetLinkage (info_var, LLVMPrivateLinkage); LLVMSetExternallyInitialized (info_var, TRUE); LLVMSetSection (info_var, "__DATA, __objc_imageinfo,regular,no_dead_strip"); LLVMSetAlignment (info_var, sizeof (target_mgreg_t)); mark_as_used (ctx->module, info_var); } var = (LLVMValueRef)g_hash_table_lookup (ctx->module->objc_selector_to_var, name); if (!var) { LLVMValueRef indexes [16]; LLVMValueRef name_var = LLVMAddGlobal (ctx->lmodule, LLVMArrayType (LLVMInt8Type (), strlen (name) + 1), "@OBJC_METH_VAR_NAME_"); LLVMSetInitializer (name_var, mono_llvm_create_constant_data_array ((const uint8_t*)name, strlen (name) + 1)); LLVMSetLinkage (name_var, LLVMPrivateLinkage); LLVMSetSection (name_var, "__TEXT,__objc_methname,cstring_literals"); mark_as_used (ctx->module, name_var); LLVMValueRef ref_var = LLVMAddGlobal (ctx->lmodule, LLVMPointerType (LLVMInt8Type (), 0), "@OBJC_SELECTOR_REFERENCES_"); indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, 0); indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, 0); LLVMSetInitializer (ref_var, LLVMConstGEP (name_var, indexes, 2)); LLVMSetLinkage (ref_var, LLVMPrivateLinkage); LLVMSetExternallyInitialized (ref_var, TRUE); LLVMSetSection (ref_var, "__DATA, __objc_selrefs, literal_pointers, no_dead_strip"); LLVMSetAlignment (ref_var, sizeof (target_mgreg_t)); mark_as_used (ctx->module, ref_var); g_hash_table_insert (ctx->module->objc_selector_to_var, g_strdup (name), ref_var); var = ref_var; } values [ins->dreg] = LLVMBuildLoad (builder, var, ""); break; } #if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_WASM) case OP_EXTRACTX_U2: case OP_XEXTRACT_I1: case OP_XEXTRACT_I2: case OP_XEXTRACT_I4: case OP_XEXTRACT_I8: case OP_XEXTRACT_R4: case OP_XEXTRACT_R8: case OP_EXTRACT_I1: case OP_EXTRACT_I2: case OP_EXTRACT_I4: case OP_EXTRACT_I8: case OP_EXTRACT_R4: case OP_EXTRACT_R8: { MonoTypeEnum mono_elt_t = inst_c1_type (ins); LLVMTypeRef elt_t = primitive_type_to_llvm_type (mono_elt_t); gboolean sext = FALSE; gboolean zext = FALSE; switch (mono_elt_t) { case MONO_TYPE_I1: case MONO_TYPE_I2: sext = TRUE; break; case MONO_TYPE_U1: case MONO_TYPE_U2: zext = TRUE; break; } LLVMValueRef element_ix = NULL; switch (ins->opcode) { case OP_XEXTRACT_I1: case OP_XEXTRACT_I2: case OP_XEXTRACT_I4: case OP_XEXTRACT_R4: case OP_XEXTRACT_R8: case OP_XEXTRACT_I8: element_ix = rhs; break; default: element_ix = const_int32 (ins->inst_c0); } LLVMTypeRef lhs_t = LLVMTypeOf (lhs); int vec_width = mono_llvm_get_prim_size_bits (lhs_t); int elem_width = mono_llvm_get_prim_size_bits (elt_t); int elements = vec_width / elem_width; element_ix = LLVMBuildAnd (builder, element_ix, const_int32 (elements - 1), "extract"); LLVMTypeRef ret_t = LLVMVectorType (elt_t, elements); LLVMValueRef src = LLVMBuildBitCast (builder, lhs, ret_t, "extract"); LLVMValueRef result = LLVMBuildExtractElement (builder, src, element_ix, "extract"); if (zext) result = LLVMBuildZExt (builder, result, i4_t, "extract_zext"); else if (sext) result = LLVMBuildSExt (builder, result, i4_t, "extract_sext"); values [ins->dreg] = result; break; } case OP_XINSERT_I1: case OP_XINSERT_I2: case OP_XINSERT_I4: case OP_XINSERT_I8: case OP_XINSERT_R4: case OP_XINSERT_R8: { MonoTypeEnum primty = inst_c1_type (ins); LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMTypeRef elem_t = LLVMGetElementType (ret_t); int elements = LLVMGetVectorSize (ret_t); LLVMValueRef element_ix = LLVMBuildAnd (builder, arg3, const_int32 (elements - 1), "xinsert"); LLVMValueRef vec = convert (ctx, lhs, ret_t); LLVMValueRef val = convert_full (ctx, rhs, elem_t, primitive_type_is_unsigned (primty)); LLVMValueRef result = LLVMBuildInsertElement (builder, vec, val, element_ix, "xinsert"); values [ins->dreg] = result; break; } case OP_EXPAND_I1: case OP_EXPAND_I2: case OP_EXPAND_I4: case OP_EXPAND_I8: case OP_EXPAND_R4: case OP_EXPAND_R8: { LLVMTypeRef t; LLVMValueRef mask [MAX_VECTOR_ELEMS], v; int i; t = simd_class_to_llvm_type (ctx, ins->klass); for (i = 0; i < MAX_VECTOR_ELEMS; ++i) mask [i] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); v = convert (ctx, values [ins->sreg1], LLVMGetElementType (t)); values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (t), v, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); values [ins->dreg] = LLVMBuildShuffleVector (builder, values [ins->dreg], LLVMGetUndef (t), LLVMConstVector (mask, LLVMGetVectorSize (t)), ""); break; } case OP_XZERO: { values [ins->dreg] = LLVMConstNull (type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass))); break; } case OP_LOADX_MEMBASE: { LLVMTypeRef t = type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass)); LLVMValueRef src; src = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_basereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (t, 0)); values [ins->dreg] = mono_llvm_build_aligned_load (builder, src, "", FALSE, 1); break; } case OP_STOREX_MEMBASE: { LLVMTypeRef t = LLVMTypeOf (values [ins->sreg1]); LLVMValueRef dest; dest = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_destbasereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (t, 0)); mono_llvm_build_aligned_store (builder, values [ins->sreg1], dest, FALSE, 1); break; } case OP_XBINOP: case OP_XBINOP_SCALAR: case OP_XBINOP_BYSCALAR: { gboolean scalar = ins->opcode == OP_XBINOP_SCALAR; gboolean byscalar = ins->opcode == OP_XBINOP_BYSCALAR; LLVMValueRef result = NULL; LLVMValueRef args [] = { lhs, rhs }; if (scalar) for (int i = 0; i < 2; ++i) args [i] = scalar_from_vector (ctx, args [i]); if (byscalar) { LLVMTypeRef t = LLVMTypeOf (args [0]); unsigned int elems = LLVMGetVectorSize (t); args [1] = broadcast_element (ctx, scalar_from_vector (ctx, args [1]), elems); } LLVMValueRef l = args [0]; LLVMValueRef r = args [1]; switch (ins->inst_c0) { case OP_IADD: result = LLVMBuildAdd (builder, l, r, ""); break; case OP_ISUB: result = LLVMBuildSub (builder, l, r, ""); break; case OP_IMUL: result = LLVMBuildMul (builder, l, r, ""); break; case OP_IAND: result = LLVMBuildAnd (builder, l, r, ""); break; case OP_IOR: result = LLVMBuildOr (builder, l, r, ""); break; case OP_IXOR: result = LLVMBuildXor (builder, l, r, ""); break; case OP_FADD: result = LLVMBuildFAdd (builder, l, r, ""); break; case OP_FSUB: result = LLVMBuildFSub (builder, l, r, ""); break; case OP_FMUL: result = LLVMBuildFMul (builder, l, r, ""); break; case OP_FDIV: result = LLVMBuildFDiv (builder, l, r, ""); break; case OP_FMAX: case OP_FMIN: { #if defined(TARGET_X86) || defined(TARGET_AMD64) LLVMValueRef args [] = { l, r }; LLVMTypeRef t = LLVMTypeOf (l); LLVMTypeRef elem_t = LLVMGetElementType (t); unsigned int elems = LLVMGetVectorSize (t); unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t); unsigned int v_size = elems * elem_bits; if (v_size == 128) { gboolean is_r4 = ins->inst_c1 == MONO_TYPE_R4; int iid = -1; if (ins->inst_c0 == OP_FMAX) { if (elems == 1) iid = is_r4 ? INTRINS_SSE_MAXSS : INTRINS_SSE_MAXSD; else iid = is_r4 ? INTRINS_SSE_MAXPS : INTRINS_SSE_MAXPD; } else { if (elems == 1) iid = is_r4 ? INTRINS_SSE_MINSS : INTRINS_SSE_MINSD; else iid = is_r4 ? INTRINS_SSE_MINPS : INTRINS_SSE_MINPD; } result = call_intrins (ctx, iid, args, dname); } else { LLVMRealPredicate op = ins->inst_c0 == OP_FMAX ? LLVMRealUGE : LLVMRealULE; LLVMValueRef cmp = LLVMBuildFCmp (builder, op, l, r, ""); result = LLVMBuildSelect (builder, cmp, l, r, ""); } #elif defined(TARGET_ARM64) LLVMValueRef args [] = { l, r }; IntrinsicId iid = ins->inst_c0 == OP_FMAX ? INTRINS_AARCH64_ADV_SIMD_FMAX : INTRINS_AARCH64_ADV_SIMD_FMIN; llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); result = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); #else NOT_IMPLEMENTED; #endif break; } case OP_IMAX: case OP_IMIN: { gboolean is_unsigned = ins->inst_c1 == MONO_TYPE_U1 || ins->inst_c1 == MONO_TYPE_U2 || ins->inst_c1 == MONO_TYPE_U4 || ins->inst_c1 == MONO_TYPE_U8; LLVMIntPredicate op; switch (ins->inst_c0) { case OP_IMAX: op = is_unsigned ? LLVMIntUGT : LLVMIntSGT; break; case OP_IMIN: op = is_unsigned ? LLVMIntULT : LLVMIntSLT; break; default: g_assert_not_reached (); } #if defined(TARGET_ARM64) if ((ins->inst_c1 == MONO_TYPE_U8) || (ins->inst_c1 == MONO_TYPE_I8)) { LLVMValueRef cmp = LLVMBuildICmp (builder, op, l, r, ""); result = LLVMBuildSelect (builder, cmp, l, r, ""); } else { IntrinsicId iid; switch (ins->inst_c0) { case OP_IMAX: iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UMAX : INTRINS_AARCH64_ADV_SIMD_SMAX; break; case OP_IMIN: iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UMIN : INTRINS_AARCH64_ADV_SIMD_SMIN; break; default: g_assert_not_reached (); } LLVMValueRef args [] = { l, r }; llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); result = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); } #else LLVMValueRef cmp = LLVMBuildICmp (builder, op, l, r, ""); result = LLVMBuildSelect (builder, cmp, l, r, ""); #endif break; } default: g_assert_not_reached (); } if (scalar) result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result); values [ins->dreg] = result; break; } case OP_XBINOP_FORCEINT: { LLVMTypeRef t = LLVMTypeOf (lhs); LLVMTypeRef elem_t = LLVMGetElementType (t); unsigned int elems = LLVMGetVectorSize (t); unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t); LLVMTypeRef intermediate_elem_t = LLVMIntType (elem_bits); LLVMTypeRef intermediate_t = LLVMVectorType (intermediate_elem_t, elems); LLVMValueRef lhs_int = convert (ctx, lhs, intermediate_t); LLVMValueRef rhs_int = convert (ctx, rhs, intermediate_t); LLVMValueRef result = NULL; switch (ins->inst_c0) { case XBINOP_FORCEINT_and: result = LLVMBuildAnd (builder, lhs_int, rhs_int, ""); break; case XBINOP_FORCEINT_or: result = LLVMBuildOr (builder, lhs_int, rhs_int, ""); break; case XBINOP_FORCEINT_ornot: result = LLVMBuildNot (builder, rhs_int, ""); result = LLVMBuildOr (builder, result, lhs_int, ""); break; case XBINOP_FORCEINT_xor: result = LLVMBuildXor (builder, lhs_int, rhs_int, ""); break; } values [ins->dreg] = LLVMBuildBitCast (builder, result, t, ""); break; } case OP_CREATE_SCALAR: case OP_CREATE_SCALAR_UNSAFE: { MonoTypeEnum primty = inst_c1_type (ins); LLVMTypeRef type = simd_class_to_llvm_type (ctx, ins->klass); // use undef vector (most likely empty but may contain garbage values) for OP_CREATE_SCALAR_UNSAFE // and zero one for OP_CREATE_SCALAR LLVMValueRef vector = (ins->opcode == OP_CREATE_SCALAR) ? LLVMConstNull (type) : LLVMGetUndef (type); LLVMValueRef val = convert_full (ctx, lhs, primitive_type_to_llvm_type (primty), primitive_type_is_unsigned (primty)); values [ins->dreg] = LLVMBuildInsertElement (builder, vector, val, const_int32 (0), ""); break; } case OP_INSERT_I1: values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt8Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname); break; case OP_INSERT_I2: values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt16Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname); break; case OP_INSERT_I4: values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt32Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname); break; case OP_INSERT_I8: values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt64Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname); break; case OP_INSERT_R4: values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMFloatType ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname); break; case OP_INSERT_R8: values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMDoubleType ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname); break; case OP_XCAST: { LLVMTypeRef t = simd_class_to_llvm_type (ctx, ins->klass); values [ins->dreg] = LLVMBuildBitCast (builder, lhs, t, ""); break; } case OP_XCONCAT: { values [ins->dreg] = concatenate_vectors (ctx, lhs, rhs); break; } case OP_XINSERT_LOWER: case OP_XINSERT_UPPER: { const char *oname = ins->opcode == OP_XINSERT_LOWER ? "xinsert_lower" : "xinsert_upper"; int ix = ins->opcode == OP_XINSERT_LOWER ? 0 : 1; LLVMTypeRef src_t = LLVMTypeOf (lhs); unsigned int width = mono_llvm_get_prim_size_bits (src_t); LLVMTypeRef int_t = LLVMIntType (width / 2); LLVMTypeRef intvec_t = LLVMVectorType (int_t, 2); LLVMValueRef insval = LLVMBuildBitCast (builder, rhs, int_t, oname); LLVMValueRef val = LLVMBuildBitCast (builder, lhs, intvec_t, oname); val = LLVMBuildInsertElement (builder, val, insval, const_int32 (ix), oname); val = LLVMBuildBitCast (builder, val, src_t, oname); values [ins->dreg] = val; break; } case OP_XLOWER: case OP_XUPPER: { const char *oname = ins->opcode == OP_XLOWER ? "xlower" : "xupper"; LLVMTypeRef src_t = LLVMTypeOf (lhs); unsigned int elems = LLVMGetVectorSize (src_t); g_assert (elems >= 2 && elems <= MAX_VECTOR_ELEMS); unsigned int ret_elems = elems / 2; int startix = ins->opcode == OP_XLOWER ? 0 : ret_elems; LLVMValueRef val = LLVMBuildShuffleVector (builder, lhs, LLVMGetUndef (src_t), create_const_vector_i32 (&mask_0_incr_1 [startix], ret_elems), oname); values [ins->dreg] = val; break; } case OP_XWIDEN: case OP_XWIDEN_UNSAFE: { const char *oname = ins->opcode == OP_XWIDEN ? "xwiden" : "xwiden_unsafe"; LLVMTypeRef src_t = LLVMTypeOf (lhs); unsigned int elems = LLVMGetVectorSize (src_t); g_assert (elems <= MAX_VECTOR_ELEMS / 2); unsigned int ret_elems = elems * 2; LLVMValueRef upper = ins->opcode == OP_XWIDEN ? LLVMConstNull (src_t) : LLVMGetUndef (src_t); LLVMValueRef val = LLVMBuildShuffleVector (builder, lhs, upper, create_const_vector_i32 (mask_0_incr_1, ret_elems), oname); values [ins->dreg] = val; break; } #endif // defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_WASM) #if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_WASM) case OP_PADDB: case OP_PADDW: case OP_PADDD: case OP_PADDQ: values [ins->dreg] = LLVMBuildAdd (builder, lhs, rhs, ""); break; case OP_ADDPD: case OP_ADDPS: values [ins->dreg] = LLVMBuildFAdd (builder, lhs, rhs, ""); break; case OP_PSUBB: case OP_PSUBW: case OP_PSUBD: case OP_PSUBQ: values [ins->dreg] = LLVMBuildSub (builder, lhs, rhs, ""); break; case OP_SUBPD: case OP_SUBPS: values [ins->dreg] = LLVMBuildFSub (builder, lhs, rhs, ""); break; case OP_MULPD: case OP_MULPS: values [ins->dreg] = LLVMBuildFMul (builder, lhs, rhs, ""); break; case OP_DIVPD: case OP_DIVPS: values [ins->dreg] = LLVMBuildFDiv (builder, lhs, rhs, ""); break; case OP_PAND: values [ins->dreg] = LLVMBuildAnd (builder, lhs, rhs, ""); break; case OP_POR: values [ins->dreg] = LLVMBuildOr (builder, lhs, rhs, ""); break; case OP_PXOR: values [ins->dreg] = LLVMBuildXor (builder, lhs, rhs, ""); break; case OP_PMULW: case OP_PMULD: values [ins->dreg] = LLVMBuildMul (builder, lhs, rhs, ""); break; case OP_ANDPS: case OP_ANDNPS: case OP_ORPS: case OP_XORPS: case OP_ANDPD: case OP_ANDNPD: case OP_ORPD: case OP_XORPD: { LLVMTypeRef t, rt; LLVMValueRef v = NULL; switch (ins->opcode) { case OP_ANDPS: case OP_ANDNPS: case OP_ORPS: case OP_XORPS: t = LLVMVectorType (LLVMInt32Type (), 4); rt = LLVMVectorType (LLVMFloatType (), 4); break; case OP_ANDPD: case OP_ANDNPD: case OP_ORPD: case OP_XORPD: t = LLVMVectorType (LLVMInt64Type (), 2); rt = LLVMVectorType (LLVMDoubleType (), 2); break; default: t = LLVMInt32Type (); rt = LLVMInt32Type (); g_assert_not_reached (); } lhs = LLVMBuildBitCast (builder, lhs, t, ""); rhs = LLVMBuildBitCast (builder, rhs, t, ""); switch (ins->opcode) { case OP_ANDPS: case OP_ANDPD: v = LLVMBuildAnd (builder, lhs, rhs, ""); break; case OP_ORPS: case OP_ORPD: v = LLVMBuildOr (builder, lhs, rhs, ""); break; case OP_XORPS: case OP_XORPD: v = LLVMBuildXor (builder, lhs, rhs, ""); break; case OP_ANDNPS: case OP_ANDNPD: v = LLVMBuildAnd (builder, rhs, LLVMBuildNot (builder, lhs, ""), ""); break; } values [ins->dreg] = LLVMBuildBitCast (builder, v, rt, ""); break; } case OP_PMIND_UN: case OP_PMINW_UN: case OP_PMINB_UN: { LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntULT, lhs, rhs, ""); values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, ""); break; } case OP_PMAXD_UN: case OP_PMAXW_UN: case OP_PMAXB_UN: { LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntUGT, lhs, rhs, ""); values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, ""); break; } case OP_PMINW: { LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSLT, lhs, rhs, ""); values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, ""); break; } case OP_PMAXW: { LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSGT, lhs, rhs, ""); values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, ""); break; } case OP_PAVGB_UN: case OP_PAVGW_UN: { LLVMValueRef ones_vec; LLVMValueRef ones [MAX_VECTOR_ELEMS]; int vector_size = LLVMGetVectorSize (LLVMTypeOf (lhs)); LLVMTypeRef ext_elem_type = vector_size == 16 ? LLVMInt16Type () : LLVMInt32Type (); for (int i = 0; i < MAX_VECTOR_ELEMS; ++i) ones [i] = LLVMConstInt (ext_elem_type, 1, FALSE); ones_vec = LLVMConstVector (ones, vector_size); LLVMValueRef val; LLVMTypeRef ext_type = LLVMVectorType (ext_elem_type, vector_size); /* Have to increase the vector element size to prevent overflows */ /* res = trunc ((zext (lhs) + zext (rhs) + 1) >> 1) */ val = LLVMBuildAdd (builder, LLVMBuildZExt (builder, lhs, ext_type, ""), LLVMBuildZExt (builder, rhs, ext_type, ""), ""); val = LLVMBuildAdd (builder, val, ones_vec, ""); val = LLVMBuildLShr (builder, val, ones_vec, ""); values [ins->dreg] = LLVMBuildTrunc (builder, val, LLVMTypeOf (lhs), ""); break; } case OP_PCMPEQB: case OP_PCMPEQW: case OP_PCMPEQD: case OP_PCMPEQQ: case OP_PCMPGTB: { LLVMValueRef pcmp; LLVMTypeRef retType; LLVMIntPredicate cmpOp; if (ins->opcode == OP_PCMPGTB) cmpOp = LLVMIntSGT; else cmpOp = LLVMIntEQ; if (LLVMTypeOf (lhs) == LLVMTypeOf (rhs)) { pcmp = LLVMBuildICmp (builder, cmpOp, lhs, rhs, ""); retType = LLVMTypeOf (lhs); } else { LLVMTypeRef flatType = LLVMVectorType (LLVMInt8Type (), 16); LLVMValueRef flatRHS = convert (ctx, rhs, flatType); LLVMValueRef flatLHS = convert (ctx, lhs, flatType); pcmp = LLVMBuildICmp (builder, cmpOp, flatLHS, flatRHS, ""); retType = flatType; } values [ins->dreg] = LLVMBuildSExt (builder, pcmp, retType, ""); break; } case OP_CVTDQ2PS: { LLVMValueRef i4 = LLVMBuildBitCast (builder, lhs, sse_i4_t, ""); values [ins->dreg] = LLVMBuildSIToFP (builder, i4, sse_r4_t, dname); break; } case OP_CVTDQ2PD: { LLVMValueRef indexes [16]; indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMConstInt (LLVMInt32Type (), 1, FALSE); LLVMValueRef mask = LLVMConstVector (indexes, 2); LLVMValueRef shuffle = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), mask, ""); values [ins->dreg] = LLVMBuildSIToFP (builder, shuffle, LLVMVectorType (LLVMDoubleType (), 2), dname); break; } case OP_SSE2_CVTSS2SD: { LLVMValueRef rhs_elem = LLVMBuildExtractElement (builder, rhs, const_int32 (0), ""); LLVMValueRef fpext = LLVMBuildFPExt (builder, rhs_elem, LLVMDoubleType (), dname); values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, fpext, const_int32 (0), ""); break; } case OP_CVTPS2PD: { LLVMValueRef indexes [16]; indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); indexes [1] = LLVMConstInt (LLVMInt32Type (), 1, FALSE); LLVMValueRef mask = LLVMConstVector (indexes, 2); LLVMValueRef shuffle = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), mask, ""); values [ins->dreg] = LLVMBuildFPExt (builder, shuffle, LLVMVectorType (LLVMDoubleType (), 2), dname); break; } case OP_CVTTPS2DQ: values [ins->dreg] = LLVMBuildFPToSI (builder, lhs, LLVMVectorType (LLVMInt32Type (), 4), dname); break; case OP_CVTPD2DQ: case OP_CVTPS2DQ: case OP_CVTPD2PS: case OP_CVTTPD2DQ: { LLVMValueRef v; v = convert (ctx, values [ins->sreg1], simd_op_to_llvm_type (ins->opcode)); values [ins->dreg] = call_intrins (ctx, simd_ins_to_intrins (ins->opcode), &v, dname); break; } case OP_COMPPS: case OP_COMPPD: { LLVMRealPredicate op; switch (ins->inst_c0) { case SIMD_COMP_EQ: op = LLVMRealOEQ; break; case SIMD_COMP_LT: op = LLVMRealOLT; break; case SIMD_COMP_LE: op = LLVMRealOLE; break; case SIMD_COMP_UNORD: op = LLVMRealUNO; break; case SIMD_COMP_NEQ: op = LLVMRealUNE; break; case SIMD_COMP_NLT: op = LLVMRealUGE; break; case SIMD_COMP_NLE: op = LLVMRealUGT; break; case SIMD_COMP_ORD: op = LLVMRealORD; break; default: g_assert_not_reached (); } LLVMValueRef cmp = LLVMBuildFCmp (builder, op, lhs, rhs, ""); if (ins->opcode == OP_COMPPD) values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt64Type (), 2), ""), LLVMTypeOf (lhs), ""); else values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt32Type (), 4), ""), LLVMTypeOf (lhs), ""); break; } case OP_ICONV_TO_X: /* This is only used for implementing shifts by non-immediate */ values [ins->dreg] = lhs; break; case OP_SHUFPS: case OP_SHUFPD: case OP_PSHUFLED: case OP_PSHUFLEW_LOW: case OP_PSHUFLEW_HIGH: { int mask [16]; LLVMValueRef v1 = NULL, v2 = NULL, mask_values [16]; int i, mask_size = 0; int imask = ins->inst_c0; /* Convert the x86 shuffle mask to LLVM's */ switch (ins->opcode) { case OP_SHUFPS: mask_size = 4; mask [0] = ((imask >> 0) & 3); mask [1] = ((imask >> 2) & 3); mask [2] = ((imask >> 4) & 3) + 4; mask [3] = ((imask >> 6) & 3) + 4; v1 = values [ins->sreg1]; v2 = values [ins->sreg2]; break; case OP_SHUFPD: mask_size = 2; mask [0] = ((imask >> 0) & 1); mask [1] = ((imask >> 1) & 1) + 2; v1 = values [ins->sreg1]; v2 = values [ins->sreg2]; break; case OP_PSHUFLEW_LOW: mask_size = 8; mask [0] = ((imask >> 0) & 3); mask [1] = ((imask >> 2) & 3); mask [2] = ((imask >> 4) & 3); mask [3] = ((imask >> 6) & 3); mask [4] = 4 + 0; mask [5] = 4 + 1; mask [6] = 4 + 2; mask [7] = 4 + 3; v1 = values [ins->sreg1]; v2 = LLVMGetUndef (LLVMTypeOf (v1)); break; case OP_PSHUFLEW_HIGH: mask_size = 8; mask [0] = 0; mask [1] = 1; mask [2] = 2; mask [3] = 3; mask [4] = 4 + ((imask >> 0) & 3); mask [5] = 4 + ((imask >> 2) & 3); mask [6] = 4 + ((imask >> 4) & 3); mask [7] = 4 + ((imask >> 6) & 3); v1 = values [ins->sreg1]; v2 = LLVMGetUndef (LLVMTypeOf (v1)); break; case OP_PSHUFLED: mask_size = 4; mask [0] = ((imask >> 0) & 3); mask [1] = ((imask >> 2) & 3); mask [2] = ((imask >> 4) & 3); mask [3] = ((imask >> 6) & 3); v1 = values [ins->sreg1]; v2 = LLVMGetUndef (LLVMTypeOf (v1)); break; default: g_assert_not_reached (); } for (i = 0; i < mask_size; ++i) mask_values [i] = LLVMConstInt (LLVMInt32Type (), mask [i], FALSE); values [ins->dreg] = LLVMBuildShuffleVector (builder, v1, v2, LLVMConstVector (mask_values, mask_size), dname); break; } case OP_UNPACK_LOWB: case OP_UNPACK_LOWW: case OP_UNPACK_LOWD: case OP_UNPACK_LOWQ: case OP_UNPACK_LOWPS: case OP_UNPACK_LOWPD: case OP_UNPACK_HIGHB: case OP_UNPACK_HIGHW: case OP_UNPACK_HIGHD: case OP_UNPACK_HIGHQ: case OP_UNPACK_HIGHPS: case OP_UNPACK_HIGHPD: { int mask [16]; LLVMValueRef mask_values [16]; int i, mask_size = 0; gboolean low = FALSE; switch (ins->opcode) { case OP_UNPACK_LOWB: mask_size = 16; low = TRUE; break; case OP_UNPACK_LOWW: mask_size = 8; low = TRUE; break; case OP_UNPACK_LOWD: case OP_UNPACK_LOWPS: mask_size = 4; low = TRUE; break; case OP_UNPACK_LOWQ: case OP_UNPACK_LOWPD: mask_size = 2; low = TRUE; break; case OP_UNPACK_HIGHB: mask_size = 16; break; case OP_UNPACK_HIGHW: mask_size = 8; break; case OP_UNPACK_HIGHD: case OP_UNPACK_HIGHPS: mask_size = 4; break; case OP_UNPACK_HIGHQ: case OP_UNPACK_HIGHPD: mask_size = 2; break; default: g_assert_not_reached (); } if (low) { for (i = 0; i < (mask_size / 2); ++i) { mask [(i * 2)] = i; mask [(i * 2) + 1] = mask_size + i; } } else { for (i = 0; i < (mask_size / 2); ++i) { mask [(i * 2)] = (mask_size / 2) + i; mask [(i * 2) + 1] = mask_size + (mask_size / 2) + i; } } for (i = 0; i < mask_size; ++i) mask_values [i] = LLVMConstInt (LLVMInt32Type (), mask [i], FALSE); values [ins->dreg] = LLVMBuildShuffleVector (builder, values [ins->sreg1], values [ins->sreg2], LLVMConstVector (mask_values, mask_size), dname); break; } case OP_DUPPD: { LLVMTypeRef t = simd_op_to_llvm_type (ins->opcode); LLVMValueRef v, val; v = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); val = LLVMConstNull (t); val = LLVMBuildInsertElement (builder, val, v, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); val = LLVMBuildInsertElement (builder, val, v, LLVMConstInt (LLVMInt32Type (), 1, FALSE), dname); values [ins->dreg] = val; break; } case OP_DUPPS_LOW: case OP_DUPPS_HIGH: { LLVMTypeRef t = simd_op_to_llvm_type (ins->opcode); LLVMValueRef v1, v2, val; if (ins->opcode == OP_DUPPS_LOW) { v1 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); v2 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 2, FALSE), ""); } else { v1 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 1, FALSE), ""); v2 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 3, FALSE), ""); } val = LLVMConstNull (t); val = LLVMBuildInsertElement (builder, val, v1, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); val = LLVMBuildInsertElement (builder, val, v1, LLVMConstInt (LLVMInt32Type (), 1, FALSE), ""); val = LLVMBuildInsertElement (builder, val, v2, LLVMConstInt (LLVMInt32Type (), 2, FALSE), ""); val = LLVMBuildInsertElement (builder, val, v2, LLVMConstInt (LLVMInt32Type (), 3, FALSE), ""); values [ins->dreg] = val; break; } case OP_FCONV_TO_R8_X: { values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (sse_r8_t), lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); break; } case OP_FCONV_TO_R4_X: { values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (sse_r4_t), lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); break; } #if defined(TARGET_X86) || defined(TARGET_AMD64) case OP_SSE_MOVMSK: { LLVMValueRef args [1]; if (ins->inst_c1 == MONO_TYPE_R4) { args [0] = lhs; values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_MOVMSK_PS, args, dname); } else if (ins->inst_c1 == MONO_TYPE_R8) { args [0] = lhs; values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_MOVMSK_PD, args, dname); } else { args [0] = convert (ctx, lhs, sse_i1_t); values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_PMOVMSKB, args, dname); } break; } case OP_SSE_MOVS: case OP_SSE_MOVS2: { if (ins->inst_c1 == MONO_TYPE_R4) values [ins->dreg] = LLVMBuildShuffleVector (builder, rhs, lhs, create_const_vector_4_i32 (0, 5, 6, 7), ""); else if (ins->inst_c1 == MONO_TYPE_R8) values [ins->dreg] = LLVMBuildShuffleVector (builder, rhs, lhs, create_const_vector_2_i32 (0, 3), ""); else if (ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8) values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, LLVMConstInt (LLVMInt64Type (), 0, FALSE), LLVMConstInt (LLVMInt32Type (), 1, FALSE), ""); else g_assert_not_reached (); // will be needed for other types later break; } case OP_SSE_MOVEHL: { if (ins->inst_c1 == MONO_TYPE_R4) values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (6, 7, 2, 3), ""); else g_assert_not_reached (); break; } case OP_SSE_MOVELH: { if (ins->inst_c1 == MONO_TYPE_R4) values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (0, 1, 4, 5), ""); else g_assert_not_reached (); break; } case OP_SSE_UNPACKLO: { if (ins->inst_c1 == MONO_TYPE_R8 || ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8) { values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_2_i32 (0, 2), ""); } else if (ins->inst_c1 == MONO_TYPE_R4 || ins->inst_c1 == MONO_TYPE_I4 || ins->inst_c1 == MONO_TYPE_U4) { values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (0, 4, 1, 5), ""); } else if (ins->inst_c1 == MONO_TYPE_I2 || ins->inst_c1 == MONO_TYPE_U2) { const int mask_values [] = { 0, 8, 1, 9, 2, 10, 3, 11 }; LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, convert (ctx, lhs, sse_i2_t), convert (ctx, rhs, sse_i2_t), create_const_vector_i32 (mask_values, 8), ""); values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1)); } else if (ins->inst_c1 == MONO_TYPE_I1 || ins->inst_c1 == MONO_TYPE_U1) { const int mask_values [] = { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 }; LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, convert (ctx, lhs, sse_i1_t), convert (ctx, rhs, sse_i1_t), create_const_vector_i32 (mask_values, 16), ""); values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1)); } else { g_assert_not_reached (); } break; } case OP_SSE_UNPACKHI: { if (ins->inst_c1 == MONO_TYPE_R8 || ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8) { values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_2_i32 (1, 3), ""); } else if (ins->inst_c1 == MONO_TYPE_R4 || ins->inst_c1 == MONO_TYPE_I4 || ins->inst_c1 == MONO_TYPE_U4) { values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (2, 6, 3, 7), ""); } else if (ins->inst_c1 == MONO_TYPE_I2 || ins->inst_c1 == MONO_TYPE_U2) { const int mask_values [] = { 4, 12, 5, 13, 6, 14, 7, 15 }; LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, convert (ctx, lhs, sse_i2_t), convert (ctx, rhs, sse_i2_t), create_const_vector_i32 (mask_values, 8), ""); values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1)); } else if (ins->inst_c1 == MONO_TYPE_I1 || ins->inst_c1 == MONO_TYPE_U1) { const int mask_values [] = { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 }; LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, convert (ctx, lhs, sse_i1_t), convert (ctx, rhs, sse_i1_t), create_const_vector_i32 (mask_values, 16), ""); values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1)); } else { g_assert_not_reached (); } break; } case OP_SSE_LOADU: { LLVMValueRef dst_ptr = convert (ctx, lhs, LLVMPointerType (primitive_type_to_llvm_type (inst_c1_type (ins)), 0)); LLVMValueRef dst_vec = LLVMBuildBitCast (builder, dst_ptr, LLVMPointerType (type_to_sse_type (ins->inst_c1), 0), ""); values [ins->dreg] = mono_llvm_build_aligned_load (builder, dst_vec, "", FALSE, ins->inst_c0); // inst_c0 is alignment break; } case OP_SSE_MOVSS: { LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMFloatType (), 0)); LLVMValueRef val = mono_llvm_build_load (builder, addr, "", FALSE); values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (type_to_sse_type (ins->inst_c1)), val, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); break; } case OP_SSE_MOVSS_STORE: { LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMFloatType (), 0)); LLVMValueRef val = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); mono_llvm_build_store (builder, val, addr, FALSE, LLVM_BARRIER_NONE); break; } case OP_SSE2_MOVD: case OP_SSE2_MOVQ: case OP_SSE2_MOVUPD: { LLVMTypeRef rty = NULL; switch (ins->opcode) { case OP_SSE2_MOVD: rty = sse_i4_t; break; case OP_SSE2_MOVQ: rty = sse_i8_t; break; case OP_SSE2_MOVUPD: rty = sse_r8_t; break; } LLVMTypeRef srcty = LLVMGetElementType (rty); LLVMValueRef zero = LLVMConstNull (rty); LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (srcty, 0)); LLVMValueRef val = mono_llvm_build_aligned_load (builder, addr, "", FALSE, 1); values [ins->dreg] = LLVMBuildInsertElement (builder, zero, val, const_int32 (0), dname); break; } case OP_SSE_MOVLPS_LOAD: case OP_SSE_MOVHPS_LOAD: { LLVMTypeRef t = LLVMFloatType (); int size = 4; gboolean high = ins->opcode == OP_SSE_MOVHPS_LOAD; /* Load two floats from rhs and store them in the low/high part of lhs */ LLVMValueRef addr = rhs; LLVMValueRef addr1 = convert (ctx, addr, LLVMPointerType (t, 0)); LLVMValueRef addr2 = convert (ctx, LLVMBuildAdd (builder, convert (ctx, addr, IntPtrType ()), convert (ctx, LLVMConstInt (LLVMInt32Type (), size, FALSE), IntPtrType ()), ""), LLVMPointerType (t, 0)); LLVMValueRef val1 = mono_llvm_build_load (builder, addr1, "", FALSE); LLVMValueRef val2 = mono_llvm_build_load (builder, addr2, "", FALSE); int index1, index2; index1 = high ? 2: 0; index2 = high ? 3 : 1; values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMBuildInsertElement (builder, lhs, val1, LLVMConstInt (LLVMInt32Type (), index1, FALSE), ""), val2, LLVMConstInt (LLVMInt32Type (), index2, FALSE), ""); break; } case OP_SSE2_MOVLPD_LOAD: case OP_SSE2_MOVHPD_LOAD: { LLVMTypeRef t = LLVMDoubleType (); LLVMValueRef addr = convert (ctx, rhs, LLVMPointerType (t, 0)); LLVMValueRef val = mono_llvm_build_load (builder, addr, "", FALSE); int index = ins->opcode == OP_SSE2_MOVHPD_LOAD ? 1 : 0; values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, val, const_int32 (index), ""); break; } case OP_SSE_MOVLPS_STORE: case OP_SSE_MOVHPS_STORE: { /* Store two floats from the low/hight part of rhs into lhs */ LLVMValueRef addr = lhs; LLVMValueRef addr1 = convert (ctx, addr, LLVMPointerType (LLVMFloatType (), 0)); LLVMValueRef addr2 = convert (ctx, LLVMBuildAdd (builder, convert (ctx, addr, IntPtrType ()), convert (ctx, LLVMConstInt (LLVMInt32Type (), 4, FALSE), IntPtrType ()), ""), LLVMPointerType (LLVMFloatType (), 0)); int index1 = ins->opcode == OP_SSE_MOVLPS_STORE ? 0 : 2; int index2 = ins->opcode == OP_SSE_MOVLPS_STORE ? 1 : 3; LLVMValueRef val1 = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), index1, FALSE), ""); LLVMValueRef val2 = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), index2, FALSE), ""); mono_llvm_build_store (builder, val1, addr1, FALSE, LLVM_BARRIER_NONE); mono_llvm_build_store (builder, val2, addr2, FALSE, LLVM_BARRIER_NONE); break; } case OP_SSE2_MOVLPD_STORE: case OP_SSE2_MOVHPD_STORE: { LLVMTypeRef t = LLVMDoubleType (); LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (t, 0)); int index = ins->opcode == OP_SSE2_MOVHPD_STORE ? 1 : 0; LLVMValueRef val = LLVMBuildExtractElement (builder, rhs, const_int32 (index), ""); mono_llvm_build_store (builder, val, addr, FALSE, LLVM_BARRIER_NONE); break; } case OP_SSE_STORE: { LLVMValueRef dst_vec = convert (ctx, lhs, LLVMPointerType (LLVMTypeOf (rhs), 0)); mono_llvm_build_aligned_store (builder, rhs, dst_vec, FALSE, ins->inst_c0); break; } case OP_SSE_STORES: { LLVMValueRef first_elem = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); LLVMValueRef dst = convert (ctx, lhs, LLVMPointerType (LLVMTypeOf (first_elem), 0)); mono_llvm_build_aligned_store (builder, first_elem, dst, FALSE, 1); break; } case OP_SSE_MOVNTPS: { LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMTypeOf (rhs), 0)); LLVMValueRef store = mono_llvm_build_aligned_store (builder, rhs, addr, FALSE, ins->inst_c0); set_nontemporal_flag (store); break; } case OP_SSE_PREFETCHT0: { LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0)); LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (3), const_int32 (1) }; call_intrins (ctx, INTRINS_PREFETCH, args, ""); break; } case OP_SSE_PREFETCHT1: { LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0)); LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (2), const_int32 (1) }; call_intrins (ctx, INTRINS_PREFETCH, args, ""); break; } case OP_SSE_PREFETCHT2: { LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0)); LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (1), const_int32 (1) }; call_intrins (ctx, INTRINS_PREFETCH, args, ""); break; } case OP_SSE_PREFETCHNTA: { LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0)); LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (0), const_int32 (1) }; call_intrins (ctx, INTRINS_PREFETCH, args, ""); break; } case OP_SSE_OR: { LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t); LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t); LLVMValueRef vec_and = LLVMBuildOr (builder, vec_lhs_i64, vec_rhs_i64, ""); values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), ""); break; } case OP_SSE_XOR: { LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t); LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t); LLVMValueRef vec_and = LLVMBuildXor (builder, vec_lhs_i64, vec_rhs_i64, ""); values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), ""); break; } case OP_SSE_AND: { LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t); LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t); LLVMValueRef vec_and = LLVMBuildAnd (builder, vec_lhs_i64, vec_rhs_i64, ""); values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), ""); break; } case OP_SSE_ANDN: { LLVMValueRef minus_one [2]; minus_one [0] = LLVMConstInt (LLVMInt64Type (), -1, FALSE); minus_one [1] = LLVMConstInt (LLVMInt64Type (), -1, FALSE); LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t); LLVMValueRef vec_xor = LLVMBuildXor (builder, vec_lhs_i64, LLVMConstVector (minus_one, 2), ""); LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t); LLVMValueRef vec_and = LLVMBuildAnd (builder, vec_rhs_i64, vec_xor, ""); values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), ""); break; } case OP_SSE_ADDSS: case OP_SSE_SUBSS: case OP_SSE_DIVSS: case OP_SSE_MULSS: case OP_SSE2_ADDSD: case OP_SSE2_SUBSD: case OP_SSE2_DIVSD: case OP_SSE2_MULSD: { LLVMValueRef v1 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); LLVMValueRef v2 = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); LLVMValueRef v = NULL; switch (ins->opcode) { case OP_SSE_ADDSS: case OP_SSE2_ADDSD: v = LLVMBuildFAdd (builder, v1, v2, ""); break; case OP_SSE_SUBSS: case OP_SSE2_SUBSD: v = LLVMBuildFSub (builder, v1, v2, ""); break; case OP_SSE_DIVSS: case OP_SSE2_DIVSD: v = LLVMBuildFDiv (builder, v1, v2, ""); break; case OP_SSE_MULSS: case OP_SSE2_MULSD: v = LLVMBuildFMul (builder, v1, v2, ""); break; default: g_assert_not_reached (); } values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, v, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); break; } case OP_SSE_CMPSS: case OP_SSE2_CMPSD: { int imm = -1; gboolean swap = FALSE; switch (ins->inst_c0) { case CMP_EQ: imm = SSE_eq_ord_nosignal; break; case CMP_GT: imm = SSE_lt_ord_signal; swap = TRUE; break; case CMP_GE: imm = SSE_le_ord_signal; swap = TRUE; break; case CMP_LT: imm = SSE_lt_ord_signal; break; case CMP_LE: imm = SSE_le_ord_signal; break; case CMP_GT_UN: imm = SSE_nle_unord_signal; break; case CMP_GE_UN: imm = SSE_nlt_unord_signal; break; case CMP_LT_UN: imm = SSE_nle_unord_signal; swap = TRUE; break; case CMP_LE_UN: imm = SSE_nlt_unord_signal; swap = TRUE; break; case CMP_NE: imm = SSE_neq_unord_nosignal; break; case CMP_ORD: imm = SSE_ord_nosignal; break; case CMP_UNORD: imm = SSE_unord_nosignal; break; default: g_assert_not_reached (); break; } LLVMValueRef cmp = LLVMConstInt (LLVMInt8Type (), imm, FALSE); LLVMValueRef args [] = { lhs, rhs, cmp }; if (swap) { args [0] = rhs; args [1] = lhs; } IntrinsicId id = (IntrinsicId) 0; switch (ins->opcode) { case OP_SSE_CMPSS: id = INTRINS_SSE_CMPSS; break; case OP_SSE2_CMPSD: id = INTRINS_SSE_CMPSD; break; default: g_assert_not_reached (); break; } int elements = LLVMGetVectorSize (LLVMTypeOf (lhs)); int mask_values [MAX_VECTOR_ELEMS] = { 0 }; for (int i = 1; i < elements; ++i) { mask_values [i] = elements + i; } LLVMValueRef result = call_intrins (ctx, id, args, ""); result = LLVMBuildShuffleVector (builder, result, lhs, create_const_vector_i32 (mask_values, elements), ""); values [ins->dreg] = result; break; } case OP_SSE_COMISS: { LLVMValueRef args [] = { lhs, rhs }; IntrinsicId id = (IntrinsicId)0; switch (ins->inst_c0) { case CMP_EQ: id = INTRINS_SSE_COMIEQ_SS; break; case CMP_GT: id = INTRINS_SSE_COMIGT_SS; break; case CMP_GE: id = INTRINS_SSE_COMIGE_SS; break; case CMP_LT: id = INTRINS_SSE_COMILT_SS; break; case CMP_LE: id = INTRINS_SSE_COMILE_SS; break; case CMP_NE: id = INTRINS_SSE_COMINEQ_SS; break; default: g_assert_not_reached (); break; } values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_SSE_UCOMISS: { LLVMValueRef args [] = { lhs, rhs }; IntrinsicId id = (IntrinsicId)0; switch (ins->inst_c0) { case CMP_EQ: id = INTRINS_SSE_UCOMIEQ_SS; break; case CMP_GT: id = INTRINS_SSE_UCOMIGT_SS; break; case CMP_GE: id = INTRINS_SSE_UCOMIGE_SS; break; case CMP_LT: id = INTRINS_SSE_UCOMILT_SS; break; case CMP_LE: id = INTRINS_SSE_UCOMILE_SS; break; case CMP_NE: id = INTRINS_SSE_UCOMINEQ_SS; break; default: g_assert_not_reached (); break; } values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_SSE2_COMISD: { LLVMValueRef args [] = { lhs, rhs }; IntrinsicId id = (IntrinsicId)0; switch (ins->inst_c0) { case CMP_EQ: id = INTRINS_SSE_COMIEQ_SD; break; case CMP_GT: id = INTRINS_SSE_COMIGT_SD; break; case CMP_GE: id = INTRINS_SSE_COMIGE_SD; break; case CMP_LT: id = INTRINS_SSE_COMILT_SD; break; case CMP_LE: id = INTRINS_SSE_COMILE_SD; break; case CMP_NE: id = INTRINS_SSE_COMINEQ_SD; break; default: g_assert_not_reached (); break; } values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_SSE2_UCOMISD: { LLVMValueRef args [] = { lhs, rhs }; IntrinsicId id = (IntrinsicId)0; switch (ins->inst_c0) { case CMP_EQ: id = INTRINS_SSE_UCOMIEQ_SD; break; case CMP_GT: id = INTRINS_SSE_UCOMIGT_SD; break; case CMP_GE: id = INTRINS_SSE_UCOMIGE_SD; break; case CMP_LT: id = INTRINS_SSE_UCOMILT_SD; break; case CMP_LE: id = INTRINS_SSE_UCOMILE_SD; break; case CMP_NE: id = INTRINS_SSE_UCOMINEQ_SD; break; default: g_assert_not_reached (); break; } values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_SSE_CVTSI2SS: case OP_SSE_CVTSI2SS64: case OP_SSE2_CVTSI2SD: case OP_SSE2_CVTSI2SD64: { LLVMTypeRef ty = LLVMFloatType (); switch (ins->opcode) { case OP_SSE2_CVTSI2SD: case OP_SSE2_CVTSI2SD64: ty = LLVMDoubleType (); break; } LLVMValueRef fp = LLVMBuildSIToFP (builder, rhs, ty, ""); values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, fp, const_int32 (0), dname); break; } case OP_SSE2_PMULUDQ: { LLVMValueRef i32_max = LLVMConstInt (LLVMInt64Type (), UINT32_MAX, FALSE); LLVMValueRef maskvals [] = { i32_max, i32_max }; LLVMValueRef mask = LLVMConstVector (maskvals, 2); LLVMValueRef l = LLVMBuildAnd (builder, convert (ctx, lhs, sse_i8_t), mask, ""); LLVMValueRef r = LLVMBuildAnd (builder, convert (ctx, rhs, sse_i8_t), mask, ""); values [ins->dreg] = LLVMBuildNUWMul (builder, l, r, dname); break; } case OP_SSE_SQRTSS: case OP_SSE2_SQRTSD: { LLVMValueRef upper = values [ins->sreg1]; LLVMValueRef lower = values [ins->sreg2]; LLVMValueRef scalar = LLVMBuildExtractElement (builder, lower, const_int32 (0), ""); LLVMValueRef result = call_intrins (ctx, simd_ins_to_intrins (ins->opcode), &scalar, dname); values [ins->dreg] = LLVMBuildInsertElement (builder, upper, result, const_int32 (0), ""); break; } case OP_SSE_RCPSS: case OP_SSE_RSQRTSS: { IntrinsicId id = (IntrinsicId)0; switch (ins->opcode) { case OP_SSE_RCPSS: id = INTRINS_SSE_RCP_SS; break; case OP_SSE_RSQRTSS: id = INTRINS_SSE_RSQRT_SS; break; default: g_assert_not_reached (); break; }; LLVMValueRef result = call_intrins (ctx, id, &rhs, dname); const int mask[] = { 0, 5, 6, 7 }; LLVMValueRef shufmask = create_const_vector_i32 (mask, 4); values [ins->dreg] = LLVMBuildShuffleVector (builder, result, lhs, shufmask, ""); break; } case OP_XOP: { IntrinsicId id = (IntrinsicId)ins->inst_c0; call_intrins (ctx, id, NULL, ""); break; } case OP_XOP_X_I: case OP_XOP_X_X: case OP_XOP_I4_X: case OP_XOP_I8_X: case OP_XOP_X_X_X: case OP_XOP_X_X_I4: case OP_XOP_X_X_I8: { IntrinsicId id = (IntrinsicId)ins->inst_c0; LLVMValueRef args [] = { lhs, rhs }; values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_XOP_I4_X_X: { gboolean to_i8_t = FALSE; gboolean ret_bool = FALSE; IntrinsicId id = (IntrinsicId)ins->inst_c0; switch (ins->inst_c0) { case INTRINS_SSE_TESTC: to_i8_t = TRUE; ret_bool = TRUE; break; case INTRINS_SSE_TESTZ: to_i8_t = TRUE; ret_bool = TRUE; break; case INTRINS_SSE_TESTNZ: to_i8_t = TRUE; ret_bool = TRUE; break; default: g_assert_not_reached (); break; } LLVMValueRef args [] = { lhs, rhs }; if (to_i8_t) { args [0] = convert (ctx, args [0], sse_i8_t); args [1] = convert (ctx, args [1], sse_i8_t); } LLVMValueRef call = call_intrins (ctx, id, args, ""); if (ret_bool) { // if return type is bool (it's still i32) we need to normalize it to 1/0 LLVMValueRef cmp_zero = LLVMBuildICmp (builder, LLVMIntNE, call, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); values [ins->dreg] = LLVMBuildZExt (builder, cmp_zero, LLVMInt8Type (), ""); } else { values [ins->dreg] = call; } break; } case OP_SSE2_MASKMOVDQU: { LLVMTypeRef i8ptr = LLVMPointerType (LLVMInt8Type (), 0); LLVMValueRef dstaddr = convert (ctx, values [ins->sreg3], i8ptr); LLVMValueRef src = convert (ctx, lhs, sse_i1_t); LLVMValueRef mask = convert (ctx, rhs, sse_i1_t); LLVMValueRef args[] = { src, mask, dstaddr }; call_intrins (ctx, INTRINS_SSE_MASKMOVDQU, args, ""); break; } case OP_PADDB_SAT: case OP_PADDW_SAT: case OP_PSUBB_SAT: case OP_PSUBW_SAT: case OP_PADDB_SAT_UN: case OP_PADDW_SAT_UN: case OP_PSUBB_SAT_UN: case OP_PSUBW_SAT_UN: case OP_SSE2_ADDS: case OP_SSE2_SUBS: { IntrinsicId id = (IntrinsicId)0; int type = 0; gboolean is_add = TRUE; switch (ins->opcode) { case OP_PADDB_SAT: type = MONO_TYPE_I1; break; case OP_PADDW_SAT: type = MONO_TYPE_I2; break; case OP_PSUBB_SAT: type = MONO_TYPE_I1; is_add = FALSE; break; case OP_PSUBW_SAT: type = MONO_TYPE_I2; is_add = FALSE; break; case OP_PADDB_SAT_UN: type = MONO_TYPE_U1; break; case OP_PADDW_SAT_UN: type = MONO_TYPE_U2; break; case OP_PSUBB_SAT_UN: type = MONO_TYPE_U1; is_add = FALSE; break; case OP_PSUBW_SAT_UN: type = MONO_TYPE_U2; is_add = FALSE; break; case OP_SSE2_ADDS: type = ins->inst_c1; break; case OP_SSE2_SUBS: type = ins->inst_c1; is_add = FALSE; break; default: g_assert_not_reached (); } if (is_add) { switch (type) { case MONO_TYPE_I1: id = INTRINS_SSE_SADD_SATI8; break; case MONO_TYPE_U1: id = INTRINS_SSE_UADD_SATI8; break; case MONO_TYPE_I2: id = INTRINS_SSE_SADD_SATI16; break; case MONO_TYPE_U2: id = INTRINS_SSE_UADD_SATI16; break; default: g_assert_not_reached (); break; } } else { switch (type) { case MONO_TYPE_I1: id = INTRINS_SSE_SSUB_SATI8; break; case MONO_TYPE_U1: id = INTRINS_SSE_USUB_SATI8; break; case MONO_TYPE_I2: id = INTRINS_SSE_SSUB_SATI16; break; case MONO_TYPE_U2: id = INTRINS_SSE_USUB_SATI16; break; default: g_assert_not_reached (); break; } } LLVMTypeRef vecty = type_to_sse_type (type); LLVMValueRef args [] = { convert (ctx, lhs, vecty), convert (ctx, rhs, vecty) }; LLVMValueRef result = call_intrins (ctx, id, args, dname); values [ins->dreg] = convert (ctx, result, vecty); break; } case OP_SSE2_PACKUS: { LLVMValueRef args [2]; args [0] = convert (ctx, lhs, sse_i2_t); args [1] = convert (ctx, rhs, sse_i2_t); values [ins->dreg] = convert (ctx, call_intrins (ctx, INTRINS_SSE_PACKUSWB, args, dname), type_to_sse_type (ins->inst_c1)); break; } case OP_SSE2_SRLI: { LLVMValueRef args [] = { lhs, rhs }; values [ins->dreg] = convert (ctx, call_intrins (ctx, INTRINS_SSE_PSRLI_W, args, dname), type_to_sse_type (ins->inst_c1)); break; } case OP_SSE2_PSLLDQ: case OP_SSE2_PSRLDQ: { LLVMBasicBlockRef bbs [16 + 1]; LLVMValueRef switch_ins; LLVMValueRef value = lhs; LLVMValueRef index = rhs; LLVMValueRef phi_values [16 + 1]; LLVMTypeRef t = sse_i1_t; int nelems = 16; int i; gboolean shift_right = (ins->opcode == OP_SSE2_PSRLDQ); value = convert (ctx, value, t); // No corresponding LLVM intrinsics // FIXME: Optimize const count for (i = 0; i < nelems; ++i) bbs [i] = gen_bb (ctx, "PSLLDQ_CASE_BB"); bbs [nelems] = gen_bb (ctx, "PSLLDQ_DEF_BB"); cbb = gen_bb (ctx, "PSLLDQ_COND_BB"); switch_ins = LLVMBuildSwitch (builder, index, bbs [nelems], 0); for (i = 0; i < nelems; ++i) { LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]); LLVMPositionBuilderAtEnd (builder, bbs [i]); int mask_values [16]; // Implement shift using a shuffle if (shift_right) { for (int j = 0; j < nelems - i; ++j) mask_values [j] = i + j; for (int j = nelems -i ; j < nelems; ++j) mask_values [j] = nelems; } else { for (int j = 0; j < i; ++j) mask_values [j] = nelems; for (int j = 0; j < nelems - i; ++j) mask_values [j + i] = j; } phi_values [i] = LLVMBuildShuffleVector (builder, value, LLVMGetUndef (t), create_const_vector_i32 (mask_values, nelems), ""); LLVMBuildBr (builder, cbb); } /* Default case */ LLVMPositionBuilderAtEnd (builder, bbs [nelems]); phi_values [nelems] = LLVMConstNull (t); LLVMBuildBr (builder, cbb); LLVMPositionBuilderAtEnd (builder, cbb); values [ins->dreg] = LLVMBuildPhi (builder, LLVMTypeOf (phi_values [0]), ""); LLVMAddIncoming (values [ins->dreg], phi_values, bbs, nelems + 1); values [ins->dreg] = convert (ctx, values [ins->dreg], type_to_sse_type (ins->inst_c1)); ctx->bblocks [bb->block_num].end_bblock = cbb; break; } case OP_SSE2_PSRAW_IMM: case OP_SSE2_PSRAD_IMM: case OP_SSE2_PSRLW_IMM: case OP_SSE2_PSRLD_IMM: case OP_SSE2_PSRLQ_IMM: { LLVMValueRef value = lhs; LLVMValueRef index = rhs; IntrinsicId id; // FIXME: Optimize const index case /* Use the non-immediate version */ switch (ins->opcode) { case OP_SSE2_PSRAW_IMM: id = INTRINS_SSE_PSRA_W; break; case OP_SSE2_PSRAD_IMM: id = INTRINS_SSE_PSRA_D; break; case OP_SSE2_PSRLW_IMM: id = INTRINS_SSE_PSRL_W; break; case OP_SSE2_PSRLD_IMM: id = INTRINS_SSE_PSRL_D; break; case OP_SSE2_PSRLQ_IMM: id = INTRINS_SSE_PSRL_Q; break; default: g_assert_not_reached (); break; } LLVMTypeRef t = LLVMTypeOf (value); LLVMValueRef index_vect = LLVMBuildInsertElement (builder, LLVMConstNull (t), convert (ctx, index, LLVMGetElementType (t)), const_int32 (0), ""); LLVMValueRef args [] = { value, index_vect }; values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_SSE_SHUFPS: case OP_SSE2_SHUFPD: case OP_SSE2_PSHUFD: case OP_SSE2_PSHUFHW: case OP_SSE2_PSHUFLW: { LLVMTypeRef ret_t = LLVMTypeOf (lhs); LLVMValueRef l = lhs; LLVMValueRef r = rhs; LLVMValueRef ctl = arg3; const char *oname = ""; int ncases = 0; switch (ins->opcode) { case OP_SSE_SHUFPS: ncases = 256; break; case OP_SSE2_SHUFPD: ncases = 4; break; case OP_SSE2_PSHUFD: case OP_SSE2_PSHUFHW: case OP_SSE2_PSHUFLW: ncases = 256; r = lhs; ctl = rhs; break; } switch (ins->opcode) { case OP_SSE_SHUFPS: oname = "sse_shufps"; break; case OP_SSE2_SHUFPD: oname = "sse2_shufpd"; break; case OP_SSE2_PSHUFD: oname = "sse2_pshufd"; break; case OP_SSE2_PSHUFHW: oname = "sse2_pshufhw"; break; case OP_SSE2_PSHUFLW: oname = "sse2_pshuflw"; break; } ctl = LLVMBuildAnd (builder, ctl, const_int32 (ncases - 1), ""); ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, ncases, ctl, ret_t, oname); int mask_values [8]; int mask_len = 0; int i = 0; while (immediate_unroll_next (&ictx, &i)) { switch (ins->opcode) { case OP_SSE_SHUFPS: mask_len = 4; mask_values [0] = ((i >> 0) & 0x3) + 0; // take two elements from lhs mask_values [1] = ((i >> 2) & 0x3) + 0; mask_values [2] = ((i >> 4) & 0x3) + 4; // and two from rhs mask_values [3] = ((i >> 6) & 0x3) + 4; break; case OP_SSE2_SHUFPD: mask_len = 2; mask_values [0] = ((i >> 0) & 0x1) + 0; mask_values [1] = ((i >> 1) & 0x1) + 2; break; case OP_SSE2_PSHUFD: /* * Each 2 bits in mask selects 1 dword from the the source and copies it to the * destination. */ mask_len = 4; for (int j = 0; j < 4; ++j) { int windex = (i >> (j * 2)) & 0x3; mask_values [j] = windex; } break; case OP_SSE2_PSHUFHW: /* * Each 2 bits in mask selects 1 word from the high quadword of the source and copies it to the * high quadword of the destination. */ mask_len = 8; /* The low quadword stays the same */ for (int j = 0; j < 4; ++j) mask_values [j] = j; for (int j = 0; j < 4; ++j) { int windex = (i >> (j * 2)) & 0x3; mask_values [j + 4] = 4 + windex; } break; case OP_SSE2_PSHUFLW: mask_len = 8; /* The high quadword stays the same */ for (int j = 0; j < 4; ++j) mask_values [j + 4] = j + 4; for (int j = 0; j < 4; ++j) { int windex = (i >> (j * 2)) & 0x3; mask_values [j] = windex; } break; } LLVMValueRef mask = create_const_vector_i32 (mask_values, mask_len); LLVMValueRef result = LLVMBuildShuffleVector (builder, l, r, mask, oname); immediate_unroll_commit (&ictx, i, result); } immediate_unroll_default (&ictx); immediate_unroll_commit_default (&ictx, LLVMGetUndef (ret_t)); values [ins->dreg] = immediate_unroll_end (&ictx, &cbb); break; } case OP_SSE3_MOVDDUP: { int mask [] = { 0, 0 }; values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, LLVMGetUndef (LLVMTypeOf (lhs)), create_const_vector_i32 (mask, 2), ""); break; } case OP_SSE3_MOVDDUP_MEM: { LLVMValueRef undef = LLVMGetUndef (v128_r8_t); LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (r8_t, 0)); LLVMValueRef elem = mono_llvm_build_aligned_load (builder, addr, "sse3_movddup_mem", FALSE, 1); LLVMValueRef val = LLVMBuildInsertElement (builder, undef, elem, const_int32 (0), "sse3_movddup_mem"); values [ins->dreg] = LLVMBuildShuffleVector (builder, val, undef, LLVMConstNull (LLVMVectorType (i4_t, 2)), "sse3_movddup_mem"); break; } case OP_SSE3_MOVSHDUP: { int mask [] = { 1, 1, 3, 3 }; values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), create_const_vector_i32 (mask, 4), ""); break; } case OP_SSE3_MOVSLDUP: { int mask [] = { 0, 0, 2, 2 }; values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), create_const_vector_i32 (mask, 4), ""); break; } case OP_SSSE3_SHUFFLE: { LLVMValueRef args [] = { lhs, rhs }; values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_PSHUFB, args, dname); break; } case OP_SSSE3_ABS: { // %sub = sub <16 x i8> zeroinitializer, %arg // %cmp = icmp sgt <16 x i8> %arg, zeroinitializer // %abs = select <16 x i1> %cmp, <16 x i8> %arg, <16 x i8> %sub LLVMTypeRef typ = type_to_sse_type (ins->inst_c1); LLVMValueRef sub = LLVMBuildSub(builder, LLVMConstNull(typ), lhs, ""); LLVMValueRef cmp = LLVMBuildICmp(builder, LLVMIntSGT, lhs, LLVMConstNull(typ), ""); LLVMValueRef abs = LLVMBuildSelect (builder, cmp, lhs, sub, ""); values [ins->dreg] = convert (ctx, abs, typ); break; } case OP_SSSE3_ALIGNR: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMValueRef zero = LLVMConstNull (v128_i1_t); LLVMValueRef hivec = convert (ctx, lhs, v128_i1_t); LLVMValueRef lovec = convert (ctx, rhs, v128_i1_t); LLVMValueRef rshift_amount = convert (ctx, arg3, i1_t); ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 32, rshift_amount, v128_i1_t, "ssse3_alignr"); LLVMValueRef mask_values [16]; // 128-bit vector, 8-bit elements, 16 total elements int i = 0; while (immediate_unroll_next (&ictx, &i)) { LLVMValueRef hi = NULL; LLVMValueRef lo = NULL; if (i <= 16) { for (int j = 0; j < 16; j++) mask_values [j] = const_int32 (i + j); lo = lovec; hi = hivec; } else { for (int j = 0; j < 16; j++) mask_values [j] = const_int32 (i + j - 16); lo = hivec; hi = zero; } LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, lo, hi, LLVMConstVector (mask_values, 16), "ssse3_alignr"); immediate_unroll_commit (&ictx, i, shuffled); } immediate_unroll_default (&ictx); immediate_unroll_commit_default (&ictx, zero); LLVMValueRef result = immediate_unroll_end (&ictx, &cbb); values [ins->dreg] = convert (ctx, result, ret_t); break; } case OP_SSE41_ROUNDP: { LLVMValueRef args [] = { lhs, LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE) }; values [ins->dreg] = call_intrins (ctx, ins->inst_c1 == MONO_TYPE_R4 ? INTRINS_SSE_ROUNDPS : INTRINS_SSE_ROUNDPD, args, dname); break; } case OP_SSE41_ROUNDS: { LLVMValueRef args [3]; args [0] = lhs; args [1] = rhs; args [2] = LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE); values [ins->dreg] = call_intrins (ctx, ins->inst_c1 == MONO_TYPE_R4 ? INTRINS_SSE_ROUNDSS : INTRINS_SSE_ROUNDSD, args, dname); break; } case OP_SSE41_DPPS: case OP_SSE41_DPPD: { /* Bits 0, 1, 4, 5 are meaningful for the control mask * in dppd; all bits are meaningful for dpps. */ LLVMTypeRef ret_t = NULL; LLVMValueRef mask = NULL; int mask_bits = 0; int high_shift = 0; int low_mask = 0; IntrinsicId iid = (IntrinsicId) 0; const char *oname = ""; switch (ins->opcode) { case OP_SSE41_DPPS: ret_t = v128_r4_t; mask = const_int8 (0xff); // 0b11111111 mask_bits = 8; high_shift = 4; low_mask = 0xf; iid = INTRINS_SSE_DPPS; oname = "sse41_dpps"; break; case OP_SSE41_DPPD: ret_t = v128_r8_t; mask = const_int8 (0x33); // 0b00110011 mask_bits = 4; high_shift = 2; low_mask = 0x3; iid = INTRINS_SSE_DPPD; oname = "sse41_dppd"; break; } LLVMValueRef args [] = { lhs, rhs, NULL }; LLVMValueRef index = LLVMBuildAnd (builder, convert (ctx, arg3, i1_t), mask, oname); ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 1 << mask_bits, index, ret_t, oname); int i = 0; while (immediate_unroll_next (&ictx, &i)) { int imm = ((i >> high_shift) << 4) | (i & low_mask); args [2] = const_int8 (imm); LLVMValueRef result = call_intrins (ctx, iid, args, dname); immediate_unroll_commit (&ictx, imm, result); } immediate_unroll_default (&ictx); immediate_unroll_commit_default (&ictx, LLVMGetUndef (ret_t)); values [ins->dreg] = immediate_unroll_end (&ictx, &cbb); break; } case OP_SSE41_MPSADBW: { LLVMValueRef args [] = { convert (ctx, lhs, sse_i1_t), convert (ctx, rhs, sse_i1_t), NULL, }; LLVMValueRef ctl = convert (ctx, arg3, i1_t); // Only 3 bits (bits 0-2) are used by mpsadbw and llvm.x86.sse41.mpsadbw int used_bits = 0x7; ctl = LLVMBuildAnd (builder, ctl, const_int8 (used_bits), "sse41_mpsadbw"); ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, used_bits + 1, ctl, v128_i2_t, "sse41_mpsadbw"); int i = 0; while (immediate_unroll_next (&ictx, &i)) { args [2] = const_int8 (i); LLVMValueRef result = call_intrins (ctx, INTRINS_SSE_MPSADBW, args, "sse41_mpsadbw"); immediate_unroll_commit (&ictx, i, result); } immediate_unroll_unreachable_default (&ictx); values [ins->dreg] = immediate_unroll_end (&ictx, &cbb); break; } case OP_SSE41_INSERTPS: { LLVMValueRef ctl = convert (ctx, arg3, i1_t); LLVMValueRef args [] = { lhs, rhs, NULL }; ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 256, ctl, v128_r4_t, "sse41_insertps"); int i = 0; while (immediate_unroll_next (&ictx, &i)) { args [2] = const_int8 (i); LLVMValueRef result = call_intrins (ctx, INTRINS_SSE_INSERTPS, args, dname); immediate_unroll_commit (&ictx, i, result); } immediate_unroll_unreachable_default (&ictx); values [ins->dreg] = immediate_unroll_end (&ictx, &cbb); break; } case OP_SSE41_BLEND: { LLVMTypeRef ret_t = LLVMTypeOf (lhs); int nelem = LLVMGetVectorSize (ret_t); g_assert (nelem >= 2 && nelem <= 8); // I2, U2, R4, R8 int unique_ctl_patterns = 1 << nelem; int ctlmask = unique_ctl_patterns - 1; LLVMValueRef ctl = convert (ctx, arg3, i1_t); ctl = LLVMBuildAnd (builder, ctl, const_int8 (ctlmask), "sse41_blend"); ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, unique_ctl_patterns, ctl, ret_t, "sse41_blend"); int i = 0; int mask_values [MAX_VECTOR_ELEMS] = { 0 }; while (immediate_unroll_next (&ictx, &i)) { for (int lane = 0; lane < nelem; ++lane) { // n-bit in inst_c0 (control byte) is set to 1 gboolean bit_set = (i & (1 << lane)) >> lane; mask_values [lane] = lane + (bit_set ? nelem : 0); } LLVMValueRef mask = create_const_vector_i32 (mask_values, nelem); LLVMValueRef result = LLVMBuildShuffleVector (builder, lhs, rhs, mask, "sse41_blend"); immediate_unroll_commit (&ictx, i, result); } immediate_unroll_default (&ictx); immediate_unroll_commit_default (&ictx, LLVMGetUndef (ret_t)); values [ins->dreg] = immediate_unroll_end (&ictx, &cbb); break; } case OP_SSE41_BLENDV: { LLVMValueRef args [] = { lhs, rhs, values [ins->sreg3] }; if (ins->inst_c1 == MONO_TYPE_R4) { values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_BLENDVPS, args, dname); } else if (ins->inst_c1 == MONO_TYPE_R8) { values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_BLENDVPD, args, dname); } else { // for other non-fp type just convert to <16 x i8> and pass to @llvm.x86.sse41.pblendvb args [0] = LLVMBuildBitCast (ctx->builder, args [0], sse_i1_t, ""); args [1] = LLVMBuildBitCast (ctx->builder, args [1], sse_i1_t, ""); args [2] = LLVMBuildBitCast (ctx->builder, args [2], sse_i1_t, ""); values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_PBLENDVB, args, dname); } break; } case OP_SSE_CVTII: { gboolean is_signed = (ins->inst_c1 == MONO_TYPE_I1) || (ins->inst_c1 == MONO_TYPE_I2) || (ins->inst_c1 == MONO_TYPE_I4); LLVMTypeRef vec_type; if ((ins->inst_c1 == MONO_TYPE_I1) || (ins->inst_c1 == MONO_TYPE_U1)) vec_type = sse_i1_t; else if ((ins->inst_c1 == MONO_TYPE_I2) || (ins->inst_c1 == MONO_TYPE_U2)) vec_type = sse_i2_t; else vec_type = sse_i4_t; LLVMValueRef value; if (LLVMGetTypeKind (LLVMTypeOf (lhs)) != LLVMVectorTypeKind) { LLVMValueRef bitcasted = LLVMBuildBitCast (ctx->builder, lhs, LLVMPointerType (vec_type, 0), ""); value = mono_llvm_build_aligned_load (builder, bitcasted, "", FALSE, 1); } else { value = LLVMBuildBitCast (ctx->builder, lhs, vec_type, ""); } LLVMValueRef mask_vec; LLVMTypeRef dst_type; if (ins->inst_c0 == MONO_TYPE_I2) { mask_vec = create_const_vector_i32 (mask_0_incr_1, 8); dst_type = sse_i2_t; } else if (ins->inst_c0 == MONO_TYPE_I4) { mask_vec = create_const_vector_i32 (mask_0_incr_1, 4); dst_type = sse_i4_t; } else { g_assert (ins->inst_c0 == MONO_TYPE_I8); mask_vec = create_const_vector_i32 (mask_0_incr_1, 2); dst_type = sse_i8_t; } LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, value, LLVMGetUndef (vec_type), mask_vec, ""); if (is_signed) values [ins->dreg] = LLVMBuildSExt (ctx->builder, shuffled, dst_type, ""); else values [ins->dreg] = LLVMBuildZExt (ctx->builder, shuffled, dst_type, ""); break; } case OP_SSE41_LOADANT: { LLVMValueRef dst_ptr = convert (ctx, lhs, LLVMPointerType (primitive_type_to_llvm_type (inst_c1_type (ins)), 0)); LLVMValueRef dst_vec = LLVMBuildBitCast (builder, dst_ptr, LLVMPointerType (type_to_sse_type (ins->inst_c1), 0), ""); LLVMValueRef load = mono_llvm_build_aligned_load (builder, dst_vec, "", FALSE, 16); set_nontemporal_flag (load); values [ins->dreg] = load; break; } case OP_SSE41_MUL: { const int shift_vals [] = { 32, 32 }; const LLVMValueRef args [] = { convert (ctx, lhs, sse_i8_t), convert (ctx, rhs, sse_i8_t), }; LLVMValueRef mul_args [2] = { 0 }; LLVMValueRef shift_vec = create_const_vector (LLVMInt64Type (), shift_vals, 2); for (int i = 0; i < 2; ++i) { LLVMValueRef padded = LLVMBuildShl (builder, args [i], shift_vec, ""); mul_args[i] = mono_llvm_build_exact_ashr (builder, padded, shift_vec); } values [ins->dreg] = LLVMBuildNSWMul (builder, mul_args [0], mul_args [1], dname); break; } case OP_SSE41_MULLO: { values [ins->dreg] = LLVMBuildMul (ctx->builder, lhs, rhs, ""); break; } case OP_SSE42_CRC32: case OP_SSE42_CRC64: { LLVMValueRef args [2]; args [0] = lhs; args [1] = convert (ctx, rhs, primitive_type_to_llvm_type (ins->inst_c0)); IntrinsicId id; switch (ins->inst_c0) { case MONO_TYPE_U1: id = INTRINS_SSE_CRC32_32_8; break; case MONO_TYPE_U2: id = INTRINS_SSE_CRC32_32_16; break; case MONO_TYPE_U4: id = INTRINS_SSE_CRC32_32_32; break; case MONO_TYPE_U8: id = INTRINS_SSE_CRC32_64_64; break; default: g_assert_not_reached (); break; } values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_PCLMULQDQ: { LLVMValueRef args [] = { lhs, rhs, NULL }; LLVMValueRef ctl = convert (ctx, arg3, i1_t); // Only bits 0 and 4 of the immediate operand are used by PCLMULQDQ. ctl = LLVMBuildAnd (builder, ctl, const_int8 (0x11), "pclmulqdq"); ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 1 << 2, ctl, v128_i8_t, "pclmulqdq"); int i = 0; while (immediate_unroll_next (&ictx, &i)) { int imm = ((i & 0x2) << 3) | (i & 0x1); args [2] = const_int8 (imm); LLVMValueRef result = call_intrins (ctx, INTRINS_PCLMULQDQ, args, "pclmulqdq"); immediate_unroll_commit (&ictx, imm, result); } immediate_unroll_unreachable_default (&ictx); values [ins->dreg] = immediate_unroll_end (&ictx, &cbb); break; } case OP_AES_KEYGENASSIST: { LLVMValueRef roundconstant = convert (ctx, rhs, i1_t); LLVMValueRef args [] = { convert (ctx, lhs, v128_i8_t), NULL }; ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 256, roundconstant, v128_i8_t, "aes_keygenassist"); int i = 0; while (immediate_unroll_next (&ictx, &i)) { args [1] = const_int8 (i); LLVMValueRef result = call_intrins (ctx, INTRINS_AESNI_AESKEYGENASSIST, args, "aes_keygenassist"); immediate_unroll_commit (&ictx, i, result); } immediate_unroll_unreachable_default (&ictx); LLVMValueRef result = immediate_unroll_end (&ictx, &cbb); values [ins->dreg] = convert (ctx, result, v128_i1_t); break; } #endif case OP_XCOMPARE_FP: { LLVMRealPredicate pred = fpcond_to_llvm_cond [ins->inst_c0]; LLVMValueRef cmp = LLVMBuildFCmp (builder, pred, lhs, rhs, ""); int nelems = LLVMGetVectorSize (LLVMTypeOf (cmp)); g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs)); if (ins->inst_c1 == MONO_TYPE_R8) values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt64Type (), nelems), ""), LLVMTypeOf (lhs), ""); else values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt32Type (), nelems), ""), LLVMTypeOf (lhs), ""); break; } case OP_XCOMPARE: { LLVMIntPredicate pred = cond_to_llvm_cond [ins->inst_c0]; LLVMValueRef cmp = LLVMBuildICmp (builder, pred, lhs, rhs, ""); g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs)); values [ins->dreg] = LLVMBuildSExt (builder, cmp, LLVMTypeOf (lhs), ""); break; } case OP_POPCNT32: values [ins->dreg] = call_intrins (ctx, INTRINS_CTPOP_I32, &lhs, ""); break; case OP_POPCNT64: values [ins->dreg] = call_intrins (ctx, INTRINS_CTPOP_I64, &lhs, ""); break; case OP_CTTZ32: case OP_CTTZ64: { LLVMValueRef args [2]; args [0] = lhs; args [1] = LLVMConstInt (LLVMInt1Type (), 0, FALSE); values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_CTTZ32 ? INTRINS_CTTZ_I32 : INTRINS_CTTZ_I64, args, ""); break; } case OP_BMI1_BEXTR32: case OP_BMI1_BEXTR64: { LLVMValueRef args [2]; args [0] = lhs; args [1] = convert (ctx, rhs, ins->opcode == OP_BMI1_BEXTR32 ? i4_t : i8_t); // cast ushort to u32/u64 values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_BMI1_BEXTR32 ? INTRINS_BEXTR_I32 : INTRINS_BEXTR_I64, args, ""); break; } case OP_BZHI32: case OP_BZHI64: { LLVMValueRef args [2]; args [0] = lhs; args [1] = rhs; values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_BZHI32 ? INTRINS_BZHI_I32 : INTRINS_BZHI_I64, args, ""); break; } case OP_MULX_H32: case OP_MULX_H64: case OP_MULX_HL32: case OP_MULX_HL64: { gboolean is_64 = ins->opcode == OP_MULX_H64 || ins->opcode == OP_MULX_HL64; gboolean only_high = ins->opcode == OP_MULX_H32 || ins->opcode == OP_MULX_H64; LLVMValueRef lx = LLVMBuildZExt (ctx->builder, lhs, LLVMInt128Type (), ""); LLVMValueRef rx = LLVMBuildZExt (ctx->builder, rhs, LLVMInt128Type (), ""); LLVMValueRef mulx = LLVMBuildMul (ctx->builder, lx, rx, ""); if (!only_high) { LLVMValueRef addr = convert (ctx, arg3, LLVMPointerType (is_64 ? i8_t : i4_t, 0)); LLVMValueRef lowx = LLVMBuildTrunc (ctx->builder, mulx, is_64 ? LLVMInt64Type () : LLVMInt32Type (), ""); LLVMBuildStore (ctx->builder, lowx, addr); } LLVMValueRef shift = LLVMConstInt (LLVMInt128Type (), is_64 ? 64 : 32, FALSE); LLVMValueRef highx = LLVMBuildLShr (ctx->builder, mulx, shift, ""); values [ins->dreg] = LLVMBuildTrunc (ctx->builder, highx, is_64 ? LLVMInt64Type () : LLVMInt32Type (), ""); break; } case OP_PEXT32: case OP_PEXT64: { LLVMValueRef args [2]; args [0] = lhs; args [1] = rhs; values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_PEXT32 ? INTRINS_PEXT_I32 : INTRINS_PEXT_I64, args, ""); break; } case OP_PDEP32: case OP_PDEP64: { LLVMValueRef args [2]; args [0] = lhs; args [1] = rhs; values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_PDEP32 ? INTRINS_PDEP_I32 : INTRINS_PDEP_I64, args, ""); break; } #endif /* defined(TARGET_X86) || defined(TARGET_AMD64) */ // Shared between ARM64 and X86 #if defined(TARGET_ARM64) || defined(TARGET_X86) || defined(TARGET_AMD64) case OP_LZCNT32: case OP_LZCNT64: { IntrinsicId iid = ins->opcode == OP_LZCNT32 ? INTRINS_CTLZ_I32 : INTRINS_CTLZ_I64; LLVMValueRef args [] = { lhs, const_int1 (FALSE) }; values [ins->dreg] = call_intrins (ctx, iid, args, ""); break; } #endif #if defined(TARGET_ARM64) || defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_WASM) case OP_XEQUAL: { LLVMTypeRef t; LLVMValueRef cmp, mask [MAX_VECTOR_ELEMS], shuffle; int nelems; #if defined(TARGET_WASM) /* The wasm code generator doesn't understand the shuffle/and code sequence below */ LLVMValueRef val; if (LLVMIsNull (lhs) || LLVMIsNull (rhs)) { val = LLVMIsNull (lhs) ? rhs : lhs; nelems = LLVMGetVectorSize (LLVMTypeOf (lhs)); IntrinsicId intrins = (IntrinsicId)0; switch (nelems) { case 16: intrins = INTRINS_WASM_ANYTRUE_V16; break; case 8: intrins = INTRINS_WASM_ANYTRUE_V8; break; case 4: intrins = INTRINS_WASM_ANYTRUE_V4; break; case 2: intrins = INTRINS_WASM_ANYTRUE_V2; break; default: g_assert_not_reached (); } /* res = !wasm.anytrue (val) */ values [ins->dreg] = call_intrins (ctx, intrins, &val, ""); values [ins->dreg] = LLVMBuildZExt (builder, LLVMBuildICmp (builder, LLVMIntEQ, values [ins->dreg], LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""), LLVMInt32Type (), dname); break; } #endif LLVMTypeRef srcelemt = LLVMGetElementType (LLVMTypeOf (lhs)); //%c = icmp sgt <16 x i8> %a0, %a1 if (srcelemt == LLVMDoubleType () || srcelemt == LLVMFloatType ()) cmp = LLVMBuildFCmp (builder, LLVMRealOEQ, lhs, rhs, ""); else cmp = LLVMBuildICmp (builder, LLVMIntEQ, lhs, rhs, ""); nelems = LLVMGetVectorSize (LLVMTypeOf (cmp)); LLVMTypeRef elemt; if (srcelemt == LLVMDoubleType ()) elemt = LLVMInt64Type (); else if (srcelemt == LLVMFloatType ()) elemt = LLVMInt32Type (); else elemt = srcelemt; t = LLVMVectorType (elemt, nelems); cmp = LLVMBuildSExt (builder, cmp, t, ""); // cmp is a <nelems x elemt> vector, each element is either 0xff... or 0 int half = nelems / 2; while (half >= 1) { // AND the top and bottom halfes into the bottom half for (int i = 0; i < half; ++i) mask [i] = LLVMConstInt (LLVMInt32Type (), half + i, FALSE); for (int i = half; i < nelems; ++i) mask [i] = LLVMConstInt (LLVMInt32Type (), 0, FALSE); shuffle = LLVMBuildShuffleVector (builder, cmp, LLVMGetUndef (t), LLVMConstVector (mask, LLVMGetVectorSize (t)), ""); cmp = LLVMBuildAnd (builder, cmp, shuffle, ""); half = half / 2; } // Extract [0] LLVMValueRef first_elem = LLVMBuildExtractElement (builder, cmp, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""); // convert to 0/1 LLVMValueRef cmp_zero = LLVMBuildICmp (builder, LLVMIntNE, first_elem, LLVMConstInt (elemt, 0, FALSE), ""); values [ins->dreg] = LLVMBuildZExt (builder, cmp_zero, LLVMInt8Type (), ""); break; } #endif #if defined(TARGET_ARM64) case OP_XOP_I4_I4: case OP_XOP_I8_I8: { IntrinsicId id = (IntrinsicId)ins->inst_c0; values [ins->dreg] = call_intrins (ctx, id, &lhs, ""); break; } case OP_XOP_X_X_X: case OP_XOP_I4_I4_I4: case OP_XOP_I4_I4_I8: { IntrinsicId id = (IntrinsicId)ins->inst_c0; gboolean zext_last = FALSE, bitcast_result = FALSE, getElement = FALSE; int element_idx = -1; switch (id) { case INTRINS_AARCH64_PMULL64: getElement = TRUE; bitcast_result = TRUE; element_idx = ins->inst_c1; break; case INTRINS_AARCH64_CRC32B: case INTRINS_AARCH64_CRC32H: case INTRINS_AARCH64_CRC32W: case INTRINS_AARCH64_CRC32CB: case INTRINS_AARCH64_CRC32CH: case INTRINS_AARCH64_CRC32CW: zext_last = TRUE; break; default: break; } LLVMValueRef arg1 = rhs; if (zext_last) arg1 = LLVMBuildZExt (ctx->builder, arg1, LLVMInt32Type (), ""); LLVMValueRef args [] = { lhs, arg1 }; if (getElement) { args [0] = LLVMBuildExtractElement (ctx->builder, args [0], const_int32 (element_idx), ""); args [1] = LLVMBuildExtractElement (ctx->builder, args [1], const_int32 (element_idx), ""); } values [ins->dreg] = call_intrins (ctx, id, args, ""); if (bitcast_result) values [ins->dreg] = convert (ctx, values [ins->dreg], LLVMVectorType (LLVMInt64Type (), 2)); break; } case OP_XOP_X_X_X_X: { IntrinsicId id = (IntrinsicId)ins->inst_c0; gboolean getLowerElement = FALSE; int arg_idx = -1; switch (id) { case INTRINS_AARCH64_SHA1C: case INTRINS_AARCH64_SHA1M: case INTRINS_AARCH64_SHA1P: getLowerElement = TRUE; arg_idx = 1; break; default: break; } LLVMValueRef args [] = { lhs, rhs, arg3 }; if (getLowerElement) args [arg_idx] = LLVMBuildExtractElement (ctx->builder, args [arg_idx], const_int32 (0), ""); values [ins->dreg] = call_intrins (ctx, id, args, ""); break; } case OP_XOP_X_X: { IntrinsicId id = (IntrinsicId)ins->inst_c0; LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); gboolean getLowerElement = FALSE; switch (id) { case INTRINS_AARCH64_SHA1H: getLowerElement = TRUE; break; default: break; } LLVMValueRef arg0 = lhs; if (getLowerElement) arg0 = LLVMBuildExtractElement (ctx->builder, arg0, const_int32 (0), ""); LLVMValueRef result = call_intrins (ctx, id, &arg0, ""); if (getLowerElement) result = vector_from_scalar (ctx, ret_t, result); values [ins->dreg] = result; break; } case OP_XCOMPARE_FP_SCALAR: case OP_XCOMPARE_FP: { g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs)); gboolean scalar = ins->opcode == OP_XCOMPARE_FP_SCALAR; LLVMRealPredicate pred = fpcond_to_llvm_cond [ins->inst_c0]; LLVMTypeRef ret_t = LLVMTypeOf (lhs); LLVMTypeRef reti_t = to_integral_vector_type (ret_t); LLVMValueRef args [] = { lhs, rhs }; if (scalar) for (int i = 0; i < 2; ++i) args [i] = scalar_from_vector (ctx, args [i]); LLVMValueRef result = LLVMBuildFCmp (builder, pred, args [0], args [1], "xcompare_fp"); if (scalar) result = vector_from_scalar (ctx, LLVMVectorType (LLVMIntType (1), LLVMGetVectorSize (reti_t)), result); result = LLVMBuildSExt (builder, result, reti_t, ""); result = LLVMBuildBitCast (builder, result, ret_t, ""); values [ins->dreg] = result; break; } case OP_XCOMPARE_SCALAR: case OP_XCOMPARE: { g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs)); gboolean scalar = ins->opcode == OP_XCOMPARE_SCALAR; LLVMIntPredicate pred = cond_to_llvm_cond [ins->inst_c0]; LLVMTypeRef ret_t = LLVMTypeOf (lhs); LLVMValueRef args [] = { lhs, rhs }; if (scalar) for (int i = 0; i < 2; ++i) args [i] = scalar_from_vector (ctx, args [i]); LLVMValueRef result = LLVMBuildICmp (builder, pred, args [0], args [1], "xcompare"); if (scalar) result = vector_from_scalar (ctx, LLVMVectorType (LLVMIntType (1), LLVMGetVectorSize (ret_t)), result); values [ins->dreg] = LLVMBuildSExt (builder, result, ret_t, ""); break; } case OP_ARM64_EXT: { LLVMTypeRef ret_t = LLVMTypeOf (lhs); unsigned int elems = LLVMGetVectorSize (ret_t); g_assert (elems <= ARM64_MAX_VECTOR_ELEMS); LLVMValueRef index = arg3; LLVMValueRef default_value = lhs; ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, elems, index, ret_t, "arm64_ext"); int i = 0; while (immediate_unroll_next (&ictx, &i)) { LLVMValueRef mask = create_const_vector_i32 (&mask_0_incr_1 [i], elems); LLVMValueRef result = LLVMBuildShuffleVector (builder, lhs, rhs, mask, "arm64_ext"); immediate_unroll_commit (&ictx, i, result); } immediate_unroll_default (&ictx); immediate_unroll_commit_default (&ictx, default_value); values [ins->dreg] = immediate_unroll_end (&ictx, &cbb); break; } case OP_ARM64_MVN: { LLVMTypeRef ret_t = LLVMTypeOf (lhs); LLVMValueRef result = bitcast_to_integral (ctx, lhs); result = LLVMBuildNot (builder, result, "arm64_mvn"); result = convert (ctx, result, ret_t); values [ins->dreg] = result; break; } case OP_ARM64_BIC: { LLVMTypeRef ret_t = LLVMTypeOf (lhs); LLVMValueRef result = bitcast_to_integral (ctx, lhs); LLVMValueRef mask = bitcast_to_integral (ctx, rhs); mask = LLVMBuildNot (builder, mask, ""); result = LLVMBuildAnd (builder, mask, result, "arm64_bic"); result = convert (ctx, result, ret_t); values [ins->dreg] = result; break; } case OP_ARM64_BSL: { LLVMTypeRef ret_t = LLVMTypeOf (rhs); LLVMValueRef select = bitcast_to_integral (ctx, lhs); LLVMValueRef left = bitcast_to_integral (ctx, rhs); LLVMValueRef right = bitcast_to_integral (ctx, arg3); LLVMValueRef result1 = LLVMBuildAnd (builder, select, left, "arm64_bsl"); LLVMValueRef result2 = LLVMBuildAnd (builder, LLVMBuildNot (builder, select, ""), right, ""); LLVMValueRef result = LLVMBuildOr (builder, result1, result2, ""); result = convert (ctx, result, ret_t); values [ins->dreg] = result; break; } case OP_ARM64_CMTST: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMValueRef l = bitcast_to_integral (ctx, lhs); LLVMValueRef r = bitcast_to_integral (ctx, rhs); LLVMValueRef result = LLVMBuildAnd (builder, l, r, "arm64_cmtst"); LLVMTypeRef t = LLVMTypeOf (l); result = LLVMBuildICmp (builder, LLVMIntNE, result, LLVMConstNull (t), ""); result = LLVMBuildSExt (builder, result, t, ""); result = convert (ctx, result, ret_t); values [ins->dreg] = result; break; } case OP_ARM64_FCVTL: case OP_ARM64_FCVTL2: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); gboolean high = ins->opcode == OP_ARM64_FCVTL2; LLVMValueRef result = lhs; if (high) result = extract_high_elements (ctx, result); result = LLVMBuildFPExt (builder, result, ret_t, "arm64_fcvtl"); values [ins->dreg] = result; break; } case OP_ARM64_FCVTXN: case OP_ARM64_FCVTXN2: case OP_ARM64_FCVTN: case OP_ARM64_FCVTN2: { gboolean high = FALSE; int iid = 0; switch (ins->opcode) { case OP_ARM64_FCVTXN2: high = TRUE; case OP_ARM64_FCVTXN: iid = INTRINS_AARCH64_ADV_SIMD_FCVTXN; break; case OP_ARM64_FCVTN2: high = TRUE; break; } LLVMValueRef result = lhs; if (high) result = rhs; if (iid) result = call_intrins (ctx, iid, &result, ""); else result = LLVMBuildFPTrunc (builder, result, v64_r4_t, ""); if (high) result = concatenate_vectors (ctx, lhs, result); values [ins->dreg] = result; break; } case OP_ARM64_UCVTF: case OP_ARM64_SCVTF: case OP_ARM64_UCVTF_SCALAR: case OP_ARM64_SCVTF_SCALAR: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); gboolean scalar = FALSE; gboolean is_unsigned = FALSE; switch (ins->opcode) { case OP_ARM64_UCVTF_SCALAR: scalar = TRUE; case OP_ARM64_UCVTF: is_unsigned = TRUE; break; case OP_ARM64_SCVTF_SCALAR: scalar = TRUE; break; } LLVMValueRef result = lhs; LLVMTypeRef cvt_t = ret_t; if (scalar) { result = scalar_from_vector (ctx, result); cvt_t = LLVMGetElementType (ret_t); } if (is_unsigned) result = LLVMBuildUIToFP (builder, result, cvt_t, "arm64_ucvtf"); else result = LLVMBuildSIToFP (builder, result, cvt_t, "arm64_scvtf"); if (scalar) result = vector_from_scalar (ctx, ret_t, result); values [ins->dreg] = result; break; } case OP_ARM64_FCVTZS: case OP_ARM64_FCVTZS_SCALAR: case OP_ARM64_FCVTZU: case OP_ARM64_FCVTZU_SCALAR: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); gboolean scalar = FALSE; gboolean is_unsigned = FALSE; switch (ins->opcode) { case OP_ARM64_FCVTZU_SCALAR: scalar = TRUE; case OP_ARM64_FCVTZU: is_unsigned = TRUE; break; case OP_ARM64_FCVTZS_SCALAR: scalar = TRUE; break; } LLVMValueRef result = lhs; LLVMTypeRef cvt_t = ret_t; if (scalar) { result = scalar_from_vector (ctx, result); cvt_t = LLVMGetElementType (ret_t); } if (is_unsigned) result = LLVMBuildFPToUI (builder, result, cvt_t, "arm64_fcvtzu"); else result = LLVMBuildFPToSI (builder, result, cvt_t, "arm64_fcvtzs"); if (scalar) result = vector_from_scalar (ctx, ret_t, result); values [ins->dreg] = result; break; } case OP_ARM64_SELECT_SCALAR: { LLVMValueRef result = LLVMBuildExtractElement (builder, lhs, rhs, ""); LLVMTypeRef elem_t = LLVMTypeOf (result); unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t); LLVMTypeRef t = LLVMVectorType (elem_t, 64 / elem_bits); result = vector_from_scalar (ctx, t, result); values [ins->dreg] = result; break; } case OP_ARM64_SELECT_QUAD: { LLVMTypeRef src_type = simd_class_to_llvm_type (ctx, ins->data.op [1].klass); LLVMTypeRef ret_type = simd_class_to_llvm_type (ctx, ins->klass); unsigned int src_type_bits = mono_llvm_get_prim_size_bits (src_type); unsigned int ret_type_bits = mono_llvm_get_prim_size_bits (ret_type); unsigned int src_intermediate_elems = src_type_bits / 32; unsigned int ret_intermediate_elems = ret_type_bits / 32; LLVMTypeRef intermediate_type = LLVMVectorType (i4_t, src_intermediate_elems); LLVMValueRef result = LLVMBuildBitCast (builder, lhs, intermediate_type, "arm64_select_quad"); result = LLVMBuildExtractElement (builder, result, rhs, "arm64_select_quad"); result = broadcast_element (ctx, result, ret_intermediate_elems); result = LLVMBuildBitCast (builder, result, ret_type, "arm64_select_quad"); values [ins->dreg] = result; break; } case OP_LSCNT32: case OP_LSCNT64: { // %shr = ashr i32 %x, 31 // %xor = xor i32 %shr, %x // %mul = shl i32 %xor, 1 // %add = or i32 %mul, 1 // %0 = tail call i32 @llvm.ctlz.i32(i32 %add, i1 false) LLVMValueRef shr = LLVMBuildAShr (builder, lhs, ins->opcode == OP_LSCNT32 ? LLVMConstInt (LLVMInt32Type (), 31, FALSE) : LLVMConstInt (LLVMInt64Type (), 63, FALSE), ""); LLVMValueRef one = ins->opcode == OP_LSCNT32 ? LLVMConstInt (LLVMInt32Type (), 1, FALSE) : LLVMConstInt (LLVMInt64Type (), 1, FALSE); LLVMValueRef xor = LLVMBuildXor (builder, shr, lhs, ""); LLVMValueRef mul = LLVMBuildShl (builder, xor, one, ""); LLVMValueRef add = LLVMBuildOr (builder, mul, one, ""); LLVMValueRef args [2]; args [0] = add; args [1] = LLVMConstInt (LLVMInt1Type (), 0, FALSE); values [ins->dreg] = LLVMBuildCall (builder, get_intrins (ctx, ins->opcode == OP_LSCNT32 ? INTRINS_CTLZ_I32 : INTRINS_CTLZ_I64), args, 2, ""); break; } case OP_ARM64_SQRDMLAH: case OP_ARM64_SQRDMLAH_BYSCALAR: case OP_ARM64_SQRDMLAH_SCALAR: case OP_ARM64_SQRDMLSH: case OP_ARM64_SQRDMLSH_BYSCALAR: case OP_ARM64_SQRDMLSH_SCALAR: { gboolean byscalar = FALSE; gboolean scalar = FALSE; gboolean subtract = FALSE; switch (ins->opcode) { case OP_ARM64_SQRDMLAH_BYSCALAR: byscalar = TRUE; break; case OP_ARM64_SQRDMLAH_SCALAR: scalar = TRUE; break; case OP_ARM64_SQRDMLSH: subtract = TRUE; break; case OP_ARM64_SQRDMLSH_BYSCALAR: subtract = TRUE; byscalar = TRUE; break; case OP_ARM64_SQRDMLSH_SCALAR: subtract = TRUE; scalar = TRUE; break; } int acc_iid = subtract ? INTRINS_AARCH64_ADV_SIMD_SQSUB : INTRINS_AARCH64_ADV_SIMD_SQADD; LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (ret_t); ScalarOpFromVectorOpCtx sctx = scalar_op_from_vector_op (ctx, ret_t, ins); LLVMValueRef args [] = { lhs, rhs, arg3 }; if (byscalar) { unsigned int elems = LLVMGetVectorSize (ret_t); args [2] = broadcast_element (ctx, scalar_from_vector (ctx, args [2]), elems); } if (scalar) { ovr_tag = sctx.ovr_tag; scalar_op_from_vector_op_process_args (&sctx, args, 3); } LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_SQRDMULH, ovr_tag, &args [1], "arm64_sqrdmlxh"); args [1] = result; result = call_overloaded_intrins (ctx, acc_iid, ovr_tag, &args [0], "arm64_sqrdmlxh"); if (scalar) result = scalar_op_from_vector_op_process_result (&sctx, result); values [ins->dreg] = result; break; } case OP_ARM64_SMULH: case OP_ARM64_UMULH: { LLVMValueRef op1, op2; if (ins->opcode == OP_ARM64_SMULH) { op1 = LLVMBuildSExt (builder, lhs, LLVMInt128Type (), ""); op2 = LLVMBuildSExt (builder, rhs, LLVMInt128Type (), ""); } else { op1 = LLVMBuildZExt (builder, lhs, LLVMInt128Type (), ""); op2 = LLVMBuildZExt (builder, rhs, LLVMInt128Type (), ""); } LLVMValueRef mul = LLVMBuildMul (builder, op1, op2, ""); LLVMValueRef hi64 = LLVMBuildLShr (builder, mul, LLVMConstInt (LLVMInt128Type (), 64, FALSE), ""); values [ins->dreg] = LLVMBuildTrunc (builder, hi64, LLVMInt64Type (), ""); break; } case OP_ARM64_XNARROW_SCALAR: { // Unfortunately, @llvm.aarch64.neon.scalar.sqxtun isn't available for i8 or i16. LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (ret_t); LLVMTypeRef elem_t = LLVMGetElementType (ret_t); LLVMValueRef result = NULL; int iid = ins->inst_c0; int scalar_iid = 0; switch (iid) { case INTRINS_AARCH64_ADV_SIMD_SQXTUN: scalar_iid = INTRINS_AARCH64_ADV_SIMD_SCALAR_SQXTUN; break; case INTRINS_AARCH64_ADV_SIMD_SQXTN: scalar_iid = INTRINS_AARCH64_ADV_SIMD_SCALAR_SQXTN; break; case INTRINS_AARCH64_ADV_SIMD_UQXTN: scalar_iid = INTRINS_AARCH64_ADV_SIMD_SCALAR_UQXTN; break; default: g_assert_not_reached (); } if (elem_t == i4_t) { LLVMValueRef arg = scalar_from_vector (ctx, lhs); result = call_intrins (ctx, scalar_iid, &arg, "arm64_xnarrow_scalar"); result = vector_from_scalar (ctx, ret_t, result); } else { LLVMTypeRef arg_t = LLVMTypeOf (lhs); LLVMTypeRef argelem_t = LLVMGetElementType (arg_t); unsigned int argelems = LLVMGetVectorSize (arg_t); LLVMValueRef arg = keep_lowest_element (ctx, LLVMVectorType (argelem_t, argelems * 2), lhs); result = call_overloaded_intrins (ctx, iid, ovr_tag, &arg, "arm64_xnarrow_scalar"); result = keep_lowest_element (ctx, LLVMTypeOf (result), result); } values [ins->dreg] = result; break; } case OP_ARM64_SQXTUN2: case OP_ARM64_UQXTN2: case OP_ARM64_SQXTN2: case OP_ARM64_XTN: case OP_ARM64_XTN2: { llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); gboolean high = FALSE; int iid = 0; switch (ins->opcode) { case OP_ARM64_SQXTUN2: high = TRUE; iid = INTRINS_AARCH64_ADV_SIMD_SQXTUN; break; case OP_ARM64_UQXTN2: high = TRUE; iid = INTRINS_AARCH64_ADV_SIMD_UQXTN; break; case OP_ARM64_SQXTN2: high = TRUE; iid = INTRINS_AARCH64_ADV_SIMD_SQXTN; break; case OP_ARM64_XTN2: high = TRUE; break; } LLVMValueRef result = lhs; if (high) { result = rhs; ovr_tag = ovr_tag_smaller_vector (ovr_tag); } LLVMTypeRef t = LLVMTypeOf (result); LLVMTypeRef elem_t = LLVMGetElementType (t); unsigned int elems = LLVMGetVectorSize (t); unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t); LLVMTypeRef result_t = LLVMVectorType (LLVMIntType (elem_bits / 2), elems); if (iid != 0) result = call_overloaded_intrins (ctx, iid, ovr_tag, &result, ""); else result = LLVMBuildTrunc (builder, result, result_t, "arm64_xtn"); if (high) result = concatenate_vectors (ctx, lhs, result); values [ins->dreg] = result; break; } case OP_ARM64_CLZ: { llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); LLVMValueRef args [] = { lhs, const_int1 (0) }; LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_CLZ, ovr_tag, args, ""); values [ins->dreg] = result; break; } case OP_ARM64_FMSUB: case OP_ARM64_FMSUB_BYSCALAR: case OP_ARM64_FMSUB_SCALAR: case OP_ARM64_FNMSUB_SCALAR: case OP_ARM64_FMADD: case OP_ARM64_FMADD_BYSCALAR: case OP_ARM64_FMADD_SCALAR: case OP_ARM64_FNMADD_SCALAR: { llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); gboolean scalar = FALSE; gboolean negate = FALSE; gboolean subtract = FALSE; gboolean byscalar = FALSE; switch (ins->opcode) { case OP_ARM64_FMSUB: subtract = TRUE; break; case OP_ARM64_FMSUB_BYSCALAR: subtract = TRUE; byscalar = TRUE; break; case OP_ARM64_FMSUB_SCALAR: subtract = TRUE; scalar = TRUE; break; case OP_ARM64_FNMSUB_SCALAR: subtract = TRUE; scalar = TRUE; negate = TRUE; break; case OP_ARM64_FMADD: break; case OP_ARM64_FMADD_BYSCALAR: byscalar = TRUE; break; case OP_ARM64_FMADD_SCALAR: scalar = TRUE; break; case OP_ARM64_FNMADD_SCALAR: scalar = TRUE; negate = TRUE; break; } // llvm.fma argument order: mulop1, mulop2, addend LLVMValueRef args [] = { rhs, arg3, lhs }; if (byscalar) { unsigned int elems = LLVMGetVectorSize (LLVMTypeOf (args [0])); args [1] = broadcast_element (ctx, scalar_from_vector (ctx, args [1]), elems); } if (scalar) { ovr_tag = ovr_tag_force_scalar (ovr_tag); for (int i = 0; i < 3; ++i) args [i] = scalar_from_vector (ctx, args [i]); } if (subtract) args [0] = LLVMBuildFNeg (builder, args [0], "arm64_fma_sub"); if (negate) { args [0] = LLVMBuildFNeg (builder, args [0], "arm64_fma_negate"); args [2] = LLVMBuildFNeg (builder, args [2], "arm64_fma_negate"); } LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_FMA, ovr_tag, args, "arm64_fma"); if (scalar) result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result); values [ins->dreg] = result; break; } case OP_ARM64_SQDMULL: case OP_ARM64_SQDMULL_BYSCALAR: case OP_ARM64_SQDMULL2: case OP_ARM64_SQDMULL2_BYSCALAR: case OP_ARM64_SQDMLAL: case OP_ARM64_SQDMLAL_BYSCALAR: case OP_ARM64_SQDMLAL2: case OP_ARM64_SQDMLAL2_BYSCALAR: case OP_ARM64_SQDMLSL: case OP_ARM64_SQDMLSL_BYSCALAR: case OP_ARM64_SQDMLSL2: case OP_ARM64_SQDMLSL2_BYSCALAR: { llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); gboolean scalar = FALSE; gboolean add = FALSE; gboolean subtract = FALSE; gboolean high = FALSE; switch (ins->opcode) { case OP_ARM64_SQDMULL_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMULL: break; case OP_ARM64_SQDMULL2_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMULL2: high = TRUE; break; case OP_ARM64_SQDMLAL_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLAL: add = TRUE; break; case OP_ARM64_SQDMLAL2_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLAL2: high = TRUE; add = TRUE; break; case OP_ARM64_SQDMLSL_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLSL: subtract = TRUE; break; case OP_ARM64_SQDMLSL2_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLSL2: high = TRUE; subtract = TRUE; break; } int iid = 0; if (add) iid = INTRINS_AARCH64_ADV_SIMD_SQADD; else if (subtract) iid = INTRINS_AARCH64_ADV_SIMD_SQSUB; LLVMValueRef mul1 = lhs; LLVMValueRef mul2 = rhs; if (iid != 0) { mul1 = rhs; mul2 = arg3; } if (scalar) { LLVMTypeRef t = LLVMTypeOf (mul1); unsigned int elems = LLVMGetVectorSize (t); mul2 = broadcast_element (ctx, scalar_from_vector (ctx, mul2), elems); } LLVMValueRef args [] = { mul1, mul2 }; if (high) for (int i = 0; i < 2; ++i) args [i] = extract_high_elements (ctx, args [i]); LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_SQDMULL, ovr_tag, args, ""); LLVMValueRef args2 [] = { lhs, result }; if (iid != 0) result = call_overloaded_intrins (ctx, iid, ovr_tag, args2, ""); values [ins->dreg] = result; break; } case OP_ARM64_SQDMULL_SCALAR: case OP_ARM64_SQDMLAL_SCALAR: case OP_ARM64_SQDMLSL_SCALAR: { /* * define dso_local i32 @__vqdmlslh_lane_s16(i32, i16, <4 x i16>, i32) local_unnamed_addr #0 { * %5 = insertelement <4 x i16> undef, i16 %1, i64 0 * %6 = shufflevector <4 x i16> %2, <4 x i16> undef, <4 x i32> <i32 3, i32 undef, i32 undef, i32 undef> * %7 = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %5, <4 x i16> %6) * %8 = extractelement <4 x i32> %7, i64 0 * %9 = tail call i32 @llvm.aarch64.neon.sqsub.i32(i32 %0, i32 %8) * ret i32 %9 * } * * define dso_local i64 @__vqdmlals_s32(i64, i32, i32) local_unnamed_addr #0 { * %4 = tail call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %1, i32 %2) #2 * %5 = tail call i64 @llvm.aarch64.neon.sqadd.i64(i64 %0, i64 %4) #2 * ret i64 %5 * } */ int mulid = INTRINS_AARCH64_ADV_SIMD_SQDMULL; int iid = 0; gboolean scalar_mul_result = FALSE; gboolean scalar_acc_result = FALSE; switch (ins->opcode) { case OP_ARM64_SQDMLAL_SCALAR: iid = INTRINS_AARCH64_ADV_SIMD_SQADD; break; case OP_ARM64_SQDMLSL_SCALAR: iid = INTRINS_AARCH64_ADV_SIMD_SQSUB; break; } LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMValueRef mularg = lhs; LLVMValueRef selected_scalar = rhs; if (iid != 0) { mularg = rhs; selected_scalar = arg3; } llvm_ovr_tag_t multag = ovr_tag_smaller_elements (ovr_tag_from_llvm_type (ret_t)); llvm_ovr_tag_t iidtag = ovr_tag_force_scalar (ovr_tag_from_llvm_type (ret_t)); LLVMTypeRef mularg_t = ovr_tag_to_llvm_type (multag); if (multag & INTRIN_int32) { /* The (i32, i32) -> i64 variant of aarch64_neon_sqdmull has * a unique, non-overloaded name. */ mulid = INTRINS_AARCH64_ADV_SIMD_SQDMULL_SCALAR; multag = 0; iidtag = INTRIN_int64 | INTRIN_scalar; scalar_mul_result = TRUE; scalar_acc_result = TRUE; } else if (multag & INTRIN_int16) { /* We were passed a (<4 x i16>, <4 x i16>) but the * widening multiplication intrinsic will yield a <4 x i32>. */ multag = INTRIN_int32 | INTRIN_vector128; } else g_assert_not_reached (); if (scalar_mul_result) { mularg = scalar_from_vector (ctx, mularg); selected_scalar = scalar_from_vector (ctx, selected_scalar); } else { mularg = keep_lowest_element (ctx, mularg_t, mularg); selected_scalar = keep_lowest_element (ctx, mularg_t, selected_scalar); } LLVMValueRef mulargs [] = { mularg, selected_scalar }; LLVMValueRef result = call_overloaded_intrins (ctx, mulid, multag, mulargs, "arm64_sqdmull_scalar"); if (iid != 0) { LLVMValueRef acc = scalar_from_vector (ctx, lhs); if (!scalar_mul_result) result = scalar_from_vector (ctx, result); LLVMValueRef subargs [] = { acc, result }; result = call_overloaded_intrins (ctx, iid, iidtag, subargs, "arm64_sqdmlxl_scalar"); scalar_acc_result = TRUE; } if (scalar_acc_result) result = vector_from_scalar (ctx, ret_t, result); else result = keep_lowest_element (ctx, ret_t, result); values [ins->dreg] = result; break; } case OP_ARM64_FMUL_SEL: { LLVMValueRef mul2 = LLVMBuildExtractElement (builder, rhs, arg3, ""); LLVMValueRef mul1 = scalar_from_vector (ctx, lhs); LLVMValueRef result = LLVMBuildFMul (builder, mul1, mul2, "arm64_fmul_sel"); result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result); values [ins->dreg] = result; break; } case OP_ARM64_MLA: case OP_ARM64_MLA_SCALAR: case OP_ARM64_MLS: case OP_ARM64_MLS_SCALAR: { gboolean scalar = FALSE; gboolean add = FALSE; switch (ins->opcode) { case OP_ARM64_MLA_SCALAR: scalar = TRUE; case OP_ARM64_MLA: add = TRUE; break; case OP_ARM64_MLS_SCALAR: scalar = TRUE; case OP_ARM64_MLS: break; } LLVMTypeRef mul_t = LLVMTypeOf (rhs); unsigned int elems = LLVMGetVectorSize (mul_t); LLVMValueRef mul2 = arg3; if (scalar) mul2 = broadcast_element (ctx, scalar_from_vector (ctx, mul2), elems); LLVMValueRef result = LLVMBuildMul (builder, rhs, mul2, ""); if (add) result = LLVMBuildAdd (builder, lhs, result, ""); else result = LLVMBuildSub (builder, lhs, result, ""); values [ins->dreg] = result; break; } case OP_ARM64_SMULL: case OP_ARM64_SMULL_SCALAR: case OP_ARM64_SMULL2: case OP_ARM64_SMULL2_SCALAR: case OP_ARM64_UMULL: case OP_ARM64_UMULL_SCALAR: case OP_ARM64_UMULL2: case OP_ARM64_UMULL2_SCALAR: case OP_ARM64_SMLAL: case OP_ARM64_SMLAL_SCALAR: case OP_ARM64_SMLAL2: case OP_ARM64_SMLAL2_SCALAR: case OP_ARM64_UMLAL: case OP_ARM64_UMLAL_SCALAR: case OP_ARM64_UMLAL2: case OP_ARM64_UMLAL2_SCALAR: case OP_ARM64_SMLSL: case OP_ARM64_SMLSL_SCALAR: case OP_ARM64_SMLSL2: case OP_ARM64_SMLSL2_SCALAR: case OP_ARM64_UMLSL: case OP_ARM64_UMLSL_SCALAR: case OP_ARM64_UMLSL2: case OP_ARM64_UMLSL2_SCALAR: { llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); gboolean is_unsigned = FALSE; gboolean high = FALSE; gboolean add = FALSE; gboolean subtract = FALSE; gboolean scalar = FALSE; int opcode = ins->opcode; switch (opcode) { case OP_ARM64_SMULL_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMULL; break; case OP_ARM64_UMULL_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMULL; break; case OP_ARM64_SMLAL_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLAL; break; case OP_ARM64_UMLAL_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLAL; break; case OP_ARM64_SMLSL_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLSL; break; case OP_ARM64_UMLSL_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLSL; break; case OP_ARM64_SMULL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMULL2; break; case OP_ARM64_UMULL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMULL2; break; case OP_ARM64_SMLAL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLAL2; break; case OP_ARM64_UMLAL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLAL2; break; case OP_ARM64_SMLSL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLSL2; break; case OP_ARM64_UMLSL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLSL2; break; } switch (opcode) { case OP_ARM64_SMULL2: high = TRUE; case OP_ARM64_SMULL: break; case OP_ARM64_UMULL2: high = TRUE; case OP_ARM64_UMULL: is_unsigned = TRUE; break; case OP_ARM64_SMLAL2: high = TRUE; case OP_ARM64_SMLAL: add = TRUE; break; case OP_ARM64_UMLAL2: high = TRUE; case OP_ARM64_UMLAL: add = TRUE; is_unsigned = TRUE; break; case OP_ARM64_SMLSL2: high = TRUE; case OP_ARM64_SMLSL: subtract = TRUE; break; case OP_ARM64_UMLSL2: high = TRUE; case OP_ARM64_UMLSL: subtract = TRUE; is_unsigned = TRUE; break; } int iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UMULL : INTRINS_AARCH64_ADV_SIMD_SMULL; LLVMValueRef intrin_args [] = { lhs, rhs }; if (add || subtract) { intrin_args [0] = rhs; intrin_args [1] = arg3; } if (scalar) { LLVMValueRef sarg = intrin_args [1]; LLVMTypeRef t = LLVMTypeOf (intrin_args [0]); unsigned int elems = LLVMGetVectorSize (t); sarg = broadcast_element (ctx, scalar_from_vector (ctx, sarg), elems); intrin_args [1] = sarg; } if (high) for (int i = 0; i < 2; ++i) intrin_args [i] = extract_high_elements (ctx, intrin_args [i]); LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, ""); if (add) result = LLVMBuildAdd (builder, lhs, result, ""); if (subtract) result = LLVMBuildSub (builder, lhs, result, ""); values [ins->dreg] = result; break; } case OP_ARM64_XNEG: case OP_ARM64_XNEG_SCALAR: { gboolean scalar = ins->opcode == OP_ARM64_XNEG_SCALAR; gboolean is_float = FALSE; switch (inst_c1_type (ins)) { case MONO_TYPE_R4: case MONO_TYPE_R8: is_float = TRUE; } LLVMValueRef result = lhs; if (scalar) result = scalar_from_vector (ctx, result); if (is_float) result = LLVMBuildFNeg (builder, result, "arm64_xneg"); else result = LLVMBuildNeg (builder, result, "arm64_xneg"); if (scalar) result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result); values [ins->dreg] = result; break; } case OP_ARM64_PMULL: case OP_ARM64_PMULL2: { gboolean high = ins->opcode == OP_ARM64_PMULL2; LLVMValueRef args [] = { lhs, rhs }; if (high) for (int i = 0; i < 2; ++i) args [i] = extract_high_elements (ctx, args [i]); LLVMValueRef result = call_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_PMULL, args, "arm64_pmull"); values [ins->dreg] = result; break; } case OP_ARM64_REVN: { LLVMTypeRef t = LLVMTypeOf (lhs); LLVMTypeRef elem_t = LLVMGetElementType (t); unsigned int group_bits = mono_llvm_get_prim_size_bits (elem_t); unsigned int vec_bits = mono_llvm_get_prim_size_bits (t); unsigned int tmp_bits = ins->inst_c0; unsigned int tmp_elements = vec_bits / tmp_bits; const int cycle8 [] = { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }; const int cycle4 [] = { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }; const int cycle2 [] = { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 }; const int *cycle = NULL; switch (group_bits / tmp_bits) { case 2: cycle = cycle2; break; case 4: cycle = cycle4; break; case 8: cycle = cycle8; break; default: g_assert_not_reached (); } g_assert (tmp_elements <= ARM64_MAX_VECTOR_ELEMS); LLVMTypeRef tmp_t = LLVMVectorType (LLVMIntType (tmp_bits), tmp_elements); LLVMValueRef tmp = LLVMBuildBitCast (builder, lhs, tmp_t, "arm64_revn"); LLVMValueRef result = LLVMBuildShuffleVector (builder, tmp, LLVMGetUndef (tmp_t), create_const_vector_i32 (cycle, tmp_elements), ""); result = LLVMBuildBitCast (builder, result, t, ""); values [ins->dreg] = result; break; } case OP_ARM64_SHL: case OP_ARM64_SSHR: case OP_ARM64_SSRA: case OP_ARM64_USHR: case OP_ARM64_USRA: { gboolean right = FALSE; gboolean add = FALSE; gboolean arith = FALSE; switch (ins->opcode) { case OP_ARM64_USHR: right = TRUE; break; case OP_ARM64_USRA: right = TRUE; add = TRUE; break; case OP_ARM64_SSHR: arith = TRUE; break; case OP_ARM64_SSRA: arith = TRUE; add = TRUE; break; } LLVMValueRef shiftarg = lhs; LLVMValueRef shift = rhs; if (add) { shiftarg = rhs; shift = arg3; } shift = create_shift_vector (ctx, shiftarg, shift); LLVMValueRef result = NULL; if (right) result = LLVMBuildLShr (builder, shiftarg, shift, ""); else if (arith) result = LLVMBuildAShr (builder, shiftarg, shift, ""); else result = LLVMBuildShl (builder, shiftarg, shift, ""); if (add) result = LLVMBuildAdd (builder, lhs, result, "arm64_usra"); values [ins->dreg] = result; break; } case OP_ARM64_SHRN: case OP_ARM64_SHRN2: { LLVMValueRef shiftarg = lhs; LLVMValueRef shift = rhs; gboolean high = ins->opcode == OP_ARM64_SHRN2; if (high) { shiftarg = rhs; shift = arg3; } LLVMTypeRef arg_t = LLVMTypeOf (shiftarg); LLVMTypeRef elem_t = LLVMGetElementType (arg_t); unsigned int elems = LLVMGetVectorSize (arg_t); unsigned int bits = mono_llvm_get_prim_size_bits (elem_t); LLVMTypeRef trunc_t = LLVMVectorType (LLVMIntType (bits / 2), elems); shift = create_shift_vector (ctx, shiftarg, shift); LLVMValueRef result = LLVMBuildLShr (builder, shiftarg, shift, "shrn"); result = LLVMBuildTrunc (builder, result, trunc_t, ""); if (high) { result = concatenate_vectors (ctx, lhs, result); } values [ins->dreg] = result; break; } case OP_ARM64_SRSHR: case OP_ARM64_SRSRA: case OP_ARM64_URSHR: case OP_ARM64_URSRA: { llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); LLVMValueRef shiftarg = lhs; LLVMValueRef shift = rhs; gboolean right = FALSE; gboolean add = FALSE; switch (ins->opcode) { case OP_ARM64_URSRA: add = TRUE; case OP_ARM64_URSHR: right = TRUE; break; case OP_ARM64_SRSRA: add = TRUE; case OP_ARM64_SRSHR: right = TRUE; break; } int iid = 0; switch (ins->opcode) { case OP_ARM64_URSRA: case OP_ARM64_URSHR: iid = INTRINS_AARCH64_ADV_SIMD_URSHL; break; case OP_ARM64_SRSRA: case OP_ARM64_SRSHR: iid = INTRINS_AARCH64_ADV_SIMD_SRSHL; break; } if (add) { shiftarg = rhs; shift = arg3; } if (right) shift = LLVMBuildNeg (builder, shift, ""); shift = create_shift_vector (ctx, shiftarg, shift); LLVMValueRef args [] = { shiftarg, shift }; LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); if (add) result = LLVMBuildAdd (builder, result, lhs, ""); values [ins->dreg] = result; break; } case OP_ARM64_XNSHIFT_SCALAR: case OP_ARM64_XNSHIFT: case OP_ARM64_XNSHIFT2: { LLVMTypeRef intrin_result_t = simd_class_to_llvm_type (ctx, ins->klass); llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (intrin_result_t); LLVMValueRef shift_arg = lhs; LLVMValueRef shift_amount = rhs; gboolean high = FALSE; gboolean scalar = FALSE; int iid = ins->inst_c0; switch (ins->opcode) { case OP_ARM64_XNSHIFT_SCALAR: scalar = TRUE; break; case OP_ARM64_XNSHIFT2: high = TRUE; break; } if (high) { shift_arg = rhs; shift_amount = arg3; ovr_tag = ovr_tag_smaller_vector (ovr_tag); intrin_result_t = ovr_tag_to_llvm_type (ovr_tag); } LLVMTypeRef shift_arg_t = LLVMTypeOf (shift_arg); LLVMTypeRef shift_arg_elem_t = LLVMGetElementType (shift_arg_t); unsigned int element_bits = mono_llvm_get_prim_size_bits (shift_arg_elem_t); int range_min = 1; int range_max = element_bits / 2; if (scalar) { unsigned int elems = LLVMGetVectorSize (shift_arg_t); LLVMValueRef lo = scalar_from_vector (ctx, shift_arg); shift_arg = vector_from_scalar (ctx, LLVMVectorType (shift_arg_elem_t, elems * 2), lo); } int max_index = range_max - range_min + 1; ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, max_index, shift_amount, intrin_result_t, "arm64_xnshift"); int i = 0; while (immediate_unroll_next (&ictx, &i)) { int shift_const = i + range_min; LLVMValueRef intrin_args [] = { shift_arg, const_int32 (shift_const) }; LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, ""); immediate_unroll_commit (&ictx, shift_const, result); } { immediate_unroll_default (&ictx); LLVMValueRef intrin_args [] = { shift_arg, const_int32 (range_max) }; LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, ""); immediate_unroll_commit_default (&ictx, result); } LLVMValueRef result = immediate_unroll_end (&ictx, &cbb); if (high) result = concatenate_vectors (ctx, lhs, result); if (scalar) result = keep_lowest_element (ctx, LLVMTypeOf (result), result); values [ins->dreg] = result; break; } case OP_ARM64_SQSHLU: case OP_ARM64_SQSHLU_SCALAR: { gboolean scalar = ins->opcode == OP_ARM64_SQSHLU_SCALAR; LLVMTypeRef intrin_result_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMTypeRef elem_t = LLVMGetElementType (intrin_result_t); unsigned int element_bits = mono_llvm_get_prim_size_bits (elem_t); llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (intrin_result_t); int max_index = element_bits; ScalarOpFromVectorOpCtx sctx = scalar_op_from_vector_op (ctx, intrin_result_t, ins); intrin_result_t = scalar ? sctx.intermediate_type : intrin_result_t; ovr_tag = scalar ? sctx.ovr_tag : ovr_tag; ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, max_index, rhs, intrin_result_t, "arm64_sqshlu"); int i = 0; while (immediate_unroll_next (&ictx, &i)) { int shift_const = i; LLVMValueRef args [2] = { lhs, create_shift_vector (ctx, lhs, const_int32 (shift_const)) }; if (scalar) scalar_op_from_vector_op_process_args (&sctx, args, 2); LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_SQSHLU, ovr_tag, args, ""); immediate_unroll_commit (&ictx, shift_const, result); } { immediate_unroll_default (&ictx); LLVMValueRef srcarg = lhs; if (scalar) scalar_op_from_vector_op_process_args (&sctx, &srcarg, 1); immediate_unroll_commit_default (&ictx, srcarg); } LLVMValueRef result = immediate_unroll_end (&ictx, &cbb); if (scalar) result = scalar_op_from_vector_op_process_result (&sctx, result); values [ins->dreg] = result; break; } case OP_ARM64_SSHLL: case OP_ARM64_SSHLL2: case OP_ARM64_USHLL: case OP_ARM64_USHLL2: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); gboolean high = FALSE; gboolean is_unsigned = FALSE; switch (ins->opcode) { case OP_ARM64_SSHLL2: high = TRUE; break; case OP_ARM64_USHLL2: high = TRUE; case OP_ARM64_USHLL: is_unsigned = TRUE; break; } LLVMValueRef result = lhs; if (high) result = extract_high_elements (ctx, result); if (is_unsigned) result = LLVMBuildZExt (builder, result, ret_t, "arm64_ushll"); else result = LLVMBuildSExt (builder, result, ret_t, "arm64_ushll"); result = LLVMBuildShl (builder, result, create_shift_vector (ctx, result, rhs), ""); values [ins->dreg] = result; break; } case OP_ARM64_SLI: case OP_ARM64_SRI: { LLVMTypeRef intrin_result_t = simd_class_to_llvm_type (ctx, ins->klass); llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (intrin_result_t); unsigned int element_bits = mono_llvm_get_prim_size_bits (LLVMGetElementType (intrin_result_t)); int range_min = 0; int range_max = element_bits - 1; if (ins->opcode == OP_ARM64_SRI) { ++range_min; ++range_max; } int iid = ins->opcode == OP_ARM64_SRI ? INTRINS_AARCH64_ADV_SIMD_SRI : INTRINS_AARCH64_ADV_SIMD_SLI; int max_index = range_max - range_min + 1; ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, max_index, arg3, intrin_result_t, "arm64_ext"); LLVMValueRef intrin_args [3] = { lhs, rhs, arg3 }; int i = 0; while (immediate_unroll_next (&ictx, &i)) { int shift_const = i + range_min; intrin_args [2] = const_int32 (shift_const); LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, ""); immediate_unroll_commit (&ictx, shift_const, result); } immediate_unroll_default (&ictx); immediate_unroll_commit_default (&ictx, lhs); LLVMValueRef result = immediate_unroll_end (&ictx, &cbb); values [ins->dreg] = result; break; } case OP_ARM64_SQRT_SCALAR: { int iid = ins->inst_c0 == MONO_TYPE_R8 ? INTRINS_SQRT : INTRINS_SQRTF; LLVMTypeRef t = LLVMTypeOf (lhs); LLVMValueRef scalar = LLVMBuildExtractElement (builder, lhs, const_int32 (0), ""); LLVMValueRef result = call_intrins (ctx, iid, &scalar, "arm64_sqrt_scalar"); values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMGetUndef (t), result, const_int32 (0), ""); break; } case OP_ARM64_STP: case OP_ARM64_STP_SCALAR: case OP_ARM64_STNP: case OP_ARM64_STNP_SCALAR: { gboolean nontemporal = FALSE; gboolean scalar = FALSE; switch (ins->opcode) { case OP_ARM64_STNP: nontemporal = TRUE; break; case OP_ARM64_STNP_SCALAR: nontemporal = TRUE; scalar = TRUE; break; case OP_ARM64_STP_SCALAR: scalar = TRUE; break; } LLVMTypeRef rhs_t = LLVMTypeOf (rhs); LLVMValueRef val = NULL; LLVMTypeRef dst_t = LLVMPointerType (rhs_t, 0); if (scalar) val = LLVMBuildShuffleVector (builder, rhs, arg3, create_const_vector_2_i32 (0, 2), ""); else { unsigned int rhs_elems = LLVMGetVectorSize (rhs_t); LLVMTypeRef rhs_elt_t = LLVMGetElementType (rhs_t); dst_t = LLVMPointerType (LLVMVectorType (rhs_elt_t, rhs_elems * 2), 0); val = concatenate_vectors (ctx, rhs, arg3); } LLVMValueRef address = convert (ctx, lhs, dst_t); LLVMValueRef store = mono_llvm_build_store (builder, val, address, FALSE, LLVM_BARRIER_NONE); if (nontemporal) set_nontemporal_flag (store); break; } case OP_ARM64_LD1_INSERT: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMTypeRef elem_t = LLVMGetElementType (ret_t); LLVMValueRef address = convert (ctx, arg3, LLVMPointerType (elem_t, 0)); unsigned int alignment = mono_llvm_get_prim_size_bits (ret_t) / 8; LLVMValueRef result = mono_llvm_build_aligned_load (builder, address, "arm64_ld1_insert", FALSE, alignment); result = LLVMBuildInsertElement (builder, lhs, result, rhs, "arm64_ld1_insert"); values [ins->dreg] = result; break; } case OP_ARM64_LD1R: case OP_ARM64_LD1: { gboolean replicate = ins->opcode == OP_ARM64_LD1R; LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); unsigned int alignment = mono_llvm_get_prim_size_bits (ret_t) / 8; LLVMValueRef address = lhs; LLVMTypeRef address_t = LLVMPointerType (ret_t, 0); if (replicate) { LLVMTypeRef elem_t = LLVMGetElementType (ret_t); address_t = LLVMPointerType (elem_t, 0); } address = convert (ctx, address, address_t); LLVMValueRef result = mono_llvm_build_aligned_load (builder, address, "arm64_ld1", FALSE, alignment); if (replicate) { unsigned int elems = LLVMGetVectorSize (ret_t); result = broadcast_element (ctx, result, elems); } values [ins->dreg] = result; break; } case OP_ARM64_LDNP: case OP_ARM64_LDNP_SCALAR: case OP_ARM64_LDP: case OP_ARM64_LDP_SCALAR: { const char *oname = NULL; gboolean nontemporal = FALSE; gboolean scalar = FALSE; switch (ins->opcode) { case OP_ARM64_LDNP: oname = "arm64_ldnp"; nontemporal = TRUE; break; case OP_ARM64_LDNP_SCALAR: oname = "arm64_ldnp_scalar"; nontemporal = TRUE; scalar = TRUE; break; case OP_ARM64_LDP: oname = "arm64_ldp"; break; case OP_ARM64_LDP_SCALAR: oname = "arm64_ldp_scalar"; scalar = TRUE; break; } if (!addresses [ins->dreg]) addresses [ins->dreg] = build_named_alloca (ctx, m_class_get_byval_arg (ins->klass), oname); LLVMTypeRef ret_t = simd_valuetuple_to_llvm_type (ctx, ins->klass); LLVMTypeRef vec_t = LLVMGetElementType (ret_t); LLVMValueRef ix = const_int32 (1); LLVMTypeRef src_t = LLVMPointerType (scalar ? LLVMGetElementType (vec_t) : vec_t, 0); LLVMValueRef src0 = convert (ctx, lhs, src_t); LLVMValueRef src1 = LLVMBuildGEP (builder, src0, &ix, 1, oname); LLVMValueRef vals [] = { src0, src1 }; for (int i = 0; i < 2; ++i) { vals [i] = LLVMBuildLoad (builder, vals [i], oname); if (nontemporal) set_nontemporal_flag (vals [i]); } unsigned int vec_sz = mono_llvm_get_prim_size_bits (vec_t); if (scalar) { g_assert (vec_sz == 64); LLVMValueRef undef = LLVMGetUndef (vec_t); for (int i = 0; i < 2; ++i) vals [i] = LLVMBuildInsertElement (builder, undef, vals [i], const_int32 (0), oname); } LLVMValueRef val = LLVMGetUndef (ret_t); for (int i = 0; i < 2; ++i) val = LLVMBuildInsertValue (builder, val, vals [i], i, oname); LLVMTypeRef retptr_t = LLVMPointerType (ret_t, 0); LLVMValueRef dst = convert (ctx, addresses [ins->dreg], retptr_t); LLVMBuildStore (builder, val, dst); values [ins->dreg] = vec_sz == 64 ? val : NULL; break; } case OP_ARM64_ST1: { LLVMTypeRef t = LLVMTypeOf (rhs); LLVMValueRef address = convert (ctx, lhs, LLVMPointerType (t, 0)); unsigned int alignment = mono_llvm_get_prim_size_bits (t) / 8; mono_llvm_build_aligned_store (builder, rhs, address, FALSE, alignment); break; } case OP_ARM64_ST1_SCALAR: { LLVMTypeRef t = LLVMGetElementType (LLVMTypeOf (rhs)); LLVMValueRef val = LLVMBuildExtractElement (builder, rhs, arg3, "arm64_st1_scalar"); LLVMValueRef address = convert (ctx, lhs, LLVMPointerType (t, 0)); unsigned int alignment = mono_llvm_get_prim_size_bits (t) / 8; mono_llvm_build_aligned_store (builder, val, address, FALSE, alignment); break; } case OP_ARM64_ADDHN: case OP_ARM64_ADDHN2: case OP_ARM64_SUBHN: case OP_ARM64_SUBHN2: case OP_ARM64_RADDHN: case OP_ARM64_RADDHN2: case OP_ARM64_RSUBHN: case OP_ARM64_RSUBHN2: { LLVMValueRef args [2] = { lhs, rhs }; gboolean high = FALSE; gboolean subtract = FALSE; int iid = 0; switch (ins->opcode) { case OP_ARM64_ADDHN2: high = TRUE; case OP_ARM64_ADDHN: break; case OP_ARM64_SUBHN2: high = TRUE; case OP_ARM64_SUBHN: subtract = TRUE; break; case OP_ARM64_RSUBHN2: high = TRUE; case OP_ARM64_RSUBHN: iid = INTRINS_AARCH64_ADV_SIMD_RSUBHN; break; case OP_ARM64_RADDHN2: high = TRUE; case OP_ARM64_RADDHN: iid = INTRINS_AARCH64_ADV_SIMD_RADDHN; break; } llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); if (high) { args [0] = rhs; args [1] = arg3; ovr_tag = ovr_tag_smaller_vector (ovr_tag); } LLVMValueRef result = NULL; if (iid != 0) result = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); else { LLVMTypeRef t = LLVMTypeOf (args [0]); LLVMTypeRef elt_t = LLVMGetElementType (t); unsigned int elems = LLVMGetVectorSize (t); unsigned int elem_bits = mono_llvm_get_prim_size_bits (elt_t); if (subtract) result = LLVMBuildSub (builder, args [0], args [1], ""); else result = LLVMBuildAdd (builder, args [0], args [1], ""); result = LLVMBuildLShr (builder, result, broadcast_constant (elem_bits / 2, elt_t, elems), ""); result = LLVMBuildTrunc (builder, result, LLVMVectorType (LLVMIntType (elem_bits / 2), elems), ""); } if (high) result = concatenate_vectors (ctx, lhs, result); values [ins->dreg] = result; break; } case OP_ARM64_SADD: case OP_ARM64_UADD: case OP_ARM64_SADD2: case OP_ARM64_UADD2: case OP_ARM64_SSUB: case OP_ARM64_USUB: case OP_ARM64_SSUB2: case OP_ARM64_USUB2: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); gboolean is_unsigned = FALSE; gboolean high = FALSE; gboolean subtract = FALSE; switch (ins->opcode) { case OP_ARM64_SADD2: high = TRUE; case OP_ARM64_SADD: break; case OP_ARM64_UADD2: high = TRUE; case OP_ARM64_UADD: is_unsigned = TRUE; break; case OP_ARM64_SSUB2: high = TRUE; case OP_ARM64_SSUB: subtract = TRUE; break; case OP_ARM64_USUB2: high = TRUE; case OP_ARM64_USUB: subtract = TRUE; is_unsigned = TRUE; break; } LLVMValueRef args [] = { lhs, rhs }; for (int i = 0; i < 2; ++i) { LLVMValueRef arg = args [i]; LLVMTypeRef arg_t = LLVMTypeOf (arg); if (high && arg_t != ret_t) arg = extract_high_elements (ctx, arg); if (is_unsigned) arg = LLVMBuildZExt (builder, arg, ret_t, ""); else arg = LLVMBuildSExt (builder, arg, ret_t, ""); args [i] = arg; } LLVMValueRef result = NULL; if (subtract) result = LLVMBuildSub (builder, args [0], args [1], "arm64_sub"); else result = LLVMBuildAdd (builder, args [0], args [1], "arm64_add"); values [ins->dreg] = result; break; } case OP_ARM64_SABAL: case OP_ARM64_SABAL2: case OP_ARM64_UABAL: case OP_ARM64_UABAL2: case OP_ARM64_SABDL: case OP_ARM64_SABDL2: case OP_ARM64_UABDL: case OP_ARM64_UABDL2: case OP_ARM64_SABA: case OP_ARM64_UABA: case OP_ARM64_SABD: case OP_ARM64_UABD: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); gboolean is_unsigned = FALSE; gboolean high = FALSE; gboolean add = FALSE; gboolean widen = FALSE; switch (ins->opcode) { case OP_ARM64_SABAL2: high = TRUE; case OP_ARM64_SABAL: widen = TRUE; add = TRUE; break; case OP_ARM64_UABAL2: high = TRUE; case OP_ARM64_UABAL: widen = TRUE; add = TRUE; is_unsigned = TRUE; break; case OP_ARM64_SABDL2: high = TRUE; case OP_ARM64_SABDL: widen = TRUE; break; case OP_ARM64_UABDL2: high = TRUE; case OP_ARM64_UABDL: widen = TRUE; is_unsigned = TRUE; break; case OP_ARM64_SABA: add = TRUE; break; case OP_ARM64_UABA: add = TRUE; is_unsigned = TRUE; break; case OP_ARM64_UABD: is_unsigned = TRUE; break; } LLVMValueRef args [] = { lhs, rhs }; if (add) { args [0] = rhs; args [1] = arg3; } if (high) for (int i = 0; i < 2; ++i) args [i] = extract_high_elements (ctx, args [i]); int iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UABD : INTRINS_AARCH64_ADV_SIMD_SABD; llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (LLVMTypeOf (args [0])); LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); if (widen) result = LLVMBuildZExt (builder, result, ret_t, ""); if (add) result = LLVMBuildAdd (builder, result, lhs, ""); values [ins->dreg] = result; break; } case OP_ARM64_XHORIZ: { gboolean truncate = FALSE; LLVMTypeRef arg_t = LLVMTypeOf (lhs); LLVMTypeRef elem_t = LLVMGetElementType (arg_t); LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (arg_t); if (elem_t == i1_t || elem_t == i2_t) truncate = TRUE; LLVMValueRef result = call_overloaded_intrins (ctx, ins->inst_c0, ovr_tag, &lhs, ""); if (truncate) { // @llvm.aarch64.neon.saddv.i32.v8i16 ought to return an i16, but doesn't in LLVM 9. result = LLVMBuildTrunc (builder, result, elem_t, ""); } result = vector_from_scalar (ctx, ret_t, result); values [ins->dreg] = result; break; } case OP_ARM64_SADDLV: case OP_ARM64_UADDLV: { LLVMTypeRef arg_t = LLVMTypeOf (lhs); LLVMTypeRef elem_t = LLVMGetElementType (arg_t); LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (arg_t); gboolean truncate = elem_t == i1_t; int iid = ins->opcode == OP_ARM64_UADDLV ? INTRINS_AARCH64_ADV_SIMD_UADDLV : INTRINS_AARCH64_ADV_SIMD_SADDLV; LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, &lhs, ""); if (truncate) { // @llvm.aarch64.neon.saddlv.i32.v16i8 ought to return an i16, but doesn't in LLVM 9. result = LLVMBuildTrunc (builder, result, i2_t, ""); } result = vector_from_scalar (ctx, ret_t, result); values [ins->dreg] = result; break; } case OP_ARM64_UADALP: case OP_ARM64_SADALP: { llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); int iid = ins->opcode == OP_ARM64_UADALP ? INTRINS_AARCH64_ADV_SIMD_UADDLP : INTRINS_AARCH64_ADV_SIMD_SADDLP; LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, &rhs, ""); result = LLVMBuildAdd (builder, result, lhs, ""); values [ins->dreg] = result; break; } case OP_ARM64_ADDP_SCALAR: { llvm_ovr_tag_t ovr_tag = INTRIN_vector128 | INTRIN_int64; LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_UADDV, ovr_tag, &lhs, "arm64_addp_scalar"); result = LLVMBuildInsertElement (builder, LLVMConstNull (v64_i8_t), result, const_int32 (0), ""); values [ins->dreg] = result; break; } case OP_ARM64_FADDP_SCALAR: { LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMValueRef hi = LLVMBuildExtractElement (builder, lhs, const_int32 (0), ""); LLVMValueRef lo = LLVMBuildExtractElement (builder, lhs, const_int32 (1), ""); LLVMValueRef result = LLVMBuildFAdd (builder, hi, lo, "arm64_faddp_scalar"); result = LLVMBuildInsertElement (builder, LLVMConstNull (ret_t), result, const_int32 (0), ""); values [ins->dreg] = result; break; } case OP_ARM64_SXTL: case OP_ARM64_SXTL2: case OP_ARM64_UXTL: case OP_ARM64_UXTL2: { gboolean high = FALSE; gboolean is_unsigned = FALSE; switch (ins->opcode) { case OP_ARM64_SXTL2: high = TRUE; break; case OP_ARM64_UXTL2: high = TRUE; case OP_ARM64_UXTL: is_unsigned = TRUE; break; } LLVMTypeRef t = LLVMTypeOf (lhs); unsigned int elem_bits = LLVMGetIntTypeWidth (LLVMGetElementType (t)); unsigned int src_elems = LLVMGetVectorSize (t); unsigned int dst_elems = src_elems; LLVMValueRef arg = lhs; if (high) { arg = extract_high_elements (ctx, lhs); dst_elems = LLVMGetVectorSize (LLVMTypeOf (arg)); } LLVMTypeRef result_t = LLVMVectorType (LLVMIntType (elem_bits * 2), dst_elems); LLVMValueRef result = NULL; if (is_unsigned) result = LLVMBuildZExt (builder, arg, result_t, "arm64_uxtl"); else result = LLVMBuildSExt (builder, arg, result_t, "arm64_sxtl"); values [ins->dreg] = result; break; } case OP_ARM64_TRN1: case OP_ARM64_TRN2: { gboolean high = ins->opcode == OP_ARM64_TRN2; LLVMTypeRef t = LLVMTypeOf (lhs); unsigned int src_elems = LLVMGetVectorSize (t); int mask [MAX_VECTOR_ELEMS] = { 0 }; int laneix = high ? 1 : 0; for (unsigned int i = 0; i < src_elems; i += 2) { mask [i] = laneix; mask [i + 1] = laneix + src_elems; laneix += 2; } values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_i32 (mask, src_elems), "arm64_uzp"); break; } case OP_ARM64_UZP1: case OP_ARM64_UZP2: { gboolean high = ins->opcode == OP_ARM64_UZP2; LLVMTypeRef t = LLVMTypeOf (lhs); unsigned int src_elems = LLVMGetVectorSize (t); int mask [MAX_VECTOR_ELEMS] = { 0 }; int laneix = high ? 1 : 0; for (unsigned int i = 0; i < src_elems; ++i) { mask [i] = laneix; laneix += 2; } values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_i32 (mask, src_elems), "arm64_uzp"); break; } case OP_ARM64_ZIP1: case OP_ARM64_ZIP2: { gboolean high = ins->opcode == OP_ARM64_ZIP2; LLVMTypeRef t = LLVMTypeOf (lhs); unsigned int src_elems = LLVMGetVectorSize (t); int mask [MAX_VECTOR_ELEMS] = { 0 }; int laneix = high ? src_elems / 2 : 0; for (unsigned int i = 0; i < src_elems; i += 2) { mask [i] = laneix; mask [i + 1] = laneix + src_elems; ++laneix; } values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_i32 (mask, src_elems), "arm64_zip"); break; } case OP_ARM64_ABSCOMPARE: { IntrinsicId iid = (IntrinsicId) ins->inst_c0; gboolean scalar = ins->inst_c1; LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); LLVMTypeRef elem_t = LLVMGetElementType (ret_t); llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); ovr_tag = ovr_tag_corresponding_integer (ovr_tag); LLVMValueRef args [] = { lhs, rhs }; LLVMTypeRef result_t = ret_t; if (scalar) { ovr_tag = ovr_tag_force_scalar (ovr_tag); result_t = elem_t; for (int i = 0; i < 2; ++i) args [i] = scalar_from_vector (ctx, args [i]); } LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); result = LLVMBuildBitCast (builder, result, result_t, ""); if (scalar) result = vector_from_scalar (ctx, ret_t, result); values [ins->dreg] = result; break; } case OP_XOP_OVR_X_X: { IntrinsicId iid = (IntrinsicId) ins->inst_c0; llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, &lhs, ""); break; } case OP_XOP_OVR_X_X_X: { IntrinsicId iid = (IntrinsicId) ins->inst_c0; llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); LLVMValueRef args [] = { lhs, rhs }; values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); break; } case OP_XOP_OVR_X_X_X_X: { IntrinsicId iid = (IntrinsicId) ins->inst_c0; llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); LLVMValueRef args [] = { lhs, rhs, arg3 }; values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); break; } case OP_XOP_OVR_BYSCALAR_X_X_X: { IntrinsicId iid = (IntrinsicId) ins->inst_c0; llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass); LLVMTypeRef t = LLVMTypeOf (lhs); unsigned int elems = LLVMGetVectorSize (t); LLVMValueRef arg2 = broadcast_element (ctx, scalar_from_vector (ctx, rhs), elems); LLVMValueRef args [] = { lhs, arg2 }; values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, args, ""); break; } case OP_XOP_OVR_SCALAR_X_X: case OP_XOP_OVR_SCALAR_X_X_X: case OP_XOP_OVR_SCALAR_X_X_X_X: { int num_args = 0; IntrinsicId iid = (IntrinsicId) ins->inst_c0; LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass); switch (ins->opcode) { case OP_XOP_OVR_SCALAR_X_X: num_args = 1; break; case OP_XOP_OVR_SCALAR_X_X_X: num_args = 2; break; case OP_XOP_OVR_SCALAR_X_X_X_X: num_args = 3; break; } /* LLVM 9 NEON intrinsic functions have scalar overloads. Unfortunately * only overloads for 32 and 64-bit integers and floating point types are * supported. 8 and 16-bit integers are unsupported, and will fail during * instruction selection. This is worked around by using a vector * operation and then explicitly clearing the upper bits of the register. */ ScalarOpFromVectorOpCtx sctx = scalar_op_from_vector_op (ctx, ret_t, ins); LLVMValueRef args [3] = { lhs, rhs, arg3 }; scalar_op_from_vector_op_process_args (&sctx, args, num_args); LLVMValueRef result = call_overloaded_intrins (ctx, iid, sctx.ovr_tag, args, ""); result = scalar_op_from_vector_op_process_result (&sctx, result); values [ins->dreg] = result; break; } #endif case OP_DUMMY_USE: break; /* * EXCEPTION HANDLING */ case OP_IMPLICIT_EXCEPTION: /* This marks a place where an implicit exception can happen */ if (bb->region != -1) set_failure (ctx, "implicit-exception"); break; case OP_THROW: case OP_RETHROW: { gboolean rethrow = (ins->opcode == OP_RETHROW); if (ctx->llvm_only) { emit_llvmonly_throw (ctx, bb, rethrow, lhs); has_terminator = TRUE; ctx->unreachable [bb->block_num] = TRUE; } else { emit_throw (ctx, bb, rethrow, lhs); builder = ctx->builder; } break; } case OP_CALL_HANDLER: { /* * We don't 'call' handlers, but instead simply branch to them. * The code generated by ENDFINALLY will branch back to us. */ LLVMBasicBlockRef noex_bb; GSList *bb_list; BBInfo *info = &bblocks [ins->inst_target_bb->block_num]; bb_list = info->call_handler_return_bbs; /* * Set the indicator variable for the finally clause. */ lhs = info->finally_ind; g_assert (lhs); LLVMBuildStore (builder, LLVMConstInt (LLVMInt32Type (), g_slist_length (bb_list) + 1, FALSE), lhs); /* Branch to the finally clause */ LLVMBuildBr (builder, info->call_handler_target_bb); noex_bb = gen_bb (ctx, "CALL_HANDLER_CONT_BB"); info->call_handler_return_bbs = g_slist_append_mempool (cfg->mempool, info->call_handler_return_bbs, noex_bb); builder = ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, noex_bb); bblocks [bb->block_num].end_bblock = noex_bb; break; } case OP_START_HANDLER: { break; } case OP_ENDFINALLY: { LLVMBasicBlockRef resume_bb; MonoBasicBlock *handler_bb; LLVMValueRef val, switch_ins, callee; GSList *bb_list; BBInfo *info; gboolean is_fault = MONO_REGION_FLAGS (bb->region) == MONO_EXCEPTION_CLAUSE_FAULT; /* * Fault clauses are like finally clauses, but they are only called if an exception is thrown. */ if (!is_fault) { handler_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->region_to_handler, GUINT_TO_POINTER (mono_get_block_region_notry (cfg, bb->region))); g_assert (handler_bb); info = &bblocks [handler_bb->block_num]; lhs = info->finally_ind; g_assert (lhs); bb_list = info->call_handler_return_bbs; resume_bb = gen_bb (ctx, "ENDFINALLY_RESUME_BB"); /* Load the finally variable */ val = LLVMBuildLoad (builder, lhs, ""); /* Reset the variable */ LLVMBuildStore (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), lhs); /* Branch to either resume_bb, or to the bblocks in bb_list */ switch_ins = LLVMBuildSwitch (builder, val, resume_bb, g_slist_length (bb_list)); /* * The other targets are added at the end to handle OP_CALL_HANDLER * opcodes processed later. */ info->endfinally_switch_ins_list = g_slist_append_mempool (cfg->mempool, info->endfinally_switch_ins_list, switch_ins); builder = ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, resume_bb); } if (ctx->llvm_only) { if (!cfg->deopt) { emit_resume_eh (ctx, bb); } else { /* Not needed */ LLVMBuildUnreachable (builder); } } else { LLVMTypeRef icall_sig = LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE); if (ctx->cfg->compile_aot) { callee = get_callee (ctx, icall_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_llvm_resume_unwind_trampoline)); } else { callee = get_jit_callee (ctx, "llvm_resume_unwind_trampoline", icall_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_llvm_resume_unwind_trampoline)); } LLVMBuildCall (builder, callee, NULL, 0, ""); LLVMBuildUnreachable (builder); } has_terminator = TRUE; break; } case OP_ENDFILTER: { g_assert (cfg->llvm_only && cfg->deopt); LLVMBuildUnreachable (builder); has_terminator = TRUE; break; } case OP_IL_SEQ_POINT: break; default: { char reason [128]; sprintf (reason, "opcode %s", mono_inst_name (ins->opcode)); set_failure (ctx, reason); break; } } if (!ctx_ok (ctx)) break; /* Convert the value to the type required by phi nodes */ if (spec [MONO_INST_DEST] != ' ' && !MONO_IS_STORE_MEMBASE (ins) && ctx->vreg_types [ins->dreg]) { if (ctx->is_vphi [ins->dreg]) /* vtypes */ values [ins->dreg] = addresses [ins->dreg]; else values [ins->dreg] = convert (ctx, values [ins->dreg], ctx->vreg_types [ins->dreg]); } /* Add stores for volatile/ref variables */ if (spec [MONO_INST_DEST] != ' ' && spec [MONO_INST_DEST] != 'v' && !MONO_IS_STORE_MEMBASE (ins)) { if (!skip_volatile_store) emit_volatile_store (ctx, ins->dreg); #ifdef TARGET_WASM if (vreg_is_ref (cfg, ins->dreg) && ctx->values [ins->dreg]) emit_gc_pin (ctx, builder, ins->dreg); #endif } } if (!ctx_ok (ctx)) return; if (!has_terminator && bb->next_bb && (bb == cfg->bb_entry || bb->in_count > 0)) { LLVMBuildBr (builder, get_bb (ctx, bb->next_bb)); } if (bb == cfg->bb_exit && sig->ret->type == MONO_TYPE_VOID) { emit_dbg_loc (ctx, builder, cfg->header->code + cfg->header->code_size - 1); LLVMBuildRetVoid (builder); } if (bb == cfg->bb_entry) ctx->last_alloca = LLVMGetLastInstruction (get_bb (ctx, cfg->bb_entry)); } /* * mono_llvm_check_method_supported: * * Do some quick checks to decide whenever cfg->method can be compiled by LLVM, to avoid * compiling a method twice. */ void mono_llvm_check_method_supported (MonoCompile *cfg) { int i, j; #ifdef TARGET_WASM if (mono_method_signature_internal (cfg->method)->call_convention == MONO_CALL_VARARG) { cfg->exception_message = g_strdup ("vararg callconv"); cfg->disable_llvm = TRUE; return; } #endif if (cfg->llvm_only) return; if (cfg->method->save_lmf) { cfg->exception_message = g_strdup ("lmf"); cfg->disable_llvm = TRUE; } if (cfg->disable_llvm) return; /* * Nested clauses where one of the clauses is a finally clause is * not supported, because LLVM can't figure out the control flow, * probably because we resume exception handling by calling our * own function instead of using the 'resume' llvm instruction. */ for (i = 0; i < cfg->header->num_clauses; ++i) { for (j = 0; j < cfg->header->num_clauses; ++j) { MonoExceptionClause *clause1 = &cfg->header->clauses [i]; MonoExceptionClause *clause2 = &cfg->header->clauses [j]; // FIXME: Nested try clauses fail in some cases too, i.e. #37273 if (i != j && clause1->try_offset >= clause2->try_offset && clause1->handler_offset <= clause2->handler_offset) { //(clause1->flags == MONO_EXCEPTION_CLAUSE_FINALLY || clause2->flags == MONO_EXCEPTION_CLAUSE_FINALLY)) { cfg->exception_message = g_strdup ("nested clauses"); cfg->disable_llvm = TRUE; break; } } } if (cfg->disable_llvm) return; /* FIXME: */ if (cfg->method->dynamic) { cfg->exception_message = g_strdup ("dynamic."); cfg->disable_llvm = TRUE; } if (cfg->disable_llvm) return; } static LLVMCallInfo* get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig) { LLVMCallInfo *linfo; int i; if (cfg->gsharedvt && cfg->llvm_only && mini_is_gsharedvt_variable_signature (sig)) { int i, n, pindex; /* * Gsharedvt methods have the following calling convention: * - all arguments are passed by ref, even non generic ones * - the return value is returned by ref too, using a vret * argument passed after 'this'. */ n = sig->param_count + sig->hasthis; linfo = (LLVMCallInfo*)mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n)); pindex = 0; if (sig->hasthis) linfo->args [pindex ++].storage = LLVMArgNormal; if (sig->ret->type != MONO_TYPE_VOID) { if (mini_is_gsharedvt_variable_type (sig->ret)) linfo->ret.storage = LLVMArgGsharedvtVariable; else if (mini_type_is_vtype (sig->ret)) linfo->ret.storage = LLVMArgGsharedvtFixedVtype; else linfo->ret.storage = LLVMArgGsharedvtFixed; linfo->vret_arg_index = pindex; } else { linfo->ret.storage = LLVMArgNone; } for (i = 0; i < sig->param_count; ++i) { if (m_type_is_byref (sig->params [i])) linfo->args [pindex].storage = LLVMArgNormal; else if (mini_is_gsharedvt_variable_type (sig->params [i])) linfo->args [pindex].storage = LLVMArgGsharedvtVariable; else if (mini_type_is_vtype (sig->params [i])) linfo->args [pindex].storage = LLVMArgGsharedvtFixedVtype; else linfo->args [pindex].storage = LLVMArgGsharedvtFixed; linfo->args [pindex].type = sig->params [i]; pindex ++; } return linfo; } linfo = mono_arch_get_llvm_call_info (cfg, sig); linfo->dummy_arg_pindex = -1; for (i = 0; i < sig->param_count; ++i) linfo->args [i + sig->hasthis].type = sig->params [i]; return linfo; } static void emit_method_inner (EmitContext *ctx); static void free_ctx (EmitContext *ctx) { GSList *l; g_free (ctx->values); g_free (ctx->addresses); g_free (ctx->vreg_types); g_free (ctx->is_vphi); g_free (ctx->vreg_cli_types); g_free (ctx->is_dead); g_free (ctx->unreachable); g_free (ctx->gc_var_indexes); g_ptr_array_free (ctx->phi_values, TRUE); g_free (ctx->bblocks); g_hash_table_destroy (ctx->region_to_handler); g_hash_table_destroy (ctx->clause_to_handler); g_hash_table_destroy (ctx->jit_callees); g_ptr_array_free (ctx->callsite_list, TRUE); g_free (ctx->method_name); g_ptr_array_free (ctx->bblock_list, TRUE); for (l = ctx->builders; l; l = l->next) { LLVMBuilderRef builder = (LLVMBuilderRef)l->data; LLVMDisposeBuilder (builder); } g_free (ctx); } static gboolean is_linkonce_method (MonoMethod *method) { #ifdef TARGET_WASM /* * Under wasm, linkonce works, so use it instead of the dedup pass for wrappers at least. * FIXME: Use for everything, i.e. can_dedup (). * FIXME: Fails System.Core tests * -> amodule->sorted_methods contains duplicates, screwing up jit tables. */ // FIXME: This works, but the aot data for the methods is still kept, so size still increases #if 0 if (method->wrapper_type == MONO_WRAPPER_OTHER) { WrapperInfo *info = mono_marshal_get_wrapper_info (method); if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG) return TRUE; } #endif #endif return FALSE; } /* * mono_llvm_emit_method: * * Emit LLVM IL from the mono IL, and compile it to native code using LLVM. */ void mono_llvm_emit_method (MonoCompile *cfg) { EmitContext *ctx; char *method_name; gboolean is_linkonce = FALSE; int i; if (cfg->skip) return; /* The code below might acquire the loader lock, so use it for global locking */ mono_loader_lock (); ctx = g_new0 (EmitContext, 1); ctx->cfg = cfg; ctx->mempool = cfg->mempool; /* * This maps vregs to the LLVM instruction defining them */ ctx->values = g_new0 (LLVMValueRef, cfg->next_vreg); /* * This maps vregs for volatile variables to the LLVM instruction defining their * address. */ ctx->addresses = g_new0 (LLVMValueRef, cfg->next_vreg); ctx->vreg_types = g_new0 (LLVMTypeRef, cfg->next_vreg); ctx->is_vphi = g_new0 (gboolean, cfg->next_vreg); ctx->vreg_cli_types = g_new0 (MonoType*, cfg->next_vreg); ctx->phi_values = g_ptr_array_sized_new (256); /* * This signals whenever the vreg was defined by a phi node with no input vars * (i.e. all its input bblocks end with NOT_REACHABLE). */ ctx->is_dead = g_new0 (gboolean, cfg->next_vreg); /* Whenever the bblock is unreachable */ ctx->unreachable = g_new0 (gboolean, cfg->max_block_num); ctx->bblock_list = g_ptr_array_sized_new (256); ctx->region_to_handler = g_hash_table_new (NULL, NULL); ctx->clause_to_handler = g_hash_table_new (NULL, NULL); ctx->callsite_list = g_ptr_array_new (); ctx->jit_callees = g_hash_table_new (NULL, NULL); if (cfg->compile_aot) { ctx->module = &aot_module; /* * Allow the linker to discard duplicate copies of wrappers, generic instances etc. by using the 'linkonce' * linkage for them. This requires the following: * - the method needs to have a unique mangled name * - llvmonly mode, since the code in aot-runtime.c would initialize got slots in the wrong aot image etc. */ if (ctx->module->llvm_only && ctx->module->static_link && is_linkonce_method (cfg->method)) is_linkonce = TRUE; if (is_linkonce || mono_aot_is_externally_callable (cfg->method)) method_name = mono_aot_get_mangled_method_name (cfg->method); else method_name = mono_aot_get_method_name (cfg); cfg->llvm_method_name = g_strdup (method_name); } else { ctx->module = init_jit_module (); method_name = mono_method_full_name (cfg->method, TRUE); } ctx->method_name = method_name; ctx->is_linkonce = is_linkonce; if (cfg->compile_aot) { ctx->lmodule = ctx->module->lmodule; } else { ctx->lmodule = LLVMModuleCreateWithName (g_strdup_printf ("jit-module-%s", cfg->method->name)); } ctx->llvm_only = ctx->module->llvm_only; #ifdef TARGET_WASM ctx->emit_dummy_arg = TRUE; #endif emit_method_inner (ctx); if (!ctx_ok (ctx)) { if (ctx->lmethod) { /* Need to add unused phi nodes as they can be referenced by other values */ LLVMBasicBlockRef phi_bb = LLVMAppendBasicBlock (ctx->lmethod, "PHI_BB"); LLVMBuilderRef builder; builder = create_builder (ctx); LLVMPositionBuilderAtEnd (builder, phi_bb); for (i = 0; i < ctx->phi_values->len; ++i) { LLVMValueRef v = (LLVMValueRef)g_ptr_array_index (ctx->phi_values, i); if (LLVMGetInstructionParent (v) == NULL) LLVMInsertIntoBuilder (builder, v); } if (ctx->module->llvm_only && ctx->module->static_link && cfg->interp) { /* The caller will retry compilation */ LLVMDeleteFunction (ctx->lmethod); } else if (ctx->module->llvm_only && ctx->module->static_link) { // Keep a stub for the function since it might be called directly int nbbs = LLVMCountBasicBlocks (ctx->lmethod); LLVMBasicBlockRef *bblocks = g_new0 (LLVMBasicBlockRef, nbbs); LLVMGetBasicBlocks (ctx->lmethod, bblocks); for (int i = 0; i < nbbs; ++i) LLVMRemoveBasicBlockFromParent (bblocks [i]); LLVMBasicBlockRef entry_bb = LLVMAppendBasicBlock (ctx->lmethod, "ENTRY"); builder = create_builder (ctx); LLVMPositionBuilderAtEnd (builder, entry_bb); ctx->builder = builder; LLVMTypeRef sig = LLVMFunctionType0 (LLVMVoidType (), FALSE); LLVMValueRef callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_nullref_exception)); LLVMBuildCall (builder, callee, NULL, 0, ""); LLVMBuildUnreachable (builder); /* Clean references to instructions inside the method */ for (int i = 0; i < ctx->callsite_list->len; ++i) { CallSite *callsite = (CallSite*)g_ptr_array_index (ctx->callsite_list, i); if (callsite->lmethod == ctx->lmethod) callsite->load = NULL; } } else { LLVMDeleteFunction (ctx->lmethod); } } } free_ctx (ctx); mono_loader_unlock (); } static void emit_method_inner (EmitContext *ctx) { MonoCompile *cfg = ctx->cfg; MonoMethodSignature *sig; MonoBasicBlock *bb; LLVMTypeRef method_type; LLVMValueRef method = NULL; LLVMValueRef *values = ctx->values; int i, max_block_num, bb_index; gboolean llvmonly_fail = FALSE; LLVMCallInfo *linfo; LLVMModuleRef lmodule = ctx->lmodule; BBInfo *bblocks; GPtrArray *bblock_list = ctx->bblock_list; MonoMethodHeader *header; MonoExceptionClause *clause; char **names; LLVMBuilderRef entry_builder = NULL; LLVMBasicBlockRef entry_bb = NULL; if (cfg->gsharedvt && !cfg->llvm_only) { set_failure (ctx, "gsharedvt"); return; } #if 0 { static int count = 0; count ++; char *llvm_count_str = g_getenv ("LLVM_COUNT"); if (llvm_count_str) { int lcount = atoi (llvm_count_str); g_free (llvm_count_str); if (count == lcount) { printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE)); fflush (stdout); } if (count > lcount) { set_failure (ctx, "count"); return; } } } #endif // If we come upon one of the init_method wrappers, we need to find // the method that we have already emitted and tell LLVM that this // managed method info for the wrapper is associated with this method // we constructed ourselves from LLVM IR. // // This is necessary to unwind through the init_method, in the case that // it has to run a static cctor that throws an exception if (cfg->method->wrapper_type == MONO_WRAPPER_OTHER) { WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method); if (info->subtype == WRAPPER_SUBTYPE_AOT_INIT) { method = get_init_func (ctx->module, info->d.aot_init.subtype); ctx->lmethod = method; ctx->module->max_method_idx = MAX (ctx->module->max_method_idx, cfg->method_index); const char *init_name = mono_marshal_get_aot_init_wrapper_name (info->d.aot_init.subtype); ctx->method_name = g_strdup_printf ("%s_%s", ctx->module->global_prefix, init_name); ctx->cfg->asm_symbol = g_strdup (ctx->method_name); if (!cfg->llvm_only && ctx->module->external_symbols) { LLVMSetLinkage (method, LLVMExternalLinkage); LLVMSetVisibility (method, LLVMHiddenVisibility); } /* Not looked up at runtime */ g_hash_table_insert (ctx->module->no_method_table_lmethods, method, method); goto after_codegen; } else if (info->subtype == WRAPPER_SUBTYPE_LLVM_FUNC) { g_assert (info->d.llvm_func.subtype == LLVM_FUNC_WRAPPER_GC_POLL); if (cfg->compile_aot) { method = ctx->module->gc_poll_cold_wrapper; g_assert (method); } else { method = emit_icall_cold_wrapper (ctx->module, lmodule, MONO_JIT_ICALL_mono_threads_state_poll, FALSE); } ctx->lmethod = method; ctx->module->max_method_idx = MAX (ctx->module->max_method_idx, cfg->method_index); ctx->method_name = g_strdup (LLVMGetValueName (method)); //g_strdup_printf ("%s_%s", ctx->module->global_prefix, LLVMGetValueName (method)); ctx->cfg->asm_symbol = g_strdup (ctx->method_name); if (!cfg->llvm_only && ctx->module->external_symbols) { LLVMSetLinkage (method, LLVMExternalLinkage); LLVMSetVisibility (method, LLVMHiddenVisibility); } goto after_codegen; } } sig = mono_method_signature_internal (cfg->method); ctx->sig = sig; linfo = get_llvm_call_info (cfg, sig); ctx->linfo = linfo; if (!ctx_ok (ctx)) return; if (cfg->rgctx_var) linfo->rgctx_arg = TRUE; else if (needs_extra_arg (ctx, cfg->method)) linfo->dummy_arg = TRUE; ctx->method_type = method_type = sig_to_llvm_sig_full (ctx, sig, linfo); if (!ctx_ok (ctx)) return; method = LLVMAddFunction (lmodule, ctx->method_name, method_type); ctx->lmethod = method; if (!cfg->llvm_only) LLVMSetFunctionCallConv (method, LLVMMono1CallConv); /* if the method doesn't contain * (1) a call (so it's a leaf method) * (2) and no loops * we can skip the GC safepoint on method entry. */ gboolean requires_safepoint; requires_safepoint = cfg->has_calls; if (!requires_safepoint) { for (bb = cfg->bb_entry->next_bb; bb; bb = bb->next_bb) { if (bb->loop_body_start || (bb->flags & BB_EXCEPTION_HANDLER)) { requires_safepoint = TRUE; } } } if (cfg->method->wrapper_type) { if (cfg->method->wrapper_type == MONO_WRAPPER_ALLOC || cfg->method->wrapper_type == MONO_WRAPPER_WRITE_BARRIER) { requires_safepoint = FALSE; } else { WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method); switch (info->subtype) { case WRAPPER_SUBTYPE_GSHAREDVT_IN: case WRAPPER_SUBTYPE_GSHAREDVT_OUT: case WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG: case WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG: /* Arguments are not used after the call */ requires_safepoint = FALSE; break; } } } ctx->has_safepoints = requires_safepoint; if (!cfg->llvm_only && mono_threads_are_safepoints_enabled () && requires_safepoint) { if (!cfg->compile_aot) { LLVMSetGC (method, "coreclr"); emit_gc_safepoint_poll (ctx->module, ctx->lmodule, cfg); } else { LLVMSetGC (method, "coreclr"); } } LLVMSetLinkage (method, LLVMPrivateLinkage); mono_llvm_add_func_attr (method, LLVM_ATTR_UW_TABLE); if (cfg->disable_omit_fp) mono_llvm_add_func_attr_nv (method, "frame-pointer", "all"); if (cfg->compile_aot) { if (mono_aot_is_externally_callable (cfg->method)) { LLVMSetLinkage (method, LLVMExternalLinkage); } else { LLVMSetLinkage (method, LLVMInternalLinkage); //all methods have internal visibility when doing llvm_only if (!cfg->llvm_only && ctx->module->external_symbols) { LLVMSetLinkage (method, LLVMExternalLinkage); LLVMSetVisibility (method, LLVMHiddenVisibility); } } if (ctx->is_linkonce) { LLVMSetLinkage (method, LLVMLinkOnceAnyLinkage); LLVMSetVisibility (method, LLVMDefaultVisibility); } } else { LLVMSetLinkage (method, LLVMExternalLinkage); } if (cfg->method->save_lmf && !cfg->llvm_only) { set_failure (ctx, "lmf"); return; } if (sig->pinvoke && cfg->method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE && !cfg->llvm_only) { set_failure (ctx, "pinvoke signature"); return; } #ifdef TARGET_WASM if (ctx->module->interp && cfg->header->code_size > 100000 && !cfg->interp_entry_only) { /* Large methods slow down llvm too much */ set_failure (ctx, "il code too large."); return; } #endif header = cfg->header; for (i = 0; i < header->num_clauses; ++i) { clause = &header->clauses [i]; if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT && clause->flags != MONO_EXCEPTION_CLAUSE_NONE) { if (cfg->llvm_only) { if (!cfg->deopt && !cfg->interp_entry_only) llvmonly_fail = TRUE; } else { set_failure (ctx, "non-finally/catch/fault clause."); return; } } } if (header->num_clauses || (cfg->method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) || cfg->no_inline) /* We can't handle inlined methods with clauses */ mono_llvm_add_func_attr (method, LLVM_ATTR_NO_INLINE); for (int i = 0; i < cfg->header->num_clauses; i++) { MonoExceptionClause *clause = &cfg->header->clauses [i]; if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE || clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) ctx->has_catch = TRUE; } if (linfo->rgctx_arg) { ctx->rgctx_arg = LLVMGetParam (method, linfo->rgctx_arg_pindex); ctx->rgctx_arg_pindex = linfo->rgctx_arg_pindex; /* * We mark the rgctx parameter with the inreg attribute, which is mapped to * MONO_ARCH_RGCTX_REG in the Mono calling convention in llvm, i.e. * CC_X86_64_Mono in X86CallingConv.td. */ if (!ctx->llvm_only) mono_llvm_add_param_attr (ctx->rgctx_arg, LLVM_ATTR_IN_REG); LLVMSetValueName (ctx->rgctx_arg, "rgctx"); } else { ctx->rgctx_arg_pindex = -1; } if (cfg->vret_addr) { values [cfg->vret_addr->dreg] = LLVMGetParam (method, linfo->vret_arg_pindex); LLVMSetValueName (values [cfg->vret_addr->dreg], "vret"); if (linfo->ret.storage == LLVMArgVtypeByRef) { mono_llvm_add_param_attr (LLVMGetParam (method, linfo->vret_arg_pindex), LLVM_ATTR_STRUCT_RET); mono_llvm_add_param_attr (LLVMGetParam (method, linfo->vret_arg_pindex), LLVM_ATTR_NO_ALIAS); } } if (sig->hasthis) { ctx->this_arg_pindex = linfo->this_arg_pindex; ctx->this_arg = LLVMGetParam (method, linfo->this_arg_pindex); values [cfg->args [0]->dreg] = ctx->this_arg; LLVMSetValueName (values [cfg->args [0]->dreg], "this"); } if (linfo->dummy_arg) LLVMSetValueName (LLVMGetParam (method, linfo->dummy_arg_pindex), "dummy_arg"); names = g_new (char *, sig->param_count); mono_method_get_param_names (cfg->method, (const char **) names); /* Set parameter names/attributes */ for (i = 0; i < sig->param_count; ++i) { LLVMArgInfo *ainfo = &linfo->args [i + sig->hasthis]; char *name; int pindex = ainfo->pindex + ainfo->ndummy_fpargs; int j; for (j = 0; j < ainfo->ndummy_fpargs; ++j) { name = g_strdup_printf ("dummy_%d_%d", i, j); LLVMSetValueName (LLVMGetParam (method, ainfo->pindex + j), name); g_free (name); } if (ainfo->storage == LLVMArgVtypeInReg && ainfo->pair_storage [0] == LLVMArgNone && ainfo->pair_storage [1] == LLVMArgNone) continue; values [cfg->args [i + sig->hasthis]->dreg] = LLVMGetParam (method, pindex); if (ainfo->storage == LLVMArgGsharedvtFixed || ainfo->storage == LLVMArgGsharedvtFixedVtype) { if (names [i] && names [i][0] != '\0') name = g_strdup_printf ("p_arg_%s", names [i]); else name = g_strdup_printf ("p_arg_%d", i); } else { if (names [i] && names [i][0] != '\0') name = g_strdup_printf ("arg_%s", names [i]); else name = g_strdup_printf ("arg_%d", i); } LLVMSetValueName (LLVMGetParam (method, pindex), name); g_free (name); if (ainfo->storage == LLVMArgVtypeByVal) mono_llvm_add_param_attr (LLVMGetParam (method, pindex), LLVM_ATTR_BY_VAL); if (ainfo->storage == LLVMArgVtypeByRef || ainfo->storage == LLVMArgVtypeAddr) { /* For OP_LDADDR */ cfg->args [i + sig->hasthis]->opcode = OP_VTARG_ADDR; } #ifdef TARGET_WASM if (ainfo->storage == LLVMArgVtypeByRef) { /* This causes llvm to make a copy of the value which is what we need */ mono_llvm_add_param_byval_attr (LLVMGetParam (method, pindex), LLVMGetElementType (LLVMTypeOf (LLVMGetParam (method, pindex)))); } #endif } g_free (names); if (ctx->module->emit_dwarf && cfg->compile_aot && mono_debug_enabled ()) { ctx->minfo = mono_debug_lookup_method (cfg->method); ctx->dbg_md = emit_dbg_subprogram (ctx, cfg, method, ctx->method_name); } max_block_num = 0; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) max_block_num = MAX (max_block_num, bb->block_num); ctx->bblocks = bblocks = g_new0 (BBInfo, max_block_num + 1); /* Add branches between non-consecutive bblocks */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) && bb->next_bb != bb->last_ins->inst_false_bb) { MonoInst *inst = (MonoInst*)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst)); inst->opcode = OP_BR; inst->inst_target_bb = bb->last_ins->inst_false_bb; mono_bblock_add_inst (bb, inst); } } /* * Make a first pass over the code to precreate PHI nodes/set INDIRECT flags. */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins; LLVMBuilderRef builder; char *dname; char dname_buf[128]; builder = create_builder (ctx); for (ins = bb->code; ins; ins = ins->next) { switch (ins->opcode) { case OP_PHI: case OP_FPHI: case OP_VPHI: case OP_XPHI: { LLVMTypeRef phi_type = llvm_type_to_stack_type (cfg, type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass))); if (!ctx_ok (ctx)) return; if (cfg->interp_entry_only) break; if (ins->opcode == OP_VPHI) { /* Treat valuetype PHI nodes as operating on the address itself */ g_assert (ins->klass); phi_type = LLVMPointerType (type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass)), 0); } /* * Have to precreate these, as they can be referenced by * earlier instructions. */ sprintf (dname_buf, "t%d", ins->dreg); dname = dname_buf; values [ins->dreg] = LLVMBuildPhi (builder, phi_type, dname); if (ins->opcode == OP_VPHI) ctx->addresses [ins->dreg] = values [ins->dreg]; g_ptr_array_add (ctx->phi_values, values [ins->dreg]); /* * Set the expected type of the incoming arguments since these have * to have the same type. */ for (i = 0; i < ins->inst_phi_args [0]; i++) { int sreg1 = ins->inst_phi_args [i + 1]; if (sreg1 != -1) { if (ins->opcode == OP_VPHI) ctx->is_vphi [sreg1] = TRUE; ctx->vreg_types [sreg1] = phi_type; } } break; } case OP_LDADDR: ((MonoInst*)ins->inst_p0)->flags |= MONO_INST_INDIRECT; break; default: break; } } } /* * Create an ordering for bblocks, use the depth first order first, then * put the exception handling bblocks last. */ for (bb_index = 0; bb_index < cfg->num_bblocks; ++bb_index) { bb = cfg->bblocks [bb_index]; if (!(bb->region != -1 && !MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_TRY))) { g_ptr_array_add (bblock_list, bb); bblocks [bb->block_num].added = TRUE; } } for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { if (!bblocks [bb->block_num].added) g_ptr_array_add (bblock_list, bb); } /* * Second pass: generate code. */ // Emit entry point entry_builder = create_builder (ctx); entry_bb = get_bb (ctx, cfg->bb_entry); LLVMPositionBuilderAtEnd (entry_builder, entry_bb); emit_entry_bb (ctx, entry_builder); if (llvmonly_fail) /* * In llvmonly mode, we want to emit an llvm method for every method even if it fails to compile, * so direct calls can be made from outside the assembly. */ goto after_codegen_1; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { int clause_index; char name [128]; if (ctx->cfg->interp_entry_only || !(bb->region != -1 && (bb->flags & BB_EXCEPTION_HANDLER))) continue; if (ctx->cfg->deopt && MONO_REGION_FLAGS (bb->region) == MONO_EXCEPTION_CLAUSE_FILTER) continue; clause_index = MONO_REGION_CLAUSE_INDEX (bb->region); g_hash_table_insert (ctx->region_to_handler, GUINT_TO_POINTER (mono_get_block_region_notry (cfg, bb->region)), bb); g_hash_table_insert (ctx->clause_to_handler, GINT_TO_POINTER (clause_index), bb); /* * Create a new bblock which CALL_HANDLER/landing pads can branch to, because branching to the * LLVM bblock containing a landing pad causes problems for the * LLVM optimizer passes. */ sprintf (name, "BB%d_CALL_HANDLER_TARGET", bb->block_num); ctx->bblocks [bb->block_num].call_handler_target_bb = LLVMAppendBasicBlock (ctx->lmethod, name); } // Make landing pads first ctx->exc_meta = g_hash_table_new_full (NULL, NULL, NULL, NULL); if (ctx->llvm_only && !ctx->cfg->interp_entry_only) { size_t group_index = 0; while (group_index < cfg->header->num_clauses) { if (cfg->clause_is_dead [group_index]) { group_index ++; continue; } int count = 0; size_t cursor = group_index; while (cursor < cfg->header->num_clauses && CLAUSE_START (&cfg->header->clauses [cursor]) == CLAUSE_START (&cfg->header->clauses [group_index]) && CLAUSE_END (&cfg->header->clauses [cursor]) == CLAUSE_END (&cfg->header->clauses [group_index])) { count++; cursor++; } LLVMBasicBlockRef lpad_bb = emit_landing_pad (ctx, group_index, count); intptr_t key = CLAUSE_END (&cfg->header->clauses [group_index]); g_hash_table_insert (ctx->exc_meta, (gpointer)key, lpad_bb); group_index = cursor; } } for (bb_index = 0; bb_index < bblock_list->len; ++bb_index) { bb = (MonoBasicBlock*)g_ptr_array_index (bblock_list, bb_index); // Prune unreachable mono BBs. if (!(bb == cfg->bb_entry || bb->in_count > 0)) continue; process_bb (ctx, bb); if (!ctx_ok (ctx)) return; } g_hash_table_destroy (ctx->exc_meta); mono_memory_barrier (); /* Add incoming phi values */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { GSList *l, *ins_list; ins_list = bblocks [bb->block_num].phi_nodes; for (l = ins_list; l; l = l->next) { PhiNode *node = (PhiNode*)l->data; MonoInst *phi = node->phi; int sreg1 = node->sreg; LLVMBasicBlockRef in_bb; if (sreg1 == -1) continue; in_bb = get_end_bb (ctx, node->in_bb); if (ctx->unreachable [node->in_bb->block_num]) continue; if (phi->opcode == OP_VPHI) { g_assert (LLVMTypeOf (ctx->addresses [sreg1]) == LLVMTypeOf (values [phi->dreg])); LLVMAddIncoming (values [phi->dreg], &ctx->addresses [sreg1], &in_bb, 1); } else { if (!values [sreg1]) { /* Can happen with values in EH clauses */ set_failure (ctx, "incoming phi sreg1"); return; } if (LLVMTypeOf (values [sreg1]) != LLVMTypeOf (values [phi->dreg])) { set_failure (ctx, "incoming phi arg type mismatch"); return; } g_assert (LLVMTypeOf (values [sreg1]) == LLVMTypeOf (values [phi->dreg])); LLVMAddIncoming (values [phi->dreg], &values [sreg1], &in_bb, 1); } } } /* Nullify empty phi instructions */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { GSList *l, *ins_list; ins_list = bblocks [bb->block_num].phi_nodes; for (l = ins_list; l; l = l->next) { PhiNode *node = (PhiNode*)l->data; MonoInst *phi = node->phi; LLVMValueRef phi_ins = values [phi->dreg]; if (!phi_ins) /* Already removed */ continue; if (LLVMCountIncoming (phi_ins) == 0) { mono_llvm_replace_uses_of (phi_ins, LLVMConstNull (LLVMTypeOf (phi_ins))); LLVMInstructionEraseFromParent (phi_ins); values [phi->dreg] = NULL; } } } /* Create the SWITCH statements for ENDFINALLY instructions */ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { BBInfo *info = &bblocks [bb->block_num]; GSList *l; for (l = info->endfinally_switch_ins_list; l; l = l->next) { LLVMValueRef switch_ins = (LLVMValueRef)l->data; GSList *bb_list = info->call_handler_return_bbs; GSList *bb_list_iter; i = 0; for (bb_list_iter = bb_list; bb_list_iter; bb_list_iter = g_slist_next (bb_list_iter)) { LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i + 1, FALSE), (LLVMBasicBlockRef)bb_list_iter->data); i ++; } } } ctx->module->max_method_idx = MAX (ctx->module->max_method_idx, cfg->method_index); after_codegen_1: if (llvmonly_fail) { /* * FIXME: Maybe fallback to interpreter */ static LLVMTypeRef sig; ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, ctx->inited_bb); char *name = mono_method_get_full_name (cfg->method); int len = strlen (name); LLVMTypeRef type = LLVMArrayType (LLVMInt8Type (), len + 1); LLVMValueRef name_var = LLVMAddGlobal (ctx->lmodule, type, "missing_method_name"); LLVMSetVisibility (name_var, LLVMHiddenVisibility); LLVMSetLinkage (name_var, LLVMInternalLinkage); LLVMSetInitializer (name_var, mono_llvm_create_constant_data_array ((guint8*)name, len + 1)); mono_llvm_set_is_constant (name_var); g_free (name); if (!sig) sig = LLVMFunctionType1 (LLVMVoidType (), ctx->module->ptr_type, FALSE); LLVMValueRef callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_aot_failed_exception)); LLVMValueRef args [] = { convert (ctx, name_var, ctx->module->ptr_type) }; LLVMBuildCall (ctx->builder, callee, args, 1, ""); LLVMBuildUnreachable (ctx->builder); } /* Initialize the method if needed */ if (cfg->compile_aot) { // FIXME: Add more shared got entries ctx->builder = create_builder (ctx); LLVMPositionBuilderAtEnd (ctx->builder, ctx->init_bb); // FIXME: beforefieldinit /* * NATIVE_TO_MANAGED methods might be called on a thread not attached to the runtime, so they are initialized when loaded * in load_method (). */ gboolean needs_init = ctx->cfg->got_access_count > 0; MonoMethod *cctor = NULL; if (!needs_init && (cctor = mono_class_get_cctor (cfg->method->klass))) { /* Needs init to run the cctor */ if (cfg->method->flags & METHOD_ATTRIBUTE_STATIC) needs_init = TRUE; if (cctor == cfg->method) needs_init = FALSE; // If we are a constructor, we need to init so the static // constructor gets called. if (!strcmp (cfg->method->name, ".ctor")) needs_init = TRUE; } if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) needs_init = FALSE; if (needs_init) emit_method_init (ctx); else LLVMBuildBr (ctx->builder, ctx->inited_bb); // Was observing LLVM moving field accesses into the caller's method // body before the init call (the inlined one), leading to NULL derefs // after the init_method returns (GOT is filled out though) if (needs_init) mono_llvm_add_func_attr (method, LLVM_ATTR_NO_INLINE); } if (mini_get_debug_options ()->llvm_disable_inlining) mono_llvm_add_func_attr (method, LLVM_ATTR_NO_INLINE); after_codegen: if (cfg->compile_aot) g_ptr_array_add (ctx->module->cfgs, cfg); if (cfg->llvm_only) { /* * Add the contents of ctx->callsite_list to module->callsite_list. * We can't do this earlier, as it contains llvm instructions which can be * freed if compilation fails. * FIXME: Get rid of this when all methods can be llvm compiled. */ for (int i = 0; i < ctx->callsite_list->len; ++i) g_ptr_array_add (ctx->module->callsite_list, g_ptr_array_index (ctx->callsite_list, i)); } if (cfg->verbose_level > 1) { g_print ("\n*** Unoptimized LLVM IR for %s ***\n", mono_method_full_name (cfg->method, TRUE)); if (cfg->compile_aot) { mono_llvm_dump_value (method); } else { mono_llvm_dump_module (ctx->lmodule); } g_print ("***\n\n"); } if (cfg->compile_aot && !cfg->llvm_only) mark_as_used (ctx->module, method); if (!cfg->llvm_only) { LLVMValueRef md_args [16]; LLVMValueRef md_node; int method_index; if (cfg->compile_aot) method_index = mono_aot_get_method_index (cfg->orig_method); else method_index = 1; md_args [0] = LLVMMDString (ctx->method_name, strlen (ctx->method_name)); md_args [1] = LLVMConstInt (LLVMInt32Type (), method_index, FALSE); md_node = LLVMMDNode (md_args, 2); LLVMAddNamedMetadataOperand (lmodule, "mono.function_indexes", md_node); //LLVMSetMetadata (method, md_kind, LLVMMDNode (&md_arg, 1)); } if (cfg->compile_aot) { /* Don't generate native code, keep the LLVM IR */ if (cfg->verbose_level) { char *name = mono_method_get_full_name (cfg->method); printf ("%s emitted as %s\n", name, ctx->method_name); g_free (name); } #if 0 int err = LLVMVerifyFunction (ctx->lmethod, LLVMPrintMessageAction); if (err != 0) LLVMDumpValue (ctx->lmethod); g_assert (err == 0); #endif } else { //LLVMVerifyFunction (method, 0); llvm_jit_finalize_method (ctx); } if (ctx->module->method_to_lmethod) g_hash_table_insert (ctx->module->method_to_lmethod, cfg->method, ctx->lmethod); if (ctx->module->idx_to_lmethod) g_hash_table_insert (ctx->module->idx_to_lmethod, GINT_TO_POINTER (cfg->method_index), ctx->lmethod); if (ctx->llvm_only && m_class_is_valuetype (cfg->orig_method->klass) && !(cfg->orig_method->flags & METHOD_ATTRIBUTE_STATIC)) emit_unbox_tramp (ctx, ctx->method_name, ctx->method_type, ctx->lmethod, cfg->method_index); } /* * mono_llvm_create_vars: * * Same as mono_arch_create_vars () for LLVM. */ void mono_llvm_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig; sig = mono_method_signature_internal (cfg->method); if (cfg->gsharedvt && cfg->llvm_only) { gboolean vretaddr = FALSE; if (mini_is_gsharedvt_variable_signature (sig) && sig->ret->type != MONO_TYPE_VOID) { vretaddr = TRUE; } else { MonoMethodSignature *sig = mono_method_signature_internal (cfg->method); LLVMCallInfo *linfo; linfo = get_llvm_call_info (cfg, sig); vretaddr = (linfo->ret.storage == LLVMArgVtypeRetAddr || linfo->ret.storage == LLVMArgVtypeByRef || linfo->ret.storage == LLVMArgGsharedvtFixed || linfo->ret.storage == LLVMArgGsharedvtVariable || linfo->ret.storage == LLVMArgGsharedvtFixedVtype); } if (vretaddr) { /* * Creating vret_addr forces CEE_SETRET to store the result into it, * so we don't have to generate any code in our OP_SETRET case. */ cfg->vret_addr = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_get_intptr_class ()), OP_ARG); if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr = "); mono_print_ins (cfg->vret_addr); } } } else { mono_arch_create_vars (cfg); } cfg->lmf_ir = TRUE; } /* * mono_llvm_emit_call: * * Same as mono_arch_emit_call () for LLVM. */ void mono_llvm_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoInst *in; MonoMethodSignature *sig; int i, n; LLVMArgInfo *ainfo; sig = call->signature; n = sig->param_count + sig->hasthis; if (sig->call_convention == MONO_CALL_VARARG) { cfg->exception_message = g_strdup ("varargs"); cfg->disable_llvm = TRUE; return; } call->cinfo = get_llvm_call_info (cfg, sig); if (cfg->disable_llvm) return; for (i = 0; i < n; ++i) { MonoInst *ins; ainfo = call->cinfo->args + i; in = call->args [i]; /* Simply remember the arguments */ switch (ainfo->storage) { case LLVMArgNormal: { MonoType *t = (sig->hasthis && i == 0) ? m_class_get_byval_arg (mono_get_intptr_class ()) : ainfo->type; int opcode; opcode = mono_type_to_regmove (cfg, t); if (opcode == OP_FMOVE) { MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->dreg = mono_alloc_freg (cfg); } else if (opcode == OP_LMOVE) { MONO_INST_NEW (cfg, ins, OP_LMOVE); ins->dreg = mono_alloc_lreg (cfg); } else if (opcode == OP_RMOVE) { MONO_INST_NEW (cfg, ins, OP_RMOVE); ins->dreg = mono_alloc_freg (cfg); } else { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); } ins->sreg1 = in->dreg; break; } case LLVMArgVtypeByVal: case LLVMArgVtypeByRef: case LLVMArgVtypeInReg: case LLVMArgVtypeAddr: case LLVMArgVtypeAsScalar: case LLVMArgAsIArgs: case LLVMArgAsFpArgs: case LLVMArgGsharedvtVariable: case LLVMArgGsharedvtFixed: case LLVMArgGsharedvtFixedVtype: case LLVMArgWasmVtypeAsScalar: MONO_INST_NEW (cfg, ins, OP_LLVM_OUTARG_VT); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; ins->inst_p0 = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMArgInfo)); memcpy (ins->inst_p0, ainfo, sizeof (LLVMArgInfo)); ins->inst_vtype = ainfo->type; ins->klass = mono_class_from_mono_type_internal (ainfo->type); break; default: cfg->exception_message = g_strdup ("ainfo->storage"); cfg->disable_llvm = TRUE; return; } if (!cfg->disable_llvm) { MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, 0, FALSE); } } } static inline void add_func (LLVMModuleRef module, const char *name, LLVMTypeRef ret_type, LLVMTypeRef *param_types, int nparams) { LLVMAddFunction (module, name, LLVMFunctionType (ret_type, param_types, nparams, FALSE)); } static LLVMValueRef add_intrins (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef *params, int nparams) { return mono_llvm_register_overloaded_intrinsic (module, id, params, nparams); } static LLVMValueRef add_intrins1 (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef param1) { return mono_llvm_register_overloaded_intrinsic (module, id, &param1, 1); } static LLVMValueRef add_intrins2 (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef param1, LLVMTypeRef param2) { LLVMTypeRef params [] = { param1, param2 }; return mono_llvm_register_overloaded_intrinsic (module, id, params, 2); } static LLVMValueRef add_intrins3 (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef param1, LLVMTypeRef param2, LLVMTypeRef param3) { LLVMTypeRef params [] = { param1, param2, param3 }; return mono_llvm_register_overloaded_intrinsic (module, id, params, 3); } static void add_intrinsic (LLVMModuleRef module, int id) { /* Register simple intrinsics */ LLVMValueRef intrins = mono_llvm_register_intrinsic (module, (IntrinsicId)id); if (intrins) { g_hash_table_insert (intrins_id_to_intrins, GINT_TO_POINTER (id), intrins); return; } if (intrin_arm64_ovr [id] != 0) { llvm_ovr_tag_t spec = intrin_arm64_ovr [id]; for (int vw = 0; vw < INTRIN_vectorwidths; ++vw) { for (int ew = 0; ew < INTRIN_elementwidths; ++ew) { llvm_ovr_tag_t vec_bit = INTRIN_vector128 >> ((INTRIN_vectorwidths - 1) - vw); llvm_ovr_tag_t elem_bit = INTRIN_int8 << ew; llvm_ovr_tag_t test = vec_bit | elem_bit; if ((spec & test) == test) { uint8_t kind = intrin_kind [id]; LLVMTypeRef distinguishing_type = intrin_types [vw][ew]; if (kind == INTRIN_kind_ftoi && (elem_bit & (INTRIN_int32 | INTRIN_int64))) { /* * @llvm.aarch64.neon.fcvtas.v4i32.v4f32 * @llvm.aarch64.neon.fcvtas.v2i64.v2f64 */ intrins = add_intrins2 (module, id, distinguishing_type, intrin_types [vw][ew + 2]); } else if (kind == INTRIN_kind_widen) { /* * @llvm.aarch64.neon.saddlp.v2i64.v4i32 * @llvm.aarch64.neon.saddlp.v4i16.v8i8 */ intrins = add_intrins2 (module, id, distinguishing_type, intrin_types [vw][ew - 1]); } else if (kind == INTRIN_kind_widen_across) { /* * @llvm.aarch64.neon.saddlv.i64.v4i32 * @llvm.aarch64.neon.saddlv.i32.v8i16 * @llvm.aarch64.neon.saddlv.i32.v16i8 * i8/i16 return types for NEON intrinsics will make isel fail as of LLVM 9. */ int associated_prim = MAX(ew + 1, 2); LLVMTypeRef associated_scalar_type = intrin_types [0][associated_prim]; intrins = add_intrins2 (module, id, associated_scalar_type, distinguishing_type); } else if (kind == INTRIN_kind_across) { /* * @llvm.aarch64.neon.uaddv.i64.v4i64 * @llvm.aarch64.neon.uaddv.i32.v4i32 * @llvm.aarch64.neon.uaddv.i32.v8i16 * @llvm.aarch64.neon.uaddv.i32.v16i8 * i8/i16 return types for NEON intrinsics will make isel fail as of LLVM 9. */ int associated_prim = MAX(ew, 2); LLVMTypeRef associated_scalar_type = intrin_types [0][associated_prim]; intrins = add_intrins2 (module, id, associated_scalar_type, distinguishing_type); } else if (kind == INTRIN_kind_arm64_dot_prod) { /* * @llvm.aarch64.neon.sdot.v2i32.v8i8 * @llvm.aarch64.neon.sdot.v4i32.v16i8 */ LLVMTypeRef associated_type = intrin_types [vw][0]; intrins = add_intrins2 (module, id, distinguishing_type, associated_type); } else intrins = add_intrins1 (module, id, distinguishing_type); int key = key_from_id_and_tag (id, test); g_hash_table_insert (intrins_id_to_intrins, GINT_TO_POINTER (key), intrins); } } } return; } /* Register overloaded intrinsics */ switch (id) { #define INTRINS(intrin_name, llvm_id, arch) #define INTRINS_OVR(intrin_name, llvm_id, arch, llvm_type) case INTRINS_ ## intrin_name: intrins = add_intrins1(module, id, llvm_type); break; #define INTRINS_OVR_2_ARG(intrin_name, llvm_id, arch, llvm_type1, llvm_type2) case INTRINS_ ## intrin_name: intrins = add_intrins2(module, id, llvm_type1, llvm_type2); break; #define INTRINS_OVR_3_ARG(intrin_name, llvm_id, arch, llvm_type1, llvm_type2, llvm_type3) case INTRINS_ ## intrin_name: intrins = add_intrins3(module, id, llvm_type1, llvm_type2, llvm_type3); break; #define INTRINS_OVR_TAG(...) #define INTRINS_OVR_TAG_KIND(...) #include "llvm-intrinsics.h" default: g_assert_not_reached (); break; } g_assert (intrins); g_hash_table_insert (intrins_id_to_intrins, GINT_TO_POINTER (id), intrins); } static LLVMValueRef get_intrins_from_module (LLVMModuleRef lmodule, int id) { LLVMValueRef res; res = (LLVMValueRef)g_hash_table_lookup (intrins_id_to_intrins, GINT_TO_POINTER (id)); g_assert (res); return res; } static LLVMValueRef get_intrins (EmitContext *ctx, int id) { return get_intrins_from_module (ctx->lmodule, id); } static void add_intrinsics (LLVMModuleRef module) { int i; /* Emit declarations of instrinsics */ /* * It would be nicer to emit only the intrinsics actually used, but LLVM's Module * type doesn't seem to do any locking. */ for (i = 0; i < INTRINS_NUM; ++i) add_intrinsic (module, i); /* EH intrinsics */ add_func (module, "mono_personality", LLVMVoidType (), NULL, 0); add_func (module, "llvm_resume_unwind_trampoline", LLVMVoidType (), NULL, 0); } static void add_types (MonoLLVMModule *module) { module->ptr_type = LLVMPointerType (TARGET_SIZEOF_VOID_P == 8 ? LLVMInt64Type () : LLVMInt32Type (), 0); } void mono_llvm_init (gboolean enable_jit) { intrin_types [0][0] = i1_t = LLVMInt8Type (); intrin_types [0][1] = i2_t = LLVMInt16Type (); intrin_types [0][2] = i4_t = LLVMInt32Type (); intrin_types [0][3] = i8_t = LLVMInt64Type (); intrin_types [0][4] = r4_t = LLVMFloatType (); intrin_types [0][5] = r8_t = LLVMDoubleType (); intrin_types [1][0] = v64_i1_t = LLVMVectorType (LLVMInt8Type (), 8); intrin_types [1][1] = v64_i2_t = LLVMVectorType (LLVMInt16Type (), 4); intrin_types [1][2] = v64_i4_t = LLVMVectorType (LLVMInt32Type (), 2); intrin_types [1][3] = v64_i8_t = LLVMVectorType (LLVMInt64Type (), 1); intrin_types [1][4] = v64_r4_t = LLVMVectorType (LLVMFloatType (), 2); intrin_types [1][5] = v64_r8_t = LLVMVectorType (LLVMDoubleType (), 1); intrin_types [2][0] = v128_i1_t = sse_i1_t = type_to_sse_type (MONO_TYPE_I1); intrin_types [2][1] = v128_i2_t = sse_i2_t = type_to_sse_type (MONO_TYPE_I2); intrin_types [2][2] = v128_i4_t = sse_i4_t = type_to_sse_type (MONO_TYPE_I4); intrin_types [2][3] = v128_i8_t = sse_i8_t = type_to_sse_type (MONO_TYPE_I8); intrin_types [2][4] = v128_r4_t = sse_r4_t = type_to_sse_type (MONO_TYPE_R4); intrin_types [2][5] = v128_r8_t = sse_r8_t = type_to_sse_type (MONO_TYPE_R8); intrins_id_to_intrins = g_hash_table_new (NULL, NULL); void_func_t = LLVMFunctionType0 (LLVMVoidType (), FALSE); if (enable_jit) mono_llvm_jit_init (); } void mono_llvm_free_mem_manager (MonoJitMemoryManager *mem_manager) { MonoLLVMModule *module = (MonoLLVMModule*)mem_manager->llvm_module; int i; if (!module) return; g_hash_table_destroy (module->llvm_types); mono_llvm_dispose_ee (module->mono_ee); if (module->bb_names) { for (i = 0; i < module->bb_names_len; ++i) g_free (module->bb_names [i]); g_free (module->bb_names); } //LLVMDisposeModule (module->module); g_free (module); mem_manager->llvm_module = NULL; } void mono_llvm_create_aot_module (MonoAssembly *assembly, const char *global_prefix, int initial_got_size, LLVMModuleFlags flags) { MonoLLVMModule *module = &aot_module; gboolean emit_dwarf = (flags & LLVM_MODULE_FLAG_DWARF) ? 1 : 0; #ifdef TARGET_WIN32_MSVC gboolean emit_codeview = (flags & LLVM_MODULE_FLAG_CODEVIEW) ? 1 : 0; #endif gboolean static_link = (flags & LLVM_MODULE_FLAG_STATIC) ? 1 : 0; gboolean llvm_only = (flags & LLVM_MODULE_FLAG_LLVM_ONLY) ? 1 : 0; gboolean interp = (flags & LLVM_MODULE_FLAG_INTERP) ? 1 : 0; /* Delete previous module */ g_hash_table_destroy (module->plt_entries); if (module->lmodule) LLVMDisposeModule (module->lmodule); memset (module, 0, sizeof (aot_module)); module->lmodule = LLVMModuleCreateWithName ("aot"); module->assembly = assembly; module->global_prefix = g_strdup (global_prefix); module->eh_frame_symbol = g_strdup_printf ("%s_eh_frame", global_prefix); module->get_method_symbol = g_strdup_printf ("%s_get_method", global_prefix); module->get_unbox_tramp_symbol = g_strdup_printf ("%s_get_unbox_tramp", global_prefix); module->init_aotconst_symbol = g_strdup_printf ("%s_init_aotconst", global_prefix); module->external_symbols = TRUE; module->emit_dwarf = emit_dwarf; module->static_link = static_link; module->llvm_only = llvm_only; module->interp = interp; /* The first few entries are reserved */ module->max_got_offset = initial_got_size; module->context = LLVMGetGlobalContext (); module->cfgs = g_ptr_array_new (); module->aotconst_vars = g_hash_table_new (NULL, NULL); module->llvm_types = g_hash_table_new (NULL, NULL); module->plt_entries = g_hash_table_new (g_str_hash, g_str_equal); module->plt_entries_ji = g_hash_table_new (NULL, NULL); module->direct_callables = g_hash_table_new (g_str_hash, g_str_equal); module->idx_to_lmethod = g_hash_table_new (NULL, NULL); module->method_to_lmethod = g_hash_table_new (NULL, NULL); module->method_to_call_info = g_hash_table_new (NULL, NULL); module->idx_to_unbox_tramp = g_hash_table_new (NULL, NULL); module->no_method_table_lmethods = g_hash_table_new (NULL, NULL); module->callsite_list = g_ptr_array_new (); if (llvm_only) /* clang ignores our debug info because it has an invalid version */ module->emit_dwarf = FALSE; add_intrinsics (module->lmodule); add_types (module); #ifdef MONO_ARCH_LLVM_TARGET_LAYOUT LLVMSetDataLayout (module->lmodule, MONO_ARCH_LLVM_TARGET_LAYOUT); #else g_assert_not_reached (); #endif #ifdef MONO_ARCH_LLVM_TARGET_TRIPLE LLVMSetTarget (module->lmodule, MONO_ARCH_LLVM_TARGET_TRIPLE); #endif if (module->emit_dwarf) { char *dir, *build_info, *s, *cu_name; module->di_builder = mono_llvm_create_di_builder (module->lmodule); // FIXME: dir = g_strdup ("."); build_info = mono_get_runtime_build_info (); s = g_strdup_printf ("Mono AOT Compiler %s (LLVM)", build_info); cu_name = g_path_get_basename (assembly->image->name); module->cu = mono_llvm_di_create_compile_unit (module->di_builder, cu_name, dir, s); g_free (dir); g_free (build_info); g_free (s); } #ifdef TARGET_WIN32_MSVC if (emit_codeview) { LLVMValueRef codeview_option_args[3]; codeview_option_args[0] = LLVMConstInt (LLVMInt32Type (), 2, FALSE); codeview_option_args[1] = LLVMMDString ("CodeView", 8); codeview_option_args[2] = LLVMConstInt (LLVMInt32Type (), 1, FALSE); LLVMAddNamedMetadataOperand (module->lmodule, "llvm.module.flags", LLVMMDNode (codeview_option_args, G_N_ELEMENTS (codeview_option_args))); } if (!static_link) { const char linker_options[] = "Linker Options"; const char *default_dynamic_lib_names[] = { "/DEFAULTLIB:msvcrt", "/DEFAULTLIB:ucrt.lib", "/DEFAULTLIB:vcruntime.lib" }; LLVMValueRef default_lib_args[G_N_ELEMENTS (default_dynamic_lib_names)]; LLVMValueRef default_lib_nodes[G_N_ELEMENTS(default_dynamic_lib_names)]; const char *default_lib_name = NULL; for (int i = 0; i < G_N_ELEMENTS (default_dynamic_lib_names); ++i) { const char *default_lib_name = default_dynamic_lib_names[i]; default_lib_args[i] = LLVMMDString (default_lib_name, strlen (default_lib_name)); default_lib_nodes[i] = LLVMMDNode (default_lib_args + i, 1); } LLVMAddNamedMetadataOperand (module->lmodule, "llvm.linker.options", LLVMMDNode (default_lib_args, G_N_ELEMENTS (default_lib_args))); } #endif { LLVMTypeRef got_type = LLVMArrayType (module->ptr_type, 16); module->dummy_got_var = LLVMAddGlobal (module->lmodule, got_type, "dummy_got"); module->got_idx_to_type = g_hash_table_new (NULL, NULL); LLVMSetInitializer (module->dummy_got_var, LLVMConstNull (got_type)); LLVMSetVisibility (module->dummy_got_var, LLVMHiddenVisibility); LLVMSetLinkage (module->dummy_got_var, LLVMInternalLinkage); } /* Add initialization array */ LLVMTypeRef inited_type = LLVMArrayType (LLVMInt8Type (), 0); module->inited_var = LLVMAddGlobal (aot_module.lmodule, inited_type, "mono_inited_tmp"); LLVMSetInitializer (module->inited_var, LLVMConstNull (inited_type)); create_aot_info_var (module); emit_gc_safepoint_poll (module, module->lmodule, NULL); emit_llvm_code_start (module); // Needs idx_to_lmethod emit_init_funcs (module); /* Add a dummy personality function */ if (!use_mono_personality_debug) { LLVMValueRef personality = LLVMAddFunction (module->lmodule, default_personality_name, LLVMFunctionType (LLVMInt32Type (), NULL, 0, TRUE)); LLVMSetLinkage (personality, LLVMExternalLinkage); //EMCC chockes if the personality function is referenced in the 'used' array #ifndef TARGET_WASM mark_as_used (module, personality); #endif } /* Add a reference to the c++ exception we throw/catch */ { LLVMTypeRef exc = LLVMPointerType (LLVMInt8Type (), 0); module->sentinel_exception = LLVMAddGlobal (module->lmodule, exc, "_ZTIPi"); LLVMSetLinkage (module->sentinel_exception, LLVMExternalLinkage); mono_llvm_set_is_constant (module->sentinel_exception); } } void mono_llvm_fixup_aot_module (void) { MonoLLVMModule *module = &aot_module; MonoMethod *method; /* * Replace GOT entries for directly callable methods with the methods themselves. * It would be easier to implement this by predefining all methods before compiling * their bodies, but that couldn't handle the case when a method fails to compile * with llvm. */ GHashTable *specializable = g_hash_table_new (NULL, NULL); GHashTable *patches_to_null = g_hash_table_new (mono_patch_info_hash, mono_patch_info_equal); for (int sindex = 0; sindex < module->callsite_list->len; ++sindex) { CallSite *site = (CallSite*)g_ptr_array_index (module->callsite_list, sindex); method = site->method; LLVMValueRef lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, method); LLVMValueRef placeholder = (LLVMValueRef)site->load; LLVMValueRef load; if (placeholder == NULL) /* Method failed LLVM compilation */ continue; gboolean can_direct_call = FALSE; /* Replace sharable instances with their shared version */ if (!lmethod && method->is_inflated) { if (mono_method_is_generic_sharable_full (method, FALSE, TRUE, FALSE)) { ERROR_DECL (error); MonoMethod *shared = mini_get_shared_method_full (method, SHARE_MODE_NONE, error); if (is_ok (error)) { lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, shared); if (lmethod) method = shared; } } } if (lmethod && !m_method_is_synchronized (method)) { can_direct_call = TRUE; } else if (m_method_is_wrapper (method) && !method->is_inflated) { WrapperInfo *info = mono_marshal_get_wrapper_info (method); /* This is a call from the synchronized wrapper to the real method */ if (info->subtype == WRAPPER_SUBTYPE_SYNCHRONIZED_INNER) { method = info->d.synchronized.method; lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, method); if (lmethod) can_direct_call = TRUE; } } if (can_direct_call) { mono_llvm_replace_uses_of (placeholder, lmethod); if (mono_aot_can_specialize (method)) g_hash_table_insert (specializable, lmethod, method); g_hash_table_insert (patches_to_null, site->ji, site->ji); } else { // FIXME: LLVMBuilderRef builder = LLVMCreateBuilder (); LLVMPositionBuilderBefore (builder, placeholder); load = get_aotconst_module (module, builder, site->ji->type, site->ji->data.target, site->type, NULL, NULL); LLVMReplaceAllUsesWith (placeholder, load); } g_free (site); } mono_llvm_propagate_nonnull_final (specializable, module); g_hash_table_destroy (specializable); for (int i = 0; i < module->cfgs->len; ++i) { /* * Nullify the patches pointing to direct calls. This is needed to * avoid allocating extra got slots, which is a perf problem and it * makes module->max_got_offset invalid. * It would be better to just store the patch_info in CallSite, but * cfg->patch_info is copied in aot-compiler.c. */ MonoCompile *cfg = (MonoCompile *)g_ptr_array_index (module->cfgs, i); for (MonoJumpInfo *patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { if (patch_info->type == MONO_PATCH_INFO_METHOD) { if (g_hash_table_lookup (patches_to_null, patch_info)) { patch_info->type = MONO_PATCH_INFO_NONE; /* Nullify the call to init_method () if possible */ g_assert (cfg->got_access_count); cfg->got_access_count --; if (cfg->got_access_count == 0) { LLVMValueRef br = (LLVMValueRef)cfg->llvmonly_init_cond; if (br) LLVMSetSuccessor (br, 0, LLVMGetSuccessor (br, 1)); } } } } } g_hash_table_destroy (patches_to_null); } static LLVMValueRef llvm_array_from_uints (LLVMTypeRef el_type, guint32 *values, int nvalues) { int i; LLVMValueRef res, *vals; vals = g_new0 (LLVMValueRef, nvalues); for (i = 0; i < nvalues; ++i) vals [i] = LLVMConstInt (LLVMInt32Type (), values [i], FALSE); res = LLVMConstArray (LLVMInt32Type (), vals, nvalues); g_free (vals); return res; } static LLVMValueRef llvm_array_from_bytes (guint8 *values, int nvalues) { int i; LLVMValueRef res, *vals; vals = g_new0 (LLVMValueRef, nvalues); for (i = 0; i < nvalues; ++i) vals [i] = LLVMConstInt (LLVMInt8Type (), values [i], FALSE); res = LLVMConstArray (LLVMInt8Type (), vals, nvalues); g_free (vals); return res; } /* * mono_llvm_emit_aot_file_info: * * Emit the MonoAotFileInfo structure. * Same as emit_aot_file_info () in aot-compiler.c. */ void mono_llvm_emit_aot_file_info (MonoAotFileInfo *info, gboolean has_jitted_code) { MonoLLVMModule *module = &aot_module; /* Save these for later */ memcpy (&module->aot_info, info, sizeof (MonoAotFileInfo)); module->has_jitted_code = has_jitted_code; } /* * mono_llvm_emit_aot_data: * * Emit the binary data DATA pointed to by symbol SYMBOL. * Return the LLVM variable for the data. */ gpointer mono_llvm_emit_aot_data_aligned (const char *symbol, guint8 *data, int data_len, int align) { MonoLLVMModule *module = &aot_module; LLVMTypeRef type; LLVMValueRef d; type = LLVMArrayType (LLVMInt8Type (), data_len); d = LLVMAddGlobal (module->lmodule, type, symbol); LLVMSetVisibility (d, LLVMHiddenVisibility); LLVMSetLinkage (d, LLVMInternalLinkage); LLVMSetInitializer (d, mono_llvm_create_constant_data_array (data, data_len)); if (align != 1) LLVMSetAlignment (d, align); mono_llvm_set_is_constant (d); return d; } gpointer mono_llvm_emit_aot_data (const char *symbol, guint8 *data, int data_len) { return mono_llvm_emit_aot_data_aligned (symbol, data, data_len, 8); } /* Add a reference to a global defined in JITted code */ static LLVMValueRef AddJitGlobal (MonoLLVMModule *module, LLVMTypeRef type, const char *name) { char *s; LLVMValueRef v; s = g_strdup_printf ("%s%s", module->global_prefix, name); v = LLVMAddGlobal (module->lmodule, LLVMInt8Type (), s); LLVMSetVisibility (v, LLVMHiddenVisibility); g_free (s); return v; } #define FILE_INFO_NUM_HEADER_FIELDS 2 #define FILE_INFO_NUM_SCALAR_FIELDS 23 #define FILE_INFO_NUM_ARRAY_FIELDS 5 #define FILE_INFO_NUM_AOTID_FIELDS 1 #define FILE_INFO_NFIELDS (FILE_INFO_NUM_HEADER_FIELDS + MONO_AOT_FILE_INFO_NUM_SYMBOLS + FILE_INFO_NUM_SCALAR_FIELDS + FILE_INFO_NUM_ARRAY_FIELDS + FILE_INFO_NUM_AOTID_FIELDS) static void create_aot_info_var (MonoLLVMModule *module) { LLVMTypeRef file_info_type; LLVMTypeRef *eltypes; LLVMValueRef info_var; int i, nfields, tindex; LLVMModuleRef lmodule = module->lmodule; /* Create an LLVM type to represent MonoAotFileInfo */ nfields = FILE_INFO_NFIELDS; eltypes = g_new (LLVMTypeRef, nfields); tindex = 0; eltypes [tindex ++] = LLVMInt32Type (); eltypes [tindex ++] = LLVMInt32Type (); /* Symbols */ for (i = 0; i < MONO_AOT_FILE_INFO_NUM_SYMBOLS; ++i) eltypes [tindex ++] = LLVMPointerType (LLVMInt8Type (), 0); /* Scalars */ for (i = 0; i < FILE_INFO_NUM_SCALAR_FIELDS; ++i) eltypes [tindex ++] = LLVMInt32Type (); /* Arrays */ eltypes [tindex ++] = LLVMArrayType (LLVMInt32Type (), MONO_AOT_TABLE_NUM); for (i = 0; i < FILE_INFO_NUM_ARRAY_FIELDS - 1; ++i) eltypes [tindex ++] = LLVMArrayType (LLVMInt32Type (), MONO_AOT_TRAMP_NUM); eltypes [tindex ++] = LLVMArrayType (LLVMInt8Type (), 16); g_assert (tindex == nfields); file_info_type = LLVMStructCreateNamed (module->context, "MonoAotFileInfo"); LLVMStructSetBody (file_info_type, eltypes, nfields, FALSE); info_var = LLVMAddGlobal (lmodule, file_info_type, "mono_aot_file_info"); module->info_var = info_var; module->info_var_eltypes = eltypes; } static void emit_aot_file_info (MonoLLVMModule *module) { LLVMTypeRef *eltypes, eltype; LLVMValueRef info_var; LLVMValueRef *fields; int i, nfields, tindex; MonoAotFileInfo *info; LLVMModuleRef lmodule = module->lmodule; info = &module->aot_info; info_var = module->info_var; eltypes = module->info_var_eltypes; nfields = FILE_INFO_NFIELDS; if (module->static_link) { LLVMSetVisibility (info_var, LLVMHiddenVisibility); LLVMSetLinkage (info_var, LLVMInternalLinkage); } #ifdef TARGET_WIN32 if (!module->static_link) { LLVMSetDLLStorageClass (info_var, LLVMDLLExportStorageClass); } #endif fields = g_new (LLVMValueRef, nfields); tindex = 0; fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->version, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->dummy, FALSE); /* Symbols */ /* * We use LLVMGetNamedGlobal () for symbol which are defined in LLVM code, and LLVMAddGlobal () * for symbols defined in the .s file emitted by the aot compiler. */ eltype = eltypes [tindex]; if (module->llvm_only) fields [tindex ++] = LLVMConstNull (eltype); else fields [tindex ++] = AddJitGlobal (module, eltype, "jit_got"); /* llc defines this directly */ if (!module->llvm_only) { fields [tindex ++] = LLVMAddGlobal (lmodule, eltype, module->eh_frame_symbol); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); } else { fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = module->get_method; fields [tindex ++] = module->get_unbox_tramp ? module->get_unbox_tramp : LLVMConstNull (eltype); } fields [tindex ++] = module->init_aotconst_func; if (module->has_jitted_code) { fields [tindex ++] = AddJitGlobal (module, eltype, "jit_code_start"); fields [tindex ++] = AddJitGlobal (module, eltype, "jit_code_end"); } else { fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); } if (!module->llvm_only) fields [tindex ++] = AddJitGlobal (module, eltype, "method_addresses"); else fields [tindex ++] = LLVMConstNull (eltype); if (module->llvm_only && module->unbox_tramp_indexes) { fields [tindex ++] = module->unbox_tramp_indexes; fields [tindex ++] = module->unbox_trampolines; } else { fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); } if (info->flags & MONO_AOT_FILE_FLAG_SEPARATE_DATA) { for (i = 0; i < MONO_AOT_TABLE_NUM; ++i) fields [tindex ++] = LLVMConstNull (eltype); } else { fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "blob"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "class_name_table"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "class_info_offsets"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "method_info_offsets"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "ex_info_offsets"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "extra_method_info_offsets"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "extra_method_table"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "got_info_offsets"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "llvm_got_info_offsets"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "image_table"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "weak_field_indexes"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "method_flags_table"); } /* Not needed (mem_end) */ fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "assembly_guid"); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "runtime_version"); if (info->trampoline_size [0]) { fields [tindex ++] = AddJitGlobal (module, eltype, "specific_trampolines"); fields [tindex ++] = AddJitGlobal (module, eltype, "static_rgctx_trampolines"); fields [tindex ++] = AddJitGlobal (module, eltype, "imt_trampolines"); fields [tindex ++] = AddJitGlobal (module, eltype, "gsharedvt_arg_trampolines"); fields [tindex ++] = AddJitGlobal (module, eltype, "ftnptr_arg_trampolines"); fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_arbitrary_trampolines"); } else { fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); } if (module->static_link && !module->llvm_only) fields [tindex ++] = AddJitGlobal (module, eltype, "globals"); else fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "assembly_name"); if (!module->llvm_only) { fields [tindex ++] = AddJitGlobal (module, eltype, "plt"); fields [tindex ++] = AddJitGlobal (module, eltype, "plt_end"); fields [tindex ++] = AddJitGlobal (module, eltype, "unwind_info"); fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_trampolines"); fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_trampolines_end"); fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_trampoline_addresses"); } else { fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); fields [tindex ++] = LLVMConstNull (eltype); } for (i = 0; i < MONO_AOT_FILE_INFO_NUM_SYMBOLS; ++i) { g_assert (fields [FILE_INFO_NUM_HEADER_FIELDS + i]); fields [FILE_INFO_NUM_HEADER_FIELDS + i] = LLVMConstBitCast (fields [FILE_INFO_NUM_HEADER_FIELDS + i], eltype); } /* Scalars */ fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->plt_got_offset_base, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->plt_got_info_offset_base, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->got_size, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->llvm_got_size, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->plt_size, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->nmethods, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->nextra_methods, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->flags, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->opts, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->simd_opts, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->gc_name_index, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->num_rgctx_fetch_trampolines, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->double_align, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->long_align, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->generic_tramp_num, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->card_table_shift_bits, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->card_table_mask, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->tramp_page_size, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->call_table_entry_size, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->nshared_got_entries, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->datafile_size, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), module->unbox_tramp_num, FALSE); fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), module->unbox_tramp_elemsize, FALSE); /* Arrays */ fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->table_offsets, MONO_AOT_TABLE_NUM); fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->num_trampolines, MONO_AOT_TRAMP_NUM); fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->trampoline_got_offset_base, MONO_AOT_TRAMP_NUM); fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->trampoline_size, MONO_AOT_TRAMP_NUM); fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->tramp_page_code_offsets, MONO_AOT_TRAMP_NUM); fields [tindex ++] = llvm_array_from_bytes (info->aotid, 16); g_assert (tindex == nfields); LLVMSetInitializer (info_var, LLVMConstNamedStruct (LLVMGetElementType (LLVMTypeOf (info_var)), fields, nfields)); if (module->static_link) { char *s, *p; LLVMValueRef var; s = g_strdup_printf ("mono_aot_module_%s_info", module->assembly->aname.name); /* Get rid of characters which cannot occur in symbols */ p = s; for (p = s; *p; ++p) { if (!(isalnum (*p) || *p == '_')) *p = '_'; } var = LLVMAddGlobal (module->lmodule, LLVMPointerType (LLVMInt8Type (), 0), s); g_free (s); LLVMSetInitializer (var, LLVMConstBitCast (LLVMGetNamedGlobal (module->lmodule, "mono_aot_file_info"), LLVMPointerType (LLVMInt8Type (), 0))); LLVMSetLinkage (var, LLVMExternalLinkage); } } typedef struct { LLVMValueRef lmethod; int argument; } NonnullPropWorkItem; static void mono_llvm_nonnull_state_update (EmitContext *ctx, LLVMValueRef lcall, MonoMethod *call_method, LLVMValueRef *args, int num_params) { if (mono_aot_can_specialize (call_method)) { int num_passed = LLVMGetNumArgOperands (lcall); g_assert (num_params <= num_passed); g_assert (ctx->module->method_to_call_info); GArray *call_site_union = (GArray *) g_hash_table_lookup (ctx->module->method_to_call_info, call_method); if (!call_site_union) { call_site_union = g_array_sized_new (FALSE, TRUE, sizeof (gint32), num_params); int zero = 0; for (int i = 0; i < num_params; i++) g_array_insert_val (call_site_union, i, zero); } for (int i = 0; i < num_params; i++) { if (mono_llvm_is_nonnull (args [i])) { g_assert (i < LLVMGetNumArgOperands (lcall)); mono_llvm_set_call_nonnull_arg (lcall, i); } else { gint32 *nullable_count = &g_array_index (call_site_union, gint32, i); *nullable_count = *nullable_count + 1; } } g_hash_table_insert (ctx->module->method_to_call_info, call_method, call_site_union); } } static void mono_llvm_propagate_nonnull_final (GHashTable *all_specializable, MonoLLVMModule *module) { // When we first traverse the mini IL, we mark the things that are // nonnull (the roots). Then, for all of the methods that can be specialized, we // see if their call sites have nonnull attributes. // If so, we mark the function's param. This param has uses to propagate // the attribute to. This propagation can trigger a need to mark more attributes // non-null, and so on and so forth. GSList *queue = NULL; GHashTableIter iter; LLVMValueRef lmethod; MonoMethod *method; g_hash_table_iter_init (&iter, all_specializable); while (g_hash_table_iter_next (&iter, (void**)&lmethod, (void**)&method)) { GArray *call_site_union = (GArray *) g_hash_table_lookup (module->method_to_call_info, method); // Basic sanity checking if (call_site_union) g_assert (call_site_union->len == LLVMCountParams (lmethod)); // Add root to work queue for (int i = 0; call_site_union && i < call_site_union->len; i++) { if (g_array_index (call_site_union, gint32, i) == 0) { NonnullPropWorkItem *item = g_malloc (sizeof (NonnullPropWorkItem)); item->lmethod = lmethod; item->argument = i; queue = g_slist_prepend (queue, item); } } } // This is essentially reference counting, and we are propagating // the refcount decrement here. We have less work to do than we may otherwise // because we are only working with a set of subgraphs of specializable functions. // // We rely on being able to see all of the references in the graph. // This is ensured by the function mono_aot_can_specialize. Everything in // all_specializable is a function that can be specialized, and is the resulting // node in the graph after all of the subsitutions are done. // // Anything disrupting the direct calls made with self-init will break this optimization. while (queue) { // Update the queue state. // Our only other per-iteration responsibility is now to free current NonnullPropWorkItem *current = (NonnullPropWorkItem *) queue->data; queue = queue->next; g_assert (current->argument < LLVMCountParams (current->lmethod)); // Does the actual leaf-node work here // Mark the function argument as nonnull for LLVM mono_llvm_set_func_nonnull_arg (current->lmethod, current->argument); // The rest of this is for propagating forward nullability changes // to calls that use the argument that is now nullable. // Get the actual LLVM value of the argument, so we can see which call instructions // used that argument LLVMValueRef caller_argument = LLVMGetParam (current->lmethod, current->argument); // Iterate over the calls using the newly-non-nullable argument GSList *calls = mono_llvm_calls_using (caller_argument); for (GSList *cursor = calls; cursor != NULL; cursor = cursor->next) { LLVMValueRef lcall = (LLVMValueRef) cursor->data; LLVMValueRef callee_lmethod = LLVMGetCalledValue (lcall); // If this wasn't a direct call for which mono_aot_can_specialize is true, // this lookup won't find a MonoMethod. MonoMethod *callee_method = (MonoMethod *) g_hash_table_lookup (all_specializable, callee_lmethod); if (!callee_method) continue; // Decrement number of nullable refs at that func's arg offset GArray *call_site_union = (GArray *) g_hash_table_lookup (module->method_to_call_info, callee_method); // It has module-local callers and is specializable, should have seen this call site // and inited this g_assert (call_site_union); // The function *definition* parameter arity should always be consistent int max_params = LLVMCountParams (callee_lmethod); if (call_site_union->len != max_params) { mono_llvm_dump_value (callee_lmethod); g_assert_not_reached (); } // Get the values that correspond to the parameters passed to the call // that used our argument LLVMValueRef *operands = mono_llvm_call_args (lcall); for (int call_argument = 0; call_argument < max_params; call_argument++) { // Every time we used the newly-non-nullable argument, decrement the nullable // refcount for that function. if (caller_argument == operands [call_argument]) { gint32 *nullable_count = &g_array_index (call_site_union, gint32, call_argument); g_assert (*nullable_count > 0); *nullable_count = *nullable_count - 1; // If we caused that callee's parameter to become newly nullable, add to work queue if (*nullable_count == 0) { NonnullPropWorkItem *item = g_malloc (sizeof (NonnullPropWorkItem)); item->lmethod = callee_lmethod; item->argument = call_argument; queue = g_slist_prepend (queue, item); } } } g_free (operands); // Update nullability refcount information for the callee now g_hash_table_insert (module->method_to_call_info, callee_method, call_site_union); } g_slist_free (calls); g_free (current); } } /* * Emit the aot module into the LLVM bitcode file FILENAME. */ void mono_llvm_emit_aot_module (const char *filename, const char *cu_name) { LLVMTypeRef inited_type; LLVMValueRef real_inited; MonoLLVMModule *module = &aot_module; emit_llvm_code_end (module); /* * Create the real init_var and replace all uses of the dummy variable with * the real one. */ inited_type = LLVMArrayType (LLVMInt8Type (), module->max_inited_idx + 1); real_inited = LLVMAddGlobal (module->lmodule, inited_type, "mono_inited"); LLVMSetInitializer (real_inited, LLVMConstNull (inited_type)); LLVMSetLinkage (real_inited, LLVMInternalLinkage); mono_llvm_replace_uses_of (module->inited_var, real_inited); LLVMDeleteGlobal (module->inited_var); /* Replace the dummy info_ variables with the real ones */ for (int i = 0; i < module->cfgs->len; ++i) { MonoCompile *cfg = (MonoCompile *)g_ptr_array_index (module->cfgs, i); // FIXME: Eliminate unused vars // FIXME: Speed this up if (cfg->llvm_dummy_info_var) { if (cfg->llvm_info_var) { mono_llvm_replace_uses_of (cfg->llvm_dummy_info_var, cfg->llvm_info_var); LLVMDeleteGlobal (cfg->llvm_dummy_info_var); } else { // FIXME: How can this happen ? LLVMSetInitializer (cfg->llvm_dummy_info_var, mono_llvm_create_constant_data_array (NULL, 0)); } } } if (module->llvm_only) { emit_get_method (&aot_module); emit_get_unbox_tramp (&aot_module); } emit_init_aotconst (module); emit_llvm_used (&aot_module); emit_dbg_info (&aot_module, filename, cu_name); emit_aot_file_info (&aot_module); /* Replace PLT entries for directly callable methods with the methods themselves */ { GHashTableIter iter; MonoJumpInfo *ji; LLVMValueRef callee; GHashTable *specializable = g_hash_table_new (NULL, NULL); g_hash_table_iter_init (&iter, module->plt_entries_ji); while (g_hash_table_iter_next (&iter, (void**)&ji, (void**)&callee)) { if (mono_aot_is_direct_callable (ji)) { LLVMValueRef lmethod; lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, ji->data.method); /* The types might not match because the caller might pass an rgctx */ if (lmethod && LLVMTypeOf (callee) == LLVMTypeOf (lmethod)) { mono_llvm_replace_uses_of (callee, lmethod); if (mono_aot_can_specialize (ji->data.method)) g_hash_table_insert (specializable, lmethod, ji->data.method); mono_aot_mark_unused_llvm_plt_entry (ji); } } } mono_llvm_propagate_nonnull_final (specializable, module); g_hash_table_destroy (specializable); } #if 0 { char *verifier_err; if (LLVMVerifyModule (module->lmodule, LLVMReturnStatusAction, &verifier_err)) { printf ("%s\n", verifier_err); g_assert_not_reached (); } } #endif /* Note: You can still dump an invalid bitcode file by running `llvm-dis` * in a debugger, set a breakpoint on `LLVMVerifyModule` and fake its * result to 0 (indicating success). */ LLVMWriteBitcodeToFile (module->lmodule, filename); } static LLVMValueRef md_string (const char *s) { return LLVMMDString (s, strlen (s)); } /* Debugging support */ static void emit_dbg_info (MonoLLVMModule *module, const char *filename, const char *cu_name) { LLVMModuleRef lmodule = module->lmodule; LLVMValueRef args [16], ver; /* * This can only be enabled when LLVM code is emitted into a separate object * file, since the AOT compiler also emits dwarf info, * and the abbrev indexes will not be correct since llvm has added its own * abbrevs. */ if (!module->emit_dwarf) return; mono_llvm_di_builder_finalize (module->di_builder); args [0] = LLVMConstInt (LLVMInt32Type (), 2, FALSE); args [1] = LLVMMDString ("Dwarf Version", strlen ("Dwarf Version")); args [2] = LLVMConstInt (LLVMInt32Type (), 2, FALSE); ver = LLVMMDNode (args, 3); LLVMAddNamedMetadataOperand (lmodule, "llvm.module.flags", ver); args [0] = LLVMConstInt (LLVMInt32Type (), 2, FALSE); args [1] = LLVMMDString ("Debug Info Version", strlen ("Debug Info Version")); args [2] = LLVMConstInt (LLVMInt64Type (), 3, FALSE); ver = LLVMMDNode (args, 3); LLVMAddNamedMetadataOperand (lmodule, "llvm.module.flags", ver); } static LLVMValueRef emit_dbg_subprogram (EmitContext *ctx, MonoCompile *cfg, LLVMValueRef method, const char *name) { MonoLLVMModule *module = ctx->module; MonoDebugMethodInfo *minfo = ctx->minfo; char *source_file, *dir, *filename; MonoSymSeqPoint *sym_seq_points; int n_seq_points; if (!minfo) return NULL; mono_debug_get_seq_points (minfo, &source_file, NULL, NULL, &sym_seq_points, &n_seq_points); if (!source_file) source_file = g_strdup ("<unknown>"); dir = g_path_get_dirname (source_file); filename = g_path_get_basename (source_file); g_free (source_file); return (LLVMValueRef)mono_llvm_di_create_function (module->di_builder, module->cu, method, cfg->method->name, name, dir, filename, n_seq_points ? sym_seq_points [0].line : 1); } static void emit_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder, const unsigned char *cil_code) { MonoCompile *cfg = ctx->cfg; if (ctx->minfo && cil_code && cil_code >= cfg->header->code && cil_code < cfg->header->code + cfg->header->code_size) { MonoDebugSourceLocation *loc; LLVMValueRef loc_md; loc = mono_debug_method_lookup_location (ctx->minfo, cil_code - cfg->header->code); if (loc) { loc_md = (LLVMValueRef)mono_llvm_di_create_location (ctx->module->di_builder, ctx->dbg_md, loc->row, loc->column); mono_llvm_di_set_location (builder, loc_md); mono_debug_free_source_location (loc); } } } static void emit_default_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder) { if (ctx->minfo) { LLVMValueRef loc_md; loc_md = (LLVMValueRef)mono_llvm_di_create_location (ctx->module->di_builder, ctx->dbg_md, 0, 0); mono_llvm_di_set_location (builder, loc_md); } } /* DESIGN: - Emit LLVM IR from the mono IR using the LLVM C API. - The original arch specific code remains, so we can fall back to it if we run into something we can't handle. */ /* A partial list of issues: - Handling of opcodes which can throw exceptions. In the mono JIT, these are implemented using code like this: method: <compare> throw_pos: b<cond> ex_label <rest of code> ex_label: push throw_pos - method call <exception trampoline> The problematic part is push throw_pos - method, which cannot be represented in the LLVM IR, since it does not support label values. -> this can be implemented in AOT mode using inline asm + labels, but cannot be implemented in JIT mode ? -> a possible but slower implementation would use the normal exception throwing code but it would need to control the placement of the throw code (it needs to be exactly after the compare+branch). -> perhaps add a PC offset intrinsics ? - efficient implementation of .ovf opcodes. These are currently implemented as: <ins which sets the condition codes> b<cond> ex_label Some overflow opcodes are now supported by LLVM SVN. - exception handling, unwinding. - SSA is disabled for methods with exception handlers - How to obtain unwind info for LLVM compiled methods ? -> this is now solved by converting the unwind info generated by LLVM into our format. - LLVM uses the c++ exception handling framework, while we use our home grown code, and couldn't use the c++ one: - its not supported under VC++, other exotic platforms. - it might be impossible to support filter clauses with it. - trampolines. The trampolines need a predictable call sequence, since they need to disasm the calling code to obtain register numbers / offsets. LLVM currently generates this code in non-JIT mode: mov -0x98(%rax),%eax callq *%rax Here, the vtable pointer is lost. -> solution: use one vtable trampoline per class. - passing/receiving the IMT pointer/RGCTX. -> solution: pass them as normal arguments ? - argument passing. LLVM does not allow the specification of argument registers etc. This means that all calls are made according to the platform ABI. - passing/receiving vtypes. Vtypes passed/received in registers are handled by the front end by using a signature with scalar arguments, and loading the parts of the vtype into those arguments. Vtypes passed on the stack are handled using the 'byval' attribute. - ldaddr. Supported though alloca, we need to emit the load/store code. - types. The mono JIT uses pointer sized iregs/double fregs, while LLVM uses precisely typed registers, so we have to keep track of the precise LLVM type of each vreg. This is made easier because the IR is already in SSA form. An additional problem is that our IR is not consistent with types, i.e. i32/i64 types are frequently used incorrectly. */ /* AOT SUPPORT: Emit LLVM bytecode into a .bc file, compile it using llc into a .s file, then link it with the file containing the methods emitted by the JIT and the AOT data structures. */ /* FIXME: Normalize some aspects of the mono IR to allow easier translation, like: * - each bblock should end with a branch * - setting the return value, making cfg->ret non-volatile * - avoid some transformations in the JIT which make it harder for us to generate * code. * - use pointer types to help optimizations. */ #else /* DISABLE_JIT */ void mono_llvm_cleanup (void) { } void mono_llvm_free_mem_manager (MonoJitMemoryManager *mem_manager) { } void mono_llvm_init (gboolean enable_jit) { } #endif /* DISABLE_JIT */ #if !defined(DISABLE_JIT) && !defined(MONO_CROSS_COMPILE) /* LLVM JIT support */ /* * decode_llvm_eh_info: * * Decode the EH table emitted by llvm in jit mode, and store * the result into cfg. */ static void decode_llvm_eh_info (EmitContext *ctx, gpointer eh_frame) { MonoCompile *cfg = ctx->cfg; guint8 *cie, *fde; int fde_len; MonoLLVMFDEInfo info; MonoJitExceptionInfo *ei; guint8 *p = (guint8*)eh_frame; int version, fde_count, fde_offset; guint32 ei_len, i, nested_len; gpointer *type_info; gint32 *table; guint8 *unw_info; /* * Decode the one element EH table emitted by the MonoException class * in llvm. */ /* Similar to decode_llvm_mono_eh_frame () in aot-runtime.c */ version = *p; g_assert (version == 3); p ++; p ++; p = (guint8 *)ALIGN_PTR_TO (p, 4); fde_count = *(guint32*)p; p += 4; table = (gint32*)p; g_assert (fde_count <= 2); /* The first entry is the real method */ g_assert (table [0] == 1); fde_offset = table [1]; table += fde_count * 2; /* Extra entry */ cfg->code_len = table [0]; fde_len = table [1] - fde_offset; table += 2; fde = (guint8*)eh_frame + fde_offset; cie = (guint8*)table; /* Compute lengths */ mono_unwind_decode_llvm_mono_fde (fde, fde_len, cie, cfg->native_code, &info, NULL, NULL, NULL); ei = (MonoJitExceptionInfo *)g_malloc0 (info.ex_info_len * sizeof (MonoJitExceptionInfo)); type_info = (gpointer *)g_malloc0 (info.ex_info_len * sizeof (gpointer)); unw_info = (guint8*)g_malloc0 (info.unw_info_len); mono_unwind_decode_llvm_mono_fde (fde, fde_len, cie, cfg->native_code, &info, ei, type_info, unw_info); cfg->encoded_unwind_ops = unw_info; cfg->encoded_unwind_ops_len = info.unw_info_len; if (cfg->verbose_level > 1) mono_print_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len); if (info.this_reg != -1) { cfg->llvm_this_reg = info.this_reg; cfg->llvm_this_offset = info.this_offset; } ei_len = info.ex_info_len; // Nested clauses are currently disabled nested_len = 0; cfg->llvm_ex_info = (MonoJitExceptionInfo*)mono_mempool_alloc0 (cfg->mempool, (ei_len + nested_len) * sizeof (MonoJitExceptionInfo)); cfg->llvm_ex_info_len = ei_len + nested_len; memcpy (cfg->llvm_ex_info, ei, ei_len * sizeof (MonoJitExceptionInfo)); /* Fill the rest of the information from the type info */ for (i = 0; i < ei_len; ++i) { gint32 clause_index = *(gint32*)type_info [i]; MonoExceptionClause *clause = &cfg->header->clauses [clause_index]; cfg->llvm_ex_info [i].flags = clause->flags; cfg->llvm_ex_info [i].data.catch_class = clause->data.catch_class; cfg->llvm_ex_info [i].clause_index = clause_index; } } static MonoLLVMModule* init_jit_module (void) { MonoJitMemoryManager *jit_mm; MonoLLVMModule *module; // FIXME: jit_mm = get_default_jit_mm (); if (jit_mm->llvm_module) return (MonoLLVMModule*)jit_mm->llvm_module; mono_loader_lock (); if (jit_mm->llvm_module) { mono_loader_unlock (); return (MonoLLVMModule*)jit_mm->llvm_module; } module = g_new0 (MonoLLVMModule, 1); module->context = LLVMGetGlobalContext (); module->mono_ee = (MonoEERef*)mono_llvm_create_ee (&module->ee); // This contains just the intrinsics module->lmodule = LLVMModuleCreateWithName ("jit-global-module"); add_intrinsics (module->lmodule); add_types (module); module->llvm_types = g_hash_table_new (NULL, NULL); mono_memory_barrier (); jit_mm->llvm_module = module; mono_loader_unlock (); return (MonoLLVMModule*)jit_mm->llvm_module; } static void llvm_jit_finalize_method (EmitContext *ctx) { MonoCompile *cfg = ctx->cfg; int nvars = g_hash_table_size (ctx->jit_callees); LLVMValueRef *callee_vars = g_new0 (LLVMValueRef, nvars); gpointer *callee_addrs = g_new0 (gpointer, nvars); GHashTableIter iter; LLVMValueRef var; MonoMethod *callee; gpointer eh_frame; int i; /* * Compute the addresses of the LLVM globals pointing to the * methods called by the current method. Pass it to the trampoline * code so it can update them after their corresponding method was * compiled. */ g_hash_table_iter_init (&iter, ctx->jit_callees); i = 0; while (g_hash_table_iter_next (&iter, NULL, (void**)&var)) callee_vars [i ++] = var; mono_llvm_optimize_method (ctx->lmethod); if (cfg->verbose_level > 1) { g_print ("\n*** Optimized LLVM IR for %s ***\n", mono_method_full_name (cfg->method, TRUE)); if (cfg->compile_aot) { mono_llvm_dump_value (ctx->lmethod); } else { mono_llvm_dump_module (ctx->lmodule); } g_print ("***\n\n"); } mono_codeman_enable_write (); cfg->native_code = (guint8*)mono_llvm_compile_method (ctx->module->mono_ee, cfg, ctx->lmethod, nvars, callee_vars, callee_addrs, &eh_frame); mono_llvm_remove_gc_safepoint_poll (ctx->lmodule); mono_codeman_disable_write (); decode_llvm_eh_info (ctx, eh_frame); // FIXME: MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); if (!jit_mm->llvm_jit_callees) jit_mm->llvm_jit_callees = g_hash_table_new (NULL, NULL); g_hash_table_iter_init (&iter, ctx->jit_callees); i = 0; while (g_hash_table_iter_next (&iter, (void**)&callee, (void**)&var)) { GSList *addrs = (GSList*)g_hash_table_lookup (jit_mm->llvm_jit_callees, callee); addrs = g_slist_prepend (addrs, callee_addrs [i]); g_hash_table_insert (jit_mm->llvm_jit_callees, callee, addrs); i ++; } jit_mm_unlock (jit_mm); } #else static MonoLLVMModule* init_jit_module (void) { g_assert_not_reached (); } static void llvm_jit_finalize_method (EmitContext *ctx) { g_assert_not_reached (); } #endif static MonoCPUFeatures cpu_features; MonoCPUFeatures mono_llvm_get_cpu_features (void) { static const CpuFeatureAliasFlag flags_map [] = { #if defined(TARGET_X86) || defined(TARGET_AMD64) { "sse", MONO_CPU_X86_SSE }, { "sse2", MONO_CPU_X86_SSE2 }, { "pclmul", MONO_CPU_X86_PCLMUL }, { "aes", MONO_CPU_X86_AES }, { "sse2", MONO_CPU_X86_SSE2 }, { "sse3", MONO_CPU_X86_SSE3 }, { "ssse3", MONO_CPU_X86_SSSE3 }, { "sse4.1", MONO_CPU_X86_SSE41 }, { "sse4.2", MONO_CPU_X86_SSE42 }, { "popcnt", MONO_CPU_X86_POPCNT }, { "avx", MONO_CPU_X86_AVX }, { "avx2", MONO_CPU_X86_AVX2 }, { "fma", MONO_CPU_X86_FMA }, { "lzcnt", MONO_CPU_X86_LZCNT }, { "bmi", MONO_CPU_X86_BMI1 }, { "bmi2", MONO_CPU_X86_BMI2 }, #endif #if defined(TARGET_ARM64) { "crc", MONO_CPU_ARM64_CRC }, { "crypto", MONO_CPU_ARM64_CRYPTO }, { "neon", MONO_CPU_ARM64_NEON }, { "rdm", MONO_CPU_ARM64_RDM }, { "dotprod", MONO_CPU_ARM64_DP }, #endif #if defined(TARGET_WASM) { "simd", MONO_CPU_WASM_SIMD }, #endif // flags_map cannot be zero length in MSVC, so add useless dummy entry for arm32 #if defined(TARGET_ARM) && defined(HOST_WIN32) { "inited", MONO_CPU_INITED}, #endif }; if (!cpu_features) cpu_features = MONO_CPU_INITED | (MonoCPUFeatures)mono_llvm_check_cpu_features (flags_map, G_N_ELEMENTS (flags_map)); return cpu_features; }
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/mini-mips.c
/** * \file * MIPS backend for the Mono code generator * * Authors: * Mark Mason ([email protected]) * * Based on mini-ppc.c by * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * * (C) 2006 Broadcom * (C) 2003 Ximian, Inc. */ #include "mini.h" #include <string.h> #include <asm/cachectl.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/unlocked.h> #include <mono/arch/mips/mips-codegen.h> #include "mini-mips.h" #include "cpu-mips.h" #include "ir-emit.h" #include "aot-runtime.h" #include "mini-runtime.h" #include "mono/utils/mono-tls-inline.h" #define SAVE_FP_REGS 0 #define ALWAYS_SAVE_RA 1 /* call-handler & switch currently clobber ra */ #define PROMOTE_R4_TO_R8 1 /* promote single values in registers to doubles */ #define USE_MUL 0 /* use mul instead of mult/mflo for multiply remember to update cpu-mips.md if you change this */ /* Emit a call sequence to 'v', using 'D' as a scratch register if necessary */ #define mips_call(c,D,v) do { \ guint32 _target = (guint32)(v); \ if (1 || ((v) == NULL) || ((_target & 0xfc000000) != (((guint32)(c)) & 0xfc000000))) { \ mips_load_const (c, D, _target); \ mips_jalr (c, D, mips_ra); \ } \ else { \ mips_jumpl (c, _target >> 2); \ } \ mips_nop (c); \ } while (0) enum { TLS_MODE_DETECT, TLS_MODE_FAILED, TLS_MODE_LTHREADS, TLS_MODE_NPTL }; /* This mutex protects architecture specific caches */ #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex) #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex) static mono_mutex_t mini_arch_mutex; /* Whenever the host is little-endian */ static int little_endian; /* Index of ms word/register */ static int ls_word_idx; /* Index of ls word/register */ static int ms_word_idx; /* Same for offsets */ static int ls_word_offset; static int ms_word_offset; /* * The code generated for sequence points reads from this location, which is * made read-only when single stepping is enabled. */ static gpointer ss_trigger_page; /* Enabled breakpoints read from this trigger page */ static gpointer bp_trigger_page; #undef DEBUG #define DEBUG(a) if (cfg->verbose_level > 1) a #undef DEBUG #define DEBUG(a) a #undef DEBUG #define DEBUG(a) #define EMIT_SYSTEM_EXCEPTION_NAME(exc_name) \ do { \ code = mips_emit_exc_by_name (code, exc_name); \ cfg->bb_exit->max_offset += 16; \ } while (0) #define MONO_EMIT_NEW_LOAD_R8(cfg,dr,addr) do { \ MonoInst *inst; \ MONO_INST_NEW ((cfg), (inst), OP_R8CONST); \ inst->type = STACK_R8; \ inst->dreg = (dr); \ inst->inst_p0 = (void*)(addr); \ mono_bblock_add_inst (cfg->cbb, inst); \ } while (0) #define ins_is_compare(ins) ((ins) && (((ins)->opcode == OP_COMPARE) \ || ((ins)->opcode == OP_ICOMPARE) \ || ((ins)->opcode == OP_LCOMPARE))) #define ins_is_compare_imm(ins) ((ins) && (((ins)->opcode == OP_COMPARE_IMM) \ || ((ins)->opcode == OP_ICOMPARE_IMM) \ || ((ins)->opcode == OP_LCOMPARE_IMM))) #define INS_REWRITE(ins, op, _s1, _s2) do { \ int s1 = _s1; \ int s2 = _s2; \ ins->opcode = (op); \ ins->sreg1 = (s1); \ ins->sreg2 = (s2); \ } while (0); #define INS_REWRITE_IMM(ins, op, _s1, _imm) do { \ int s1 = _s1; \ ins->opcode = (op); \ ins->sreg1 = (s1); \ ins->inst_imm = (_imm); \ } while (0); typedef struct InstList InstList; struct InstList { InstList *prev; InstList *next; MonoInst *data; }; typedef enum { ArgInIReg, ArgOnStack, ArgInFReg, ArgStructByVal, ArgStructByAddr } ArgStorage; typedef struct { gint32 offset; guint16 vtsize; /* in param area */ guint8 reg; ArgStorage storage; guint8 size : 4; /* 1, 2, 4, 8, or regs used by ArgStructByVal */ } ArgInfo; struct CallInfo { int nargs; int gr; int fr; gboolean gr_passed; gboolean on_stack; gboolean vtype_retaddr; int stack_size; guint32 stack_usage; guint32 struct_ret; ArgInfo ret; ArgInfo sig_cookie; ArgInfo args [1]; }; void patch_lui_addiu(guint32 *ip, guint32 val); static guint8 *mono_arch_emit_epilog_sub (MonoCompile *cfg); guint8 *mips_emit_cond_branch (MonoCompile *cfg, guint8 *code, int op, MonoInst *ins); void mips_adjust_stackframe(MonoCompile *cfg); void mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg); MonoInst *mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args); /* Not defined in asm/cachectl.h */ int cacheflush(char *addr, int nbytes, int cache); void mono_arch_flush_icache (guint8 *code, gint size) { /* Linux/MIPS specific */ cacheflush ((char*)code, size, BCACHE); } void mono_arch_flush_register_windows (void) { } gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return TRUE; } static guint8 * mips_emit_exc_by_name(guint8 *code, const char *name) { gpointer addr; MonoClass *exc_class; exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", name); mips_load_const (code, mips_a0, m_class_get_type_token (exc_class)); addr = mono_get_throw_corlib_exception (); mips_call (code, mips_t9, addr); return code; } guint8 * mips_emit_load_const (guint8 *code, int dreg, target_mgreg_t v) { if (mips_is_imm16 (v)) mips_addiu (code, dreg, mips_zero, ((guint32)v) & 0xffff); else { #if SIZEOF_REGISTER == 8 if (v != (long) v) { /* v is not a sign-extended 32-bit value */ mips_lui (code, dreg, mips_zero, (guint32)((v >> (32+16)) & 0xffff)); mips_ori (code, dreg, dreg, (guint32)((v >> (32)) & 0xffff)); mips_dsll (code, dreg, dreg, 16); mips_ori (code, dreg, dreg, (guint32)((v >> (16)) & 0xffff)); mips_dsll (code, dreg, dreg, 16); mips_ori (code, dreg, dreg, (guint32)(v & 0xffff)); return code; } #endif if (((guint32)v) & (1 << 15)) { mips_lui (code, dreg, mips_zero, (((guint32)v)>>16)+1); } else { mips_lui (code, dreg, mips_zero, (((guint32)v)>>16)); } if (((guint32)v) & 0xffff) mips_addiu (code, dreg, dreg, ((guint32)v) & 0xffff); } return code; } guint8 * mips_emit_cond_branch (MonoCompile *cfg, guint8 *code, int op, MonoInst *ins) { g_assert (ins); if (cfg->arch.long_branch) { int br_offset = 5; /* Invert test and emit branch around jump */ switch (op) { case OP_MIPS_BEQ: mips_bne (code, ins->sreg1, ins->sreg2, br_offset); mips_nop (code); break; case OP_MIPS_BNE: mips_beq (code, ins->sreg1, ins->sreg2, br_offset); mips_nop (code); break; case OP_MIPS_BGEZ: mips_bltz (code, ins->sreg1, br_offset); mips_nop (code); break; case OP_MIPS_BGTZ: mips_blez (code, ins->sreg1, br_offset); mips_nop (code); break; case OP_MIPS_BLEZ: mips_bgtz (code, ins->sreg1, br_offset); mips_nop (code); break; case OP_MIPS_BLTZ: mips_bgez (code, ins->sreg1, br_offset); mips_nop (code); break; default: g_assert_not_reached (); } mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); mips_lui (code, mips_at, mips_zero, 0); mips_addiu (code, mips_at, mips_at, 0); mips_jr (code, mips_at); mips_nop (code); } else { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); switch (op) { case OP_MIPS_BEQ: mips_beq (code, ins->sreg1, ins->sreg2, 0); mips_nop (code); break; case OP_MIPS_BNE: mips_bne (code, ins->sreg1, ins->sreg2, 0); mips_nop (code); break; case OP_MIPS_BGEZ: mips_bgez (code, ins->sreg1, 0); mips_nop (code); break; case OP_MIPS_BGTZ: mips_bgtz (code, ins->sreg1, 0); mips_nop (code); break; case OP_MIPS_BLEZ: mips_blez (code, ins->sreg1, 0); mips_nop (code); break; case OP_MIPS_BLTZ: mips_bltz (code, ins->sreg1, 0); mips_nop (code); break; default: g_assert_not_reached (); } } return (code); } /* XXX - big-endian dependent? */ void patch_lui_addiu(guint32 *ip, guint32 val) { guint16 *__lui_addiu = (guint16*)(void *)(ip); #if 0 printf ("patch_lui_addiu ip=0x%08x (0x%08x, 0x%08x) to point to 0x%08x\n", ip, ((guint32 *)ip)[0], ((guint32 *)ip)[1], val); fflush (stdout); #endif if (((guint32)(val)) & (1 << 15)) __lui_addiu [MINI_LS_WORD_IDX] = ((((guint32)(val)) >> 16) & 0xffff) + 1; else __lui_addiu [MINI_LS_WORD_IDX] = (((guint32)(val)) >> 16) & 0xffff; __lui_addiu [MINI_LS_WORD_IDX + 2] = ((guint32)(val)) & 0xffff; mono_arch_flush_icache ((guint8 *)ip, 8); } guint32 trap_target; void mips_patch (guint32 *code, guint32 target) { guint32 ins = *code; guint32 op = ins >> 26; guint32 diff, offset; g_assert (trap_target != target); //printf ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target); switch (op) { case 0x00: /* jr ra */ if (ins == 0x3e00008) break; g_assert_not_reached (); break; case 0x02: /* j */ case 0x03: /* jal */ g_assert (!(target & 0x03)); g_assert ((target & 0xfc000000) == (((guint32)code) & 0xfc000000)); ins = (ins & 0xfc000000) | (((target) >> 2) & 0x03ffffff); *code = ins; mono_arch_flush_icache ((guint8 *)code, 4); break; case 0x01: /* BLTZ */ case 0x04: /* BEQ */ case 0x05: /* BNE */ case 0x06: /* BLEZ */ case 0x07: /* BGTZ */ case 0x11: /* bc1t */ diff = target - (guint32)(code + 1); g_assert (((diff & 0x0003ffff) == diff) || ((diff | 0xfffc0000) == diff)); g_assert (!(diff & 0x03)); offset = ((gint32)diff) >> 2; if (((int)offset) != ((int)(short)offset)) g_assert (((int)offset) == ((int)(short)offset)); ins = (ins & 0xffff0000) | (offset & 0x0000ffff); *code = ins; mono_arch_flush_icache ((guint8 *)code, 4); break; case 0x0f: /* LUI / ADDIU pair */ g_assert ((code[1] >> 26) == 0x9); patch_lui_addiu (code, target); mono_arch_flush_icache ((guint8 *)code, 8); break; default: printf ("unknown op 0x%02x (0x%08x) @ %p\n", op, ins, code); g_assert_not_reached (); } } static void mono_arch_compute_omit_fp (MonoCompile *cfg); const char* mono_arch_regname (int reg) { #if _MIPS_SIM == _ABIO32 static const char * rnames[] = { "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra" }; #elif _MIPS_SIM == _ABIN32 static const char * rnames[] = { "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra" }; #endif if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } const char* mono_arch_fregname (int reg) { static const char * rnames[] = { "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } /* this function overwrites at */ static guint8* emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset) { /* XXX write a loop, not an unrolled loop */ while (size > 0) { mips_lw (code, mips_at, sreg, soffset); mips_sw (code, mips_at, dreg, doffset); size -= 4; soffset += 4; doffset += 4; } return code; } /* * mono_arch_get_argument_info: * @csig: a method signature * @param_count: the number of parameters to consider * @arg_info: an array to store the result infos * * Gathers information on parameters such as size, alignment and * padding. arg_info should be large enought to hold param_count + 1 entries. * * Returns the size of the activation frame. */ int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { int k, frame_size = 0; guint32 size, align, pad; int offset = 0; if (MONO_TYPE_ISSTRUCT (csig->ret)) { frame_size += sizeof (target_mgreg_t); offset += 4; } arg_info [0].offset = offset; if (csig->hasthis) { frame_size += sizeof (target_mgreg_t); offset += 4; } arg_info [0].size = frame_size; for (k = 0; k < param_count; k++) { size = mini_type_stack_size_full (csig->params [k], &align, csig->pinvoke && !csig->marshalling_disabled); /* ignore alignment for now */ align = 1; frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; frame_size += size; arg_info [k + 1].pad = 0; arg_info [k + 1].size = size; offset += pad; arg_info [k + 1].offset = offset; offset += size; } align = MONO_ARCH_FRAME_ALIGNMENT; frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; return frame_size; } /* The delegate object plus 3 params */ #define MAX_ARCH_DELEGATE_PARAMS (4 - 1) static guint8* get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, gboolean param_count) { guint8 *code, *start; if (has_target) { start = code = mono_global_codeman_reserve (16); /* Replace the this argument with the target */ mips_lw (code, mips_temp, mips_a0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); mips_lw (code, mips_a0, mips_a0, MONO_STRUCT_OFFSET (MonoDelegate, target)); mips_jr (code, mips_temp); mips_nop (code); g_assert ((code - start) <= 16); mono_arch_flush_icache (start, 16); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } else { int size, i; size = 16 + param_count * 4; start = code = mono_global_codeman_reserve (size); mips_lw (code, mips_temp, mips_a0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); /* slide down the arguments */ for (i = 0; i < param_count; ++i) { mips_move (code, mips_a0 + i, mips_a0 + i + 1); } mips_jr (code, mips_temp); mips_nop (code); g_assert ((code - start) <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } if (has_target) { *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, NULL); } else { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count); *info = mono_tramp_info_create (name, start, code - start, NULL, NULL); g_free (name); } return start; } /* * mono_arch_get_delegate_invoke_impls: * * Return a list of MonoAotTrampInfo structures for the delegate invoke impl * trampolines. */ GSList* mono_arch_get_delegate_invoke_impls (void) { GSList *res = NULL; MonoTrampInfo *info; int i; get_delegate_invoke_impl (&info, TRUE, 0); res = g_slist_prepend (res, info); for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) { get_delegate_invoke_impl (&info, FALSE, i); res = g_slist_prepend (res, info); } return res; } gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target) { guint8 *code, *start; /* FIXME: Support more cases */ if (MONO_TYPE_ISSTRUCT (sig->ret)) return NULL; if (has_target) { static guint8* cached = NULL; mono_mini_arch_lock (); if (cached) { mono_mini_arch_unlock (); return cached; } if (mono_ee_features.use_aot_trampolines) { start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target"); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, TRUE, 0); mono_tramp_info_register (info, NULL); } cached = start; mono_mini_arch_unlock (); return cached; } else { static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL}; int i; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; for (i = 0; i < sig->param_count; ++i) if (!mono_is_regsize_var (sig->params [i])) return NULL; mono_mini_arch_lock (); code = cache [sig->param_count]; if (code) { mono_mini_arch_unlock (); return code; } if (mono_ee_features.use_aot_trampolines) { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); start = mono_aot_get_trampoline (name); g_free (name); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, FALSE, sig->param_count); mono_tramp_info_register (info, NULL); } cache [sig->param_count] = start; mono_mini_arch_unlock (); return start; } return NULL; } gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg) { return NULL; } gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code) { g_assert(regs); return (gpointer)regs [mips_a0]; } /* * Initialize the cpu to execute managed code. */ void mono_arch_cpu_init (void) { #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN little_endian = 1; ls_word_idx = 0; ms_word_idx = 1; #else ls_word_idx = 1; ms_word_idx = 0; #endif ls_word_offset = ls_word_idx * 4; ms_word_offset = ms_word_idx * 4; } /* * Initialize architecture specific code. */ void mono_arch_init (void) { mono_os_mutex_init_recursive (&mini_arch_mutex); ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER); bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER); mono_mprotect (bp_trigger_page, mono_pagesize (), 0); } /* * Cleanup architecture specific code. */ void mono_arch_cleanup (void) { mono_os_mutex_destroy (&mini_arch_mutex); } gboolean mono_arch_have_fast_tls (void) { return FALSE; } /* * This function returns the optimizations supported on this cpu. */ guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { guint32 opts = 0; /* no mips-specific optimizations yet */ *exclude_mask = 0; return opts; } GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG)) continue; /* we can only allocate 32 bit values */ if (mono_is_regsize_var (ins->inst_vtype)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE); } } return vars; } GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; regs = g_list_prepend (regs, (gpointer)mips_s0); regs = g_list_prepend (regs, (gpointer)mips_s1); regs = g_list_prepend (regs, (gpointer)mips_s2); regs = g_list_prepend (regs, (gpointer)mips_s3); regs = g_list_prepend (regs, (gpointer)mips_s4); //regs = g_list_prepend (regs, (gpointer)mips_s5); regs = g_list_prepend (regs, (gpointer)mips_s6); regs = g_list_prepend (regs, (gpointer)mips_s7); return regs; } /* * mono_arch_regalloc_cost: * * Return the cost, in number of memory references, of the action of * allocating the variable VMV into a register during global register * allocation. */ guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { /* FIXME: */ return 2; } static void args_onto_stack (CallInfo *info) { g_assert (!info->on_stack); g_assert (info->stack_size <= MIPS_STACK_PARAM_OFFSET); info->on_stack = TRUE; info->stack_size = MIPS_STACK_PARAM_OFFSET; } #if _MIPS_SIM == _ABIO32 /* * O32 calling convention version */ static void add_int32_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack && info->gr > MIPS_LAST_ARG_REG) args_onto_stack (info); /* Now, place the argument */ if (info->on_stack) { ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; } else { ainfo->storage = ArgInIReg; ainfo->reg = info->gr; info->gr += 1; info->gr_passed = TRUE; } info->stack_size += 4; } static void add_int64_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack && info->gr+1 > MIPS_LAST_ARG_REG) args_onto_stack (info); /* Now, place the argument */ if (info->on_stack) { g_assert (info->stack_size % 4 == 0); info->stack_size += (info->stack_size % 8); ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; } else { // info->gr must be a0 or a2 info->gr += (info->gr - MIPS_FIRST_ARG_REG) % 2; g_assert(info->gr <= MIPS_LAST_ARG_REG); ainfo->storage = ArgInIReg; ainfo->reg = info->gr; info->gr += 2; info->gr_passed = TRUE; } info->stack_size += 8; } static void add_float32_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack && info->gr > MIPS_LAST_ARG_REG) args_onto_stack (info); /* Now, place the argument */ if (info->on_stack) { ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; } else { /* Only use FP regs for args if no int args passed yet */ if (!info->gr_passed && info->fr <= MIPS_LAST_FPARG_REG) { ainfo->storage = ArgInFReg; ainfo->reg = info->fr; /* Even though it's a single-precision float, it takes up two FP regs */ info->fr += 2; /* FP and GP slots do not overlap */ info->gr += 1; } else { /* Passing single-precision float arg in a GP register * such as: func (0, 1.0, 2, 3); * In this case, only one 'gr' register is consumed. */ ainfo->storage = ArgInIReg; ainfo->reg = info->gr; info->gr += 1; info->gr_passed = TRUE; } } info->stack_size += 4; } static void add_float64_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack && info->gr+1 > MIPS_LAST_ARG_REG) args_onto_stack (info); /* Now, place the argument */ if (info->on_stack) { g_assert(info->stack_size % 4 == 0); info->stack_size += (info->stack_size % 8); ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; } else { /* Only use FP regs for args if no int args passed yet */ if (!info->gr_passed && info->fr <= MIPS_LAST_FPARG_REG) { ainfo->storage = ArgInFReg; ainfo->reg = info->fr; info->fr += 2; /* FP and GP slots do not overlap */ info->gr += 2; } else { // info->gr must be a0 or a2 info->gr += (info->gr - MIPS_FIRST_ARG_REG) % 2; g_assert(info->gr <= MIPS_LAST_ARG_REG); ainfo->storage = ArgInIReg; ainfo->reg = info->gr; info->gr += 2; info->gr_passed = TRUE; } } info->stack_size += 8; } #elif _MIPS_SIM == _ABIN32 /* * N32 calling convention version */ static void add_int32_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack && info->gr > MIPS_LAST_ARG_REG) args_onto_stack (info); /* Now, place the argument */ if (info->on_stack) { ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; info->stack_size += SIZEOF_REGISTER; } else { ainfo->storage = ArgInIReg; ainfo->reg = info->gr; info->gr += 1; info->gr_passed = TRUE; } } static void add_int64_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack && info->gr > MIPS_LAST_ARG_REG) args_onto_stack (info); /* Now, place the argument */ if (info->on_stack) { g_assert (info->stack_size % 4 == 0); info->stack_size += (info->stack_size % 8); ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; info->stack_size += SIZEOF_REGISTER; } else { g_assert (info->gr <= MIPS_LAST_ARG_REG); ainfo->storage = ArgInIReg; ainfo->reg = info->gr; info->gr += 1; info->gr_passed = TRUE; } } static void add_float32_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack) { if (info->gr > MIPS_LAST_ARG_REG) args_onto_stack (info); else if (info->fr > MIPS_LAST_FPARG_REG) args_onto_stack (info); } /* Now, place the argument */ if (info->on_stack) { ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; info->stack_size += FREG_SIZE; } else { ainfo->storage = ArgInFReg; ainfo->reg = info->fr; info->fr += 1; /* FP and GP slots do not overlap */ info->gr += 1; } } static void add_float64_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack) { if (info->gr > MIPS_LAST_ARG_REG) args_onto_stack (info); else if (info->fr > MIPS_LAST_FPARG_REG) args_onto_stack (info); } /* Now, place the argument */ if (info->on_stack) { g_assert(info->stack_size % 4 == 0); info->stack_size += (info->stack_size % 8); ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; info->stack_size += FREG_SIZE; } else { ainfo->storage = ArgInFReg; ainfo->reg = info->fr; info->fr += 1; /* FP and GP slots do not overlap */ info->gr += 1; } } #endif static CallInfo* get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { guint i; int n = sig->hasthis + sig->param_count; int pstart; MonoType* simpletype; CallInfo *cinfo; gboolean is_pinvoke = sig->pinvoke; if (mp) cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n)); else cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n)); cinfo->fr = MIPS_FIRST_FPARG_REG; cinfo->gr = MIPS_FIRST_ARG_REG; cinfo->stack_size = 0; DEBUG(printf("calculate_sizes\n")); cinfo->vtype_retaddr = MONO_TYPE_ISSTRUCT (sig->ret) ? TRUE : FALSE; pstart = 0; n = 0; #if 0 /* handle returning a struct */ if (MONO_TYPE_ISSTRUCT (sig->ret)) { cinfo->struct_ret = cinfo->gr; add_int32_arg (cinfo, &cinfo->ret); } if (sig->hasthis) { add_int32_arg (cinfo, cinfo->args + n); n++; } #else /* * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after * the first argument, allowing 'this' to be always passed in the first arg reg. * Also do this if the first argument is a reference type, since virtual calls * are sometimes made using calli without sig->hasthis set, like in the delegate * invoke wrappers. */ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) { if (sig->hasthis) { add_int32_arg (cinfo, cinfo->args + n); n ++; } else { add_int32_arg (cinfo, cinfo->args + sig->hasthis); pstart = 1; n ++; } add_int32_arg (cinfo, &cinfo->ret); cinfo->struct_ret = cinfo->ret.reg; } else { /* this */ if (sig->hasthis) { add_int32_arg (cinfo, cinfo->args + n); n ++; } if (cinfo->vtype_retaddr) { add_int32_arg (cinfo, &cinfo->ret); cinfo->struct_ret = cinfo->ret.reg; } } #endif DEBUG(printf("params: %d\n", sig->param_count)); for (i = pstart; i < sig->param_count; ++i) { if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ args_onto_stack (cinfo); /* Emit the signature cookie just before the implicit arguments */ add_int32_arg (cinfo, &cinfo->sig_cookie); } DEBUG(printf("param %d: ", i)); simpletype = mini_get_underlying_type (sig->params [i]); switch (simpletype->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: DEBUG(printf("1 byte\n")); cinfo->args [n].size = 1; add_int32_arg (cinfo, &cinfo->args[n]); n++; break; case MONO_TYPE_I2: case MONO_TYPE_U2: DEBUG(printf("2 bytes\n")); cinfo->args [n].size = 2; add_int32_arg (cinfo, &cinfo->args[n]); n++; break; case MONO_TYPE_I4: case MONO_TYPE_U4: DEBUG(printf("4 bytes\n")); cinfo->args [n].size = 4; add_int32_arg (cinfo, &cinfo->args[n]); n++; break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: cinfo->args [n].size = sizeof (target_mgreg_t); add_int32_arg (cinfo, &cinfo->args[n]); n++; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (simpletype)) { cinfo->args [n].size = sizeof (target_mgreg_t); add_int32_arg (cinfo, &cinfo->args[n]); n++; break; } /* Fall through */ case MONO_TYPE_TYPEDBYREF: case MONO_TYPE_VALUETYPE: { int j; int nwords = 0; int has_offset = FALSE; ArgInfo dummy_arg; gint size, alignment; MonoClass *klass; if (simpletype->type == MONO_TYPE_TYPEDBYREF) { size = MONO_ABI_SIZEOF (MonoTypedRef); alignment = sizeof (target_mgreg_t); } else { klass = mono_class_from_mono_type_internal (sig->params [i]); if (sig->pinvoke && !sig->marshalling_disabled) size = mono_class_native_size (klass, NULL); else size = mono_class_value_size (klass, NULL); alignment = mono_class_min_align (klass); } #if MIPS_PASS_STRUCTS_BY_VALUE /* Need to do alignment if struct contains long or double */ if (alignment > 4) { /* Drop onto stack *before* looking at stack_size, if required. */ if (!cinfo->on_stack && cinfo->gr > MIPS_LAST_ARG_REG) args_onto_stack (cinfo); if (cinfo->stack_size & (alignment - 1)) { add_int32_arg (cinfo, &dummy_arg); } g_assert (!(cinfo->stack_size & (alignment - 1))); } #if 0 g_printf ("valuetype struct size=%d offset=%d align=%d\n", mono_class_native_size (sig->params [i]->data.klass, NULL), cinfo->stack_size, alignment); #endif nwords = (size + sizeof (target_mgreg_t) -1 ) / sizeof (target_mgreg_t); g_assert (cinfo->args [n].size == 0); g_assert (cinfo->args [n].vtsize == 0); for (j = 0; j < nwords; ++j) { if (j == 0) { add_int32_arg (cinfo, &cinfo->args [n]); if (cinfo->on_stack) has_offset = TRUE; } else { add_int32_arg (cinfo, &dummy_arg); if (!has_offset && cinfo->on_stack) { cinfo->args [n].offset = dummy_arg.offset; has_offset = TRUE; } } if (cinfo->on_stack) cinfo->args [n].vtsize += 1; else cinfo->args [n].size += 1; } //g_printf ("\tstack_size=%d vtsize=%d\n", cinfo->args [n].size, cinfo->args[n].vtsize); cinfo->args [n].storage = ArgStructByVal; #else add_int32_arg (cinfo, &cinfo->args[n]); cinfo->args [n].storage = ArgStructByAddr; #endif n++; break; } case MONO_TYPE_U8: case MONO_TYPE_I8: DEBUG(printf("8 bytes\n")); cinfo->args [n].size = 8; add_int64_arg (cinfo, &cinfo->args[n]); n++; break; case MONO_TYPE_R4: DEBUG(printf("R4\n")); cinfo->args [n].size = 4; add_float32_arg (cinfo, &cinfo->args[n]); n++; break; case MONO_TYPE_R8: DEBUG(printf("R8\n")); cinfo->args [n].size = 8; add_float64_arg (cinfo, &cinfo->args[n]); n++; break; default: g_error ("Can't trampoline 0x%x", sig->params [i]->type); } } /* Handle the case where there are no implicit arguments */ if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ args_onto_stack (cinfo); /* Emit the signature cookie just before the implicit arguments */ add_int32_arg (cinfo, &cinfo->sig_cookie); } { simpletype = mini_get_underlying_type (sig->ret); switch (simpletype->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: cinfo->ret.reg = mips_v0; break; case MONO_TYPE_U8: case MONO_TYPE_I8: cinfo->ret.reg = mips_v0; break; case MONO_TYPE_R4: case MONO_TYPE_R8: cinfo->ret.reg = mips_f0; cinfo->ret.storage = ArgInFReg; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (simpletype)) { cinfo->ret.reg = mips_v0; break; } break; case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: break; case MONO_TYPE_VOID: break; default: g_error ("Can't handle as return value 0x%x", sig->ret->type); } } /* align stack size to 16 */ cinfo->stack_size = (cinfo->stack_size + MIPS_STACK_ALIGNMENT - 1) & ~(MIPS_STACK_ALIGNMENT - 1); cinfo->stack_usage = cinfo->stack_size; return cinfo; } static gboolean debug_omit_fp (void) { #if 0 return mono_debug_count (); #else return TRUE; #endif } /** * mono_arch_compute_omit_fp: * Determine whether the frame pointer can be eliminated. */ static void mono_arch_compute_omit_fp (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; int i, locals_size; CallInfo *cinfo; if (cfg->arch.omit_fp_computed) return; header = cfg->header; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; /* * FIXME: Remove some of the restrictions. */ cfg->arch.omit_fp = TRUE; cfg->arch.omit_fp_computed = TRUE; if (cfg->disable_omit_fp) cfg->arch.omit_fp = FALSE; if (!debug_omit_fp ()) cfg->arch.omit_fp = FALSE; if (cfg->method->save_lmf) cfg->arch.omit_fp = FALSE; if (cfg->flags & MONO_CFG_HAS_ALLOCA) cfg->arch.omit_fp = FALSE; if (header->num_clauses) cfg->arch.omit_fp = FALSE; if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) cfg->arch.omit_fp = FALSE; /* * On MIPS, fp points to the bottom of the frame, so it can be eliminated even if * there are stack arguments. */ /* if (cinfo->stack_usage) cfg->arch.omit_fp = FALSE; */ locals_size = 0; for (i = cfg->locals_start; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; int ialign; locals_size += mono_type_size (ins->inst_vtype, &ialign); } //printf ("D: %s %d\n", cfg->method->name, cfg->arch.omit_fp); } /* * Set var information according to the calling convention. mips version. * The locals var stuff should most likely be split in another method. */ void mono_arch_allocate_vars (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; MonoInst *inst; int i, offset, size, align, curinst; int frame_reg = mips_sp; guint32 iregs_to_save = 0; #if SAVE_FP_REGS guint32 fregs_to_restore; #endif CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; mono_arch_compute_omit_fp (cfg); /* spill down, we'll fix it in a separate pass */ // cfg->flags |= MONO_CFG_HAS_SPILLUP; /* this is bug #60332: remove when #59509 is fixed, so no weird vararg * call convs needs to be handled this way. */ if (cfg->flags & MONO_CFG_HAS_VARARGS) cfg->param_area = MAX (cfg->param_area, sizeof (target_mgreg_t)*8); /* gtk-sharp and other broken code will dllimport vararg functions even with * non-varargs signatures. Since there is little hope people will get this right * we assume they won't. */ if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) cfg->param_area = MAX (cfg->param_area, sizeof (target_mgreg_t)*8); /* a0-a3 always present */ cfg->param_area = MAX (cfg->param_area, MIPS_STACK_PARAM_OFFSET); header = cfg->header; if (cfg->arch.omit_fp) frame_reg = mips_sp; else frame_reg = mips_fp; cfg->frame_reg = frame_reg; if (frame_reg != mips_sp) { cfg->used_int_regs |= 1 << frame_reg; } offset = 0; curinst = 0; if (!MONO_TYPE_ISSTRUCT (sig->ret)) { /* FIXME: handle long and FP values */ switch (mini_get_underlying_type (sig->ret)->type) { case MONO_TYPE_VOID: break; case MONO_TYPE_R4: case MONO_TYPE_R8: cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cfg->ret->dreg = mips_f0; break; default: cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = mips_v0; break; } } /* Space for outgoing parameters, including a0-a3 */ offset += cfg->param_area; /* Now handle the local variables */ curinst = cfg->locals_start; for (i = curinst; i < cfg->num_varinfo; ++i) { inst = cfg->varinfo [i]; if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR) continue; /* inst->backend.is_pinvoke indicates native sized value types, this is used by the * pinvoke wrappers when they call functions returning structure */ if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) size = mono_class_native_size (mono_class_from_mono_type_internal (inst->inst_vtype), (unsigned int *) &align); else size = mono_type_size (inst->inst_vtype, &align); offset += align - 1; offset &= ~(align - 1); inst->inst_offset = offset; inst->opcode = OP_REGOFFSET; inst->inst_basereg = frame_reg; offset += size; // g_print ("allocating local %d to %d\n", i, inst->inst_offset); } /* Space for LMF (if needed) */ if (cfg->method->save_lmf) { /* align the offset to 16 bytes */ offset = (offset + MIPS_STACK_ALIGNMENT - 1) & ~(MIPS_STACK_ALIGNMENT - 1); cfg->arch.lmf_offset = offset; offset += sizeof (MonoLMF); } if (sig->call_convention == MONO_CALL_VARARG) { size = 4; align = 4; /* Allocate a local slot to hold the sig cookie address */ offset += align - 1; offset &= ~(align - 1); cfg->sig_cookie = offset; offset += size; } offset += SIZEOF_REGISTER - 1; offset &= ~(SIZEOF_REGISTER - 1); /* Space for saved registers */ cfg->arch.iregs_offset = offset; iregs_to_save = (cfg->used_int_regs & MONO_ARCH_CALLEE_SAVED_REGS); if (iregs_to_save) { for (i = MONO_MAX_IREGS-1; i >= 0; --i) { if (iregs_to_save & (1 << i)) { offset += SIZEOF_REGISTER; } } } /* saved float registers */ #if SAVE_FP_REGS fregs_to_restore = (cfg->used_float_regs & MONO_ARCH_CALLEE_SAVED_FREGS); if (fregs_to_restore) { for (i = MONO_MAX_FREGS-1; i >= 0; --i) { if (fregs_to_restore & (1 << i)) { offset += sizeof(double); } } } #endif #if _MIPS_SIM == _ABIO32 /* Now add space for saving the ra */ offset += TARGET_SIZEOF_VOID_P; /* change sign? */ offset = (offset + MIPS_STACK_ALIGNMENT - 1) & ~(MIPS_STACK_ALIGNMENT - 1); cfg->stack_offset = offset; cfg->arch.local_alloc_offset = cfg->stack_offset; #endif /* * Now allocate stack slots for the int arg regs (a0 - a3) * On MIPS o32, these are just above the incoming stack pointer * Even if the arg has been assigned to a regvar, it gets a stack slot */ /* Return struct-by-value results in a hidden first argument */ if (MONO_TYPE_ISSTRUCT (sig->ret)) { cfg->vret_addr->opcode = OP_REGOFFSET; cfg->vret_addr->inst_c0 = mips_a0; cfg->vret_addr->inst_offset = offset; cfg->vret_addr->inst_basereg = frame_reg; offset += SIZEOF_REGISTER; } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { inst = cfg->args [i]; if (inst->opcode != OP_REGVAR) { MonoType *arg_type; if (sig->hasthis && (i == 0)) arg_type = mono_get_object_type (); else arg_type = sig->params [i - sig->hasthis]; inst->opcode = OP_REGOFFSET; size = mono_type_size (arg_type, &align); if (size < SIZEOF_REGISTER) { size = SIZEOF_REGISTER; align = SIZEOF_REGISTER; } inst->inst_basereg = frame_reg; offset = (offset + align - 1) & ~(align - 1); inst->inst_offset = offset; offset += size; if (cfg->verbose_level > 1) printf ("allocating param %d to fp[%d]\n", i, inst->inst_offset); } else { #if _MIPS_SIM == _ABIO32 /* o32: Even a0-a3 get stack slots */ size = SIZEOF_REGISTER; align = SIZEOF_REGISTER; inst->inst_basereg = frame_reg; offset = (offset + align - 1) & ~(align - 1); inst->inst_offset = offset; offset += size; if (cfg->verbose_level > 1) printf ("allocating param %d to fp[%d]\n", i, inst->inst_offset); #endif } } #if _MIPS_SIM == _ABIN32 /* Now add space for saving the ra */ offset += TARGET_SIZEOF_VOID_P; /* change sign? */ offset = (offset + MIPS_STACK_ALIGNMENT - 1) & ~(MIPS_STACK_ALIGNMENT - 1); cfg->stack_offset = offset; cfg->arch.local_alloc_offset = cfg->stack_offset; #endif } void mono_arch_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig; sig = mono_method_signature_internal (cfg->method); if (MONO_TYPE_ISSTRUCT (sig->ret)) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG); if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr = "); mono_print_ins (cfg->vret_addr); } } } /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode, * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info */ /* * take the arguments and generate the arch-specific * instructions to properly call the function in call. * This includes pushing, moving arguments to the right register * etc. * Issue: who does the spilling if needed, and when? */ static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { MonoMethodSignature *tmp_sig; MonoInst *sig_arg; if (MONO_IS_TAILCALL_OPCODE (call)) NOT_IMPLEMENTED; /* FIXME: Add support for signature tokens to AOT */ cfg->disable_aot = TRUE; /* * mono_ArgIterator_Setup assumes the signature cookie is * passed first and all the arguments which were before it are * passed on the stack after the signature. So compensate by * passing a different signature. */ tmp_sig = mono_metadata_signature_dup (call->signature); tmp_sig->param_count -= call->signature->sentinelpos; tmp_sig->sentinelpos = 0; memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*)); MONO_INST_NEW (cfg, sig_arg, OP_ICONST); sig_arg->dreg = mono_alloc_ireg (cfg); sig_arg->inst_p0 = tmp_sig; MONO_ADD_INS (cfg->cbb, sig_arg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, mips_sp, cinfo->sig_cookie.offset, sig_arg->dreg); } void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoInst *in, *ins; MonoMethodSignature *sig; int i, n; CallInfo *cinfo; int is_virtual = 0; sig = call->signature; n = sig->param_count + sig->hasthis; cinfo = get_call_info (cfg->mempool, sig); if (cinfo->struct_ret) call->used_iregs |= 1 << cinfo->struct_ret; for (i = 0; i < n; ++i) { ArgInfo *ainfo = cinfo->args + i; MonoType *t; if (i >= sig->hasthis) t = sig->params [i - sig->hasthis]; else t = mono_get_int_type (); t = mini_get_underlying_type (t); if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Emit the signature cookie just before the implicit arguments */ emit_sig_cookie (cfg, call, cinfo); } if (is_virtual && i == 0) { /* the argument will be attached to the call instrucion */ in = call->args [i]; call->used_iregs |= 1 << ainfo->reg; continue; } in = call->args [i]; if (ainfo->storage == ArgInIReg) { #if SIZEOF_REGISTER == 4 if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = MONO_LVREG_LS (in->dreg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + ls_word_idx, FALSE); MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = MONO_LVREG_MS (in->dreg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + ms_word_idx, FALSE); } else #endif if (!m_type_is_byref (t) && (t->type == MONO_TYPE_R4)) { int freg; #if PROMOTE_R4_TO_R8 /* ??? - convert to single first? */ MONO_INST_NEW (cfg, ins, OP_MIPS_CVTSD); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); freg = ins->dreg; #else freg = in->dreg; #endif /* trying to load float value into int registers */ MONO_INST_NEW (cfg, ins, OP_MIPS_MFC1S); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = freg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); } else if (!m_type_is_byref (t) && (t->type == MONO_TYPE_R8)) { /* trying to load float value into int registers */ MONO_INST_NEW (cfg, ins, OP_MIPS_MFC1D); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); } else { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); } } else if (ainfo->storage == ArgStructByAddr) { MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); } else if (ainfo->storage == ArgStructByVal) { /* this is further handled in mono_arch_emit_outarg_vt () */ MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); } else if (ainfo->storage == ArgOnStack) { if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, mips_sp, ainfo->offset, in->dreg); } else if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) { if (t->type == MONO_TYPE_R8) MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, mips_sp, ainfo->offset, in->dreg); else MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, mips_sp, ainfo->offset, in->dreg); } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, mips_sp, ainfo->offset, in->dreg); } } else if (ainfo->storage == ArgInFReg) { if (t->type == MONO_TYPE_VALUETYPE) { /* this is further handled in mono_arch_emit_outarg_vt () */ MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_HAS_FPOUT; } else { int dreg = mono_alloc_freg (cfg); if (ainfo->size == 4) { MONO_EMIT_NEW_UNALU (cfg, OP_MIPS_CVTSD, dreg, in->dreg); } else { MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->dreg = dreg; ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); } mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE); cfg->flags |= MONO_CFG_HAS_FPOUT; } } else { g_assert_not_reached (); } } /* Handle the case where there are no implicit arguments */ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) emit_sig_cookie (cfg, call, cinfo); if (cinfo->struct_ret) { MonoInst *vtarg; MONO_INST_NEW (cfg, vtarg, OP_MOVE); vtarg->sreg1 = call->vret_var->dreg; vtarg->dreg = mono_alloc_preg (cfg); MONO_ADD_INS (cfg->cbb, vtarg); mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->struct_ret, FALSE); } #if 0 /* * Reverse the call->out_args list. */ { MonoInst *prev = NULL, *list = call->out_args, *next; while (list) { next = list->next; list->next = prev; prev = list; list = next; } call->out_args = prev; } #endif call->stack_usage = cinfo->stack_usage; cfg->param_area = MAX (cfg->param_area, cinfo->stack_usage); #if _MIPS_SIM == _ABIO32 /* a0-a3 always present */ cfg->param_area = MAX (cfg->param_area, 4 * SIZEOF_REGISTER); #endif cfg->param_area = (cfg->param_area + MIPS_STACK_ALIGNMENT - 1) & ~(MIPS_STACK_ALIGNMENT - 1); cfg->flags |= MONO_CFG_HAS_CALLS; /* * should set more info in call, such as the stack space * used by the args that needs to be added back to esp */ } void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst*)ins->inst_p0; ArgInfo *ainfo = ins->inst_p1; int ovf_size = ainfo->vtsize; int doffset = ainfo->offset; int i, soffset, dreg; if (ainfo->storage == ArgStructByVal) { #if 0 if (cfg->verbose_level > 0) { char* nm = mono_method_full_name (cfg->method, TRUE); g_print ("Method %s outarg_vt struct doffset=%d ainfo->size=%d ovf_size=%d\n", nm, doffset, ainfo->size, ovf_size); g_free (nm); } #endif soffset = 0; for (i = 0; i < ainfo->size; ++i) { dreg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset); mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE); soffset += SIZEOF_REGISTER; } if (ovf_size != 0) { mini_emit_memcpy (cfg, mips_sp, doffset, src->dreg, soffset, ovf_size * sizeof (target_mgreg_t), TARGET_SIZEOF_VOID_P); } } else if (ainfo->storage == ArgInFReg) { int tmpr = mono_alloc_freg (cfg); if (ainfo->size == 4) MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, tmpr, src->dreg, 0); else MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmpr, src->dreg, 0); dreg = mono_alloc_freg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, dreg, tmpr); mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE); } else { MonoInst *vtcopy = mono_compile_create_var (cfg, m_class_get_byval_arg (src->klass), OP_LOCAL); MonoInst *load; guint32 size; /* FIXME: alignment? */ if (call->signature->pinvoke && !call->signature->marshalling_disabled) { size = mono_type_native_stack_size (m_class_get_byval_arg (src->klass), NULL); vtcopy->backend.is_pinvoke = 1; } else { size = mini_type_stack_size (m_class_get_byval_arg (src->klass), NULL); } if (size > 0) g_assert (ovf_size > 0); EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype); mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, TARGET_SIZEOF_VOID_P); if (ainfo->offset) MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, mips_at, ainfo->offset, load->dreg); else mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg, FALSE); } } void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret); if (!m_type_is_byref (ret)) { #if (SIZEOF_REGISTER == 4) if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) { MonoInst *ins; MONO_INST_NEW (cfg, ins, OP_SETLRET); ins->sreg1 = MONO_LVREG_LS (val->dreg); ins->sreg2 = MONO_LVREG_MS (val->dreg); MONO_ADD_INS (cfg->cbb, ins); return; } #endif if (ret->type == MONO_TYPE_R8) { MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); return; } if (ret->type == MONO_TYPE_R4) { MONO_EMIT_NEW_UNALU (cfg, OP_MIPS_CVTSD, cfg->ret->dreg, val->dreg); return; } } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); } void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n, *last_ins = NULL; if (cfg->verbose_level > 2) g_print ("Basic block %d peephole pass 1\n", bb->block_num); ins = bb->code; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { if (cfg->verbose_level > 2) mono_print_ins_index (0, ins); switch (ins->opcode) { #if 0 case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: /* * OP_IADD reg2, reg1, const1 * OP_LOAD_MEMBASE const2(reg2), reg3 * -> * OP_LOAD_MEMBASE (const1+const2)(reg1), reg3 */ if (last_ins && (last_ins->opcode == OP_IADD_IMM || last_ins->opcode == OP_ADD_IMM) && (last_ins->dreg == ins->inst_basereg) && (last_ins->sreg1 != last_ins->dreg)){ int const1 = last_ins->inst_imm; int const2 = ins->inst_offset; if (mips_is_imm16 (const1 + const2)) { ins->inst_basereg = last_ins->sreg1; ins->inst_offset = const1 + const2; } } break; #endif } last_ins = ins; ins = ins->next; } bb->last_ins = last_ins; } void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n, *last_ins = NULL; ins = bb->code; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { MonoInst *last_ins = ins->prev; switch (ins->opcode) { case OP_MUL_IMM: /* remove unnecessary multiplication with 1 */ if (ins->inst_imm == 1) { if (ins->dreg != ins->sreg1) { ins->opcode = OP_MOVE; } else { MONO_DELETE_INS (bb, ins); continue; } } else if (ins->inst_imm > 0) { int power2 = mono_is_power_of_two (ins->inst_imm); if (power2 > 0) { ins->opcode = OP_SHL_IMM; ins->inst_imm = power2; } } break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: /* * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg */ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG || last_ins->opcode == OP_STORE_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } else { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_MOVE; ins->sreg1 = last_ins->sreg1; } break; } /* * Note: reg1 must be different from the basereg in the second load * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_MOVE reg1, reg2 */ if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE || last_ins->opcode == OP_LOAD_MEMBASE) && ins->inst_basereg != last_ins->dreg && ins->inst_basereg == last_ins->inst_basereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->dreg) { MONO_DELETE_INS (bb, ins); continue; } else { ins->opcode = OP_MOVE; ins->sreg1 = last_ins->dreg; } //g_assert_not_reached (); break; } #if 0 /* * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg * --> * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_ICONST reg, imm */ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM || last_ins->opcode == OP_STORE_MEMBASE_IMM) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_ICONST; ins->inst_c0 = last_ins->inst_imm; g_assert_not_reached (); // check this rule break; } #endif break; case OP_LOADU1_MEMBASE: case OP_LOADI1_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1; ins->sreg1 = last_ins->sreg1; } break; case OP_LOADU2_MEMBASE: case OP_LOADI2_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2; ins->sreg1 = last_ins->sreg1; } break; case OP_ICONV_TO_I4: case OP_ICONV_TO_U4: case OP_MOVE: ins->opcode = OP_MOVE; /* * OP_MOVE reg, reg */ if (ins->dreg == ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } /* * OP_MOVE sreg, dreg * OP_MOVE dreg, sreg */ if (last_ins && last_ins->opcode == OP_MOVE && ins->sreg1 == last_ins->dreg && ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } break; } last_ins = ins; ins = ins->next; } bb->last_ins = last_ins; } void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins) { int tmp1 = -1; int tmp2 = -1; int tmp3 = -1; int tmp4 = -1; int tmp5 = -1; switch (ins->opcode) { case OP_LADD: tmp1 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+1, ins->sreg1+1, ins->sreg2+1); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->dreg+1, ins->sreg1+1); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+2, ins->sreg1+2, ins->sreg2+2); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+2, ins->dreg+2, tmp1); NULLIFY_INS(ins); break; case OP_LADD_IMM: tmp1 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, ins->dreg+1, ins->sreg1+1, ins_get_l_low (ins)); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->dreg+1, ins->sreg1+1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, ins->dreg+2, ins->sreg1+2, ins_get_l_high (ins)); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+2, ins->dreg+2, tmp1); NULLIFY_INS(ins); break; case OP_LSUB: tmp1 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+1, ins->sreg1+1, ins->sreg2+1); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->sreg1+1, ins->dreg+1); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->sreg1+2, ins->sreg2+2); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->dreg+2, tmp1); NULLIFY_INS(ins); break; case OP_LSUB_IMM: tmp1 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, ins->dreg+1, ins->sreg1+1, ins_get_l_low (ins)); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->sreg1+1, ins->dreg+1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, ins->dreg+2, ins->sreg1+2, ins_get_l_high (ins)); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->dreg+2, tmp1); NULLIFY_INS(ins); break; case OP_LNEG: tmp1 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+1, mips_zero, ins->sreg1+1); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, mips_zero, ins->dreg+1); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, mips_zero, ins->sreg1+2); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->dreg+2, tmp1); NULLIFY_INS(ins); break; case OP_LADD_OVF: tmp1 = mono_alloc_ireg (cfg); tmp2 = mono_alloc_ireg (cfg); tmp3 = mono_alloc_ireg (cfg); tmp4 = mono_alloc_ireg (cfg); tmp5 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+1, ins->sreg1+1, ins->sreg2+1); /* tmp1 holds the carry from the low 32-bit to the high 32-bits */ MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp5, ins->dreg+1, ins->sreg1+1); /* add the high 32-bits, and add in the carry from the low 32-bits */ MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+2, ins->sreg1+2, ins->sreg2+2); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+2, tmp5, ins->dreg+2); /* Overflow happens if * neg + neg = pos or * pos + pos = neg * XOR of the high bits returns 0 if the signs match * XOR of that with the high bit of the result return 1 if overflow. */ /* tmp1 = 0 if the signs of the two inputs match, 1 otherwise */ MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp1, ins->sreg1+2, ins->sreg2+2); /* set tmp2 = 0 if bit31 of results matches is different than the operands */ MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp2, ins->dreg+2, ins->sreg2+2); MONO_EMIT_NEW_UNALU (cfg, OP_INOT, tmp2, tmp2); /* OR(tmp1, tmp2) = 0 if both conditions are true */ MONO_EMIT_NEW_BIALU (cfg, OP_IOR, tmp3, tmp2, tmp1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, tmp4, tmp3, 31); /* Now, if (tmp4 == 0) then overflow */ MONO_EMIT_NEW_COMPARE_EXC (cfg, EQ, tmp4, mips_zero, "OverflowException"); NULLIFY_INS(ins); break; case OP_LADD_OVF_UN: tmp1 = mono_alloc_ireg (cfg); tmp2 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+1, ins->sreg1+1, ins->sreg2+1); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->dreg+1, ins->sreg1+1); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+2, ins->sreg1+2, ins->sreg2+2); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+2, tmp1, ins->dreg+2); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp2, ins->dreg+2, ins->sreg1+2); MONO_EMIT_NEW_COMPARE_EXC (cfg, NE_UN, tmp2, mips_zero, "OverflowException"); NULLIFY_INS(ins); break; case OP_LSUB_OVF: tmp1 = mono_alloc_ireg (cfg); tmp2 = mono_alloc_ireg (cfg); tmp3 = mono_alloc_ireg (cfg); tmp4 = mono_alloc_ireg (cfg); tmp5 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+1, ins->sreg1+1, ins->sreg2+1); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp5, ins->sreg1+1, ins->dreg+1); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->sreg1+2, ins->sreg2+2); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->dreg+2, tmp5); /* Overflow happens if * neg - pos = pos or * pos - neg = neg * XOR of bit31 of the lhs & rhs = 1 if the signs are different * * tmp1 = (lhs ^ rhs) * tmp2 = (lhs ^ result) * if ((tmp1 < 0) & (tmp2 < 0)) then overflow */ MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp1, ins->sreg1+2, ins->sreg2+2); MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp2, ins->sreg1+2, ins->dreg+2); MONO_EMIT_NEW_BIALU (cfg, OP_IAND, tmp3, tmp2, tmp1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, tmp4, tmp3, 31); /* Now, if (tmp4 == 1) then overflow */ MONO_EMIT_NEW_COMPARE_EXC (cfg, NE_UN, tmp4, mips_zero, "OverflowException"); NULLIFY_INS(ins); break; case OP_LSUB_OVF_UN: tmp1 = mono_alloc_ireg (cfg); tmp2 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+1, ins->sreg1+1, ins->sreg2+1); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->sreg1+1, ins->dreg+1); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->sreg1+2, ins->sreg2+2); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->dreg+2, tmp1); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp2, ins->sreg1+2, ins->dreg+2); MONO_EMIT_NEW_COMPARE_EXC (cfg, NE_UN, tmp2, mips_zero, "OverflowException"); NULLIFY_INS(ins); break; case OP_LCONV_TO_OVF_I4_2: tmp1 = mono_alloc_ireg (cfg); /* Overflows if reg2 != sign extension of reg1 */ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, tmp1, ins->sreg1, 31); MONO_EMIT_NEW_COMPARE_EXC (cfg, NE_UN, ins->sreg2, tmp1, "OverflowException"); MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ins->dreg, ins->sreg1); NULLIFY_INS(ins); break; default: break; } } void mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins) { int tmp1 = -1; int tmp2 = -1; int tmp3 = -1; int tmp4 = -1; int tmp5 = -1; switch (ins->opcode) { case OP_IADD_OVF: tmp1 = mono_alloc_ireg (cfg); tmp2 = mono_alloc_ireg (cfg); tmp3 = mono_alloc_ireg (cfg); tmp4 = mono_alloc_ireg (cfg); tmp5 = mono_alloc_ireg (cfg); /* add the operands */ MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg, ins->sreg1, ins->sreg2); /* Overflow happens if * neg + neg = pos or * pos + pos = neg * * (bit31s of operands match) AND (bit31 of operand != bit31 of result) * XOR of the high bit returns 0 if the signs match * XOR of that with the high bit of the result return 1 if overflow. */ /* tmp1 = 0 if the signs of the two inputs match, 1 otherwise */ MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp1, ins->sreg1, ins->sreg2); /* set tmp2 = 0 if bit31 of results matches is different than the operands */ MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp2, ins->dreg, ins->sreg2); MONO_EMIT_NEW_UNALU (cfg, OP_INOT, tmp3, tmp2); /* OR(tmp1, tmp2) = 0 if both conditions are true */ MONO_EMIT_NEW_BIALU (cfg, OP_IOR, tmp4, tmp3, tmp1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, tmp5, tmp4, 31); /* Now, if (tmp5 == 0) then overflow */ MONO_EMIT_NEW_COMPARE_EXC (cfg, EQ, tmp5, mips_zero, "OverflowException"); /* Make decompse and method-to-ir.c happy, last insn writes dreg */ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ins->dreg, ins->dreg); NULLIFY_INS(ins); break; case OP_IADD_OVF_UN: tmp1 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg, ins->sreg1, ins->sreg2); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->dreg, ins->sreg1); MONO_EMIT_NEW_COMPARE_EXC (cfg, NE_UN, tmp1, mips_zero, "OverflowException"); /* Make decompse and method-to-ir.c happy, last insn writes dreg */ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ins->dreg, ins->dreg); NULLIFY_INS(ins); break; case OP_ISUB_OVF: tmp1 = mono_alloc_ireg (cfg); tmp2 = mono_alloc_ireg (cfg); tmp3 = mono_alloc_ireg (cfg); tmp4 = mono_alloc_ireg (cfg); tmp5 = mono_alloc_ireg (cfg); /* add the operands */ MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg, ins->sreg1, ins->sreg2); /* Overflow happens if * neg - pos = pos or * pos - neg = neg * XOR of bit31 of the lhs & rhs = 1 if the signs are different * * tmp1 = (lhs ^ rhs) * tmp2 = (lhs ^ result) * if ((tmp1 < 0) & (tmp2 < 0)) then overflow */ /* tmp3 = 1 if the signs of the two inputs differ */ MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp1, ins->sreg1, ins->sreg2); MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp2, ins->sreg1, ins->dreg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MIPS_SLTI, tmp3, tmp1, 0); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MIPS_SLTI, tmp4, tmp2, 0); MONO_EMIT_NEW_BIALU (cfg, OP_IAND, tmp5, tmp4, tmp3); MONO_EMIT_NEW_COMPARE_EXC (cfg, NE_UN, tmp5, mips_zero, "OverflowException"); /* Make decompse and method-to-ir.c happy, last insn writes dreg */ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ins->dreg, ins->dreg); NULLIFY_INS(ins); break; case OP_ISUB_OVF_UN: tmp1 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg, ins->sreg1, ins->sreg2); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->sreg1, ins->dreg); MONO_EMIT_NEW_COMPARE_EXC (cfg, NE_UN, tmp1, mips_zero, "OverflowException"); /* Make decompse and method-to-ir.c happy, last insn writes dreg */ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ins->dreg, ins->dreg); NULLIFY_INS(ins); break; } } static int map_to_reg_reg_op (int op) { switch (op) { case OP_ADD_IMM: return OP_IADD; case OP_SUB_IMM: return OP_ISUB; case OP_AND_IMM: return OP_IAND; case OP_COMPARE_IMM: return OP_COMPARE; case OP_ICOMPARE_IMM: return OP_ICOMPARE; case OP_LCOMPARE_IMM: return OP_LCOMPARE; case OP_ADDCC_IMM: return OP_IADDCC; case OP_ADC_IMM: return OP_IADC; case OP_SUBCC_IMM: return OP_ISUBCC; case OP_SBB_IMM: return OP_ISBB; case OP_OR_IMM: return OP_IOR; case OP_XOR_IMM: return OP_IXOR; case OP_MUL_IMM: return OP_IMUL; case OP_LOAD_MEMBASE: return OP_LOAD_MEMINDEX; case OP_LOADI4_MEMBASE: return OP_LOADI4_MEMINDEX; case OP_LOADU4_MEMBASE: return OP_LOADU4_MEMINDEX; case OP_LOADU1_MEMBASE: return OP_LOADU1_MEMINDEX; case OP_LOADI2_MEMBASE: return OP_LOADI2_MEMINDEX; case OP_LOADU2_MEMBASE: return OP_LOADU2_MEMINDEX; case OP_LOADI1_MEMBASE: return OP_LOADI1_MEMINDEX; case OP_LOADR4_MEMBASE: return OP_LOADR4_MEMINDEX; case OP_LOADR8_MEMBASE: return OP_LOADR8_MEMINDEX; case OP_STOREI1_MEMBASE_REG: return OP_STOREI1_MEMINDEX; case OP_STOREI2_MEMBASE_REG: return OP_STOREI2_MEMINDEX; case OP_STOREI4_MEMBASE_REG: return OP_STOREI4_MEMINDEX; case OP_STORE_MEMBASE_REG: return OP_STORE_MEMINDEX; case OP_STORER4_MEMBASE_REG: return OP_STORER4_MEMINDEX; case OP_STORER8_MEMBASE_REG: return OP_STORER8_MEMINDEX; case OP_STORE_MEMBASE_IMM: return OP_STORE_MEMBASE_REG; case OP_STOREI1_MEMBASE_IMM: return OP_STOREI1_MEMBASE_REG; case OP_STOREI2_MEMBASE_IMM: return OP_STOREI2_MEMBASE_REG; case OP_STOREI4_MEMBASE_IMM: return OP_STOREI4_MEMBASE_REG; case OP_STOREI8_MEMBASE_IMM: return OP_STOREI8_MEMBASE_REG; } if (mono_op_imm_to_op (op) == -1) g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (op)); return mono_op_imm_to_op (op); } static int map_to_mips_op (int op) { switch (op) { case OP_FBEQ: return OP_MIPS_FBEQ; case OP_FBGE: return OP_MIPS_FBGE; case OP_FBGT: return OP_MIPS_FBGT; case OP_FBLE: return OP_MIPS_FBLE; case OP_FBLT: return OP_MIPS_FBLT; case OP_FBNE_UN: return OP_MIPS_FBNE; case OP_FBGE_UN: return OP_MIPS_FBGE_UN; case OP_FBGT_UN: return OP_MIPS_FBGT_UN; case OP_FBLE_UN: return OP_MIPS_FBLE_UN; case OP_FBLT_UN: return OP_MIPS_FBLT_UN; case OP_FCEQ: case OP_FCGT: case OP_FCGT_UN: case OP_FCLT: case OP_FCLT_UN: default: g_warning ("unknown opcode %s in %s()\n", mono_inst_name (op), __FUNCTION__); g_assert_not_reached (); } } #define NEW_INS(cfg,after,dest,op) do { \ MONO_INST_NEW((cfg), (dest), (op)); \ mono_bblock_insert_after_ins (bb, (after), (dest)); \ } while (0) #define INS(pos,op,_dreg,_sreg1,_sreg2) do { \ MonoInst *temp; \ MONO_INST_NEW(cfg, temp, (op)); \ mono_bblock_insert_after_ins (bb, (pos), temp); \ temp->dreg = (_dreg); \ temp->sreg1 = (_sreg1); \ temp->sreg2 = (_sreg2); \ pos = temp; \ } while (0) #define INS_IMM(pos,op,_dreg,_sreg1,_imm) do { \ MonoInst *temp; \ MONO_INST_NEW(cfg, temp, (op)); \ mono_bblock_insert_after_ins (bb, (pos), temp); \ temp->dreg = (_dreg); \ temp->sreg1 = (_sreg1); \ temp->inst_c0 = (_imm); \ pos = temp; \ } while (0) /* * Remove from the instruction list the instructions that can't be * represented with very simple instructions with no register * requirements. */ void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *next, *temp, *last_ins = NULL; int imm; #if 1 if (cfg->verbose_level > 2) { int idx = 0; g_print ("BASIC BLOCK %d (before lowering)\n", bb->block_num); MONO_BB_FOR_EACH_INS (bb, ins) { mono_print_ins_index (idx++, ins); } } #endif MONO_BB_FOR_EACH_INS (bb, ins) { loop_start: switch (ins->opcode) { case OP_COMPARE: case OP_ICOMPARE: case OP_LCOMPARE: next = ins->next; /* Branch opts can eliminate the branch */ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) { NULLIFY_INS(ins); break; } break; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: case OP_LCOMPARE_IMM: next = ins->next; /* Branch opts can eliminate the branch */ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) { NULLIFY_INS(ins); break; } if (ins->inst_imm) { NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; last_ins = temp; } else { ins->sreg2 = mips_zero; } if (ins->opcode == OP_COMPARE_IMM) ins->opcode = OP_COMPARE; else if (ins->opcode == OP_ICOMPARE_IMM) ins->opcode = OP_ICOMPARE; else if (ins->opcode == OP_LCOMPARE_IMM) ins->opcode = OP_LCOMPARE; goto loop_start; case OP_IDIV_UN_IMM: case OP_IDIV_IMM: case OP_IREM_IMM: case OP_IREM_UN_IMM: NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; if (ins->opcode == OP_IDIV_IMM) ins->opcode = OP_IDIV; else if (ins->opcode == OP_IREM_IMM) ins->opcode = OP_IREM; else if (ins->opcode == OP_IDIV_UN_IMM) ins->opcode = OP_IDIV_UN; else if (ins->opcode == OP_IREM_UN_IMM) ins->opcode = OP_IREM_UN; last_ins = temp; /* handle rem separately */ goto loop_start; #if 0 case OP_AND_IMM: case OP_OR_IMM: case OP_XOR_IMM: if ((ins->inst_imm & 0xffff0000) && (ins->inst_imm & 0xffff)) { NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; #endif case OP_AND_IMM: case OP_IAND_IMM: case OP_OR_IMM: case OP_IOR_IMM: case OP_XOR_IMM: case OP_IXOR_IMM: /* unsigned 16 bit immediate */ if (ins->inst_imm & 0xffff0000) { NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; case OP_IADD_IMM: case OP_ADD_IMM: case OP_ADDCC_IMM: /* signed 16 bit immediate */ if (!mips_is_imm16 (ins->inst_imm)) { NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; case OP_SUB_IMM: case OP_ISUB_IMM: if (!mips_is_imm16 (-ins->inst_imm)) { NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; case OP_MUL_IMM: case OP_IMUL_IMM: if (ins->inst_imm == 1) { ins->opcode = OP_MOVE; break; } if (ins->inst_imm == 0) { ins->opcode = OP_ICONST; ins->inst_c0 = 0; break; } imm = (ins->inst_imm > 0) ? mono_is_power_of_two (ins->inst_imm) : -1; if (imm > 0) { ins->opcode = OP_SHL_IMM; ins->inst_imm = imm; break; } NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_LOCALLOC_IMM: NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg1 = temp->dreg; ins->opcode = OP_LOCALLOC; break; case OP_LOADR4_MEMBASE: case OP_STORER4_MEMBASE_REG: /* we can do two things: load the immed in a register * and use an indexed load, or see if the immed can be * represented as an ad_imm + a load with a smaller offset * that fits. We just do the first for now, optimize later. */ if (mips_is_imm16 (ins->inst_offset)) break; NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_STORE_MEMBASE_IMM: case OP_STOREI1_MEMBASE_IMM: case OP_STOREI2_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: case OP_STOREI8_MEMBASE_IMM: if (!ins->inst_imm) { ins->sreg1 = mips_zero; ins->opcode = map_to_reg_reg_op (ins->opcode); } else { NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg1 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); last_ins = temp; goto loop_start; /* make it handle the possibly big ins->inst_offset */ } break; case OP_FCOMPARE: next = ins->next; /* Branch opts can eliminate the branch */ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) { NULLIFY_INS(ins); break; } g_assert(next); /* * remap compare/branch and compare/set * to MIPS specific opcodes. */ next->opcode = map_to_mips_op (next->opcode); next->sreg1 = ins->sreg1; next->sreg2 = ins->sreg2; NULLIFY_INS(ins); break; #if 0 case OP_R8CONST: case OP_R4CONST: NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = (guint32)ins->inst_p0; temp->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = temp->dreg; ins->inst_offset = 0; ins->opcode = ins->opcode == OP_R4CONST? OP_LOADR4_MEMBASE: OP_LOADR8_MEMBASE; last_ins = temp; /* make it handle the possibly big ins->inst_offset * later optimize to use lis + load_membase */ goto loop_start; #endif case OP_IBEQ: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_BEQ, last_ins->sreg1, last_ins->sreg2); NULLIFY_INS(last_ins); break; case OP_IBNE_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_BNE, last_ins->sreg1, last_ins->sreg2); NULLIFY_INS(last_ins); break; case OP_IBGE: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLT, last_ins->sreg1, last_ins->sreg2); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BEQ, last_ins->dreg, mips_zero); break; case OP_IBGE_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLTU, last_ins->sreg1, last_ins->sreg2); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BEQ, last_ins->dreg, mips_zero); break; case OP_IBLT: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLT, last_ins->sreg1, last_ins->sreg2); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BNE, last_ins->dreg, mips_zero); break; case OP_IBLT_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLTU, last_ins->sreg1, last_ins->sreg2); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BNE, last_ins->dreg, mips_zero); break; case OP_IBLE: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLT, last_ins->sreg2, last_ins->sreg1); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BEQ, last_ins->dreg, mips_zero); break; case OP_IBLE_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLTU, last_ins->sreg2, last_ins->sreg1); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BEQ, last_ins->dreg, mips_zero); break; case OP_IBGT: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLT, last_ins->sreg2, last_ins->sreg1); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BNE, last_ins->dreg, mips_zero); break; case OP_IBGT_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLTU, last_ins->sreg2, last_ins->sreg1); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BNE, last_ins->dreg, mips_zero); break; case OP_CEQ: case OP_ICEQ: g_assert (ins_is_compare(last_ins)); last_ins->opcode = OP_IXOR; last_ins->dreg = mono_alloc_ireg(cfg); INS_REWRITE_IMM(ins, OP_MIPS_SLTIU, last_ins->dreg, 1); break; case OP_CLT: case OP_ICLT: INS_REWRITE(ins, OP_MIPS_SLT, last_ins->sreg1, last_ins->sreg2); NULLIFY_INS(last_ins); break; case OP_CLT_UN: case OP_ICLT_UN: INS_REWRITE(ins, OP_MIPS_SLTU, last_ins->sreg1, last_ins->sreg2); NULLIFY_INS(last_ins); break; case OP_CGT: case OP_ICGT: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_SLT, last_ins->sreg2, last_ins->sreg1); MONO_DELETE_INS(bb, last_ins); break; case OP_CGT_UN: case OP_ICGT_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_SLTU, last_ins->sreg2, last_ins->sreg1); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_EQ: case OP_COND_EXC_IEQ: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_EQ, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_GE: case OP_COND_EXC_IGE: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_GE, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_GT: case OP_COND_EXC_IGT: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_GT, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_LE: case OP_COND_EXC_ILE: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_LE, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_LT: case OP_COND_EXC_ILT: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_LT, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_NE_UN: case OP_COND_EXC_INE_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_NE_UN, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_GE_UN: case OP_COND_EXC_IGE_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_GE_UN, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_GT_UN: case OP_COND_EXC_IGT_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_GT_UN, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_LE_UN: case OP_COND_EXC_ILE_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_LE_UN, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_LT_UN: case OP_COND_EXC_ILT_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_LT_UN, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_OV: case OP_COND_EXC_IOV: { int tmp1, tmp2, tmp3, tmp4, tmp5; MonoInst *pos = last_ins; /* Overflow happens if * neg + neg = pos or * pos + pos = neg * * (bit31s of operands match) AND (bit31 of operand * != bit31 of result) * XOR of the high bit returns 0 if the signs match * XOR of that with the high bit of the result return 1 * if overflow. */ g_assert (last_ins->opcode == OP_IADC); tmp1 = mono_alloc_ireg (cfg); tmp2 = mono_alloc_ireg (cfg); tmp3 = mono_alloc_ireg (cfg); tmp4 = mono_alloc_ireg (cfg); tmp5 = mono_alloc_ireg (cfg); /* tmp1 = 0 if the signs of the two inputs match, else 1 */ INS (pos, OP_IXOR, tmp1, last_ins->sreg1, last_ins->sreg2); /* set tmp2 = 0 if bit31 of results matches is different than the operands */ INS (pos, OP_IXOR, tmp2, last_ins->dreg, last_ins->sreg2); INS (pos, OP_INOT, tmp3, tmp2, -1); /* OR(tmp1, tmp2) = 0 if both conditions are true */ INS (pos, OP_IOR, tmp4, tmp3, tmp1); INS_IMM (pos, OP_SHR_IMM, tmp5, tmp4, 31); /* Now, if (tmp5 == 0) then overflow */ INS_REWRITE(ins, OP_MIPS_COND_EXC_EQ, tmp5, mips_zero); ins->dreg = -1; break; } case OP_COND_EXC_NO: case OP_COND_EXC_INO: g_assert_not_reached (); break; case OP_COND_EXC_C: case OP_COND_EXC_IC: g_assert_not_reached (); break; case OP_COND_EXC_NC: case OP_COND_EXC_INC: g_assert_not_reached (); break; } last_ins = ins; } bb->last_ins = last_ins; bb->max_vreg = cfg->next_vreg; #if 1 if (cfg->verbose_level > 2) { int idx = 0; g_print ("BASIC BLOCK %d (after lowering)\n", bb->block_num); MONO_BB_FOR_EACH_INS (bb, ins) { mono_print_ins_index (idx++, ins); } } #endif } static guchar* emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed) { /* sreg is a float, dreg is an integer reg. mips_at is used as scratch */ #if 1 mips_truncwd (code, mips_ftemp, sreg); #else mips_cvtwd (code, mips_ftemp, sreg); #endif mips_mfc1 (code, dreg, mips_ftemp); if (!is_signed) { if (size == 1) mips_andi (code, dreg, dreg, 0xff); else if (size == 2) { mips_sll (code, dreg, dreg, 16); mips_srl (code, dreg, dreg, 16); } } else { if (size == 1) { mips_sll (code, dreg, dreg, 24); mips_sra (code, dreg, dreg, 24); } else if (size == 2) { mips_sll (code, dreg, dreg, 16); mips_sra (code, dreg, dreg, 16); } } return code; } /* * emit_load_volatile_arguments: * * Load volatile arguments from the stack to the original input registers. * Required before a tailcall. */ static guint8 * emit_load_volatile_arguments(MonoCompile *cfg, guint8 *code) { MonoMethod *method = cfg->method; MonoMethodSignature *sig; MonoInst *inst; CallInfo *cinfo; int i; sig = mono_method_signature_internal (method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; if (cinfo->struct_ret) { ArgInfo *ainfo = &cinfo->ret; inst = cfg->vret_addr; mips_lw (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; inst = cfg->args [i]; if (inst->opcode == OP_REGVAR) { if (ainfo->storage == ArgInIReg) MIPS_MOVE (code, ainfo->reg, inst->dreg); else if (ainfo->storage == ArgInFReg) g_assert_not_reached(); else if (ainfo->storage == ArgOnStack) { /* do nothing */ } else g_assert_not_reached (); } else { if (ainfo->storage == ArgInIReg) { g_assert (mips_is_imm16 (inst->inst_offset)); switch (ainfo->size) { case 1: mips_lb (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); break; case 2: mips_lh (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); break; case 0: /* XXX */ case 4: mips_lw (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); break; case 8: mips_lw (code, ainfo->reg, inst->inst_basereg, inst->inst_offset + ls_word_offset); mips_lw (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + ms_word_offset); break; default: g_assert_not_reached (); break; } } else if (ainfo->storage == ArgOnStack) { /* do nothing */ } else if (ainfo->storage == ArgInFReg) { g_assert (mips_is_imm16 (inst->inst_offset)); if (ainfo->size == 8) { #if _MIPS_SIM == _ABIO32 mips_lwc1 (code, ainfo->reg, inst->inst_basereg, inst->inst_offset + ls_word_offset); mips_lwc1 (code, ainfo->reg+1, inst->inst_basereg, inst->inst_offset + ms_word_offset); #elif _MIPS_SIM == _ABIN32 mips_ldc1 (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); #endif } else if (ainfo->size == 4) mips_lwc1 (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); else g_assert_not_reached (); } else if (ainfo->storage == ArgStructByVal) { int i; int doffset = inst->inst_offset; g_assert (mips_is_imm16 (inst->inst_offset)); g_assert (mips_is_imm16 (inst->inst_offset + ainfo->size * sizeof (target_mgreg_t))); for (i = 0; i < ainfo->size; ++i) { mips_lw (code, ainfo->reg + i, inst->inst_basereg, doffset); doffset += SIZEOF_REGISTER; } } else if (ainfo->storage == ArgStructByAddr) { g_assert (mips_is_imm16 (inst->inst_offset)); mips_lw (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } else g_assert_not_reached (); } } return code; } static guint8* emit_reserve_param_area (MonoCompile *cfg, guint8 *code) { int size = cfg->param_area; size += MONO_ARCH_FRAME_ALIGNMENT - 1; size &= -MONO_ARCH_FRAME_ALIGNMENT; if (!size) return code; #if 0 ppc_lwz (code, ppc_r0, 0, ppc_sp); if (ppc_is_imm16 (-size)) { ppc_stwu (code, ppc_r0, -size, ppc_sp); } else { ppc_load (code, ppc_r12, -size); ppc_stwux (code, ppc_r0, ppc_sp, ppc_r12); } #endif return code; } static guint8* emit_unreserve_param_area (MonoCompile *cfg, guint8 *code) { int size = cfg->param_area; size += MONO_ARCH_FRAME_ALIGNMENT - 1; size &= -MONO_ARCH_FRAME_ALIGNMENT; if (!size) return code; #if 0 ppc_lwz (code, ppc_r0, 0, ppc_sp); if (ppc_is_imm16 (size)) { ppc_stwu (code, ppc_r0, size, ppc_sp); } else { ppc_load (code, ppc_r12, size); ppc_stwux (code, ppc_r0, ppc_sp, ppc_r12); } #endif return code; } void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins; MonoCallInst *call; guint8 *code = cfg->native_code + cfg->code_len; MonoInst *last_ins = NULL; int max_len, cpos; int ins_cnt = 0; /* we don't align basic blocks of loops on mips */ if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); cpos = bb->max_offset; MONO_BB_FOR_EACH_INS (bb, ins) { const guint offset = code - cfg->native_code; set_code_cursor (cfg, code); max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); mono_debug_record_line_number (cfg, ins, offset); if (cfg->verbose_level > 2) { g_print (" @ 0x%x\t", offset); mono_print_ins_index (ins_cnt++, ins); } /* Check for virtual regs that snuck by */ g_assert ((ins->dreg >= -1) && (ins->dreg < 32)); switch (ins->opcode) { case OP_RELAXED_NOP: case OP_NOP: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_I8CONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: case OP_NOT_REACHED: case OP_NOT_NULL: break; case OP_IL_SEQ_POINT: mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); break; case OP_SEQ_POINT: { if (ins->flags & MONO_INST_SINGLE_STEP_LOC) { guint32 addr = (guint32)ss_trigger_page; mips_load_const (code, mips_t9, addr); mips_lw (code, mips_t9, mips_t9, 0); } mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); /* * A placeholder for a possible breakpoint inserted by * mono_arch_set_breakpoint (). */ /* mips_load_const () + mips_lw */ mips_nop (code); mips_nop (code); mips_nop (code); break; } case OP_BIGMUL: mips_mult (code, ins->sreg1, ins->sreg2); mips_mflo (code, ins->dreg); mips_mfhi (code, ins->dreg+1); break; case OP_BIGMUL_UN: mips_multu (code, ins->sreg1, ins->sreg2); mips_mflo (code, ins->dreg); mips_mfhi (code, ins->dreg+1); break; case OP_MEMORY_BARRIER: mips_sync (code, 0); break; case OP_STOREI1_MEMBASE_IMM: mips_load_const (code, mips_temp, ins->inst_imm); if (mips_is_imm16 (ins->inst_offset)) { mips_sb (code, mips_temp, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_sb (code, mips_temp, mips_at, ins->inst_destbasereg); } break; case OP_STOREI2_MEMBASE_IMM: mips_load_const (code, mips_temp, ins->inst_imm); if (mips_is_imm16 (ins->inst_offset)) { mips_sh (code, mips_temp, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_sh (code, mips_temp, mips_at, ins->inst_destbasereg); } break; case OP_STOREI8_MEMBASE_IMM: mips_load_const (code, mips_temp, ins->inst_imm); if (mips_is_imm16 (ins->inst_offset)) { mips_sd (code, mips_temp, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_sd (code, mips_temp, mips_at, ins->inst_destbasereg); } break; case OP_STORE_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: mips_load_const (code, mips_temp, ins->inst_imm); if (mips_is_imm16 (ins->inst_offset)) { mips_sw (code, mips_temp, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_sw (code, mips_temp, mips_at, ins->inst_destbasereg); } break; case OP_STOREI1_MEMBASE_REG: if (mips_is_imm16 (ins->inst_offset)) { mips_sb (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_destbasereg); mips_sb (code, ins->sreg1, mips_at, 0); } break; case OP_STOREI2_MEMBASE_REG: if (mips_is_imm16 (ins->inst_offset)) { mips_sh (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_destbasereg); mips_sh (code, ins->sreg1, mips_at, 0); } break; case OP_STORE_MEMBASE_REG: case OP_STOREI4_MEMBASE_REG: if (mips_is_imm16 (ins->inst_offset)) { mips_sw (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_destbasereg); mips_sw (code, ins->sreg1, mips_at, 0); } break; case OP_STOREI8_MEMBASE_REG: if (mips_is_imm16 (ins->inst_offset)) { mips_sd (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_destbasereg); mips_sd (code, ins->sreg1, mips_at, 0); } break; case OP_LOADU4_MEM: g_assert_not_reached (); //x86_mov_reg_imm (code, ins->dreg, ins->inst_p0); //x86_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4); break; case OP_LOADI8_MEMBASE: if (mips_is_imm16 (ins->inst_offset)) { mips_ld (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_basereg); mips_ld (code, ins->dreg, mips_at, 0); } break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: case OP_LOADU4_MEMBASE: g_assert (ins->dreg != -1); if (mips_is_imm16 (ins->inst_offset)) { mips_lw (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_basereg); mips_lw (code, ins->dreg, mips_at, 0); } break; case OP_LOADI1_MEMBASE: if (mips_is_imm16 (ins->inst_offset)) { mips_lb (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_basereg); mips_lb (code, ins->dreg, mips_at, 0); } break; case OP_LOADU1_MEMBASE: if (mips_is_imm16 (ins->inst_offset)) { mips_lbu (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_basereg); mips_lbu (code, ins->dreg, mips_at, 0); } break; case OP_LOADI2_MEMBASE: if (mips_is_imm16 (ins->inst_offset)) { mips_lh (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_basereg); mips_lh (code, ins->dreg, mips_at, 0); } break; case OP_LOADU2_MEMBASE: if (mips_is_imm16 (ins->inst_offset)) { mips_lhu (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_basereg); mips_lhu (code, ins->dreg, mips_at, 0); } break; case OP_ICONV_TO_I1: mips_sll (code, mips_at, ins->sreg1, 24); mips_sra (code, ins->dreg, mips_at, 24); break; case OP_ICONV_TO_I2: mips_sll (code, mips_at, ins->sreg1, 16); mips_sra (code, ins->dreg, mips_at, 16); break; case OP_ICONV_TO_U1: mips_andi (code, ins->dreg, ins->sreg1, 0xff); break; case OP_ICONV_TO_U2: mips_sll (code, mips_at, ins->sreg1, 16); mips_srl (code, ins->dreg, mips_at, 16); break; case OP_MIPS_SLT: mips_slt (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_MIPS_SLTI: g_assert (mips_is_imm16 (ins->inst_imm)); mips_slti (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_MIPS_SLTU: mips_sltu (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_MIPS_SLTIU: g_assert (mips_is_imm16 (ins->inst_imm)); mips_sltiu (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_BREAK: /* * gdb does not like encountering the hw breakpoint ins in the debugged code. * So instead of emitting a trap, we emit a call a C function and place a * breakpoint there. */ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break)); mips_load (code, mips_t9, 0x1f1f1f1f); mips_jalr (code, mips_t9, mips_ra); mips_nop (code); break; case OP_IADD: mips_addu (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_LADD: mips_daddu (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_ADD_IMM: case OP_IADD_IMM: g_assert (mips_is_imm16 (ins->inst_imm)); mips_addiu (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_LADD_IMM: g_assert (mips_is_imm16 (ins->inst_imm)); mips_daddiu (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_ISUB: mips_subu (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_LSUB: mips_dsubu (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_ISUB_IMM: case OP_SUB_IMM: // we add the negated value g_assert (mips_is_imm16 (-ins->inst_imm)); mips_addiu (code, ins->dreg, ins->sreg1, -ins->inst_imm); break; case OP_LSUB_IMM: // we add the negated value g_assert (mips_is_imm16 (-ins->inst_imm)); mips_daddiu (code, ins->dreg, ins->sreg1, -ins->inst_imm); break; case OP_IAND: case OP_LAND: mips_and (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_AND_IMM: case OP_IAND_IMM: case OP_LAND_IMM: g_assert (!(ins->inst_imm & 0xffff0000)); mips_andi (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_IDIV: case OP_IREM: { guint32 *divisor_is_m1; guint32 *dividend_is_minvalue; guint32 *divisor_is_zero; mips_load_const (code, mips_at, -1); divisor_is_m1 = (guint32 *)(void *)code; mips_bne (code, ins->sreg2, mips_at, 0); mips_lui (code, mips_at, mips_zero, 0x8000); dividend_is_minvalue = (guint32 *)(void *)code; mips_bne (code, ins->sreg1, mips_at, 0); mips_nop (code); /* Divide Int32.MinValue by -1 -- throw exception */ EMIT_SYSTEM_EXCEPTION_NAME("OverflowException"); mips_patch (divisor_is_m1, (guint32)code); mips_patch (dividend_is_minvalue, (guint32)code); /* Put divide in branch delay slot (NOT YET) */ divisor_is_zero = (guint32 *)(void *)code; mips_bne (code, ins->sreg2, mips_zero, 0); mips_nop (code); /* Divide by zero -- throw exception */ EMIT_SYSTEM_EXCEPTION_NAME("DivideByZeroException"); mips_patch (divisor_is_zero, (guint32)code); mips_div (code, ins->sreg1, ins->sreg2); if (ins->opcode == OP_IDIV) mips_mflo (code, ins->dreg); else mips_mfhi (code, ins->dreg); break; } case OP_IDIV_UN: case OP_IREM_UN: { guint32 *divisor_is_zero = (guint32 *)(void *)code; /* Put divide in branch delay slot (NOT YET) */ mips_bne (code, ins->sreg2, mips_zero, 0); mips_nop (code); /* Divide by zero -- throw exception */ EMIT_SYSTEM_EXCEPTION_NAME("DivideByZeroException"); mips_patch (divisor_is_zero, (guint32)code); mips_divu (code, ins->sreg1, ins->sreg2); if (ins->opcode == OP_IDIV_UN) mips_mflo (code, ins->dreg); else mips_mfhi (code, ins->dreg); break; } case OP_DIV_IMM: g_assert_not_reached (); #if 0 ppc_load (code, ppc_r12, ins->inst_imm); ppc_divwod (code, ins->dreg, ins->sreg1, ppc_r12); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); /* FIXME: use OverflowException for 0x80000000/-1 */ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException"); #endif g_assert_not_reached(); break; case OP_REM_IMM: g_assert_not_reached (); case OP_IOR: mips_or (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_OR_IMM: case OP_IOR_IMM: g_assert (!(ins->inst_imm & 0xffff0000)); mips_ori (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_IXOR: mips_xor (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_XOR_IMM: case OP_IXOR_IMM: /* unsigned 16-bit immediate */ g_assert (!(ins->inst_imm & 0xffff0000)); mips_xori (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_ISHL: mips_sllv (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SHL_IMM: case OP_ISHL_IMM: mips_sll (code, ins->dreg, ins->sreg1, ins->inst_imm & 0x1f); break; case OP_ISHR: mips_srav (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_LSHR: mips_dsrav (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SHR_IMM: case OP_ISHR_IMM: mips_sra (code, ins->dreg, ins->sreg1, ins->inst_imm & 0x1f); break; case OP_LSHR_IMM: mips_dsra (code, ins->dreg, ins->sreg1, ins->inst_imm & 0x3f); break; case OP_SHR_UN_IMM: case OP_ISHR_UN_IMM: mips_srl (code, ins->dreg, ins->sreg1, ins->inst_imm & 0x1f); break; case OP_LSHR_UN_IMM: mips_dsrl (code, ins->dreg, ins->sreg1, ins->inst_imm & 0x3f); break; case OP_ISHR_UN: mips_srlv (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_LSHR_UN: mips_dsrlv (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_INOT: case OP_LNOT: mips_nor (code, ins->dreg, mips_zero, ins->sreg1); break; case OP_INEG: mips_subu (code, ins->dreg, mips_zero, ins->sreg1); break; case OP_LNEG: mips_dsubu (code, ins->dreg, mips_zero, ins->sreg1); break; case OP_IMUL: #if USE_MUL mips_mul (code, ins->dreg, ins->sreg1, ins->sreg2); #else mips_mult (code, ins->sreg1, ins->sreg2); mips_mflo (code, ins->dreg); mips_nop (code); mips_nop (code); #endif break; #if SIZEOF_REGISTER == 8 case OP_LMUL: mips_dmult (code, ins->sreg1, ins->sreg2); mips_mflo (code, ins->dreg); break; #endif case OP_IMUL_OVF: { guint32 *patch; mips_mult (code, ins->sreg1, ins->sreg2); mips_mflo (code, ins->dreg); mips_mfhi (code, mips_at); mips_nop (code); mips_nop (code); mips_sra (code, mips_temp, ins->dreg, 31); patch = (guint32 *)(void *)code; mips_beq (code, mips_temp, mips_at, 0); mips_nop (code); EMIT_SYSTEM_EXCEPTION_NAME("OverflowException"); mips_patch (patch, (guint32)code); break; } case OP_IMUL_OVF_UN: { guint32 *patch; mips_mult (code, ins->sreg1, ins->sreg2); mips_mflo (code, ins->dreg); mips_mfhi (code, mips_at); mips_nop (code); mips_nop (code); patch = (guint32 *)(void *)code; mips_beq (code, mips_at, mips_zero, 0); mips_nop (code); EMIT_SYSTEM_EXCEPTION_NAME("OverflowException"); mips_patch (patch, (guint32)code); break; } case OP_ICONST: mips_load_const (code, ins->dreg, ins->inst_c0); break; #if SIZEOF_REGISTER == 8 case OP_I8CONST: mips_load_const (code, ins->dreg, ins->inst_c0); break; #endif case OP_AOTCONST: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0); mips_load (code, ins->dreg, 0); break; case OP_MIPS_MTC1S: mips_mtc1 (code, ins->dreg, ins->sreg1); break; case OP_MIPS_MTC1S_2: mips_mtc1 (code, ins->dreg, ins->sreg1); mips_mtc1 (code, ins->dreg+1, ins->sreg2); break; case OP_MIPS_MFC1S: mips_mfc1 (code, ins->dreg, ins->sreg1); break; case OP_MIPS_MTC1D: mips_dmtc1 (code, ins->dreg, ins->sreg1); break; case OP_MIPS_MFC1D: #if 0 mips_dmfc1 (code, ins->dreg, ins->sreg1); #else mips_mfc1 (code, ins->dreg, ins->sreg1 + ls_word_idx); mips_mfc1 (code, ins->dreg+1, ins->sreg1 + ms_word_idx); #endif break; case OP_ICONV_TO_I4: case OP_ICONV_TO_U4: case OP_MOVE: if (ins->dreg != ins->sreg1) MIPS_MOVE (code, ins->dreg, ins->sreg1); break; #if SIZEOF_REGISTER == 8 case OP_ZEXT_I4: mips_dsll (code, ins->dreg, ins->sreg1, 32); mips_dsrl (code, ins->dreg, ins->dreg, 32); break; case OP_SEXT_I4: mips_dsll (code, ins->dreg, ins->sreg1, 32); mips_dsra (code, ins->dreg, ins->dreg, 32); break; #endif case OP_SETLRET: { int lsreg = mips_v0 + ls_word_idx; int msreg = mips_v0 + ms_word_idx; /* Get sreg1 into lsreg, sreg2 into msreg */ if (ins->sreg1 == msreg) { if (ins->sreg1 != mips_at) MIPS_MOVE (code, mips_at, ins->sreg1); if (ins->sreg2 != msreg) MIPS_MOVE (code, msreg, ins->sreg2); MIPS_MOVE (code, lsreg, mips_at); } else { if (ins->sreg2 != msreg) MIPS_MOVE (code, msreg, ins->sreg2); if (ins->sreg1 != lsreg) MIPS_MOVE (code, lsreg, ins->sreg1); } break; } case OP_FMOVE: if (ins->dreg != ins->sreg1) { mips_fmovd (code, ins->dreg, ins->sreg1); } break; case OP_MOVE_F_TO_I4: mips_cvtsd (code, mips_ftemp, ins->sreg1); mips_mfc1 (code, ins->dreg, mips_ftemp); break; case OP_MOVE_I4_TO_F: mips_mtc1 (code, ins->dreg, ins->sreg1); mips_cvtds (code, ins->dreg, ins->dreg); break; case OP_MIPS_CVTSD: /* Convert from double to float and leave it there */ mips_cvtsd (code, ins->dreg, ins->sreg1); break; case OP_FCONV_TO_R4: #if 0 mips_cvtsd (code, ins->dreg, ins->sreg1); #else /* Just a move, no precision change */ if (ins->dreg != ins->sreg1) { mips_fmovd (code, ins->dreg, ins->sreg1); } #endif break; case OP_CHECK_THIS: /* ensure ins->sreg1 is not NULL */ mips_lw (code, mips_zero, ins->sreg1, 0); break; case OP_ARGLIST: { g_assert (mips_is_imm16 (cfg->sig_cookie)); mips_lw (code, mips_at, cfg->frame_reg, cfg->sig_cookie); mips_sw (code, mips_at, ins->sreg1, 0); break; } case OP_FCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: case OP_FCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: case OP_FCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: call = (MonoCallInst*)ins; switch (ins->opcode) { case OP_FCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: mono_call_add_patch_info (cfg, call, offset); if (ins->flags & MONO_INST_HAS_METHOD) { mips_load (code, mips_t9, call->method); } else { mips_load (code, mips_t9, call->fptr); } mips_jalr (code, mips_t9, mips_ra); mips_nop (code); break; case OP_FCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: MIPS_MOVE (code, mips_t9, ins->sreg1); mips_jalr (code, mips_t9, mips_ra); mips_nop (code); break; case OP_FCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: mips_lw (code, mips_t9, ins->sreg1, ins->inst_offset); mips_jalr (code, mips_t9, mips_ra); mips_nop (code); break; } #if PROMOTE_R4_TO_R8 /* returned an FP R4 (single), promote to R8 (double) in place */ switch (ins->opcode) { case OP_FCALL: case OP_FCALL_REG: case OP_FCALL_MEMBASE: if (call->signature->ret->type == MONO_TYPE_R4) mips_cvtds (code, mips_f0, mips_f0); break; default: break; } #endif break; case OP_LOCALLOC: { int area_offset = cfg->param_area; /* Round up ins->sreg1, mips_at ends up holding size */ mips_addiu (code, mips_at, ins->sreg1, 31); mips_addiu (code, mips_temp, mips_zero, ~31); mips_and (code, mips_at, mips_at, mips_temp); mips_subu (code, mips_sp, mips_sp, mips_at); g_assert (mips_is_imm16 (area_offset)); mips_addiu (code, ins->dreg, mips_sp, area_offset); if (ins->flags & MONO_INST_INIT) { guint32 *buf; buf = (guint32*)(void*)code; mips_beq (code, mips_at, mips_zero, 0); mips_nop (code); mips_move (code, mips_temp, ins->dreg); mips_sb (code, mips_zero, mips_temp, 0); mips_addiu (code, mips_at, mips_at, -1); mips_bne (code, mips_at, mips_zero, -3); mips_addiu (code, mips_temp, mips_temp, 1); mips_patch (buf, (guint32)code); } break; } case OP_THROW: { gpointer addr = mono_arch_get_throw_exception(NULL, FALSE); mips_move (code, mips_a0, ins->sreg1); mips_call (code, mips_t9, addr); mips_break (code, 0xfc); break; } case OP_RETHROW: { gpointer addr = mono_arch_get_rethrow_exception(NULL, FALSE); mips_move (code, mips_a0, ins->sreg1); mips_call (code, mips_t9, addr); mips_break (code, 0xfb); break; } case OP_START_HANDLER: { /* * The START_HANDLER instruction marks the beginning of * a handler block. It is called using a call * instruction, so mips_ra contains the return address. * Since the handler executes in the same stack frame * as the method itself, we can't use save/restore to * save the return address. Instead, we save it into * a dedicated variable. */ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); g_assert (spvar->inst_basereg != mips_sp); code = emit_reserve_param_area (cfg, code); if (mips_is_imm16 (spvar->inst_offset)) { mips_sw (code, mips_ra, spvar->inst_basereg, spvar->inst_offset); } else { mips_load_const (code, mips_at, spvar->inst_offset); mips_addu (code, mips_at, mips_at, spvar->inst_basereg); mips_sw (code, mips_ra, mips_at, 0); } break; } case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); g_assert (spvar->inst_basereg != mips_sp); code = emit_unreserve_param_area (cfg, code); if (ins->sreg1 != mips_v0) MIPS_MOVE (code, mips_v0, ins->sreg1); if (mips_is_imm16 (spvar->inst_offset)) { mips_lw (code, mips_ra, spvar->inst_basereg, spvar->inst_offset); } else { mips_load_const (code, mips_at, spvar->inst_offset); mips_addu (code, mips_at, mips_at, spvar->inst_basereg); mips_lw (code, mips_ra, mips_at, 0); } mips_jr (code, mips_ra); mips_nop (code); break; } case OP_ENDFINALLY: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); g_assert (spvar->inst_basereg != mips_sp); code = emit_unreserve_param_area (cfg, code); mips_lw (code, mips_t9, spvar->inst_basereg, spvar->inst_offset); mips_jalr (code, mips_t9, mips_ra); mips_nop (code); break; } case OP_CALL_HANDLER: mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); mips_lui (code, mips_t9, mips_zero, 0); mips_addiu (code, mips_t9, mips_t9, 0); mips_jalr (code, mips_t9, mips_ra); mips_nop (code); /*FIXME should it be before the NOP or not? Does MIPS has a delay slot like sparc?*/ for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); break; case OP_LABEL: ins->inst_c0 = code - cfg->native_code; break; case OP_BR: mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); if (cfg->arch.long_branch) { mips_lui (code, mips_at, mips_zero, 0); mips_addiu (code, mips_at, mips_at, 0); mips_jr (code, mips_at); mips_nop (code); } else { mips_beq (code, mips_zero, mips_zero, 0); mips_nop (code); } break; case OP_BR_REG: mips_jr (code, ins->sreg1); mips_nop (code); break; case OP_SWITCH: { int i; max_len += 4 * GPOINTER_TO_INT (ins->klass); code = realloc_code (cfg, max_len); g_assert (ins->sreg1 != -1); mips_sll (code, mips_at, ins->sreg1, 2); if (1 || !(cfg->flags & MONO_CFG_HAS_CALLS)) MIPS_MOVE (code, mips_t8, mips_ra); mips_bgezal (code, mips_zero, 1); /* bal */ mips_nop (code); mips_addu (code, mips_t9, mips_ra, mips_at); /* Table is 16 or 20 bytes from target of bal above */ if (1 || !(cfg->flags & MONO_CFG_HAS_CALLS)) { MIPS_MOVE (code, mips_ra, mips_t8); mips_lw (code, mips_t9, mips_t9, 20); } else mips_lw (code, mips_t9, mips_t9, 16); mips_jalr (code, mips_t9, mips_t8); mips_nop (code); for (i = 0; i < GPOINTER_TO_INT (ins->klass); ++i) mips_emit32 (code, 0xfefefefe); break; } case OP_CEQ: case OP_ICEQ: mips_addiu (code, ins->dreg, mips_zero, 1); mips_beq (code, mips_at, mips_zero, 2); mips_nop (code); MIPS_MOVE (code, ins->dreg, mips_zero); break; case OP_CLT: case OP_CLT_UN: case OP_ICLT: case OP_ICLT_UN: mips_addiu (code, ins->dreg, mips_zero, 1); mips_bltz (code, mips_at, 2); mips_nop (code); MIPS_MOVE (code, ins->dreg, mips_zero); break; case OP_CGT: case OP_CGT_UN: case OP_ICGT: case OP_ICGT_UN: mips_addiu (code, ins->dreg, mips_zero, 1); mips_bgtz (code, mips_at, 2); mips_nop (code); MIPS_MOVE (code, ins->dreg, mips_zero); break; case OP_MIPS_COND_EXC_EQ: case OP_MIPS_COND_EXC_GE: case OP_MIPS_COND_EXC_GT: case OP_MIPS_COND_EXC_LE: case OP_MIPS_COND_EXC_LT: case OP_MIPS_COND_EXC_NE_UN: case OP_MIPS_COND_EXC_GE_UN: case OP_MIPS_COND_EXC_GT_UN: case OP_MIPS_COND_EXC_LE_UN: case OP_MIPS_COND_EXC_LT_UN: case OP_MIPS_COND_EXC_OV: case OP_MIPS_COND_EXC_NO: case OP_MIPS_COND_EXC_C: case OP_MIPS_COND_EXC_NC: case OP_MIPS_COND_EXC_IEQ: case OP_MIPS_COND_EXC_IGE: case OP_MIPS_COND_EXC_IGT: case OP_MIPS_COND_EXC_ILE: case OP_MIPS_COND_EXC_ILT: case OP_MIPS_COND_EXC_INE_UN: case OP_MIPS_COND_EXC_IGE_UN: case OP_MIPS_COND_EXC_IGT_UN: case OP_MIPS_COND_EXC_ILE_UN: case OP_MIPS_COND_EXC_ILT_UN: case OP_MIPS_COND_EXC_IOV: case OP_MIPS_COND_EXC_INO: case OP_MIPS_COND_EXC_IC: case OP_MIPS_COND_EXC_INC: { guint32 *skip; guint32 *throw; /* If the condition is true, raise the exception */ /* need to reverse test to skip around exception raising */ /* For the moment, branch around a branch to avoid reversing the tests. */ /* Remember, an unpatched branch to 0 branches to the delay slot */ switch (ins->opcode) { case OP_MIPS_COND_EXC_EQ: throw = (guint32 *)(void *)code; mips_beq (code, ins->sreg1, ins->sreg2, 0); mips_nop (code); break; case OP_MIPS_COND_EXC_NE_UN: throw = (guint32 *)(void *)code; mips_bne (code, ins->sreg1, ins->sreg2, 0); mips_nop (code); break; case OP_MIPS_COND_EXC_LE_UN: mips_sltu (code, mips_at, ins->sreg2, ins->sreg1); throw = (guint32 *)(void *)code; mips_beq (code, mips_at, mips_zero, 0); mips_nop (code); break; case OP_MIPS_COND_EXC_GT: mips_slt (code, mips_at, ins->sreg2, ins->sreg1); throw = (guint32 *)(void *)code; mips_bne (code, mips_at, mips_zero, 0); mips_nop (code); break; case OP_MIPS_COND_EXC_GT_UN: mips_sltu (code, mips_at, ins->sreg2, ins->sreg1); throw = (guint32 *)(void *)code; mips_bne (code, mips_at, mips_zero, 0); mips_nop (code); break; case OP_MIPS_COND_EXC_LT: mips_slt (code, mips_at, ins->sreg1, ins->sreg2); throw = (guint32 *)(void *)code; mips_bne (code, mips_at, mips_zero, 0); mips_nop (code); break; case OP_MIPS_COND_EXC_LT_UN: mips_sltu (code, mips_at, ins->sreg1, ins->sreg2); throw = (guint32 *)(void *)code; mips_bne (code, mips_at, mips_zero, 0); mips_nop (code); break; default: /* Not yet implemented */ g_warning ("NYI conditional exception %s\n", mono_inst_name (ins->opcode)); g_assert_not_reached (); } skip = (guint32 *)(void *)code; mips_beq (code, mips_zero, mips_zero, 0); mips_nop (code); mips_patch (throw, (guint32)code); code = mips_emit_exc_by_name (code, ins->inst_p1); mips_patch (skip, (guint32)code); cfg->bb_exit->max_offset += 24; break; } case OP_MIPS_BEQ: case OP_MIPS_BNE: case OP_MIPS_BGEZ: case OP_MIPS_BGTZ: case OP_MIPS_BLEZ: case OP_MIPS_BLTZ: code = mips_emit_cond_branch (cfg, code, ins->opcode, ins); break; /* floating point opcodes */ case OP_R8CONST: #if 0 if (((guint32)ins->inst_p0) & (1 << 15)) mips_lui (code, mips_at, mips_zero, (((guint32)ins->inst_p0)>>16)+1); else mips_lui (code, mips_at, mips_zero, (((guint32)ins->inst_p0)>>16)); mips_ldc1 (code, ins->dreg, mips_at, ((guint32)ins->inst_p0) & 0xffff); #else mips_load_const (code, mips_at, ins->inst_p0); mips_lwc1 (code, ins->dreg, mips_at, ls_word_offset); mips_lwc1 (code, ins->dreg+1, mips_at, ms_word_offset); #endif break; case OP_R4CONST: if (((guint32)ins->inst_p0) & (1 << 15)) mips_lui (code, mips_at, mips_zero, (((guint32)ins->inst_p0)>>16)+1); else mips_lui (code, mips_at, mips_zero, (((guint32)ins->inst_p0)>>16)); mips_lwc1 (code, ins->dreg, mips_at, ((guint32)ins->inst_p0) & 0xffff); #if PROMOTE_R4_TO_R8 mips_cvtds (code, ins->dreg, ins->dreg); #endif break; case OP_STORER8_MEMBASE_REG: if (mips_is_imm16 (ins->inst_offset)) { #if _MIPS_SIM == _ABIO32 mips_swc1 (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset + ls_word_offset); mips_swc1 (code, ins->sreg1+1, ins->inst_destbasereg, ins->inst_offset + ms_word_offset); #elif _MIPS_SIM == _ABIN32 mips_sdc1 (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); #endif } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_destbasereg); mips_swc1 (code, ins->sreg1, mips_at, ls_word_offset); mips_swc1 (code, ins->sreg1+1, mips_at, ms_word_offset); } break; case OP_LOADR8_MEMBASE: if (mips_is_imm16 (ins->inst_offset)) { #if _MIPS_SIM == _ABIO32 mips_lwc1 (code, ins->dreg, ins->inst_basereg, ins->inst_offset + ls_word_offset); mips_lwc1 (code, ins->dreg+1, ins->inst_basereg, ins->inst_offset + ms_word_offset); #elif _MIPS_SIM == _ABIN32 mips_ldc1 (code, ins->dreg, ins->inst_basereg, ins->inst_offset); #endif } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_basereg); mips_lwc1 (code, ins->dreg, mips_at, ls_word_offset); mips_lwc1 (code, ins->dreg+1, mips_at, ms_word_offset); } break; case OP_STORER4_MEMBASE_REG: g_assert (mips_is_imm16 (ins->inst_offset)); #if PROMOTE_R4_TO_R8 /* Need to convert ins->sreg1 to single-precision first */ mips_cvtsd (code, mips_ftemp, ins->sreg1); mips_swc1 (code, mips_ftemp, ins->inst_destbasereg, ins->inst_offset); #else mips_swc1 (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); #endif break; case OP_MIPS_LWC1: g_assert (mips_is_imm16 (ins->inst_offset)); mips_lwc1 (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADR4_MEMBASE: g_assert (mips_is_imm16 (ins->inst_offset)); mips_lwc1 (code, ins->dreg, ins->inst_basereg, ins->inst_offset); #if PROMOTE_R4_TO_R8 /* Convert to double precision in place */ mips_cvtds (code, ins->dreg, ins->dreg); #endif break; case OP_LOADR4_MEMINDEX: mips_addu (code, mips_at, ins->inst_basereg, ins->sreg2); mips_lwc1 (code, ins->dreg, mips_at, 0); break; case OP_LOADR8_MEMINDEX: mips_addu (code, mips_at, ins->inst_basereg, ins->sreg2); #if _MIPS_SIM == _ABIO32 mips_lwc1 (code, ins->dreg, mips_at, ls_word_offset); mips_lwc1 (code, ins->dreg+1, mips_at, ms_word_offset); #elif _MIPS_SIM == _ABIN32 mips_ldc1 (code, ins->dreg, mips_at, 0); #endif break; case OP_STORER4_MEMINDEX: mips_addu (code, mips_at, ins->inst_destbasereg, ins->sreg2); #if PROMOTE_R4_TO_R8 /* Need to convert ins->sreg1 to single-precision first */ mips_cvtsd (code, mips_ftemp, ins->sreg1); mips_swc1 (code, mips_ftemp, mips_at, 0); #else mips_swc1 (code, ins->sreg1, mips_at, 0); #endif break; case OP_STORER8_MEMINDEX: mips_addu (code, mips_at, ins->inst_destbasereg, ins->sreg2); #if _MIPS_SIM == _ABIO32 mips_swc1 (code, ins->sreg1, mips_at, ls_word_offset); mips_swc1 (code, ins->sreg1+1, mips_at, ms_word_offset); #elif _MIPS_SIM == _ABIN32 mips_sdc1 (code, ins->sreg1, mips_at, 0); #endif break; case OP_ICONV_TO_R_UN: { static const guint64 adjust_val = 0x41F0000000000000ULL; /* convert unsigned int to double */ mips_mtc1 (code, mips_ftemp, ins->sreg1); mips_bgez (code, ins->sreg1, 5); mips_cvtdw (code, ins->dreg, mips_ftemp); mips_load (code, mips_at, (guint32) &adjust_val); mips_ldc1 (code, mips_ftemp, mips_at, 0); mips_faddd (code, ins->dreg, ins->dreg, mips_ftemp); /* target is here */ break; } case OP_ICONV_TO_R4: mips_mtc1 (code, mips_ftemp, ins->sreg1); mips_cvtsw (code, ins->dreg, mips_ftemp); mips_cvtds (code, ins->dreg, ins->dreg); break; case OP_ICONV_TO_R8: mips_mtc1 (code, mips_ftemp, ins->sreg1); mips_cvtdw (code, ins->dreg, mips_ftemp); break; case OP_FCONV_TO_I1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE); break; case OP_FCONV_TO_U1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE); break; case OP_FCONV_TO_I2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE); break; case OP_FCONV_TO_U2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE); break; case OP_FCONV_TO_I4: case OP_FCONV_TO_I: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE); break; case OP_FCONV_TO_U4: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE); break; case OP_SQRT: mips_fsqrtd (code, ins->dreg, ins->sreg1); break; case OP_FADD: mips_faddd (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FSUB: mips_fsubd (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FMUL: mips_fmuld (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FDIV: mips_fdivd (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FNEG: mips_fnegd (code, ins->dreg, ins->sreg1); break; case OP_FCEQ: mips_fcmpd (code, MIPS_FPU_EQ, ins->sreg1, ins->sreg2); mips_addiu (code, ins->dreg, mips_zero, 1); mips_fbtrue (code, 2); mips_nop (code); MIPS_MOVE (code, ins->dreg, mips_zero); break; case OP_FCLT: mips_fcmpd (code, MIPS_FPU_LT, ins->sreg1, ins->sreg2); mips_addiu (code, ins->dreg, mips_zero, 1); mips_fbtrue (code, 2); mips_nop (code); MIPS_MOVE (code, ins->dreg, mips_zero); break; case OP_FCLT_UN: /* Less than, or Unordered */ mips_fcmpd (code, MIPS_FPU_ULT, ins->sreg1, ins->sreg2); mips_addiu (code, ins->dreg, mips_zero, 1); mips_fbtrue (code, 2); mips_nop (code); MIPS_MOVE (code, ins->dreg, mips_zero); break; case OP_FCGT: mips_fcmpd (code, MIPS_FPU_ULE, ins->sreg1, ins->sreg2); MIPS_MOVE (code, ins->dreg, mips_zero); mips_fbtrue (code, 2); mips_nop (code); mips_addiu (code, ins->dreg, mips_zero, 1); break; case OP_FCGT_UN: /* Greater than, or Unordered */ mips_fcmpd (code, MIPS_FPU_OLE, ins->sreg1, ins->sreg2); MIPS_MOVE (code, ins->dreg, mips_zero); mips_fbtrue (code, 2); mips_nop (code); mips_addiu (code, ins->dreg, mips_zero, 1); break; case OP_MIPS_FBEQ: case OP_MIPS_FBNE: case OP_MIPS_FBLT: case OP_MIPS_FBLT_UN: case OP_MIPS_FBGT: case OP_MIPS_FBGT_UN: case OP_MIPS_FBGE: case OP_MIPS_FBGE_UN: case OP_MIPS_FBLE: case OP_MIPS_FBLE_UN: { int cond = 0; gboolean is_true = TRUE, is_ordered = FALSE; guint32 *buf = NULL; switch (ins->opcode) { case OP_MIPS_FBEQ: cond = MIPS_FPU_EQ; is_true = TRUE; break; case OP_MIPS_FBNE: cond = MIPS_FPU_EQ; is_true = FALSE; break; case OP_MIPS_FBLT: cond = MIPS_FPU_LT; is_true = TRUE; is_ordered = TRUE; break; case OP_MIPS_FBLT_UN: cond = MIPS_FPU_ULT; is_true = TRUE; break; case OP_MIPS_FBGT: cond = MIPS_FPU_LE; is_true = FALSE; is_ordered = TRUE; break; case OP_MIPS_FBGT_UN: cond = MIPS_FPU_OLE; is_true = FALSE; break; case OP_MIPS_FBGE: cond = MIPS_FPU_LT; is_true = FALSE; is_ordered = TRUE; break; case OP_MIPS_FBGE_UN: cond = MIPS_FPU_OLT; is_true = FALSE; break; case OP_MIPS_FBLE: cond = MIPS_FPU_OLE; is_true = TRUE; is_ordered = TRUE; break; case OP_MIPS_FBLE_UN: cond = MIPS_FPU_ULE; is_true = TRUE; break; default: g_assert_not_reached (); } if (is_ordered) { /* Skip the check if unordered */ mips_fcmpd (code, MIPS_FPU_UN, ins->sreg1, ins->sreg2); mips_nop (code); buf = (guint32*)code; mips_fbtrue (code, 0); mips_nop (code); } mips_fcmpd (code, cond, ins->sreg1, ins->sreg2); mips_nop (code); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); if (is_true) mips_fbtrue (code, 0); else mips_fbfalse (code, 0); mips_nop (code); if (is_ordered) mips_patch (buf, (guint32)code); break; } case OP_CKFINITE: { guint32 *branch_patch; mips_mfc1 (code, mips_at, ins->sreg1+1); mips_srl (code, mips_at, mips_at, 16+4); mips_andi (code, mips_at, mips_at, 2047); mips_addiu (code, mips_at, mips_at, -2047); branch_patch = (guint32 *)(void *)code; mips_bne (code, mips_at, mips_zero, 0); mips_nop (code); EMIT_SYSTEM_EXCEPTION_NAME("OverflowException"); mips_patch (branch_patch, (guint32)code); mips_fmovd (code, ins->dreg, ins->sreg1); break; } case OP_JUMP_TABLE: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_c1, ins->inst_p0); mips_load (code, ins->dreg, 0x0f0f0f0f); break; case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: break; default: g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__); g_assert_not_reached (); } if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) { g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)", mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset); g_assert_not_reached (); } cpos += max_len; last_ins = ins; } set_code_cursor (cfg, code); } void mono_arch_register_lowlevel_calls (void) { } void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target) { unsigned char *ip = ji->ip.i + code; switch (ji->type) { case MONO_PATCH_INFO_IP: patch_lui_addiu ((guint32 *)(void *)ip, (guint32)ip); continue; case MONO_PATCH_INFO_SWITCH: { gpointer *table = (gpointer *)ji->data.table->table; int i; patch_lui_addiu ((guint32 *)(void *)ip, (guint32)table); for (i = 0; i < ji->data.table->table_size; i++) { table [i] = (int)ji->data.table->table [i] + code; } continue; } case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_IMAGE: case MONO_PATCH_INFO_FIELD: case MONO_PATCH_INFO_VTABLE: case MONO_PATCH_INFO_IID: case MONO_PATCH_INFO_SFLDA: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDTOKEN: case MONO_PATCH_INFO_R4: case MONO_PATCH_INFO_R8: /* from OP_AOTCONST : lui + addiu */ patch_lui_addiu ((guint32 *)(void *)ip, (guint32)target); continue; #if 0 case MONO_PATCH_INFO_EXC_NAME: g_assert_not_reached (); *((gconstpointer *)(void *)(ip + 1)) = target; continue; #endif case MONO_PATCH_INFO_NONE: /* everything is dealt with at epilog output time */ continue; default: mips_patch ((guint32 *)(void *)ip, (guint32)target); break; } } void mips_adjust_stackframe(MonoCompile *cfg) { MonoBasicBlock *bb; int delta, threshold, i; MonoMethodSignature *sig; int ra_offset; if (cfg->stack_offset == cfg->arch.local_alloc_offset) return; /* adjust cfg->stack_offset for account for down-spilling */ cfg->stack_offset += SIZEOF_REGISTER; /* re-align cfg->stack_offset if needed (due to var spilling) */ cfg->stack_offset = (cfg->stack_offset + MIPS_STACK_ALIGNMENT - 1) & ~(MIPS_STACK_ALIGNMENT - 1); delta = cfg->stack_offset - cfg->arch.local_alloc_offset; if (cfg->verbose_level > 2) { g_print ("mips_adjust_stackframe:\n"); g_print ("\tspillvars allocated 0x%x -> 0x%x\n", cfg->arch.local_alloc_offset, cfg->stack_offset); } threshold = cfg->arch.local_alloc_offset; ra_offset = cfg->stack_offset - sizeof(gpointer); if (cfg->verbose_level > 2) { g_print ("\tra_offset %d/0x%x delta %d/0x%x\n", ra_offset, ra_offset, delta, delta); } sig = mono_method_signature_internal (cfg->method); if (sig && sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) { cfg->vret_addr->inst_offset += delta; } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { MonoInst *inst = cfg->args [i]; inst->inst_offset += delta; } /* * loads and stores based off the frame reg that (used to) lie * above the spill var area need to be increased by 'delta' * to make room for the spill vars. */ /* Need to find loads and stores to adjust that * are above where the spillvars were inserted, but * which are not the spillvar references themselves. * * Idea - since all offsets from fp are positive, make * spillvar offsets negative to begin with so we can spot * them here. */ #if 1 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { int ins_cnt = 0; MonoInst *ins; if (cfg->verbose_level > 2) { g_print ("BASIC BLOCK %d:\n", bb->block_num); } MONO_BB_FOR_EACH_INS (bb, ins) { int adj_c0 = 0; int adj_imm = 0; if (cfg->verbose_level > 2) { mono_print_ins_index (ins_cnt, ins); } /* The == mips_sp tests catch FP spills */ if (MONO_IS_LOAD_MEMBASE(ins) && ((ins->inst_basereg == mips_fp) || (ins->inst_basereg == mips_sp))) { switch (ins->opcode) { case OP_LOADI8_MEMBASE: case OP_LOADR8_MEMBASE: adj_c0 = 8; break; default: adj_c0 = 4; break; } } else if (MONO_IS_STORE_MEMBASE(ins) && ((ins->dreg == mips_fp) || (ins->dreg == mips_sp))) { switch (ins->opcode) { case OP_STOREI8_MEMBASE_REG: case OP_STORER8_MEMBASE_REG: case OP_STOREI8_MEMBASE_IMM: adj_c0 = 8; break; default: adj_c0 = 4; break; } } if (((ins->opcode == OP_ADD_IMM) || (ins->opcode == OP_IADD_IMM)) && (ins->sreg1 == cfg->frame_reg)) adj_imm = 1; if (adj_c0) { if (ins->inst_c0 >= threshold) { ins->inst_c0 += delta; if (cfg->verbose_level > 2) { g_print ("adj"); mono_print_ins_index (ins_cnt, ins); } } else if (ins->inst_c0 < 0) { /* Adj_c0 holds the size of the datatype. */ ins->inst_c0 = - ins->inst_c0 - adj_c0; if (cfg->verbose_level > 2) { g_print ("spill"); mono_print_ins_index (ins_cnt, ins); } } g_assert (ins->inst_c0 != ra_offset); } if (adj_imm) { if (ins->inst_imm >= threshold) { ins->inst_imm += delta; if (cfg->verbose_level > 2) { g_print ("adj"); mono_print_ins_index (ins_cnt, ins); } } g_assert (ins->inst_c0 != ra_offset); } ++ins_cnt; } } #endif } /* * Stack frame layout: * * ------------------- sp + cfg->stack_usage + cfg->param_area * param area incoming * ------------------- sp + cfg->stack_usage + MIPS_STACK_PARAM_OFFSET * a0-a3 incoming * ------------------- sp + cfg->stack_usage * ra * ------------------- sp + cfg->stack_usage-4 * spilled regs * ------------------- sp + * MonoLMF structure optional * ------------------- sp + cfg->arch.lmf_offset * saved registers s0-s8 * ------------------- sp + cfg->arch.iregs_offset * locals * ------------------- sp + cfg->param_area * param area outgoing * ------------------- sp + MIPS_STACK_PARAM_OFFSET * a0-a3 outgoing * ------------------- sp * red zone */ guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoMethodSignature *sig; MonoInst *inst; int alloc_size, pos, i, max_offset; int alloc2_size = 0; guint8 *code; CallInfo *cinfo; guint32 iregs_to_save = 0; #if SAVE_FP_REGS guint32 fregs_to_save = 0; #endif /* lmf_offset is the offset of the LMF from our stack pointer. */ guint32 lmf_offset = cfg->arch.lmf_offset; int cfa_offset = 0; MonoBasicBlock *bb; sig = mono_method_signature_internal (method); cfg->code_size = 768 + sig->param_count * 20; code = cfg->native_code = g_malloc (cfg->code_size); /* * compute max_offset in order to use short forward jumps. */ max_offset = 0; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins = bb->code; bb->max_offset = max_offset; MONO_BB_FOR_EACH_INS (bb, ins) max_offset += ins_get_size (ins->opcode); } if (max_offset > 0xffff) cfg->arch.long_branch = TRUE; /* * Currently, fp points to the bottom of the frame on MIPS, unlike other platforms. * This means that we have to adjust the offsets inside instructions which reference * arguments received on the stack, since the initial offset doesn't take into * account spill slots. */ mips_adjust_stackframe (cfg); /* Offset between current sp and the CFA */ cfa_offset = 0; mono_emit_unwind_op_def_cfa (cfg, code, mips_sp, cfa_offset); /* stack_offset should not be changed here. */ alloc_size = cfg->stack_offset; cfg->stack_usage = alloc_size; iregs_to_save = (cfg->used_int_regs & MONO_ARCH_CALLEE_SAVED_REGS); #if SAVE_FP_REGS #if 0 fregs_to_save = (cfg->used_float_regs & MONO_ARCH_CALLEE_SAVED_FREGS); #else fregs_to_save = MONO_ARCH_CALLEE_SAVED_FREGS; fregs_to_save |= (fregs_to_save << 1); #endif #endif /* If the stack size is too big, save 1024 bytes to start with * so the prologue can use imm16(reg) addressing, then allocate * the rest of the frame. */ if (alloc_size > ((1 << 15) - 1024)) { alloc2_size = alloc_size - 1024; alloc_size = 1024; } if (alloc_size) { g_assert (mips_is_imm16 (-alloc_size)); mips_addiu (code, mips_sp, mips_sp, -alloc_size); cfa_offset = alloc_size; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); } if ((cfg->flags & MONO_CFG_HAS_CALLS) || ALWAYS_SAVE_RA) { int offset = alloc_size + MIPS_RET_ADDR_OFFSET; if (mips_is_imm16(offset)) mips_sw (code, mips_ra, mips_sp, offset); else { g_assert_not_reached (); } /* sp = cfa - cfa_offset, so sp + offset = cfa - cfa_offset + offset */ mono_emit_unwind_op_offset (cfg, code, mips_ra, offset - cfa_offset); } /* XXX - optimize this later to not save all regs if LMF constructed */ pos = cfg->arch.iregs_offset - alloc2_size; if (iregs_to_save) { /* save used registers in own stack frame (at pos) */ for (i = MONO_MAX_IREGS-1; i >= 0; --i) { if (iregs_to_save & (1 << i)) { g_assert (pos < (int)(cfg->stack_usage - sizeof(gpointer))); g_assert (mips_is_imm16(pos)); MIPS_SW (code, i, mips_sp, pos); mono_emit_unwind_op_offset (cfg, code, i, pos - cfa_offset); pos += SIZEOF_REGISTER; } } } // FIXME: Don't save registers twice if there is an LMF // s8 has to be special cased since it is overwritten with the updated value // below if (method->save_lmf) { for (i = MONO_MAX_IREGS-1; i >= 0; --i) { int offset = lmf_offset + G_STRUCT_OFFSET(MonoLMF, iregs[i]); g_assert (mips_is_imm16(offset)); if (MIPS_LMF_IREGMASK & (1 << i)) MIPS_SW (code, i, mips_sp, offset); } } #if SAVE_FP_REGS /* Save float registers */ if (fregs_to_save) { for (i = MONO_MAX_FREGS-1; i >= 0; --i) { if (fregs_to_save & (1 << i)) { g_assert (pos < cfg->stack_usage - MIPS_STACK_ALIGNMENT); g_assert (mips_is_imm16(pos)); mips_swc1 (code, i, mips_sp, pos); pos += sizeof (gulong); } } } if (method->save_lmf) { for (i = MONO_MAX_FREGS-1; i >= 0; --i) { int offset = lmf_offset + G_STRUCT_OFFSET(MonoLMF, fregs[i]); g_assert (mips_is_imm16(offset)); mips_swc1 (code, i, mips_sp, offset); } } #endif if (cfg->frame_reg != mips_sp) { MIPS_MOVE (code, cfg->frame_reg, mips_sp); mono_emit_unwind_op_def_cfa (cfg, code, cfg->frame_reg, cfa_offset); if (method->save_lmf) { int offset = lmf_offset + G_STRUCT_OFFSET(MonoLMF, iregs[cfg->frame_reg]); g_assert (mips_is_imm16(offset)); MIPS_SW (code, cfg->frame_reg, mips_sp, offset); } } /* store runtime generic context */ if (cfg->rgctx_var) { MonoInst *ins = cfg->rgctx_var; g_assert (ins->opcode == OP_REGOFFSET); g_assert (mips_is_imm16 (ins->inst_offset)); mips_sw (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset); } /* load arguments allocated to register from the stack */ pos = 0; if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; if (MONO_TYPE_ISSTRUCT (sig->ret)) { ArgInfo *ainfo = &cinfo->ret; inst = cfg->vret_addr; if (inst->opcode == OP_REGVAR) MIPS_MOVE (code, inst->dreg, ainfo->reg); else if (mips_is_imm16 (inst->inst_offset)) { mips_sw (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } else { mips_load_const (code, mips_at, inst->inst_offset); mips_addu (code, mips_at, mips_at, inst->inst_basereg); mips_sw (code, ainfo->reg, mips_at, 0); } } if (sig->call_convention == MONO_CALL_VARARG) { ArgInfo *cookie = &cinfo->sig_cookie; int offset = alloc_size + cookie->offset; /* Save the sig cookie address */ g_assert (cookie->storage == ArgOnStack); g_assert (mips_is_imm16(offset)); mips_addi (code, mips_at, cfg->frame_reg, offset); mips_sw (code, mips_at, cfg->frame_reg, cfg->sig_cookie - alloc2_size); } /* Keep this in sync with emit_load_volatile_arguments */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; inst = cfg->args [pos]; if (cfg->verbose_level > 2) g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage); if (inst->opcode == OP_REGVAR) { /* Argument ends up in a register */ if (ainfo->storage == ArgInIReg) MIPS_MOVE (code, inst->dreg, ainfo->reg); else if (ainfo->storage == ArgInFReg) { g_assert_not_reached(); #if 0 ppc_fmr (code, inst->dreg, ainfo->reg); #endif } else if (ainfo->storage == ArgOnStack) { int offset = cfg->stack_usage + ainfo->offset; g_assert (mips_is_imm16(offset)); mips_lw (code, inst->dreg, mips_sp, offset); } else g_assert_not_reached (); if (cfg->verbose_level > 2) g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg)); } else { /* Argument ends up on the stack */ if (ainfo->storage == ArgInIReg) { int basereg_offset; /* Incoming parameters should be above this frame */ if (cfg->verbose_level > 2) g_print ("stack slot at %d of %d+%d\n", inst->inst_offset, alloc_size, alloc2_size); /* g_assert (inst->inst_offset >= alloc_size); */ g_assert (inst->inst_basereg == cfg->frame_reg); basereg_offset = inst->inst_offset - alloc2_size; g_assert (mips_is_imm16 (basereg_offset)); switch (ainfo->size) { case 1: mips_sb (code, ainfo->reg, inst->inst_basereg, basereg_offset); break; case 2: mips_sh (code, ainfo->reg, inst->inst_basereg, basereg_offset); break; case 0: /* XXX */ case 4: mips_sw (code, ainfo->reg, inst->inst_basereg, basereg_offset); break; case 8: #if (SIZEOF_REGISTER == 4) mips_sw (code, ainfo->reg, inst->inst_basereg, basereg_offset + ls_word_offset); mips_sw (code, ainfo->reg + 1, inst->inst_basereg, basereg_offset + ms_word_offset); #elif (SIZEOF_REGISTER == 8) mips_sd (code, ainfo->reg, inst->inst_basereg, basereg_offset); #endif break; default: g_assert_not_reached (); break; } } else if (ainfo->storage == ArgOnStack) { /* * Argument comes in on the stack, and ends up on the stack * 1 and 2 byte args are passed as 32-bit quantities, but used as * 8 and 16 bit quantities. Shorten them in place. */ g_assert (mips_is_imm16 (inst->inst_offset)); switch (ainfo->size) { case 1: mips_lw (code, mips_at, inst->inst_basereg, inst->inst_offset); mips_sb (code, mips_at, inst->inst_basereg, inst->inst_offset); break; case 2: mips_lw (code, mips_at, inst->inst_basereg, inst->inst_offset); mips_sh (code, mips_at, inst->inst_basereg, inst->inst_offset); break; case 0: /* XXX */ case 4: case 8: break; default: g_assert_not_reached (); } } else if (ainfo->storage == ArgInFReg) { g_assert (mips_is_imm16 (inst->inst_offset)); g_assert (mips_is_imm16 (inst->inst_offset+4)); if (ainfo->size == 8) { #if _MIPS_SIM == _ABIO32 mips_swc1 (code, ainfo->reg, inst->inst_basereg, inst->inst_offset + ls_word_offset); mips_swc1 (code, ainfo->reg+1, inst->inst_basereg, inst->inst_offset + ms_word_offset); #elif _MIPS_SIM == _ABIN32 mips_sdc1 (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); #endif } else if (ainfo->size == 4) mips_swc1 (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); else g_assert_not_reached (); } else if (ainfo->storage == ArgStructByVal) { int i; int doffset = inst->inst_offset; g_assert (mips_is_imm16 (inst->inst_offset)); g_assert (mips_is_imm16 (inst->inst_offset + ainfo->size * sizeof (target_mgreg_t))); /* Push the argument registers into their stack slots */ for (i = 0; i < ainfo->size; ++i) { g_assert (mips_is_imm16(doffset)); MIPS_SW (code, ainfo->reg + i, inst->inst_basereg, doffset); doffset += SIZEOF_REGISTER; } } else if (ainfo->storage == ArgStructByAddr) { g_assert (mips_is_imm16 (inst->inst_offset)); /* FIXME: handle overrun! with struct sizes not multiple of 4 */ code = emit_memcpy (code, ainfo->vtsize * sizeof (target_mgreg_t), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0); } else g_assert_not_reached (); } pos++; } if (method->save_lmf) { mips_load_const (code, mips_at, MIPS_LMF_MAGIC1); mips_sw (code, mips_at, mips_sp, lmf_offset + G_STRUCT_OFFSET(MonoLMF, magic)); /* This can/will clobber the a0-a3 registers */ mips_call (code, mips_t9, (gpointer)mono_get_lmf_addr); /* mips_v0 is the result from mono_get_lmf_addr () (MonoLMF **) */ g_assert (mips_is_imm16(lmf_offset + G_STRUCT_OFFSET(MonoLMF, lmf_addr))); mips_sw (code, mips_v0, mips_sp, lmf_offset + G_STRUCT_OFFSET(MonoLMF, lmf_addr)); /* new_lmf->previous_lmf = *lmf_addr */ mips_lw (code, mips_at, mips_v0, 0); g_assert (mips_is_imm16(lmf_offset + G_STRUCT_OFFSET(MonoLMF, previous_lmf))); mips_sw (code, mips_at, mips_sp, lmf_offset + G_STRUCT_OFFSET(MonoLMF, previous_lmf)); /* *(lmf_addr) = sp + lmf_offset */ g_assert (mips_is_imm16(lmf_offset)); mips_addiu (code, mips_at, mips_sp, lmf_offset); mips_sw (code, mips_at, mips_v0, 0); /* save method info */ mips_load_const (code, mips_at, method); g_assert (mips_is_imm16(lmf_offset + G_STRUCT_OFFSET(MonoLMF, method))); mips_sw (code, mips_at, mips_sp, lmf_offset + G_STRUCT_OFFSET(MonoLMF, method)); /* save the current IP */ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_IP, NULL); mips_load_const (code, mips_at, 0x01010101); mips_sw (code, mips_at, mips_sp, lmf_offset + G_STRUCT_OFFSET(MonoLMF, eip)); } if (alloc2_size) { if (mips_is_imm16 (-alloc2_size)) { mips_addu (code, mips_sp, mips_sp, -alloc2_size); } else { mips_load_const (code, mips_at, -alloc2_size); mips_addu (code, mips_sp, mips_sp, mips_at); } alloc_size += alloc2_size; cfa_offset += alloc2_size; if (cfg->frame_reg != mips_sp) MIPS_MOVE (code, cfg->frame_reg, mips_sp); else mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); } set_code_cursor (cfg, code); return code; } guint8 * mono_arch_emit_epilog_sub (MonoCompile *cfg) { guint8 *code = NULL; MonoMethod *method = cfg->method; int i; int max_epilog_size = 16 + 20*4; int alloc2_size = 0; guint32 iregs_to_restore; #if SAVE_FP_REGS guint32 fregs_to_restore; #endif if (cfg->method->save_lmf) max_epilog_size += 128; realloc_code (cfg, max_epilog_size); code = cfg->native_code + cfg->code_len; if (cfg->frame_reg != mips_sp) { MIPS_MOVE (code, mips_sp, cfg->frame_reg); } /* If the stack frame is really large, deconstruct it in two steps */ if (cfg->stack_usage > ((1 << 15) - 1024)) { alloc2_size = cfg->stack_usage - 1024; /* partially deconstruct the stack */ mips_load_const (code, mips_at, alloc2_size); mips_addu (code, mips_sp, mips_sp, mips_at); } int pos = cfg->arch.iregs_offset - alloc2_size; iregs_to_restore = (cfg->used_int_regs & MONO_ARCH_CALLEE_SAVED_REGS); if (iregs_to_restore) { for (i = MONO_MAX_IREGS-1; i >= 0; --i) { if (iregs_to_restore & (1 << i)) { g_assert (mips_is_imm16(pos)); MIPS_LW (code, i, mips_sp, pos); pos += SIZEOF_REGISTER; } } } #if SAVE_FP_REGS #if 0 fregs_to_restore = (cfg->used_float_regs & MONO_ARCH_CALLEE_SAVED_FREGS); #else fregs_to_restore = MONO_ARCH_CALLEE_SAVED_FREGS; fregs_to_restore |= (fregs_to_restore << 1); #endif if (fregs_to_restore) { for (i = MONO_MAX_FREGS-1; i >= 0; --i) { if (fregs_to_restore & (1 << i)) { g_assert (pos < cfg->stack_usage - MIPS_STACK_ALIGNMENT); g_assert (mips_is_imm16(pos)); mips_lwc1 (code, i, mips_sp, pos); pos += FREG_SIZE } } } #endif /* Unlink the LMF if necessary */ if (method->save_lmf) { int lmf_offset = cfg->arch.lmf_offset; /* t0 = current_lmf->previous_lmf */ g_assert (mips_is_imm16(lmf_offset + G_STRUCT_OFFSET(MonoLMF, previous_lmf))); mips_lw (code, mips_temp, mips_sp, lmf_offset + G_STRUCT_OFFSET(MonoLMF, previous_lmf)); /* t1 = lmf_addr */ g_assert (mips_is_imm16(lmf_offset + G_STRUCT_OFFSET(MonoLMF, lmf_addr))); mips_lw (code, mips_t1, mips_sp, lmf_offset + G_STRUCT_OFFSET(MonoLMF, lmf_addr)); /* (*lmf_addr) = previous_lmf */ mips_sw (code, mips_temp, mips_t1, 0); } #if 0 /* Restore the fp */ mips_lw (code, mips_fp, mips_sp, cfg->stack_usage + MIPS_FP_ADDR_OFFSET); #endif /* Restore ra */ if ((cfg->flags & MONO_CFG_HAS_CALLS) || ALWAYS_SAVE_RA) { g_assert (mips_is_imm16(cfg->stack_usage - alloc2_size + MIPS_RET_ADDR_OFFSET)); mips_lw (code, mips_ra, mips_sp, cfg->stack_usage - alloc2_size + MIPS_RET_ADDR_OFFSET); } /* Restore the stack pointer */ g_assert (mips_is_imm16(cfg->stack_usage - alloc2_size)); mips_addiu (code, mips_sp, mips_sp, cfg->stack_usage - alloc2_size); /* Caller will emit either return or tail-call sequence */ set_code_cursor (cfg, code); return (code); } void mono_arch_emit_epilog (MonoCompile *cfg) { guint8 *code = mono_arch_emit_epilog_sub (cfg); mips_jr (code, mips_ra); mips_nop (code); set_code_cursor (cfg, code); } /* remove once throw_exception_by_name is eliminated */ #if 0 static int exception_id_by_name (const char *name) { if (strcmp (name, "IndexOutOfRangeException") == 0) return MONO_EXC_INDEX_OUT_OF_RANGE; if (strcmp (name, "OverflowException") == 0) return MONO_EXC_OVERFLOW; if (strcmp (name, "ArithmeticException") == 0) return MONO_EXC_ARITHMETIC; if (strcmp (name, "DivideByZeroException") == 0) return MONO_EXC_DIVIDE_BY_ZERO; if (strcmp (name, "InvalidCastException") == 0) return MONO_EXC_INVALID_CAST; if (strcmp (name, "NullReferenceException") == 0) return MONO_EXC_NULL_REF; if (strcmp (name, "ArrayTypeMismatchException") == 0) return MONO_EXC_ARRAY_TYPE_MISMATCH; if (strcmp (name, "ArgumentException") == 0) return MONO_EXC_ARGUMENT; g_error ("Unknown intrinsic exception %s\n", name); return 0; } #endif void mono_arch_emit_exceptions (MonoCompile *cfg) { #if 0 MonoJumpInfo *patch_info; int i; guint8 *code; const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL}; guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0}; int max_epilog_size = 50; /* count the number of exception infos */ /* * make sure we have enough space for exceptions * 24 is the simulated call to throw_exception_by_name */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { #if 0 if (patch_info->type == MONO_PATCH_INFO_EXC) { i = exception_id_by_name (patch_info->data.target); g_assert (i < MONO_EXC_INTRINS_NUM); if (!exc_throw_found [i]) { max_epilog_size += 12; exc_throw_found [i] = TRUE; } } #endif } code = realloc_code (cfg, max_epilog_size); /* add code to raise exceptions */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { switch (patch_info->type) { case MONO_PATCH_INFO_EXC: { g_assert_not_reached(); break; } default: /* do nothing */ break; } } set_code_cursor (cfg, code); #endif } void mono_arch_finish_init (void) { } void mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg) { int this_dreg = mips_a0; if (vt_reg != -1) this_dreg = mips_a1; /* add the this argument */ if (this_reg != -1) { MonoInst *this_ins; MONO_INST_NEW (cfg, this_ins, OP_MOVE); this_ins->type = this_type; this_ins->sreg1 = this_reg; this_ins->dreg = mono_alloc_ireg (cfg); mono_bblock_add_inst (cfg->cbb, this_ins); mono_call_inst_add_outarg_reg (cfg, inst, this_ins->dreg, this_dreg, FALSE); } if (vt_reg != -1) { MonoInst *vtarg; MONO_INST_NEW (cfg, vtarg, OP_MOVE); vtarg->type = STACK_MP; vtarg->sreg1 = vt_reg; vtarg->dreg = mono_alloc_ireg (cfg); mono_bblock_add_inst (cfg->cbb, vtarg); mono_call_inst_add_outarg_reg (cfg, inst, vtarg->dreg, mips_a0, FALSE); } } MonoInst* mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins = NULL; return ins; } MonoInst* mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { return NULL; } host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { return ctx->sc_regs [reg]; } host_mgreg_t* mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { return &ctx->sc_regs [reg]; } #define ENABLE_WRONG_METHOD_CHECK 0 #define MIPS_LOAD_SEQUENCE_LENGTH 8 #define CMP_SIZE (MIPS_LOAD_SEQUENCE_LENGTH + 4) #define BR_SIZE 8 #define LOADSTORE_SIZE 4 #define JUMP_IMM_SIZE 16 #define JUMP_IMM32_SIZE (MIPS_LOAD_SEQUENCE_LENGTH + 8) #define LOAD_CONST_SIZE 8 #define JUMP_JR_SIZE 8 gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int i; int size = 0; guint8 *code, *start, *patch; for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { if (item->check_target_idx) { item->chunk_size += LOAD_CONST_SIZE + BR_SIZE + JUMP_JR_SIZE; if (item->has_target_code) item->chunk_size += LOAD_CONST_SIZE; else item->chunk_size += LOADSTORE_SIZE; } else { if (fail_tramp) { item->chunk_size += LOAD_CONST_SIZE + BR_SIZE + JUMP_IMM32_SIZE + LOADSTORE_SIZE + JUMP_IMM32_SIZE; if (!item->has_target_code) item->chunk_size += LOADSTORE_SIZE; } else { item->chunk_size += LOADSTORE_SIZE + JUMP_JR_SIZE; #if ENABLE_WRONG_METHOD_CHECK item->chunk_size += CMP_SIZE + BR_SIZE + 4; #endif } } } else { item->chunk_size += CMP_SIZE + BR_SIZE; imt_entries [item->check_target_idx]->compare_done = TRUE; } size += item->chunk_size; } /* the initial load of the vtable address */ size += MIPS_LOAD_SEQUENCE_LENGTH; if (fail_tramp) { code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size); } else { MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); code = mono_mem_manager_code_reserve (mem_manager, size); } start = code; /* t7 points to the vtable */ mips_load_const (code, mips_t7, (gsize)(& (vtable->vtable [0]))); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; item->code_target = code; if (item->is_equals) { if (item->check_target_idx) { mips_load_const (code, mips_temp, (gsize)item->key); item->jmp_code = code; mips_bne (code, mips_temp, MONO_ARCH_IMT_REG, 0); mips_nop (code); if (item->has_target_code) { mips_load_const (code, mips_t9, item->value.target_code); } else { mips_lw (code, mips_t9, mips_t7, (sizeof (target_mgreg_t) * item->value.vtable_slot)); } mips_jr (code, mips_t9); mips_nop (code); } else { if (fail_tramp) { mips_load_const (code, mips_temp, (gsize)item->key); patch = code; mips_bne (code, mips_temp, MONO_ARCH_IMT_REG, 0); mips_nop (code); if (item->has_target_code) { mips_load_const (code, mips_t9, item->value.target_code); } else { g_assert (vtable); mips_load_const (code, mips_at, & (vtable->vtable [item->value.vtable_slot])); mips_lw (code, mips_t9, mips_at, 0); } mips_jr (code, mips_t9); mips_nop (code); mips_patch ((guint32 *)(void *)patch, (guint32)code); mips_load_const (code, mips_t9, fail_tramp); mips_jr (code, mips_t9); mips_nop (code); } else { /* enable the commented code to assert on wrong method */ #if ENABLE_WRONG_METHOD_CHECK ppc_load (code, ppc_r0, (guint32)item->key); ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0); patch = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); #endif mips_lw (code, mips_t9, mips_t7, (sizeof (target_mgreg_t) * item->value.vtable_slot)); mips_jr (code, mips_t9); mips_nop (code); #if ENABLE_WRONG_METHOD_CHECK ppc_patch (patch, code); ppc_break (code); #endif } } } else { mips_load_const (code, mips_temp, (gulong)item->key); mips_slt (code, mips_temp, MONO_ARCH_IMT_REG, mips_temp); item->jmp_code = code; mips_beq (code, mips_temp, mips_zero, 0); mips_nop (code); } } /* patch the branches to get to the target items */ for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code && item->check_target_idx) { mips_patch ((guint32 *)item->jmp_code, (guint32)imt_entries [item->check_target_idx]->code_target); } } if (!fail_tramp) UnlockedAdd (&mono_stats.imt_trampolines_size, code - start); g_assert (code - start <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL)); mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), NULL); return start; } MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { return (MonoMethod*) regs [MONO_ARCH_IMT_REG]; } MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code) { return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG]; } /* Soft Debug support */ #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED /* * mono_arch_set_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; guint32 addr = (guint32)bp_trigger_page; mips_load_const (code, mips_t9, addr); mips_lw (code, mips_t9, mips_t9, 0); mono_arch_flush_icache (ip, code - ip); } /* * mono_arch_clear_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; mips_nop (code); mips_nop (code); mips_nop (code); mono_arch_flush_icache (ip, code - ip); } /* * mono_arch_start_single_stepping: * * See mini-amd64.c for docs. */ void mono_arch_start_single_stepping (void) { mono_mprotect (ss_trigger_page, mono_pagesize (), 0); } /* * mono_arch_stop_single_stepping: * * See mini-amd64.c for docs. */ void mono_arch_stop_single_stepping (void) { mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ); } /* * mono_arch_is_single_step_event: * * See mini-amd64.c for docs. */ gboolean mono_arch_is_single_step_event (void *info, void *sigctx) { siginfo_t* sinfo = (siginfo_t*) info; /* Sometimes the address is off by 4 */ if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128) return TRUE; else return FALSE; } /* * mono_arch_is_breakpoint_event: * * See mini-amd64.c for docs. */ gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx) { siginfo_t* sinfo = (siginfo_t*) info; /* Sometimes the address is off by 4 */ if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128) return TRUE; else return FALSE; } /* * mono_arch_skip_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji) { MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4); } /* * mono_arch_skip_single_step: * * See mini-amd64.c for docs. */ void mono_arch_skip_single_step (MonoContext *ctx) { MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4); } /* * mono_arch_get_seq_point_info: * * See mini-amd64.c for docs. */ SeqPointInfo* mono_arch_get_seq_point_info (guint8 *code) { NOT_IMPLEMENTED; return NULL; } #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */ gboolean mono_arch_opcode_supported (int opcode) { return FALSE; } gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { return FALSE; } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { return NULL; } GSList* mono_arch_get_cie_program (void) { NOT_IMPLEMENTED; return NULL; }
/** * \file * MIPS backend for the Mono code generator * * Authors: * Mark Mason ([email protected]) * * Based on mini-ppc.c by * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * * (C) 2006 Broadcom * (C) 2003 Ximian, Inc. */ #include "mini.h" #include <string.h> #include <asm/cachectl.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/unlocked.h> #include <mono/arch/mips/mips-codegen.h> #include "mini-mips.h" #include "cpu-mips.h" #include "ir-emit.h" #include "aot-runtime.h" #include "mini-runtime.h" #include "mono/utils/mono-tls-inline.h" #define SAVE_FP_REGS 0 #define ALWAYS_SAVE_RA 1 /* call-handler & switch currently clobber ra */ #define PROMOTE_R4_TO_R8 1 /* promote single values in registers to doubles */ #define USE_MUL 0 /* use mul instead of mult/mflo for multiply remember to update cpu-mips.md if you change this */ /* Emit a call sequence to 'v', using 'D' as a scratch register if necessary */ #define mips_call(c,D,v) do { \ guint32 _target = (guint32)(v); \ if (1 || ((v) == NULL) || ((_target & 0xfc000000) != (((guint32)(c)) & 0xfc000000))) { \ mips_load_const (c, D, _target); \ mips_jalr (c, D, mips_ra); \ } \ else { \ mips_jumpl (c, _target >> 2); \ } \ mips_nop (c); \ } while (0) enum { TLS_MODE_DETECT, TLS_MODE_FAILED, TLS_MODE_LTHREADS, TLS_MODE_NPTL }; /* This mutex protects architecture specific caches */ #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex) #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex) static mono_mutex_t mini_arch_mutex; /* Whenever the host is little-endian */ static int little_endian; /* Index of ms word/register */ static int ls_word_idx; /* Index of ls word/register */ static int ms_word_idx; /* Same for offsets */ static int ls_word_offset; static int ms_word_offset; /* * The code generated for sequence points reads from this location, which is * made read-only when single stepping is enabled. */ static gpointer ss_trigger_page; /* Enabled breakpoints read from this trigger page */ static gpointer bp_trigger_page; #undef DEBUG #define DEBUG(a) if (cfg->verbose_level > 1) a #undef DEBUG #define DEBUG(a) a #undef DEBUG #define DEBUG(a) #define EMIT_SYSTEM_EXCEPTION_NAME(exc_name) \ do { \ code = mips_emit_exc_by_name (code, exc_name); \ cfg->bb_exit->max_offset += 16; \ } while (0) #define MONO_EMIT_NEW_LOAD_R8(cfg,dr,addr) do { \ MonoInst *inst; \ MONO_INST_NEW ((cfg), (inst), OP_R8CONST); \ inst->type = STACK_R8; \ inst->dreg = (dr); \ inst->inst_p0 = (void*)(addr); \ mono_bblock_add_inst (cfg->cbb, inst); \ } while (0) #define ins_is_compare(ins) ((ins) && (((ins)->opcode == OP_COMPARE) \ || ((ins)->opcode == OP_ICOMPARE) \ || ((ins)->opcode == OP_LCOMPARE))) #define ins_is_compare_imm(ins) ((ins) && (((ins)->opcode == OP_COMPARE_IMM) \ || ((ins)->opcode == OP_ICOMPARE_IMM) \ || ((ins)->opcode == OP_LCOMPARE_IMM))) #define INS_REWRITE(ins, op, _s1, _s2) do { \ int s1 = _s1; \ int s2 = _s2; \ ins->opcode = (op); \ ins->sreg1 = (s1); \ ins->sreg2 = (s2); \ } while (0); #define INS_REWRITE_IMM(ins, op, _s1, _imm) do { \ int s1 = _s1; \ ins->opcode = (op); \ ins->sreg1 = (s1); \ ins->inst_imm = (_imm); \ } while (0); typedef struct InstList InstList; struct InstList { InstList *prev; InstList *next; MonoInst *data; }; typedef enum { ArgInIReg, ArgOnStack, ArgInFReg, ArgStructByVal, ArgStructByAddr } ArgStorage; typedef struct { gint32 offset; guint16 vtsize; /* in param area */ guint8 reg; ArgStorage storage; guint8 size : 4; /* 1, 2, 4, 8, or regs used by ArgStructByVal */ } ArgInfo; struct CallInfo { int nargs; int gr; int fr; gboolean gr_passed; gboolean on_stack; gboolean vtype_retaddr; int stack_size; guint32 stack_usage; guint32 struct_ret; ArgInfo ret; ArgInfo sig_cookie; ArgInfo args [1]; }; void patch_lui_addiu(guint32 *ip, guint32 val); static guint8 *mono_arch_emit_epilog_sub (MonoCompile *cfg); guint8 *mips_emit_cond_branch (MonoCompile *cfg, guint8 *code, int op, MonoInst *ins); void mips_adjust_stackframe(MonoCompile *cfg); void mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg); MonoInst *mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args); /* Not defined in asm/cachectl.h */ int cacheflush(char *addr, int nbytes, int cache); void mono_arch_flush_icache (guint8 *code, gint size) { /* Linux/MIPS specific */ cacheflush ((char*)code, size, BCACHE); } void mono_arch_flush_register_windows (void) { } gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return TRUE; } static guint8 * mips_emit_exc_by_name(guint8 *code, const char *name) { gpointer addr; MonoClass *exc_class; exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", name); mips_load_const (code, mips_a0, m_class_get_type_token (exc_class)); addr = mono_get_throw_corlib_exception (); mips_call (code, mips_t9, addr); return code; } guint8 * mips_emit_load_const (guint8 *code, int dreg, target_mgreg_t v) { if (mips_is_imm16 (v)) mips_addiu (code, dreg, mips_zero, ((guint32)v) & 0xffff); else { #if SIZEOF_REGISTER == 8 if (v != (long) v) { /* v is not a sign-extended 32-bit value */ mips_lui (code, dreg, mips_zero, (guint32)((v >> (32+16)) & 0xffff)); mips_ori (code, dreg, dreg, (guint32)((v >> (32)) & 0xffff)); mips_dsll (code, dreg, dreg, 16); mips_ori (code, dreg, dreg, (guint32)((v >> (16)) & 0xffff)); mips_dsll (code, dreg, dreg, 16); mips_ori (code, dreg, dreg, (guint32)(v & 0xffff)); return code; } #endif if (((guint32)v) & (1 << 15)) { mips_lui (code, dreg, mips_zero, (((guint32)v)>>16)+1); } else { mips_lui (code, dreg, mips_zero, (((guint32)v)>>16)); } if (((guint32)v) & 0xffff) mips_addiu (code, dreg, dreg, ((guint32)v) & 0xffff); } return code; } guint8 * mips_emit_cond_branch (MonoCompile *cfg, guint8 *code, int op, MonoInst *ins) { g_assert (ins); if (cfg->arch.long_branch) { int br_offset = 5; /* Invert test and emit branch around jump */ switch (op) { case OP_MIPS_BEQ: mips_bne (code, ins->sreg1, ins->sreg2, br_offset); mips_nop (code); break; case OP_MIPS_BNE: mips_beq (code, ins->sreg1, ins->sreg2, br_offset); mips_nop (code); break; case OP_MIPS_BGEZ: mips_bltz (code, ins->sreg1, br_offset); mips_nop (code); break; case OP_MIPS_BGTZ: mips_blez (code, ins->sreg1, br_offset); mips_nop (code); break; case OP_MIPS_BLEZ: mips_bgtz (code, ins->sreg1, br_offset); mips_nop (code); break; case OP_MIPS_BLTZ: mips_bgez (code, ins->sreg1, br_offset); mips_nop (code); break; default: g_assert_not_reached (); } mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); mips_lui (code, mips_at, mips_zero, 0); mips_addiu (code, mips_at, mips_at, 0); mips_jr (code, mips_at); mips_nop (code); } else { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); switch (op) { case OP_MIPS_BEQ: mips_beq (code, ins->sreg1, ins->sreg2, 0); mips_nop (code); break; case OP_MIPS_BNE: mips_bne (code, ins->sreg1, ins->sreg2, 0); mips_nop (code); break; case OP_MIPS_BGEZ: mips_bgez (code, ins->sreg1, 0); mips_nop (code); break; case OP_MIPS_BGTZ: mips_bgtz (code, ins->sreg1, 0); mips_nop (code); break; case OP_MIPS_BLEZ: mips_blez (code, ins->sreg1, 0); mips_nop (code); break; case OP_MIPS_BLTZ: mips_bltz (code, ins->sreg1, 0); mips_nop (code); break; default: g_assert_not_reached (); } } return (code); } /* XXX - big-endian dependent? */ void patch_lui_addiu(guint32 *ip, guint32 val) { guint16 *__lui_addiu = (guint16*)(void *)(ip); #if 0 printf ("patch_lui_addiu ip=0x%08x (0x%08x, 0x%08x) to point to 0x%08x\n", ip, ((guint32 *)ip)[0], ((guint32 *)ip)[1], val); fflush (stdout); #endif if (((guint32)(val)) & (1 << 15)) __lui_addiu [MINI_LS_WORD_IDX] = ((((guint32)(val)) >> 16) & 0xffff) + 1; else __lui_addiu [MINI_LS_WORD_IDX] = (((guint32)(val)) >> 16) & 0xffff; __lui_addiu [MINI_LS_WORD_IDX + 2] = ((guint32)(val)) & 0xffff; mono_arch_flush_icache ((guint8 *)ip, 8); } guint32 trap_target; void mips_patch (guint32 *code, guint32 target) { guint32 ins = *code; guint32 op = ins >> 26; guint32 diff, offset; g_assert (trap_target != target); //printf ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target); switch (op) { case 0x00: /* jr ra */ if (ins == 0x3e00008) break; g_assert_not_reached (); break; case 0x02: /* j */ case 0x03: /* jal */ g_assert (!(target & 0x03)); g_assert ((target & 0xfc000000) == (((guint32)code) & 0xfc000000)); ins = (ins & 0xfc000000) | (((target) >> 2) & 0x03ffffff); *code = ins; mono_arch_flush_icache ((guint8 *)code, 4); break; case 0x01: /* BLTZ */ case 0x04: /* BEQ */ case 0x05: /* BNE */ case 0x06: /* BLEZ */ case 0x07: /* BGTZ */ case 0x11: /* bc1t */ diff = target - (guint32)(code + 1); g_assert (((diff & 0x0003ffff) == diff) || ((diff | 0xfffc0000) == diff)); g_assert (!(diff & 0x03)); offset = ((gint32)diff) >> 2; if (((int)offset) != ((int)(short)offset)) g_assert (((int)offset) == ((int)(short)offset)); ins = (ins & 0xffff0000) | (offset & 0x0000ffff); *code = ins; mono_arch_flush_icache ((guint8 *)code, 4); break; case 0x0f: /* LUI / ADDIU pair */ g_assert ((code[1] >> 26) == 0x9); patch_lui_addiu (code, target); mono_arch_flush_icache ((guint8 *)code, 8); break; default: printf ("unknown op 0x%02x (0x%08x) @ %p\n", op, ins, code); g_assert_not_reached (); } } static void mono_arch_compute_omit_fp (MonoCompile *cfg); const char* mono_arch_regname (int reg) { #if _MIPS_SIM == _ABIO32 static const char * rnames[] = { "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra" }; #elif _MIPS_SIM == _ABIN32 static const char * rnames[] = { "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra" }; #endif if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } const char* mono_arch_fregname (int reg) { static const char * rnames[] = { "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } /* this function overwrites at */ static guint8* emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset) { /* XXX write a loop, not an unrolled loop */ while (size > 0) { mips_lw (code, mips_at, sreg, soffset); mips_sw (code, mips_at, dreg, doffset); size -= 4; soffset += 4; doffset += 4; } return code; } /* * mono_arch_get_argument_info: * @csig: a method signature * @param_count: the number of parameters to consider * @arg_info: an array to store the result infos * * Gathers information on parameters such as size, alignment and * padding. arg_info should be large enought to hold param_count + 1 entries. * * Returns the size of the activation frame. */ int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { int k, frame_size = 0; guint32 size, align, pad; int offset = 0; if (MONO_TYPE_ISSTRUCT (csig->ret)) { frame_size += sizeof (target_mgreg_t); offset += 4; } arg_info [0].offset = offset; if (csig->hasthis) { frame_size += sizeof (target_mgreg_t); offset += 4; } arg_info [0].size = frame_size; for (k = 0; k < param_count; k++) { size = mini_type_stack_size_full (csig->params [k], &align, csig->pinvoke && !csig->marshalling_disabled); /* ignore alignment for now */ align = 1; frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; frame_size += size; arg_info [k + 1].pad = 0; arg_info [k + 1].size = size; offset += pad; arg_info [k + 1].offset = offset; offset += size; } align = MONO_ARCH_FRAME_ALIGNMENT; frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; return frame_size; } /* The delegate object plus 3 params */ #define MAX_ARCH_DELEGATE_PARAMS (4 - 1) static guint8* get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, gboolean param_count) { guint8 *code, *start; if (has_target) { start = code = mono_global_codeman_reserve (16); /* Replace the this argument with the target */ mips_lw (code, mips_temp, mips_a0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); mips_lw (code, mips_a0, mips_a0, MONO_STRUCT_OFFSET (MonoDelegate, target)); mips_jr (code, mips_temp); mips_nop (code); g_assert ((code - start) <= 16); mono_arch_flush_icache (start, 16); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } else { int size, i; size = 16 + param_count * 4; start = code = mono_global_codeman_reserve (size); mips_lw (code, mips_temp, mips_a0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); /* slide down the arguments */ for (i = 0; i < param_count; ++i) { mips_move (code, mips_a0 + i, mips_a0 + i + 1); } mips_jr (code, mips_temp); mips_nop (code); g_assert ((code - start) <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } if (has_target) { *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, NULL); } else { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count); *info = mono_tramp_info_create (name, start, code - start, NULL, NULL); g_free (name); } return start; } /* * mono_arch_get_delegate_invoke_impls: * * Return a list of MonoAotTrampInfo structures for the delegate invoke impl * trampolines. */ GSList* mono_arch_get_delegate_invoke_impls (void) { GSList *res = NULL; MonoTrampInfo *info; int i; get_delegate_invoke_impl (&info, TRUE, 0); res = g_slist_prepend (res, info); for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) { get_delegate_invoke_impl (&info, FALSE, i); res = g_slist_prepend (res, info); } return res; } gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target) { guint8 *code, *start; /* FIXME: Support more cases */ if (MONO_TYPE_ISSTRUCT (sig->ret)) return NULL; if (has_target) { static guint8* cached = NULL; mono_mini_arch_lock (); if (cached) { mono_mini_arch_unlock (); return cached; } if (mono_ee_features.use_aot_trampolines) { start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target"); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, TRUE, 0); mono_tramp_info_register (info, NULL); } cached = start; mono_mini_arch_unlock (); return cached; } else { static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL}; int i; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; for (i = 0; i < sig->param_count; ++i) if (!mono_is_regsize_var (sig->params [i])) return NULL; mono_mini_arch_lock (); code = cache [sig->param_count]; if (code) { mono_mini_arch_unlock (); return code; } if (mono_ee_features.use_aot_trampolines) { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); start = mono_aot_get_trampoline (name); g_free (name); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, FALSE, sig->param_count); mono_tramp_info_register (info, NULL); } cache [sig->param_count] = start; mono_mini_arch_unlock (); return start; } return NULL; } gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg) { return NULL; } gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code) { g_assert(regs); return (gpointer)regs [mips_a0]; } /* * Initialize the cpu to execute managed code. */ void mono_arch_cpu_init (void) { #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN little_endian = 1; ls_word_idx = 0; ms_word_idx = 1; #else ls_word_idx = 1; ms_word_idx = 0; #endif ls_word_offset = ls_word_idx * 4; ms_word_offset = ms_word_idx * 4; } /* * Initialize architecture specific code. */ void mono_arch_init (void) { mono_os_mutex_init_recursive (&mini_arch_mutex); ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER); bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER); mono_mprotect (bp_trigger_page, mono_pagesize (), 0); } /* * Cleanup architecture specific code. */ void mono_arch_cleanup (void) { mono_os_mutex_destroy (&mini_arch_mutex); } gboolean mono_arch_have_fast_tls (void) { return FALSE; } /* * This function returns the optimizations supported on this cpu. */ guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { guint32 opts = 0; /* no mips-specific optimizations yet */ *exclude_mask = 0; return opts; } GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG)) continue; /* we can only allocate 32 bit values */ if (mono_is_regsize_var (ins->inst_vtype)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE); } } return vars; } GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; regs = g_list_prepend (regs, (gpointer)mips_s0); regs = g_list_prepend (regs, (gpointer)mips_s1); regs = g_list_prepend (regs, (gpointer)mips_s2); regs = g_list_prepend (regs, (gpointer)mips_s3); regs = g_list_prepend (regs, (gpointer)mips_s4); //regs = g_list_prepend (regs, (gpointer)mips_s5); regs = g_list_prepend (regs, (gpointer)mips_s6); regs = g_list_prepend (regs, (gpointer)mips_s7); return regs; } /* * mono_arch_regalloc_cost: * * Return the cost, in number of memory references, of the action of * allocating the variable VMV into a register during global register * allocation. */ guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { /* FIXME: */ return 2; } static void args_onto_stack (CallInfo *info) { g_assert (!info->on_stack); g_assert (info->stack_size <= MIPS_STACK_PARAM_OFFSET); info->on_stack = TRUE; info->stack_size = MIPS_STACK_PARAM_OFFSET; } #if _MIPS_SIM == _ABIO32 /* * O32 calling convention version */ static void add_int32_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack && info->gr > MIPS_LAST_ARG_REG) args_onto_stack (info); /* Now, place the argument */ if (info->on_stack) { ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; } else { ainfo->storage = ArgInIReg; ainfo->reg = info->gr; info->gr += 1; info->gr_passed = TRUE; } info->stack_size += 4; } static void add_int64_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack && info->gr+1 > MIPS_LAST_ARG_REG) args_onto_stack (info); /* Now, place the argument */ if (info->on_stack) { g_assert (info->stack_size % 4 == 0); info->stack_size += (info->stack_size % 8); ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; } else { // info->gr must be a0 or a2 info->gr += (info->gr - MIPS_FIRST_ARG_REG) % 2; g_assert(info->gr <= MIPS_LAST_ARG_REG); ainfo->storage = ArgInIReg; ainfo->reg = info->gr; info->gr += 2; info->gr_passed = TRUE; } info->stack_size += 8; } static void add_float32_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack && info->gr > MIPS_LAST_ARG_REG) args_onto_stack (info); /* Now, place the argument */ if (info->on_stack) { ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; } else { /* Only use FP regs for args if no int args passed yet */ if (!info->gr_passed && info->fr <= MIPS_LAST_FPARG_REG) { ainfo->storage = ArgInFReg; ainfo->reg = info->fr; /* Even though it's a single-precision float, it takes up two FP regs */ info->fr += 2; /* FP and GP slots do not overlap */ info->gr += 1; } else { /* Passing single-precision float arg in a GP register * such as: func (0, 1.0, 2, 3); * In this case, only one 'gr' register is consumed. */ ainfo->storage = ArgInIReg; ainfo->reg = info->gr; info->gr += 1; info->gr_passed = TRUE; } } info->stack_size += 4; } static void add_float64_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack && info->gr+1 > MIPS_LAST_ARG_REG) args_onto_stack (info); /* Now, place the argument */ if (info->on_stack) { g_assert(info->stack_size % 4 == 0); info->stack_size += (info->stack_size % 8); ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; } else { /* Only use FP regs for args if no int args passed yet */ if (!info->gr_passed && info->fr <= MIPS_LAST_FPARG_REG) { ainfo->storage = ArgInFReg; ainfo->reg = info->fr; info->fr += 2; /* FP and GP slots do not overlap */ info->gr += 2; } else { // info->gr must be a0 or a2 info->gr += (info->gr - MIPS_FIRST_ARG_REG) % 2; g_assert(info->gr <= MIPS_LAST_ARG_REG); ainfo->storage = ArgInIReg; ainfo->reg = info->gr; info->gr += 2; info->gr_passed = TRUE; } } info->stack_size += 8; } #elif _MIPS_SIM == _ABIN32 /* * N32 calling convention version */ static void add_int32_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack && info->gr > MIPS_LAST_ARG_REG) args_onto_stack (info); /* Now, place the argument */ if (info->on_stack) { ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; info->stack_size += SIZEOF_REGISTER; } else { ainfo->storage = ArgInIReg; ainfo->reg = info->gr; info->gr += 1; info->gr_passed = TRUE; } } static void add_int64_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack && info->gr > MIPS_LAST_ARG_REG) args_onto_stack (info); /* Now, place the argument */ if (info->on_stack) { g_assert (info->stack_size % 4 == 0); info->stack_size += (info->stack_size % 8); ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; info->stack_size += SIZEOF_REGISTER; } else { g_assert (info->gr <= MIPS_LAST_ARG_REG); ainfo->storage = ArgInIReg; ainfo->reg = info->gr; info->gr += 1; info->gr_passed = TRUE; } } static void add_float32_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack) { if (info->gr > MIPS_LAST_ARG_REG) args_onto_stack (info); else if (info->fr > MIPS_LAST_FPARG_REG) args_onto_stack (info); } /* Now, place the argument */ if (info->on_stack) { ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; info->stack_size += FREG_SIZE; } else { ainfo->storage = ArgInFReg; ainfo->reg = info->fr; info->fr += 1; /* FP and GP slots do not overlap */ info->gr += 1; } } static void add_float64_arg (CallInfo *info, ArgInfo *ainfo) { /* First, see if we need to drop onto the stack */ if (!info->on_stack) { if (info->gr > MIPS_LAST_ARG_REG) args_onto_stack (info); else if (info->fr > MIPS_LAST_FPARG_REG) args_onto_stack (info); } /* Now, place the argument */ if (info->on_stack) { g_assert(info->stack_size % 4 == 0); info->stack_size += (info->stack_size % 8); ainfo->storage = ArgOnStack; ainfo->reg = mips_sp; /* in the caller */ ainfo->offset = info->stack_size; info->stack_size += FREG_SIZE; } else { ainfo->storage = ArgInFReg; ainfo->reg = info->fr; info->fr += 1; /* FP and GP slots do not overlap */ info->gr += 1; } } #endif static CallInfo* get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { guint i; int n = sig->hasthis + sig->param_count; int pstart; MonoType* simpletype; CallInfo *cinfo; gboolean is_pinvoke = sig->pinvoke; if (mp) cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n)); else cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n)); cinfo->fr = MIPS_FIRST_FPARG_REG; cinfo->gr = MIPS_FIRST_ARG_REG; cinfo->stack_size = 0; DEBUG(printf("calculate_sizes\n")); cinfo->vtype_retaddr = MONO_TYPE_ISSTRUCT (sig->ret) ? TRUE : FALSE; pstart = 0; n = 0; #if 0 /* handle returning a struct */ if (MONO_TYPE_ISSTRUCT (sig->ret)) { cinfo->struct_ret = cinfo->gr; add_int32_arg (cinfo, &cinfo->ret); } if (sig->hasthis) { add_int32_arg (cinfo, cinfo->args + n); n++; } #else /* * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after * the first argument, allowing 'this' to be always passed in the first arg reg. * Also do this if the first argument is a reference type, since virtual calls * are sometimes made using calli without sig->hasthis set, like in the delegate * invoke wrappers. */ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) { if (sig->hasthis) { add_int32_arg (cinfo, cinfo->args + n); n ++; } else { add_int32_arg (cinfo, cinfo->args + sig->hasthis); pstart = 1; n ++; } add_int32_arg (cinfo, &cinfo->ret); cinfo->struct_ret = cinfo->ret.reg; } else { /* this */ if (sig->hasthis) { add_int32_arg (cinfo, cinfo->args + n); n ++; } if (cinfo->vtype_retaddr) { add_int32_arg (cinfo, &cinfo->ret); cinfo->struct_ret = cinfo->ret.reg; } } #endif DEBUG(printf("params: %d\n", sig->param_count)); for (i = pstart; i < sig->param_count; ++i) { if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ args_onto_stack (cinfo); /* Emit the signature cookie just before the implicit arguments */ add_int32_arg (cinfo, &cinfo->sig_cookie); } DEBUG(printf("param %d: ", i)); simpletype = mini_get_underlying_type (sig->params [i]); switch (simpletype->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: DEBUG(printf("1 byte\n")); cinfo->args [n].size = 1; add_int32_arg (cinfo, &cinfo->args[n]); n++; break; case MONO_TYPE_I2: case MONO_TYPE_U2: DEBUG(printf("2 bytes\n")); cinfo->args [n].size = 2; add_int32_arg (cinfo, &cinfo->args[n]); n++; break; case MONO_TYPE_I4: case MONO_TYPE_U4: DEBUG(printf("4 bytes\n")); cinfo->args [n].size = 4; add_int32_arg (cinfo, &cinfo->args[n]); n++; break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: cinfo->args [n].size = sizeof (target_mgreg_t); add_int32_arg (cinfo, &cinfo->args[n]); n++; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (simpletype)) { cinfo->args [n].size = sizeof (target_mgreg_t); add_int32_arg (cinfo, &cinfo->args[n]); n++; break; } /* Fall through */ case MONO_TYPE_TYPEDBYREF: case MONO_TYPE_VALUETYPE: { int j; int nwords = 0; int has_offset = FALSE; ArgInfo dummy_arg; gint size, alignment; MonoClass *klass; if (simpletype->type == MONO_TYPE_TYPEDBYREF) { size = MONO_ABI_SIZEOF (MonoTypedRef); alignment = sizeof (target_mgreg_t); } else { klass = mono_class_from_mono_type_internal (sig->params [i]); if (sig->pinvoke && !sig->marshalling_disabled) size = mono_class_native_size (klass, NULL); else size = mono_class_value_size (klass, NULL); alignment = mono_class_min_align (klass); } #if MIPS_PASS_STRUCTS_BY_VALUE /* Need to do alignment if struct contains long or double */ if (alignment > 4) { /* Drop onto stack *before* looking at stack_size, if required. */ if (!cinfo->on_stack && cinfo->gr > MIPS_LAST_ARG_REG) args_onto_stack (cinfo); if (cinfo->stack_size & (alignment - 1)) { add_int32_arg (cinfo, &dummy_arg); } g_assert (!(cinfo->stack_size & (alignment - 1))); } #if 0 g_printf ("valuetype struct size=%d offset=%d align=%d\n", mono_class_native_size (sig->params [i]->data.klass, NULL), cinfo->stack_size, alignment); #endif nwords = (size + sizeof (target_mgreg_t) -1 ) / sizeof (target_mgreg_t); g_assert (cinfo->args [n].size == 0); g_assert (cinfo->args [n].vtsize == 0); for (j = 0; j < nwords; ++j) { if (j == 0) { add_int32_arg (cinfo, &cinfo->args [n]); if (cinfo->on_stack) has_offset = TRUE; } else { add_int32_arg (cinfo, &dummy_arg); if (!has_offset && cinfo->on_stack) { cinfo->args [n].offset = dummy_arg.offset; has_offset = TRUE; } } if (cinfo->on_stack) cinfo->args [n].vtsize += 1; else cinfo->args [n].size += 1; } //g_printf ("\tstack_size=%d vtsize=%d\n", cinfo->args [n].size, cinfo->args[n].vtsize); cinfo->args [n].storage = ArgStructByVal; #else add_int32_arg (cinfo, &cinfo->args[n]); cinfo->args [n].storage = ArgStructByAddr; #endif n++; break; } case MONO_TYPE_U8: case MONO_TYPE_I8: DEBUG(printf("8 bytes\n")); cinfo->args [n].size = 8; add_int64_arg (cinfo, &cinfo->args[n]); n++; break; case MONO_TYPE_R4: DEBUG(printf("R4\n")); cinfo->args [n].size = 4; add_float32_arg (cinfo, &cinfo->args[n]); n++; break; case MONO_TYPE_R8: DEBUG(printf("R8\n")); cinfo->args [n].size = 8; add_float64_arg (cinfo, &cinfo->args[n]); n++; break; default: g_error ("Can't trampoline 0x%x", sig->params [i]->type); } } /* Handle the case where there are no implicit arguments */ if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ args_onto_stack (cinfo); /* Emit the signature cookie just before the implicit arguments */ add_int32_arg (cinfo, &cinfo->sig_cookie); } { simpletype = mini_get_underlying_type (sig->ret); switch (simpletype->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: cinfo->ret.reg = mips_v0; break; case MONO_TYPE_U8: case MONO_TYPE_I8: cinfo->ret.reg = mips_v0; break; case MONO_TYPE_R4: case MONO_TYPE_R8: cinfo->ret.reg = mips_f0; cinfo->ret.storage = ArgInFReg; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (simpletype)) { cinfo->ret.reg = mips_v0; break; } break; case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: break; case MONO_TYPE_VOID: break; default: g_error ("Can't handle as return value 0x%x", sig->ret->type); } } /* align stack size to 16 */ cinfo->stack_size = (cinfo->stack_size + MIPS_STACK_ALIGNMENT - 1) & ~(MIPS_STACK_ALIGNMENT - 1); cinfo->stack_usage = cinfo->stack_size; return cinfo; } static gboolean debug_omit_fp (void) { #if 0 return mono_debug_count (); #else return TRUE; #endif } /** * mono_arch_compute_omit_fp: * Determine whether the frame pointer can be eliminated. */ static void mono_arch_compute_omit_fp (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; int i, locals_size; CallInfo *cinfo; if (cfg->arch.omit_fp_computed) return; header = cfg->header; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; /* * FIXME: Remove some of the restrictions. */ cfg->arch.omit_fp = TRUE; cfg->arch.omit_fp_computed = TRUE; if (cfg->disable_omit_fp) cfg->arch.omit_fp = FALSE; if (!debug_omit_fp ()) cfg->arch.omit_fp = FALSE; if (cfg->method->save_lmf) cfg->arch.omit_fp = FALSE; if (cfg->flags & MONO_CFG_HAS_ALLOCA) cfg->arch.omit_fp = FALSE; if (header->num_clauses) cfg->arch.omit_fp = FALSE; if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) cfg->arch.omit_fp = FALSE; /* * On MIPS, fp points to the bottom of the frame, so it can be eliminated even if * there are stack arguments. */ /* if (cinfo->stack_usage) cfg->arch.omit_fp = FALSE; */ locals_size = 0; for (i = cfg->locals_start; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; int ialign; locals_size += mono_type_size (ins->inst_vtype, &ialign); } //printf ("D: %s %d\n", cfg->method->name, cfg->arch.omit_fp); } /* * Set var information according to the calling convention. mips version. * The locals var stuff should most likely be split in another method. */ void mono_arch_allocate_vars (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; MonoInst *inst; int i, offset, size, align, curinst; int frame_reg = mips_sp; guint32 iregs_to_save = 0; #if SAVE_FP_REGS guint32 fregs_to_restore; #endif CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; mono_arch_compute_omit_fp (cfg); /* spill down, we'll fix it in a separate pass */ // cfg->flags |= MONO_CFG_HAS_SPILLUP; /* this is bug #60332: remove when #59509 is fixed, so no weird vararg * call convs needs to be handled this way. */ if (cfg->flags & MONO_CFG_HAS_VARARGS) cfg->param_area = MAX (cfg->param_area, sizeof (target_mgreg_t)*8); /* gtk-sharp and other broken code will dllimport vararg functions even with * non-varargs signatures. Since there is little hope people will get this right * we assume they won't. */ if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) cfg->param_area = MAX (cfg->param_area, sizeof (target_mgreg_t)*8); /* a0-a3 always present */ cfg->param_area = MAX (cfg->param_area, MIPS_STACK_PARAM_OFFSET); header = cfg->header; if (cfg->arch.omit_fp) frame_reg = mips_sp; else frame_reg = mips_fp; cfg->frame_reg = frame_reg; if (frame_reg != mips_sp) { cfg->used_int_regs |= 1 << frame_reg; } offset = 0; curinst = 0; if (!MONO_TYPE_ISSTRUCT (sig->ret)) { /* FIXME: handle long and FP values */ switch (mini_get_underlying_type (sig->ret)->type) { case MONO_TYPE_VOID: break; case MONO_TYPE_R4: case MONO_TYPE_R8: cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cfg->ret->dreg = mips_f0; break; default: cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = mips_v0; break; } } /* Space for outgoing parameters, including a0-a3 */ offset += cfg->param_area; /* Now handle the local variables */ curinst = cfg->locals_start; for (i = curinst; i < cfg->num_varinfo; ++i) { inst = cfg->varinfo [i]; if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR) continue; /* inst->backend.is_pinvoke indicates native sized value types, this is used by the * pinvoke wrappers when they call functions returning structure */ if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) size = mono_class_native_size (mono_class_from_mono_type_internal (inst->inst_vtype), (unsigned int *) &align); else size = mono_type_size (inst->inst_vtype, &align); offset += align - 1; offset &= ~(align - 1); inst->inst_offset = offset; inst->opcode = OP_REGOFFSET; inst->inst_basereg = frame_reg; offset += size; // g_print ("allocating local %d to %d\n", i, inst->inst_offset); } /* Space for LMF (if needed) */ if (cfg->method->save_lmf) { /* align the offset to 16 bytes */ offset = (offset + MIPS_STACK_ALIGNMENT - 1) & ~(MIPS_STACK_ALIGNMENT - 1); cfg->arch.lmf_offset = offset; offset += sizeof (MonoLMF); } if (sig->call_convention == MONO_CALL_VARARG) { size = 4; align = 4; /* Allocate a local slot to hold the sig cookie address */ offset += align - 1; offset &= ~(align - 1); cfg->sig_cookie = offset; offset += size; } offset += SIZEOF_REGISTER - 1; offset &= ~(SIZEOF_REGISTER - 1); /* Space for saved registers */ cfg->arch.iregs_offset = offset; iregs_to_save = (cfg->used_int_regs & MONO_ARCH_CALLEE_SAVED_REGS); if (iregs_to_save) { for (i = MONO_MAX_IREGS-1; i >= 0; --i) { if (iregs_to_save & (1 << i)) { offset += SIZEOF_REGISTER; } } } /* saved float registers */ #if SAVE_FP_REGS fregs_to_restore = (cfg->used_float_regs & MONO_ARCH_CALLEE_SAVED_FREGS); if (fregs_to_restore) { for (i = MONO_MAX_FREGS-1; i >= 0; --i) { if (fregs_to_restore & (1 << i)) { offset += sizeof(double); } } } #endif #if _MIPS_SIM == _ABIO32 /* Now add space for saving the ra */ offset += TARGET_SIZEOF_VOID_P; /* change sign? */ offset = (offset + MIPS_STACK_ALIGNMENT - 1) & ~(MIPS_STACK_ALIGNMENT - 1); cfg->stack_offset = offset; cfg->arch.local_alloc_offset = cfg->stack_offset; #endif /* * Now allocate stack slots for the int arg regs (a0 - a3) * On MIPS o32, these are just above the incoming stack pointer * Even if the arg has been assigned to a regvar, it gets a stack slot */ /* Return struct-by-value results in a hidden first argument */ if (MONO_TYPE_ISSTRUCT (sig->ret)) { cfg->vret_addr->opcode = OP_REGOFFSET; cfg->vret_addr->inst_c0 = mips_a0; cfg->vret_addr->inst_offset = offset; cfg->vret_addr->inst_basereg = frame_reg; offset += SIZEOF_REGISTER; } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { inst = cfg->args [i]; if (inst->opcode != OP_REGVAR) { MonoType *arg_type; if (sig->hasthis && (i == 0)) arg_type = mono_get_object_type (); else arg_type = sig->params [i - sig->hasthis]; inst->opcode = OP_REGOFFSET; size = mono_type_size (arg_type, &align); if (size < SIZEOF_REGISTER) { size = SIZEOF_REGISTER; align = SIZEOF_REGISTER; } inst->inst_basereg = frame_reg; offset = (offset + align - 1) & ~(align - 1); inst->inst_offset = offset; offset += size; if (cfg->verbose_level > 1) printf ("allocating param %d to fp[%d]\n", i, inst->inst_offset); } else { #if _MIPS_SIM == _ABIO32 /* o32: Even a0-a3 get stack slots */ size = SIZEOF_REGISTER; align = SIZEOF_REGISTER; inst->inst_basereg = frame_reg; offset = (offset + align - 1) & ~(align - 1); inst->inst_offset = offset; offset += size; if (cfg->verbose_level > 1) printf ("allocating param %d to fp[%d]\n", i, inst->inst_offset); #endif } } #if _MIPS_SIM == _ABIN32 /* Now add space for saving the ra */ offset += TARGET_SIZEOF_VOID_P; /* change sign? */ offset = (offset + MIPS_STACK_ALIGNMENT - 1) & ~(MIPS_STACK_ALIGNMENT - 1); cfg->stack_offset = offset; cfg->arch.local_alloc_offset = cfg->stack_offset; #endif } void mono_arch_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig; sig = mono_method_signature_internal (cfg->method); if (MONO_TYPE_ISSTRUCT (sig->ret)) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG); if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr = "); mono_print_ins (cfg->vret_addr); } } } /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode, * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info */ /* * take the arguments and generate the arch-specific * instructions to properly call the function in call. * This includes pushing, moving arguments to the right register * etc. * Issue: who does the spilling if needed, and when? */ static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { MonoMethodSignature *tmp_sig; MonoInst *sig_arg; if (MONO_IS_TAILCALL_OPCODE (call)) NOT_IMPLEMENTED; /* FIXME: Add support for signature tokens to AOT */ cfg->disable_aot = TRUE; /* * mono_ArgIterator_Setup assumes the signature cookie is * passed first and all the arguments which were before it are * passed on the stack after the signature. So compensate by * passing a different signature. */ tmp_sig = mono_metadata_signature_dup (call->signature); tmp_sig->param_count -= call->signature->sentinelpos; tmp_sig->sentinelpos = 0; memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*)); MONO_INST_NEW (cfg, sig_arg, OP_ICONST); sig_arg->dreg = mono_alloc_ireg (cfg); sig_arg->inst_p0 = tmp_sig; MONO_ADD_INS (cfg->cbb, sig_arg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, mips_sp, cinfo->sig_cookie.offset, sig_arg->dreg); } void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoInst *in, *ins; MonoMethodSignature *sig; int i, n; CallInfo *cinfo; int is_virtual = 0; sig = call->signature; n = sig->param_count + sig->hasthis; cinfo = get_call_info (cfg->mempool, sig); if (cinfo->struct_ret) call->used_iregs |= 1 << cinfo->struct_ret; for (i = 0; i < n; ++i) { ArgInfo *ainfo = cinfo->args + i; MonoType *t; if (i >= sig->hasthis) t = sig->params [i - sig->hasthis]; else t = mono_get_int_type (); t = mini_get_underlying_type (t); if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Emit the signature cookie just before the implicit arguments */ emit_sig_cookie (cfg, call, cinfo); } if (is_virtual && i == 0) { /* the argument will be attached to the call instrucion */ in = call->args [i]; call->used_iregs |= 1 << ainfo->reg; continue; } in = call->args [i]; if (ainfo->storage == ArgInIReg) { #if SIZEOF_REGISTER == 4 if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = MONO_LVREG_LS (in->dreg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + ls_word_idx, FALSE); MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = MONO_LVREG_MS (in->dreg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + ms_word_idx, FALSE); } else #endif if (!m_type_is_byref (t) && (t->type == MONO_TYPE_R4)) { int freg; #if PROMOTE_R4_TO_R8 /* ??? - convert to single first? */ MONO_INST_NEW (cfg, ins, OP_MIPS_CVTSD); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); freg = ins->dreg; #else freg = in->dreg; #endif /* trying to load float value into int registers */ MONO_INST_NEW (cfg, ins, OP_MIPS_MFC1S); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = freg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); } else if (!m_type_is_byref (t) && (t->type == MONO_TYPE_R8)) { /* trying to load float value into int registers */ MONO_INST_NEW (cfg, ins, OP_MIPS_MFC1D); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); } else { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); } } else if (ainfo->storage == ArgStructByAddr) { MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); } else if (ainfo->storage == ArgStructByVal) { /* this is further handled in mono_arch_emit_outarg_vt () */ MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); } else if (ainfo->storage == ArgOnStack) { if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, mips_sp, ainfo->offset, in->dreg); } else if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) { if (t->type == MONO_TYPE_R8) MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, mips_sp, ainfo->offset, in->dreg); else MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, mips_sp, ainfo->offset, in->dreg); } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, mips_sp, ainfo->offset, in->dreg); } } else if (ainfo->storage == ArgInFReg) { if (t->type == MONO_TYPE_VALUETYPE) { /* this is further handled in mono_arch_emit_outarg_vt () */ MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_HAS_FPOUT; } else { int dreg = mono_alloc_freg (cfg); if (ainfo->size == 4) { MONO_EMIT_NEW_UNALU (cfg, OP_MIPS_CVTSD, dreg, in->dreg); } else { MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->dreg = dreg; ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); } mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE); cfg->flags |= MONO_CFG_HAS_FPOUT; } } else { g_assert_not_reached (); } } /* Handle the case where there are no implicit arguments */ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) emit_sig_cookie (cfg, call, cinfo); if (cinfo->struct_ret) { MonoInst *vtarg; MONO_INST_NEW (cfg, vtarg, OP_MOVE); vtarg->sreg1 = call->vret_var->dreg; vtarg->dreg = mono_alloc_preg (cfg); MONO_ADD_INS (cfg->cbb, vtarg); mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->struct_ret, FALSE); } #if 0 /* * Reverse the call->out_args list. */ { MonoInst *prev = NULL, *list = call->out_args, *next; while (list) { next = list->next; list->next = prev; prev = list; list = next; } call->out_args = prev; } #endif call->stack_usage = cinfo->stack_usage; cfg->param_area = MAX (cfg->param_area, cinfo->stack_usage); #if _MIPS_SIM == _ABIO32 /* a0-a3 always present */ cfg->param_area = MAX (cfg->param_area, 4 * SIZEOF_REGISTER); #endif cfg->param_area = (cfg->param_area + MIPS_STACK_ALIGNMENT - 1) & ~(MIPS_STACK_ALIGNMENT - 1); cfg->flags |= MONO_CFG_HAS_CALLS; /* * should set more info in call, such as the stack space * used by the args that needs to be added back to esp */ } void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst*)ins->inst_p0; ArgInfo *ainfo = ins->inst_p1; int ovf_size = ainfo->vtsize; int doffset = ainfo->offset; int i, soffset, dreg; if (ainfo->storage == ArgStructByVal) { #if 0 if (cfg->verbose_level > 0) { char* nm = mono_method_full_name (cfg->method, TRUE); g_print ("Method %s outarg_vt struct doffset=%d ainfo->size=%d ovf_size=%d\n", nm, doffset, ainfo->size, ovf_size); g_free (nm); } #endif soffset = 0; for (i = 0; i < ainfo->size; ++i) { dreg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset); mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE); soffset += SIZEOF_REGISTER; } if (ovf_size != 0) { mini_emit_memcpy (cfg, mips_sp, doffset, src->dreg, soffset, ovf_size * sizeof (target_mgreg_t), TARGET_SIZEOF_VOID_P); } } else if (ainfo->storage == ArgInFReg) { int tmpr = mono_alloc_freg (cfg); if (ainfo->size == 4) MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, tmpr, src->dreg, 0); else MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmpr, src->dreg, 0); dreg = mono_alloc_freg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, dreg, tmpr); mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE); } else { MonoInst *vtcopy = mono_compile_create_var (cfg, m_class_get_byval_arg (src->klass), OP_LOCAL); MonoInst *load; guint32 size; /* FIXME: alignment? */ if (call->signature->pinvoke && !call->signature->marshalling_disabled) { size = mono_type_native_stack_size (m_class_get_byval_arg (src->klass), NULL); vtcopy->backend.is_pinvoke = 1; } else { size = mini_type_stack_size (m_class_get_byval_arg (src->klass), NULL); } if (size > 0) g_assert (ovf_size > 0); EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype); mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, TARGET_SIZEOF_VOID_P); if (ainfo->offset) MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, mips_at, ainfo->offset, load->dreg); else mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg, FALSE); } } void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret); if (!m_type_is_byref (ret)) { #if (SIZEOF_REGISTER == 4) if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) { MonoInst *ins; MONO_INST_NEW (cfg, ins, OP_SETLRET); ins->sreg1 = MONO_LVREG_LS (val->dreg); ins->sreg2 = MONO_LVREG_MS (val->dreg); MONO_ADD_INS (cfg->cbb, ins); return; } #endif if (ret->type == MONO_TYPE_R8) { MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); return; } if (ret->type == MONO_TYPE_R4) { MONO_EMIT_NEW_UNALU (cfg, OP_MIPS_CVTSD, cfg->ret->dreg, val->dreg); return; } } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); } void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n, *last_ins = NULL; if (cfg->verbose_level > 2) g_print ("Basic block %d peephole pass 1\n", bb->block_num); ins = bb->code; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { if (cfg->verbose_level > 2) mono_print_ins_index (0, ins); switch (ins->opcode) { #if 0 case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: /* * OP_IADD reg2, reg1, const1 * OP_LOAD_MEMBASE const2(reg2), reg3 * -> * OP_LOAD_MEMBASE (const1+const2)(reg1), reg3 */ if (last_ins && (last_ins->opcode == OP_IADD_IMM || last_ins->opcode == OP_ADD_IMM) && (last_ins->dreg == ins->inst_basereg) && (last_ins->sreg1 != last_ins->dreg)){ int const1 = last_ins->inst_imm; int const2 = ins->inst_offset; if (mips_is_imm16 (const1 + const2)) { ins->inst_basereg = last_ins->sreg1; ins->inst_offset = const1 + const2; } } break; #endif } last_ins = ins; ins = ins->next; } bb->last_ins = last_ins; } void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n, *last_ins = NULL; ins = bb->code; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { MonoInst *last_ins = ins->prev; switch (ins->opcode) { case OP_MUL_IMM: /* remove unnecessary multiplication with 1 */ if (ins->inst_imm == 1) { if (ins->dreg != ins->sreg1) { ins->opcode = OP_MOVE; } else { MONO_DELETE_INS (bb, ins); continue; } } else if (ins->inst_imm > 0) { int power2 = mono_is_power_of_two (ins->inst_imm); if (power2 > 0) { ins->opcode = OP_SHL_IMM; ins->inst_imm = power2; } } break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: /* * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg */ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG || last_ins->opcode == OP_STORE_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } else { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_MOVE; ins->sreg1 = last_ins->sreg1; } break; } /* * Note: reg1 must be different from the basereg in the second load * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_MOVE reg1, reg2 */ if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE || last_ins->opcode == OP_LOAD_MEMBASE) && ins->inst_basereg != last_ins->dreg && ins->inst_basereg == last_ins->inst_basereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->dreg) { MONO_DELETE_INS (bb, ins); continue; } else { ins->opcode = OP_MOVE; ins->sreg1 = last_ins->dreg; } //g_assert_not_reached (); break; } #if 0 /* * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg * --> * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_ICONST reg, imm */ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM || last_ins->opcode == OP_STORE_MEMBASE_IMM) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_ICONST; ins->inst_c0 = last_ins->inst_imm; g_assert_not_reached (); // check this rule break; } #endif break; case OP_LOADU1_MEMBASE: case OP_LOADI1_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1; ins->sreg1 = last_ins->sreg1; } break; case OP_LOADU2_MEMBASE: case OP_LOADI2_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2; ins->sreg1 = last_ins->sreg1; } break; case OP_ICONV_TO_I4: case OP_ICONV_TO_U4: case OP_MOVE: ins->opcode = OP_MOVE; /* * OP_MOVE reg, reg */ if (ins->dreg == ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } /* * OP_MOVE sreg, dreg * OP_MOVE dreg, sreg */ if (last_ins && last_ins->opcode == OP_MOVE && ins->sreg1 == last_ins->dreg && ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } break; } last_ins = ins; ins = ins->next; } bb->last_ins = last_ins; } void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins) { int tmp1 = -1; int tmp2 = -1; int tmp3 = -1; int tmp4 = -1; int tmp5 = -1; switch (ins->opcode) { case OP_LADD: tmp1 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+1, ins->sreg1+1, ins->sreg2+1); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->dreg+1, ins->sreg1+1); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+2, ins->sreg1+2, ins->sreg2+2); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+2, ins->dreg+2, tmp1); NULLIFY_INS(ins); break; case OP_LADD_IMM: tmp1 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, ins->dreg+1, ins->sreg1+1, ins_get_l_low (ins)); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->dreg+1, ins->sreg1+1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, ins->dreg+2, ins->sreg1+2, ins_get_l_high (ins)); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+2, ins->dreg+2, tmp1); NULLIFY_INS(ins); break; case OP_LSUB: tmp1 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+1, ins->sreg1+1, ins->sreg2+1); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->sreg1+1, ins->dreg+1); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->sreg1+2, ins->sreg2+2); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->dreg+2, tmp1); NULLIFY_INS(ins); break; case OP_LSUB_IMM: tmp1 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, ins->dreg+1, ins->sreg1+1, ins_get_l_low (ins)); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->sreg1+1, ins->dreg+1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, ins->dreg+2, ins->sreg1+2, ins_get_l_high (ins)); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->dreg+2, tmp1); NULLIFY_INS(ins); break; case OP_LNEG: tmp1 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+1, mips_zero, ins->sreg1+1); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, mips_zero, ins->dreg+1); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, mips_zero, ins->sreg1+2); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->dreg+2, tmp1); NULLIFY_INS(ins); break; case OP_LADD_OVF: tmp1 = mono_alloc_ireg (cfg); tmp2 = mono_alloc_ireg (cfg); tmp3 = mono_alloc_ireg (cfg); tmp4 = mono_alloc_ireg (cfg); tmp5 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+1, ins->sreg1+1, ins->sreg2+1); /* tmp1 holds the carry from the low 32-bit to the high 32-bits */ MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp5, ins->dreg+1, ins->sreg1+1); /* add the high 32-bits, and add in the carry from the low 32-bits */ MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+2, ins->sreg1+2, ins->sreg2+2); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+2, tmp5, ins->dreg+2); /* Overflow happens if * neg + neg = pos or * pos + pos = neg * XOR of the high bits returns 0 if the signs match * XOR of that with the high bit of the result return 1 if overflow. */ /* tmp1 = 0 if the signs of the two inputs match, 1 otherwise */ MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp1, ins->sreg1+2, ins->sreg2+2); /* set tmp2 = 0 if bit31 of results matches is different than the operands */ MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp2, ins->dreg+2, ins->sreg2+2); MONO_EMIT_NEW_UNALU (cfg, OP_INOT, tmp2, tmp2); /* OR(tmp1, tmp2) = 0 if both conditions are true */ MONO_EMIT_NEW_BIALU (cfg, OP_IOR, tmp3, tmp2, tmp1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, tmp4, tmp3, 31); /* Now, if (tmp4 == 0) then overflow */ MONO_EMIT_NEW_COMPARE_EXC (cfg, EQ, tmp4, mips_zero, "OverflowException"); NULLIFY_INS(ins); break; case OP_LADD_OVF_UN: tmp1 = mono_alloc_ireg (cfg); tmp2 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+1, ins->sreg1+1, ins->sreg2+1); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->dreg+1, ins->sreg1+1); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+2, ins->sreg1+2, ins->sreg2+2); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg+2, tmp1, ins->dreg+2); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp2, ins->dreg+2, ins->sreg1+2); MONO_EMIT_NEW_COMPARE_EXC (cfg, NE_UN, tmp2, mips_zero, "OverflowException"); NULLIFY_INS(ins); break; case OP_LSUB_OVF: tmp1 = mono_alloc_ireg (cfg); tmp2 = mono_alloc_ireg (cfg); tmp3 = mono_alloc_ireg (cfg); tmp4 = mono_alloc_ireg (cfg); tmp5 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+1, ins->sreg1+1, ins->sreg2+1); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp5, ins->sreg1+1, ins->dreg+1); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->sreg1+2, ins->sreg2+2); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->dreg+2, tmp5); /* Overflow happens if * neg - pos = pos or * pos - neg = neg * XOR of bit31 of the lhs & rhs = 1 if the signs are different * * tmp1 = (lhs ^ rhs) * tmp2 = (lhs ^ result) * if ((tmp1 < 0) & (tmp2 < 0)) then overflow */ MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp1, ins->sreg1+2, ins->sreg2+2); MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp2, ins->sreg1+2, ins->dreg+2); MONO_EMIT_NEW_BIALU (cfg, OP_IAND, tmp3, tmp2, tmp1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, tmp4, tmp3, 31); /* Now, if (tmp4 == 1) then overflow */ MONO_EMIT_NEW_COMPARE_EXC (cfg, NE_UN, tmp4, mips_zero, "OverflowException"); NULLIFY_INS(ins); break; case OP_LSUB_OVF_UN: tmp1 = mono_alloc_ireg (cfg); tmp2 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+1, ins->sreg1+1, ins->sreg2+1); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->sreg1+1, ins->dreg+1); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->sreg1+2, ins->sreg2+2); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg+2, ins->dreg+2, tmp1); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp2, ins->sreg1+2, ins->dreg+2); MONO_EMIT_NEW_COMPARE_EXC (cfg, NE_UN, tmp2, mips_zero, "OverflowException"); NULLIFY_INS(ins); break; case OP_LCONV_TO_OVF_I4_2: tmp1 = mono_alloc_ireg (cfg); /* Overflows if reg2 != sign extension of reg1 */ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, tmp1, ins->sreg1, 31); MONO_EMIT_NEW_COMPARE_EXC (cfg, NE_UN, ins->sreg2, tmp1, "OverflowException"); MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ins->dreg, ins->sreg1); NULLIFY_INS(ins); break; default: break; } } void mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins) { int tmp1 = -1; int tmp2 = -1; int tmp3 = -1; int tmp4 = -1; int tmp5 = -1; switch (ins->opcode) { case OP_IADD_OVF: tmp1 = mono_alloc_ireg (cfg); tmp2 = mono_alloc_ireg (cfg); tmp3 = mono_alloc_ireg (cfg); tmp4 = mono_alloc_ireg (cfg); tmp5 = mono_alloc_ireg (cfg); /* add the operands */ MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg, ins->sreg1, ins->sreg2); /* Overflow happens if * neg + neg = pos or * pos + pos = neg * * (bit31s of operands match) AND (bit31 of operand != bit31 of result) * XOR of the high bit returns 0 if the signs match * XOR of that with the high bit of the result return 1 if overflow. */ /* tmp1 = 0 if the signs of the two inputs match, 1 otherwise */ MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp1, ins->sreg1, ins->sreg2); /* set tmp2 = 0 if bit31 of results matches is different than the operands */ MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp2, ins->dreg, ins->sreg2); MONO_EMIT_NEW_UNALU (cfg, OP_INOT, tmp3, tmp2); /* OR(tmp1, tmp2) = 0 if both conditions are true */ MONO_EMIT_NEW_BIALU (cfg, OP_IOR, tmp4, tmp3, tmp1); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, tmp5, tmp4, 31); /* Now, if (tmp5 == 0) then overflow */ MONO_EMIT_NEW_COMPARE_EXC (cfg, EQ, tmp5, mips_zero, "OverflowException"); /* Make decompse and method-to-ir.c happy, last insn writes dreg */ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ins->dreg, ins->dreg); NULLIFY_INS(ins); break; case OP_IADD_OVF_UN: tmp1 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_IADD, ins->dreg, ins->sreg1, ins->sreg2); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->dreg, ins->sreg1); MONO_EMIT_NEW_COMPARE_EXC (cfg, NE_UN, tmp1, mips_zero, "OverflowException"); /* Make decompse and method-to-ir.c happy, last insn writes dreg */ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ins->dreg, ins->dreg); NULLIFY_INS(ins); break; case OP_ISUB_OVF: tmp1 = mono_alloc_ireg (cfg); tmp2 = mono_alloc_ireg (cfg); tmp3 = mono_alloc_ireg (cfg); tmp4 = mono_alloc_ireg (cfg); tmp5 = mono_alloc_ireg (cfg); /* add the operands */ MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg, ins->sreg1, ins->sreg2); /* Overflow happens if * neg - pos = pos or * pos - neg = neg * XOR of bit31 of the lhs & rhs = 1 if the signs are different * * tmp1 = (lhs ^ rhs) * tmp2 = (lhs ^ result) * if ((tmp1 < 0) & (tmp2 < 0)) then overflow */ /* tmp3 = 1 if the signs of the two inputs differ */ MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp1, ins->sreg1, ins->sreg2); MONO_EMIT_NEW_BIALU (cfg, OP_IXOR, tmp2, ins->sreg1, ins->dreg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MIPS_SLTI, tmp3, tmp1, 0); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MIPS_SLTI, tmp4, tmp2, 0); MONO_EMIT_NEW_BIALU (cfg, OP_IAND, tmp5, tmp4, tmp3); MONO_EMIT_NEW_COMPARE_EXC (cfg, NE_UN, tmp5, mips_zero, "OverflowException"); /* Make decompse and method-to-ir.c happy, last insn writes dreg */ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ins->dreg, ins->dreg); NULLIFY_INS(ins); break; case OP_ISUB_OVF_UN: tmp1 = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU (cfg, OP_ISUB, ins->dreg, ins->sreg1, ins->sreg2); MONO_EMIT_NEW_BIALU (cfg, OP_MIPS_SLTU, tmp1, ins->sreg1, ins->dreg); MONO_EMIT_NEW_COMPARE_EXC (cfg, NE_UN, tmp1, mips_zero, "OverflowException"); /* Make decompse and method-to-ir.c happy, last insn writes dreg */ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ins->dreg, ins->dreg); NULLIFY_INS(ins); break; } } static int map_to_reg_reg_op (int op) { switch (op) { case OP_ADD_IMM: return OP_IADD; case OP_SUB_IMM: return OP_ISUB; case OP_AND_IMM: return OP_IAND; case OP_COMPARE_IMM: return OP_COMPARE; case OP_ICOMPARE_IMM: return OP_ICOMPARE; case OP_LCOMPARE_IMM: return OP_LCOMPARE; case OP_ADDCC_IMM: return OP_IADDCC; case OP_ADC_IMM: return OP_IADC; case OP_SUBCC_IMM: return OP_ISUBCC; case OP_SBB_IMM: return OP_ISBB; case OP_OR_IMM: return OP_IOR; case OP_XOR_IMM: return OP_IXOR; case OP_MUL_IMM: return OP_IMUL; case OP_LOAD_MEMBASE: return OP_LOAD_MEMINDEX; case OP_LOADI4_MEMBASE: return OP_LOADI4_MEMINDEX; case OP_LOADU4_MEMBASE: return OP_LOADU4_MEMINDEX; case OP_LOADU1_MEMBASE: return OP_LOADU1_MEMINDEX; case OP_LOADI2_MEMBASE: return OP_LOADI2_MEMINDEX; case OP_LOADU2_MEMBASE: return OP_LOADU2_MEMINDEX; case OP_LOADI1_MEMBASE: return OP_LOADI1_MEMINDEX; case OP_LOADR4_MEMBASE: return OP_LOADR4_MEMINDEX; case OP_LOADR8_MEMBASE: return OP_LOADR8_MEMINDEX; case OP_STOREI1_MEMBASE_REG: return OP_STOREI1_MEMINDEX; case OP_STOREI2_MEMBASE_REG: return OP_STOREI2_MEMINDEX; case OP_STOREI4_MEMBASE_REG: return OP_STOREI4_MEMINDEX; case OP_STORE_MEMBASE_REG: return OP_STORE_MEMINDEX; case OP_STORER4_MEMBASE_REG: return OP_STORER4_MEMINDEX; case OP_STORER8_MEMBASE_REG: return OP_STORER8_MEMINDEX; case OP_STORE_MEMBASE_IMM: return OP_STORE_MEMBASE_REG; case OP_STOREI1_MEMBASE_IMM: return OP_STOREI1_MEMBASE_REG; case OP_STOREI2_MEMBASE_IMM: return OP_STOREI2_MEMBASE_REG; case OP_STOREI4_MEMBASE_IMM: return OP_STOREI4_MEMBASE_REG; case OP_STOREI8_MEMBASE_IMM: return OP_STOREI8_MEMBASE_REG; } if (mono_op_imm_to_op (op) == -1) g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (op)); return mono_op_imm_to_op (op); } static int map_to_mips_op (int op) { switch (op) { case OP_FBEQ: return OP_MIPS_FBEQ; case OP_FBGE: return OP_MIPS_FBGE; case OP_FBGT: return OP_MIPS_FBGT; case OP_FBLE: return OP_MIPS_FBLE; case OP_FBLT: return OP_MIPS_FBLT; case OP_FBNE_UN: return OP_MIPS_FBNE; case OP_FBGE_UN: return OP_MIPS_FBGE_UN; case OP_FBGT_UN: return OP_MIPS_FBGT_UN; case OP_FBLE_UN: return OP_MIPS_FBLE_UN; case OP_FBLT_UN: return OP_MIPS_FBLT_UN; case OP_FCEQ: case OP_FCGT: case OP_FCGT_UN: case OP_FCLT: case OP_FCLT_UN: default: g_warning ("unknown opcode %s in %s()\n", mono_inst_name (op), __FUNCTION__); g_assert_not_reached (); } } #define NEW_INS(cfg,after,dest,op) do { \ MONO_INST_NEW((cfg), (dest), (op)); \ mono_bblock_insert_after_ins (bb, (after), (dest)); \ } while (0) #define INS(pos,op,_dreg,_sreg1,_sreg2) do { \ MonoInst *temp; \ MONO_INST_NEW(cfg, temp, (op)); \ mono_bblock_insert_after_ins (bb, (pos), temp); \ temp->dreg = (_dreg); \ temp->sreg1 = (_sreg1); \ temp->sreg2 = (_sreg2); \ pos = temp; \ } while (0) #define INS_IMM(pos,op,_dreg,_sreg1,_imm) do { \ MonoInst *temp; \ MONO_INST_NEW(cfg, temp, (op)); \ mono_bblock_insert_after_ins (bb, (pos), temp); \ temp->dreg = (_dreg); \ temp->sreg1 = (_sreg1); \ temp->inst_c0 = (_imm); \ pos = temp; \ } while (0) /* * Remove from the instruction list the instructions that can't be * represented with very simple instructions with no register * requirements. */ void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *next, *temp, *last_ins = NULL; int imm; #if 1 if (cfg->verbose_level > 2) { int idx = 0; g_print ("BASIC BLOCK %d (before lowering)\n", bb->block_num); MONO_BB_FOR_EACH_INS (bb, ins) { mono_print_ins_index (idx++, ins); } } #endif MONO_BB_FOR_EACH_INS (bb, ins) { loop_start: switch (ins->opcode) { case OP_COMPARE: case OP_ICOMPARE: case OP_LCOMPARE: next = ins->next; /* Branch opts can eliminate the branch */ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) { NULLIFY_INS(ins); break; } break; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: case OP_LCOMPARE_IMM: next = ins->next; /* Branch opts can eliminate the branch */ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) { NULLIFY_INS(ins); break; } if (ins->inst_imm) { NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; last_ins = temp; } else { ins->sreg2 = mips_zero; } if (ins->opcode == OP_COMPARE_IMM) ins->opcode = OP_COMPARE; else if (ins->opcode == OP_ICOMPARE_IMM) ins->opcode = OP_ICOMPARE; else if (ins->opcode == OP_LCOMPARE_IMM) ins->opcode = OP_LCOMPARE; goto loop_start; case OP_IDIV_UN_IMM: case OP_IDIV_IMM: case OP_IREM_IMM: case OP_IREM_UN_IMM: NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; if (ins->opcode == OP_IDIV_IMM) ins->opcode = OP_IDIV; else if (ins->opcode == OP_IREM_IMM) ins->opcode = OP_IREM; else if (ins->opcode == OP_IDIV_UN_IMM) ins->opcode = OP_IDIV_UN; else if (ins->opcode == OP_IREM_UN_IMM) ins->opcode = OP_IREM_UN; last_ins = temp; /* handle rem separately */ goto loop_start; #if 0 case OP_AND_IMM: case OP_OR_IMM: case OP_XOR_IMM: if ((ins->inst_imm & 0xffff0000) && (ins->inst_imm & 0xffff)) { NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; #endif case OP_AND_IMM: case OP_IAND_IMM: case OP_OR_IMM: case OP_IOR_IMM: case OP_XOR_IMM: case OP_IXOR_IMM: /* unsigned 16 bit immediate */ if (ins->inst_imm & 0xffff0000) { NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; case OP_IADD_IMM: case OP_ADD_IMM: case OP_ADDCC_IMM: /* signed 16 bit immediate */ if (!mips_is_imm16 (ins->inst_imm)) { NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; case OP_SUB_IMM: case OP_ISUB_IMM: if (!mips_is_imm16 (-ins->inst_imm)) { NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; case OP_MUL_IMM: case OP_IMUL_IMM: if (ins->inst_imm == 1) { ins->opcode = OP_MOVE; break; } if (ins->inst_imm == 0) { ins->opcode = OP_ICONST; ins->inst_c0 = 0; break; } imm = (ins->inst_imm > 0) ? mono_is_power_of_two (ins->inst_imm) : -1; if (imm > 0) { ins->opcode = OP_SHL_IMM; ins->inst_imm = imm; break; } NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_LOCALLOC_IMM: NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg1 = temp->dreg; ins->opcode = OP_LOCALLOC; break; case OP_LOADR4_MEMBASE: case OP_STORER4_MEMBASE_REG: /* we can do two things: load the immed in a register * and use an indexed load, or see if the immed can be * represented as an ad_imm + a load with a smaller offset * that fits. We just do the first for now, optimize later. */ if (mips_is_imm16 (ins->inst_offset)) break; NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_STORE_MEMBASE_IMM: case OP_STOREI1_MEMBASE_IMM: case OP_STOREI2_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: case OP_STOREI8_MEMBASE_IMM: if (!ins->inst_imm) { ins->sreg1 = mips_zero; ins->opcode = map_to_reg_reg_op (ins->opcode); } else { NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg1 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); last_ins = temp; goto loop_start; /* make it handle the possibly big ins->inst_offset */ } break; case OP_FCOMPARE: next = ins->next; /* Branch opts can eliminate the branch */ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) { NULLIFY_INS(ins); break; } g_assert(next); /* * remap compare/branch and compare/set * to MIPS specific opcodes. */ next->opcode = map_to_mips_op (next->opcode); next->sreg1 = ins->sreg1; next->sreg2 = ins->sreg2; NULLIFY_INS(ins); break; #if 0 case OP_R8CONST: case OP_R4CONST: NEW_INS (cfg, last_ins, temp, OP_ICONST); temp->inst_c0 = (guint32)ins->inst_p0; temp->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = temp->dreg; ins->inst_offset = 0; ins->opcode = ins->opcode == OP_R4CONST? OP_LOADR4_MEMBASE: OP_LOADR8_MEMBASE; last_ins = temp; /* make it handle the possibly big ins->inst_offset * later optimize to use lis + load_membase */ goto loop_start; #endif case OP_IBEQ: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_BEQ, last_ins->sreg1, last_ins->sreg2); NULLIFY_INS(last_ins); break; case OP_IBNE_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_BNE, last_ins->sreg1, last_ins->sreg2); NULLIFY_INS(last_ins); break; case OP_IBGE: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLT, last_ins->sreg1, last_ins->sreg2); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BEQ, last_ins->dreg, mips_zero); break; case OP_IBGE_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLTU, last_ins->sreg1, last_ins->sreg2); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BEQ, last_ins->dreg, mips_zero); break; case OP_IBLT: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLT, last_ins->sreg1, last_ins->sreg2); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BNE, last_ins->dreg, mips_zero); break; case OP_IBLT_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLTU, last_ins->sreg1, last_ins->sreg2); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BNE, last_ins->dreg, mips_zero); break; case OP_IBLE: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLT, last_ins->sreg2, last_ins->sreg1); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BEQ, last_ins->dreg, mips_zero); break; case OP_IBLE_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLTU, last_ins->sreg2, last_ins->sreg1); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BEQ, last_ins->dreg, mips_zero); break; case OP_IBGT: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLT, last_ins->sreg2, last_ins->sreg1); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BNE, last_ins->dreg, mips_zero); break; case OP_IBGT_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(last_ins, OP_MIPS_SLTU, last_ins->sreg2, last_ins->sreg1); last_ins->dreg = mono_alloc_ireg (cfg); INS_REWRITE(ins, OP_MIPS_BNE, last_ins->dreg, mips_zero); break; case OP_CEQ: case OP_ICEQ: g_assert (ins_is_compare(last_ins)); last_ins->opcode = OP_IXOR; last_ins->dreg = mono_alloc_ireg(cfg); INS_REWRITE_IMM(ins, OP_MIPS_SLTIU, last_ins->dreg, 1); break; case OP_CLT: case OP_ICLT: INS_REWRITE(ins, OP_MIPS_SLT, last_ins->sreg1, last_ins->sreg2); NULLIFY_INS(last_ins); break; case OP_CLT_UN: case OP_ICLT_UN: INS_REWRITE(ins, OP_MIPS_SLTU, last_ins->sreg1, last_ins->sreg2); NULLIFY_INS(last_ins); break; case OP_CGT: case OP_ICGT: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_SLT, last_ins->sreg2, last_ins->sreg1); MONO_DELETE_INS(bb, last_ins); break; case OP_CGT_UN: case OP_ICGT_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_SLTU, last_ins->sreg2, last_ins->sreg1); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_EQ: case OP_COND_EXC_IEQ: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_EQ, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_GE: case OP_COND_EXC_IGE: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_GE, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_GT: case OP_COND_EXC_IGT: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_GT, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_LE: case OP_COND_EXC_ILE: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_LE, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_LT: case OP_COND_EXC_ILT: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_LT, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_NE_UN: case OP_COND_EXC_INE_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_NE_UN, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_GE_UN: case OP_COND_EXC_IGE_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_GE_UN, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_GT_UN: case OP_COND_EXC_IGT_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_GT_UN, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_LE_UN: case OP_COND_EXC_ILE_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_LE_UN, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_LT_UN: case OP_COND_EXC_ILT_UN: g_assert (ins_is_compare(last_ins)); INS_REWRITE(ins, OP_MIPS_COND_EXC_LT_UN, last_ins->sreg1, last_ins->sreg2); MONO_DELETE_INS(bb, last_ins); break; case OP_COND_EXC_OV: case OP_COND_EXC_IOV: { int tmp1, tmp2, tmp3, tmp4, tmp5; MonoInst *pos = last_ins; /* Overflow happens if * neg + neg = pos or * pos + pos = neg * * (bit31s of operands match) AND (bit31 of operand * != bit31 of result) * XOR of the high bit returns 0 if the signs match * XOR of that with the high bit of the result return 1 * if overflow. */ g_assert (last_ins->opcode == OP_IADC); tmp1 = mono_alloc_ireg (cfg); tmp2 = mono_alloc_ireg (cfg); tmp3 = mono_alloc_ireg (cfg); tmp4 = mono_alloc_ireg (cfg); tmp5 = mono_alloc_ireg (cfg); /* tmp1 = 0 if the signs of the two inputs match, else 1 */ INS (pos, OP_IXOR, tmp1, last_ins->sreg1, last_ins->sreg2); /* set tmp2 = 0 if bit31 of results matches is different than the operands */ INS (pos, OP_IXOR, tmp2, last_ins->dreg, last_ins->sreg2); INS (pos, OP_INOT, tmp3, tmp2, -1); /* OR(tmp1, tmp2) = 0 if both conditions are true */ INS (pos, OP_IOR, tmp4, tmp3, tmp1); INS_IMM (pos, OP_SHR_IMM, tmp5, tmp4, 31); /* Now, if (tmp5 == 0) then overflow */ INS_REWRITE(ins, OP_MIPS_COND_EXC_EQ, tmp5, mips_zero); ins->dreg = -1; break; } case OP_COND_EXC_NO: case OP_COND_EXC_INO: g_assert_not_reached (); break; case OP_COND_EXC_C: case OP_COND_EXC_IC: g_assert_not_reached (); break; case OP_COND_EXC_NC: case OP_COND_EXC_INC: g_assert_not_reached (); break; } last_ins = ins; } bb->last_ins = last_ins; bb->max_vreg = cfg->next_vreg; #if 1 if (cfg->verbose_level > 2) { int idx = 0; g_print ("BASIC BLOCK %d (after lowering)\n", bb->block_num); MONO_BB_FOR_EACH_INS (bb, ins) { mono_print_ins_index (idx++, ins); } } #endif } static guchar* emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed) { /* sreg is a float, dreg is an integer reg. mips_at is used as scratch */ #if 1 mips_truncwd (code, mips_ftemp, sreg); #else mips_cvtwd (code, mips_ftemp, sreg); #endif mips_mfc1 (code, dreg, mips_ftemp); if (!is_signed) { if (size == 1) mips_andi (code, dreg, dreg, 0xff); else if (size == 2) { mips_sll (code, dreg, dreg, 16); mips_srl (code, dreg, dreg, 16); } } else { if (size == 1) { mips_sll (code, dreg, dreg, 24); mips_sra (code, dreg, dreg, 24); } else if (size == 2) { mips_sll (code, dreg, dreg, 16); mips_sra (code, dreg, dreg, 16); } } return code; } /* * emit_load_volatile_arguments: * * Load volatile arguments from the stack to the original input registers. * Required before a tailcall. */ static guint8 * emit_load_volatile_arguments(MonoCompile *cfg, guint8 *code) { MonoMethod *method = cfg->method; MonoMethodSignature *sig; MonoInst *inst; CallInfo *cinfo; int i; sig = mono_method_signature_internal (method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; if (cinfo->struct_ret) { ArgInfo *ainfo = &cinfo->ret; inst = cfg->vret_addr; mips_lw (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; inst = cfg->args [i]; if (inst->opcode == OP_REGVAR) { if (ainfo->storage == ArgInIReg) MIPS_MOVE (code, ainfo->reg, inst->dreg); else if (ainfo->storage == ArgInFReg) g_assert_not_reached(); else if (ainfo->storage == ArgOnStack) { /* do nothing */ } else g_assert_not_reached (); } else { if (ainfo->storage == ArgInIReg) { g_assert (mips_is_imm16 (inst->inst_offset)); switch (ainfo->size) { case 1: mips_lb (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); break; case 2: mips_lh (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); break; case 0: /* XXX */ case 4: mips_lw (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); break; case 8: mips_lw (code, ainfo->reg, inst->inst_basereg, inst->inst_offset + ls_word_offset); mips_lw (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + ms_word_offset); break; default: g_assert_not_reached (); break; } } else if (ainfo->storage == ArgOnStack) { /* do nothing */ } else if (ainfo->storage == ArgInFReg) { g_assert (mips_is_imm16 (inst->inst_offset)); if (ainfo->size == 8) { #if _MIPS_SIM == _ABIO32 mips_lwc1 (code, ainfo->reg, inst->inst_basereg, inst->inst_offset + ls_word_offset); mips_lwc1 (code, ainfo->reg+1, inst->inst_basereg, inst->inst_offset + ms_word_offset); #elif _MIPS_SIM == _ABIN32 mips_ldc1 (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); #endif } else if (ainfo->size == 4) mips_lwc1 (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); else g_assert_not_reached (); } else if (ainfo->storage == ArgStructByVal) { int i; int doffset = inst->inst_offset; g_assert (mips_is_imm16 (inst->inst_offset)); g_assert (mips_is_imm16 (inst->inst_offset + ainfo->size * sizeof (target_mgreg_t))); for (i = 0; i < ainfo->size; ++i) { mips_lw (code, ainfo->reg + i, inst->inst_basereg, doffset); doffset += SIZEOF_REGISTER; } } else if (ainfo->storage == ArgStructByAddr) { g_assert (mips_is_imm16 (inst->inst_offset)); mips_lw (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } else g_assert_not_reached (); } } return code; } static guint8* emit_reserve_param_area (MonoCompile *cfg, guint8 *code) { int size = cfg->param_area; size += MONO_ARCH_FRAME_ALIGNMENT - 1; size &= -MONO_ARCH_FRAME_ALIGNMENT; if (!size) return code; #if 0 ppc_lwz (code, ppc_r0, 0, ppc_sp); if (ppc_is_imm16 (-size)) { ppc_stwu (code, ppc_r0, -size, ppc_sp); } else { ppc_load (code, ppc_r12, -size); ppc_stwux (code, ppc_r0, ppc_sp, ppc_r12); } #endif return code; } static guint8* emit_unreserve_param_area (MonoCompile *cfg, guint8 *code) { int size = cfg->param_area; size += MONO_ARCH_FRAME_ALIGNMENT - 1; size &= -MONO_ARCH_FRAME_ALIGNMENT; if (!size) return code; #if 0 ppc_lwz (code, ppc_r0, 0, ppc_sp); if (ppc_is_imm16 (size)) { ppc_stwu (code, ppc_r0, size, ppc_sp); } else { ppc_load (code, ppc_r12, size); ppc_stwux (code, ppc_r0, ppc_sp, ppc_r12); } #endif return code; } void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins; MonoCallInst *call; guint8 *code = cfg->native_code + cfg->code_len; MonoInst *last_ins = NULL; int max_len, cpos; int ins_cnt = 0; /* we don't align basic blocks of loops on mips */ if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); cpos = bb->max_offset; MONO_BB_FOR_EACH_INS (bb, ins) { const guint offset = code - cfg->native_code; set_code_cursor (cfg, code); max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); mono_debug_record_line_number (cfg, ins, offset); if (cfg->verbose_level > 2) { g_print (" @ 0x%x\t", offset); mono_print_ins_index (ins_cnt++, ins); } /* Check for virtual regs that snuck by */ g_assert ((ins->dreg >= -1) && (ins->dreg < 32)); switch (ins->opcode) { case OP_RELAXED_NOP: case OP_NOP: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_I8CONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: case OP_NOT_REACHED: case OP_NOT_NULL: break; case OP_IL_SEQ_POINT: mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); break; case OP_SEQ_POINT: { if (ins->flags & MONO_INST_SINGLE_STEP_LOC) { guint32 addr = (guint32)ss_trigger_page; mips_load_const (code, mips_t9, addr); mips_lw (code, mips_t9, mips_t9, 0); } mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); /* * A placeholder for a possible breakpoint inserted by * mono_arch_set_breakpoint (). */ /* mips_load_const () + mips_lw */ mips_nop (code); mips_nop (code); mips_nop (code); break; } case OP_BIGMUL: mips_mult (code, ins->sreg1, ins->sreg2); mips_mflo (code, ins->dreg); mips_mfhi (code, ins->dreg+1); break; case OP_BIGMUL_UN: mips_multu (code, ins->sreg1, ins->sreg2); mips_mflo (code, ins->dreg); mips_mfhi (code, ins->dreg+1); break; case OP_MEMORY_BARRIER: mips_sync (code, 0); break; case OP_STOREI1_MEMBASE_IMM: mips_load_const (code, mips_temp, ins->inst_imm); if (mips_is_imm16 (ins->inst_offset)) { mips_sb (code, mips_temp, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_sb (code, mips_temp, mips_at, ins->inst_destbasereg); } break; case OP_STOREI2_MEMBASE_IMM: mips_load_const (code, mips_temp, ins->inst_imm); if (mips_is_imm16 (ins->inst_offset)) { mips_sh (code, mips_temp, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_sh (code, mips_temp, mips_at, ins->inst_destbasereg); } break; case OP_STOREI8_MEMBASE_IMM: mips_load_const (code, mips_temp, ins->inst_imm); if (mips_is_imm16 (ins->inst_offset)) { mips_sd (code, mips_temp, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_sd (code, mips_temp, mips_at, ins->inst_destbasereg); } break; case OP_STORE_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: mips_load_const (code, mips_temp, ins->inst_imm); if (mips_is_imm16 (ins->inst_offset)) { mips_sw (code, mips_temp, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_sw (code, mips_temp, mips_at, ins->inst_destbasereg); } break; case OP_STOREI1_MEMBASE_REG: if (mips_is_imm16 (ins->inst_offset)) { mips_sb (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_destbasereg); mips_sb (code, ins->sreg1, mips_at, 0); } break; case OP_STOREI2_MEMBASE_REG: if (mips_is_imm16 (ins->inst_offset)) { mips_sh (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_destbasereg); mips_sh (code, ins->sreg1, mips_at, 0); } break; case OP_STORE_MEMBASE_REG: case OP_STOREI4_MEMBASE_REG: if (mips_is_imm16 (ins->inst_offset)) { mips_sw (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_destbasereg); mips_sw (code, ins->sreg1, mips_at, 0); } break; case OP_STOREI8_MEMBASE_REG: if (mips_is_imm16 (ins->inst_offset)) { mips_sd (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_destbasereg); mips_sd (code, ins->sreg1, mips_at, 0); } break; case OP_LOADU4_MEM: g_assert_not_reached (); //x86_mov_reg_imm (code, ins->dreg, ins->inst_p0); //x86_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4); break; case OP_LOADI8_MEMBASE: if (mips_is_imm16 (ins->inst_offset)) { mips_ld (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_basereg); mips_ld (code, ins->dreg, mips_at, 0); } break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: case OP_LOADU4_MEMBASE: g_assert (ins->dreg != -1); if (mips_is_imm16 (ins->inst_offset)) { mips_lw (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_basereg); mips_lw (code, ins->dreg, mips_at, 0); } break; case OP_LOADI1_MEMBASE: if (mips_is_imm16 (ins->inst_offset)) { mips_lb (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_basereg); mips_lb (code, ins->dreg, mips_at, 0); } break; case OP_LOADU1_MEMBASE: if (mips_is_imm16 (ins->inst_offset)) { mips_lbu (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_basereg); mips_lbu (code, ins->dreg, mips_at, 0); } break; case OP_LOADI2_MEMBASE: if (mips_is_imm16 (ins->inst_offset)) { mips_lh (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_basereg); mips_lh (code, ins->dreg, mips_at, 0); } break; case OP_LOADU2_MEMBASE: if (mips_is_imm16 (ins->inst_offset)) { mips_lhu (code, ins->dreg, ins->inst_basereg, ins->inst_offset); } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_basereg); mips_lhu (code, ins->dreg, mips_at, 0); } break; case OP_ICONV_TO_I1: mips_sll (code, mips_at, ins->sreg1, 24); mips_sra (code, ins->dreg, mips_at, 24); break; case OP_ICONV_TO_I2: mips_sll (code, mips_at, ins->sreg1, 16); mips_sra (code, ins->dreg, mips_at, 16); break; case OP_ICONV_TO_U1: mips_andi (code, ins->dreg, ins->sreg1, 0xff); break; case OP_ICONV_TO_U2: mips_sll (code, mips_at, ins->sreg1, 16); mips_srl (code, ins->dreg, mips_at, 16); break; case OP_MIPS_SLT: mips_slt (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_MIPS_SLTI: g_assert (mips_is_imm16 (ins->inst_imm)); mips_slti (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_MIPS_SLTU: mips_sltu (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_MIPS_SLTIU: g_assert (mips_is_imm16 (ins->inst_imm)); mips_sltiu (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_BREAK: /* * gdb does not like encountering the hw breakpoint ins in the debugged code. * So instead of emitting a trap, we emit a call a C function and place a * breakpoint there. */ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break)); mips_load (code, mips_t9, 0x1f1f1f1f); mips_jalr (code, mips_t9, mips_ra); mips_nop (code); break; case OP_IADD: mips_addu (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_LADD: mips_daddu (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_ADD_IMM: case OP_IADD_IMM: g_assert (mips_is_imm16 (ins->inst_imm)); mips_addiu (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_LADD_IMM: g_assert (mips_is_imm16 (ins->inst_imm)); mips_daddiu (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_ISUB: mips_subu (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_LSUB: mips_dsubu (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_ISUB_IMM: case OP_SUB_IMM: // we add the negated value g_assert (mips_is_imm16 (-ins->inst_imm)); mips_addiu (code, ins->dreg, ins->sreg1, -ins->inst_imm); break; case OP_LSUB_IMM: // we add the negated value g_assert (mips_is_imm16 (-ins->inst_imm)); mips_daddiu (code, ins->dreg, ins->sreg1, -ins->inst_imm); break; case OP_IAND: case OP_LAND: mips_and (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_AND_IMM: case OP_IAND_IMM: case OP_LAND_IMM: g_assert (!(ins->inst_imm & 0xffff0000)); mips_andi (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_IDIV: case OP_IREM: { guint32 *divisor_is_m1; guint32 *dividend_is_minvalue; guint32 *divisor_is_zero; mips_load_const (code, mips_at, -1); divisor_is_m1 = (guint32 *)(void *)code; mips_bne (code, ins->sreg2, mips_at, 0); mips_lui (code, mips_at, mips_zero, 0x8000); dividend_is_minvalue = (guint32 *)(void *)code; mips_bne (code, ins->sreg1, mips_at, 0); mips_nop (code); /* Divide Int32.MinValue by -1 -- throw exception */ EMIT_SYSTEM_EXCEPTION_NAME("OverflowException"); mips_patch (divisor_is_m1, (guint32)code); mips_patch (dividend_is_minvalue, (guint32)code); /* Put divide in branch delay slot (NOT YET) */ divisor_is_zero = (guint32 *)(void *)code; mips_bne (code, ins->sreg2, mips_zero, 0); mips_nop (code); /* Divide by zero -- throw exception */ EMIT_SYSTEM_EXCEPTION_NAME("DivideByZeroException"); mips_patch (divisor_is_zero, (guint32)code); mips_div (code, ins->sreg1, ins->sreg2); if (ins->opcode == OP_IDIV) mips_mflo (code, ins->dreg); else mips_mfhi (code, ins->dreg); break; } case OP_IDIV_UN: case OP_IREM_UN: { guint32 *divisor_is_zero = (guint32 *)(void *)code; /* Put divide in branch delay slot (NOT YET) */ mips_bne (code, ins->sreg2, mips_zero, 0); mips_nop (code); /* Divide by zero -- throw exception */ EMIT_SYSTEM_EXCEPTION_NAME("DivideByZeroException"); mips_patch (divisor_is_zero, (guint32)code); mips_divu (code, ins->sreg1, ins->sreg2); if (ins->opcode == OP_IDIV_UN) mips_mflo (code, ins->dreg); else mips_mfhi (code, ins->dreg); break; } case OP_DIV_IMM: g_assert_not_reached (); #if 0 ppc_load (code, ppc_r12, ins->inst_imm); ppc_divwod (code, ins->dreg, ins->sreg1, ppc_r12); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); /* FIXME: use OverflowException for 0x80000000/-1 */ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException"); #endif g_assert_not_reached(); break; case OP_REM_IMM: g_assert_not_reached (); case OP_IOR: mips_or (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_OR_IMM: case OP_IOR_IMM: g_assert (!(ins->inst_imm & 0xffff0000)); mips_ori (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_IXOR: mips_xor (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_XOR_IMM: case OP_IXOR_IMM: /* unsigned 16-bit immediate */ g_assert (!(ins->inst_imm & 0xffff0000)); mips_xori (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_ISHL: mips_sllv (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SHL_IMM: case OP_ISHL_IMM: mips_sll (code, ins->dreg, ins->sreg1, ins->inst_imm & 0x1f); break; case OP_ISHR: mips_srav (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_LSHR: mips_dsrav (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SHR_IMM: case OP_ISHR_IMM: mips_sra (code, ins->dreg, ins->sreg1, ins->inst_imm & 0x1f); break; case OP_LSHR_IMM: mips_dsra (code, ins->dreg, ins->sreg1, ins->inst_imm & 0x3f); break; case OP_SHR_UN_IMM: case OP_ISHR_UN_IMM: mips_srl (code, ins->dreg, ins->sreg1, ins->inst_imm & 0x1f); break; case OP_LSHR_UN_IMM: mips_dsrl (code, ins->dreg, ins->sreg1, ins->inst_imm & 0x3f); break; case OP_ISHR_UN: mips_srlv (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_LSHR_UN: mips_dsrlv (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_INOT: case OP_LNOT: mips_nor (code, ins->dreg, mips_zero, ins->sreg1); break; case OP_INEG: mips_subu (code, ins->dreg, mips_zero, ins->sreg1); break; case OP_LNEG: mips_dsubu (code, ins->dreg, mips_zero, ins->sreg1); break; case OP_IMUL: #if USE_MUL mips_mul (code, ins->dreg, ins->sreg1, ins->sreg2); #else mips_mult (code, ins->sreg1, ins->sreg2); mips_mflo (code, ins->dreg); mips_nop (code); mips_nop (code); #endif break; #if SIZEOF_REGISTER == 8 case OP_LMUL: mips_dmult (code, ins->sreg1, ins->sreg2); mips_mflo (code, ins->dreg); break; #endif case OP_IMUL_OVF: { guint32 *patch; mips_mult (code, ins->sreg1, ins->sreg2); mips_mflo (code, ins->dreg); mips_mfhi (code, mips_at); mips_nop (code); mips_nop (code); mips_sra (code, mips_temp, ins->dreg, 31); patch = (guint32 *)(void *)code; mips_beq (code, mips_temp, mips_at, 0); mips_nop (code); EMIT_SYSTEM_EXCEPTION_NAME("OverflowException"); mips_patch (patch, (guint32)code); break; } case OP_IMUL_OVF_UN: { guint32 *patch; mips_mult (code, ins->sreg1, ins->sreg2); mips_mflo (code, ins->dreg); mips_mfhi (code, mips_at); mips_nop (code); mips_nop (code); patch = (guint32 *)(void *)code; mips_beq (code, mips_at, mips_zero, 0); mips_nop (code); EMIT_SYSTEM_EXCEPTION_NAME("OverflowException"); mips_patch (patch, (guint32)code); break; } case OP_ICONST: mips_load_const (code, ins->dreg, ins->inst_c0); break; #if SIZEOF_REGISTER == 8 case OP_I8CONST: mips_load_const (code, ins->dreg, ins->inst_c0); break; #endif case OP_AOTCONST: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0); mips_load (code, ins->dreg, 0); break; case OP_MIPS_MTC1S: mips_mtc1 (code, ins->dreg, ins->sreg1); break; case OP_MIPS_MTC1S_2: mips_mtc1 (code, ins->dreg, ins->sreg1); mips_mtc1 (code, ins->dreg+1, ins->sreg2); break; case OP_MIPS_MFC1S: mips_mfc1 (code, ins->dreg, ins->sreg1); break; case OP_MIPS_MTC1D: mips_dmtc1 (code, ins->dreg, ins->sreg1); break; case OP_MIPS_MFC1D: #if 0 mips_dmfc1 (code, ins->dreg, ins->sreg1); #else mips_mfc1 (code, ins->dreg, ins->sreg1 + ls_word_idx); mips_mfc1 (code, ins->dreg+1, ins->sreg1 + ms_word_idx); #endif break; case OP_ICONV_TO_I4: case OP_ICONV_TO_U4: case OP_MOVE: if (ins->dreg != ins->sreg1) MIPS_MOVE (code, ins->dreg, ins->sreg1); break; #if SIZEOF_REGISTER == 8 case OP_ZEXT_I4: mips_dsll (code, ins->dreg, ins->sreg1, 32); mips_dsrl (code, ins->dreg, ins->dreg, 32); break; case OP_SEXT_I4: mips_dsll (code, ins->dreg, ins->sreg1, 32); mips_dsra (code, ins->dreg, ins->dreg, 32); break; #endif case OP_SETLRET: { int lsreg = mips_v0 + ls_word_idx; int msreg = mips_v0 + ms_word_idx; /* Get sreg1 into lsreg, sreg2 into msreg */ if (ins->sreg1 == msreg) { if (ins->sreg1 != mips_at) MIPS_MOVE (code, mips_at, ins->sreg1); if (ins->sreg2 != msreg) MIPS_MOVE (code, msreg, ins->sreg2); MIPS_MOVE (code, lsreg, mips_at); } else { if (ins->sreg2 != msreg) MIPS_MOVE (code, msreg, ins->sreg2); if (ins->sreg1 != lsreg) MIPS_MOVE (code, lsreg, ins->sreg1); } break; } case OP_FMOVE: if (ins->dreg != ins->sreg1) { mips_fmovd (code, ins->dreg, ins->sreg1); } break; case OP_MOVE_F_TO_I4: mips_cvtsd (code, mips_ftemp, ins->sreg1); mips_mfc1 (code, ins->dreg, mips_ftemp); break; case OP_MOVE_I4_TO_F: mips_mtc1 (code, ins->dreg, ins->sreg1); mips_cvtds (code, ins->dreg, ins->dreg); break; case OP_MIPS_CVTSD: /* Convert from double to float and leave it there */ mips_cvtsd (code, ins->dreg, ins->sreg1); break; case OP_FCONV_TO_R4: #if 0 mips_cvtsd (code, ins->dreg, ins->sreg1); #else /* Just a move, no precision change */ if (ins->dreg != ins->sreg1) { mips_fmovd (code, ins->dreg, ins->sreg1); } #endif break; case OP_CHECK_THIS: /* ensure ins->sreg1 is not NULL */ mips_lw (code, mips_zero, ins->sreg1, 0); break; case OP_ARGLIST: { g_assert (mips_is_imm16 (cfg->sig_cookie)); mips_lw (code, mips_at, cfg->frame_reg, cfg->sig_cookie); mips_sw (code, mips_at, ins->sreg1, 0); break; } case OP_FCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: case OP_FCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: case OP_FCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: call = (MonoCallInst*)ins; switch (ins->opcode) { case OP_FCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: mono_call_add_patch_info (cfg, call, offset); if (ins->flags & MONO_INST_HAS_METHOD) { mips_load (code, mips_t9, call->method); } else { mips_load (code, mips_t9, call->fptr); } mips_jalr (code, mips_t9, mips_ra); mips_nop (code); break; case OP_FCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: MIPS_MOVE (code, mips_t9, ins->sreg1); mips_jalr (code, mips_t9, mips_ra); mips_nop (code); break; case OP_FCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: mips_lw (code, mips_t9, ins->sreg1, ins->inst_offset); mips_jalr (code, mips_t9, mips_ra); mips_nop (code); break; } #if PROMOTE_R4_TO_R8 /* returned an FP R4 (single), promote to R8 (double) in place */ switch (ins->opcode) { case OP_FCALL: case OP_FCALL_REG: case OP_FCALL_MEMBASE: if (call->signature->ret->type == MONO_TYPE_R4) mips_cvtds (code, mips_f0, mips_f0); break; default: break; } #endif break; case OP_LOCALLOC: { int area_offset = cfg->param_area; /* Round up ins->sreg1, mips_at ends up holding size */ mips_addiu (code, mips_at, ins->sreg1, 31); mips_addiu (code, mips_temp, mips_zero, ~31); mips_and (code, mips_at, mips_at, mips_temp); mips_subu (code, mips_sp, mips_sp, mips_at); g_assert (mips_is_imm16 (area_offset)); mips_addiu (code, ins->dreg, mips_sp, area_offset); if (ins->flags & MONO_INST_INIT) { guint32 *buf; buf = (guint32*)(void*)code; mips_beq (code, mips_at, mips_zero, 0); mips_nop (code); mips_move (code, mips_temp, ins->dreg); mips_sb (code, mips_zero, mips_temp, 0); mips_addiu (code, mips_at, mips_at, -1); mips_bne (code, mips_at, mips_zero, -3); mips_addiu (code, mips_temp, mips_temp, 1); mips_patch (buf, (guint32)code); } break; } case OP_THROW: { gpointer addr = mono_arch_get_throw_exception(NULL, FALSE); mips_move (code, mips_a0, ins->sreg1); mips_call (code, mips_t9, addr); mips_break (code, 0xfc); break; } case OP_RETHROW: { gpointer addr = mono_arch_get_rethrow_exception(NULL, FALSE); mips_move (code, mips_a0, ins->sreg1); mips_call (code, mips_t9, addr); mips_break (code, 0xfb); break; } case OP_START_HANDLER: { /* * The START_HANDLER instruction marks the beginning of * a handler block. It is called using a call * instruction, so mips_ra contains the return address. * Since the handler executes in the same stack frame * as the method itself, we can't use save/restore to * save the return address. Instead, we save it into * a dedicated variable. */ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); g_assert (spvar->inst_basereg != mips_sp); code = emit_reserve_param_area (cfg, code); if (mips_is_imm16 (spvar->inst_offset)) { mips_sw (code, mips_ra, spvar->inst_basereg, spvar->inst_offset); } else { mips_load_const (code, mips_at, spvar->inst_offset); mips_addu (code, mips_at, mips_at, spvar->inst_basereg); mips_sw (code, mips_ra, mips_at, 0); } break; } case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); g_assert (spvar->inst_basereg != mips_sp); code = emit_unreserve_param_area (cfg, code); if (ins->sreg1 != mips_v0) MIPS_MOVE (code, mips_v0, ins->sreg1); if (mips_is_imm16 (spvar->inst_offset)) { mips_lw (code, mips_ra, spvar->inst_basereg, spvar->inst_offset); } else { mips_load_const (code, mips_at, spvar->inst_offset); mips_addu (code, mips_at, mips_at, spvar->inst_basereg); mips_lw (code, mips_ra, mips_at, 0); } mips_jr (code, mips_ra); mips_nop (code); break; } case OP_ENDFINALLY: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); g_assert (spvar->inst_basereg != mips_sp); code = emit_unreserve_param_area (cfg, code); mips_lw (code, mips_t9, spvar->inst_basereg, spvar->inst_offset); mips_jalr (code, mips_t9, mips_ra); mips_nop (code); break; } case OP_CALL_HANDLER: mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); mips_lui (code, mips_t9, mips_zero, 0); mips_addiu (code, mips_t9, mips_t9, 0); mips_jalr (code, mips_t9, mips_ra); mips_nop (code); /*FIXME should it be before the NOP or not? Does MIPS has a delay slot like sparc?*/ for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); break; case OP_LABEL: ins->inst_c0 = code - cfg->native_code; break; case OP_BR: mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); if (cfg->arch.long_branch) { mips_lui (code, mips_at, mips_zero, 0); mips_addiu (code, mips_at, mips_at, 0); mips_jr (code, mips_at); mips_nop (code); } else { mips_beq (code, mips_zero, mips_zero, 0); mips_nop (code); } break; case OP_BR_REG: mips_jr (code, ins->sreg1); mips_nop (code); break; case OP_SWITCH: { int i; max_len += 4 * GPOINTER_TO_INT (ins->klass); code = realloc_code (cfg, max_len); g_assert (ins->sreg1 != -1); mips_sll (code, mips_at, ins->sreg1, 2); if (1 || !(cfg->flags & MONO_CFG_HAS_CALLS)) MIPS_MOVE (code, mips_t8, mips_ra); mips_bgezal (code, mips_zero, 1); /* bal */ mips_nop (code); mips_addu (code, mips_t9, mips_ra, mips_at); /* Table is 16 or 20 bytes from target of bal above */ if (1 || !(cfg->flags & MONO_CFG_HAS_CALLS)) { MIPS_MOVE (code, mips_ra, mips_t8); mips_lw (code, mips_t9, mips_t9, 20); } else mips_lw (code, mips_t9, mips_t9, 16); mips_jalr (code, mips_t9, mips_t8); mips_nop (code); for (i = 0; i < GPOINTER_TO_INT (ins->klass); ++i) mips_emit32 (code, 0xfefefefe); break; } case OP_CEQ: case OP_ICEQ: mips_addiu (code, ins->dreg, mips_zero, 1); mips_beq (code, mips_at, mips_zero, 2); mips_nop (code); MIPS_MOVE (code, ins->dreg, mips_zero); break; case OP_CLT: case OP_CLT_UN: case OP_ICLT: case OP_ICLT_UN: mips_addiu (code, ins->dreg, mips_zero, 1); mips_bltz (code, mips_at, 2); mips_nop (code); MIPS_MOVE (code, ins->dreg, mips_zero); break; case OP_CGT: case OP_CGT_UN: case OP_ICGT: case OP_ICGT_UN: mips_addiu (code, ins->dreg, mips_zero, 1); mips_bgtz (code, mips_at, 2); mips_nop (code); MIPS_MOVE (code, ins->dreg, mips_zero); break; case OP_MIPS_COND_EXC_EQ: case OP_MIPS_COND_EXC_GE: case OP_MIPS_COND_EXC_GT: case OP_MIPS_COND_EXC_LE: case OP_MIPS_COND_EXC_LT: case OP_MIPS_COND_EXC_NE_UN: case OP_MIPS_COND_EXC_GE_UN: case OP_MIPS_COND_EXC_GT_UN: case OP_MIPS_COND_EXC_LE_UN: case OP_MIPS_COND_EXC_LT_UN: case OP_MIPS_COND_EXC_OV: case OP_MIPS_COND_EXC_NO: case OP_MIPS_COND_EXC_C: case OP_MIPS_COND_EXC_NC: case OP_MIPS_COND_EXC_IEQ: case OP_MIPS_COND_EXC_IGE: case OP_MIPS_COND_EXC_IGT: case OP_MIPS_COND_EXC_ILE: case OP_MIPS_COND_EXC_ILT: case OP_MIPS_COND_EXC_INE_UN: case OP_MIPS_COND_EXC_IGE_UN: case OP_MIPS_COND_EXC_IGT_UN: case OP_MIPS_COND_EXC_ILE_UN: case OP_MIPS_COND_EXC_ILT_UN: case OP_MIPS_COND_EXC_IOV: case OP_MIPS_COND_EXC_INO: case OP_MIPS_COND_EXC_IC: case OP_MIPS_COND_EXC_INC: { guint32 *skip; guint32 *throw; /* If the condition is true, raise the exception */ /* need to reverse test to skip around exception raising */ /* For the moment, branch around a branch to avoid reversing the tests. */ /* Remember, an unpatched branch to 0 branches to the delay slot */ switch (ins->opcode) { case OP_MIPS_COND_EXC_EQ: throw = (guint32 *)(void *)code; mips_beq (code, ins->sreg1, ins->sreg2, 0); mips_nop (code); break; case OP_MIPS_COND_EXC_NE_UN: throw = (guint32 *)(void *)code; mips_bne (code, ins->sreg1, ins->sreg2, 0); mips_nop (code); break; case OP_MIPS_COND_EXC_LE_UN: mips_sltu (code, mips_at, ins->sreg2, ins->sreg1); throw = (guint32 *)(void *)code; mips_beq (code, mips_at, mips_zero, 0); mips_nop (code); break; case OP_MIPS_COND_EXC_GT: mips_slt (code, mips_at, ins->sreg2, ins->sreg1); throw = (guint32 *)(void *)code; mips_bne (code, mips_at, mips_zero, 0); mips_nop (code); break; case OP_MIPS_COND_EXC_GT_UN: mips_sltu (code, mips_at, ins->sreg2, ins->sreg1); throw = (guint32 *)(void *)code; mips_bne (code, mips_at, mips_zero, 0); mips_nop (code); break; case OP_MIPS_COND_EXC_LT: mips_slt (code, mips_at, ins->sreg1, ins->sreg2); throw = (guint32 *)(void *)code; mips_bne (code, mips_at, mips_zero, 0); mips_nop (code); break; case OP_MIPS_COND_EXC_LT_UN: mips_sltu (code, mips_at, ins->sreg1, ins->sreg2); throw = (guint32 *)(void *)code; mips_bne (code, mips_at, mips_zero, 0); mips_nop (code); break; default: /* Not yet implemented */ g_warning ("NYI conditional exception %s\n", mono_inst_name (ins->opcode)); g_assert_not_reached (); } skip = (guint32 *)(void *)code; mips_beq (code, mips_zero, mips_zero, 0); mips_nop (code); mips_patch (throw, (guint32)code); code = mips_emit_exc_by_name (code, ins->inst_p1); mips_patch (skip, (guint32)code); cfg->bb_exit->max_offset += 24; break; } case OP_MIPS_BEQ: case OP_MIPS_BNE: case OP_MIPS_BGEZ: case OP_MIPS_BGTZ: case OP_MIPS_BLEZ: case OP_MIPS_BLTZ: code = mips_emit_cond_branch (cfg, code, ins->opcode, ins); break; /* floating point opcodes */ case OP_R8CONST: #if 0 if (((guint32)ins->inst_p0) & (1 << 15)) mips_lui (code, mips_at, mips_zero, (((guint32)ins->inst_p0)>>16)+1); else mips_lui (code, mips_at, mips_zero, (((guint32)ins->inst_p0)>>16)); mips_ldc1 (code, ins->dreg, mips_at, ((guint32)ins->inst_p0) & 0xffff); #else mips_load_const (code, mips_at, ins->inst_p0); mips_lwc1 (code, ins->dreg, mips_at, ls_word_offset); mips_lwc1 (code, ins->dreg+1, mips_at, ms_word_offset); #endif break; case OP_R4CONST: if (((guint32)ins->inst_p0) & (1 << 15)) mips_lui (code, mips_at, mips_zero, (((guint32)ins->inst_p0)>>16)+1); else mips_lui (code, mips_at, mips_zero, (((guint32)ins->inst_p0)>>16)); mips_lwc1 (code, ins->dreg, mips_at, ((guint32)ins->inst_p0) & 0xffff); #if PROMOTE_R4_TO_R8 mips_cvtds (code, ins->dreg, ins->dreg); #endif break; case OP_STORER8_MEMBASE_REG: if (mips_is_imm16 (ins->inst_offset)) { #if _MIPS_SIM == _ABIO32 mips_swc1 (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset + ls_word_offset); mips_swc1 (code, ins->sreg1+1, ins->inst_destbasereg, ins->inst_offset + ms_word_offset); #elif _MIPS_SIM == _ABIN32 mips_sdc1 (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); #endif } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_destbasereg); mips_swc1 (code, ins->sreg1, mips_at, ls_word_offset); mips_swc1 (code, ins->sreg1+1, mips_at, ms_word_offset); } break; case OP_LOADR8_MEMBASE: if (mips_is_imm16 (ins->inst_offset)) { #if _MIPS_SIM == _ABIO32 mips_lwc1 (code, ins->dreg, ins->inst_basereg, ins->inst_offset + ls_word_offset); mips_lwc1 (code, ins->dreg+1, ins->inst_basereg, ins->inst_offset + ms_word_offset); #elif _MIPS_SIM == _ABIN32 mips_ldc1 (code, ins->dreg, ins->inst_basereg, ins->inst_offset); #endif } else { mips_load_const (code, mips_at, ins->inst_offset); mips_addu (code, mips_at, mips_at, ins->inst_basereg); mips_lwc1 (code, ins->dreg, mips_at, ls_word_offset); mips_lwc1 (code, ins->dreg+1, mips_at, ms_word_offset); } break; case OP_STORER4_MEMBASE_REG: g_assert (mips_is_imm16 (ins->inst_offset)); #if PROMOTE_R4_TO_R8 /* Need to convert ins->sreg1 to single-precision first */ mips_cvtsd (code, mips_ftemp, ins->sreg1); mips_swc1 (code, mips_ftemp, ins->inst_destbasereg, ins->inst_offset); #else mips_swc1 (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); #endif break; case OP_MIPS_LWC1: g_assert (mips_is_imm16 (ins->inst_offset)); mips_lwc1 (code, ins->dreg, ins->inst_basereg, ins->inst_offset); break; case OP_LOADR4_MEMBASE: g_assert (mips_is_imm16 (ins->inst_offset)); mips_lwc1 (code, ins->dreg, ins->inst_basereg, ins->inst_offset); #if PROMOTE_R4_TO_R8 /* Convert to double precision in place */ mips_cvtds (code, ins->dreg, ins->dreg); #endif break; case OP_LOADR4_MEMINDEX: mips_addu (code, mips_at, ins->inst_basereg, ins->sreg2); mips_lwc1 (code, ins->dreg, mips_at, 0); break; case OP_LOADR8_MEMINDEX: mips_addu (code, mips_at, ins->inst_basereg, ins->sreg2); #if _MIPS_SIM == _ABIO32 mips_lwc1 (code, ins->dreg, mips_at, ls_word_offset); mips_lwc1 (code, ins->dreg+1, mips_at, ms_word_offset); #elif _MIPS_SIM == _ABIN32 mips_ldc1 (code, ins->dreg, mips_at, 0); #endif break; case OP_STORER4_MEMINDEX: mips_addu (code, mips_at, ins->inst_destbasereg, ins->sreg2); #if PROMOTE_R4_TO_R8 /* Need to convert ins->sreg1 to single-precision first */ mips_cvtsd (code, mips_ftemp, ins->sreg1); mips_swc1 (code, mips_ftemp, mips_at, 0); #else mips_swc1 (code, ins->sreg1, mips_at, 0); #endif break; case OP_STORER8_MEMINDEX: mips_addu (code, mips_at, ins->inst_destbasereg, ins->sreg2); #if _MIPS_SIM == _ABIO32 mips_swc1 (code, ins->sreg1, mips_at, ls_word_offset); mips_swc1 (code, ins->sreg1+1, mips_at, ms_word_offset); #elif _MIPS_SIM == _ABIN32 mips_sdc1 (code, ins->sreg1, mips_at, 0); #endif break; case OP_ICONV_TO_R_UN: { static const guint64 adjust_val = 0x41F0000000000000ULL; /* convert unsigned int to double */ mips_mtc1 (code, mips_ftemp, ins->sreg1); mips_bgez (code, ins->sreg1, 5); mips_cvtdw (code, ins->dreg, mips_ftemp); mips_load (code, mips_at, (guint32) &adjust_val); mips_ldc1 (code, mips_ftemp, mips_at, 0); mips_faddd (code, ins->dreg, ins->dreg, mips_ftemp); /* target is here */ break; } case OP_ICONV_TO_R4: mips_mtc1 (code, mips_ftemp, ins->sreg1); mips_cvtsw (code, ins->dreg, mips_ftemp); mips_cvtds (code, ins->dreg, ins->dreg); break; case OP_ICONV_TO_R8: mips_mtc1 (code, mips_ftemp, ins->sreg1); mips_cvtdw (code, ins->dreg, mips_ftemp); break; case OP_FCONV_TO_I1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE); break; case OP_FCONV_TO_U1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE); break; case OP_FCONV_TO_I2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE); break; case OP_FCONV_TO_U2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE); break; case OP_FCONV_TO_I4: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE); break; case OP_FCONV_TO_U4: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE); break; case OP_SQRT: mips_fsqrtd (code, ins->dreg, ins->sreg1); break; case OP_FADD: mips_faddd (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FSUB: mips_fsubd (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FMUL: mips_fmuld (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FDIV: mips_fdivd (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FNEG: mips_fnegd (code, ins->dreg, ins->sreg1); break; case OP_FCEQ: mips_fcmpd (code, MIPS_FPU_EQ, ins->sreg1, ins->sreg2); mips_addiu (code, ins->dreg, mips_zero, 1); mips_fbtrue (code, 2); mips_nop (code); MIPS_MOVE (code, ins->dreg, mips_zero); break; case OP_FCLT: mips_fcmpd (code, MIPS_FPU_LT, ins->sreg1, ins->sreg2); mips_addiu (code, ins->dreg, mips_zero, 1); mips_fbtrue (code, 2); mips_nop (code); MIPS_MOVE (code, ins->dreg, mips_zero); break; case OP_FCLT_UN: /* Less than, or Unordered */ mips_fcmpd (code, MIPS_FPU_ULT, ins->sreg1, ins->sreg2); mips_addiu (code, ins->dreg, mips_zero, 1); mips_fbtrue (code, 2); mips_nop (code); MIPS_MOVE (code, ins->dreg, mips_zero); break; case OP_FCGT: mips_fcmpd (code, MIPS_FPU_ULE, ins->sreg1, ins->sreg2); MIPS_MOVE (code, ins->dreg, mips_zero); mips_fbtrue (code, 2); mips_nop (code); mips_addiu (code, ins->dreg, mips_zero, 1); break; case OP_FCGT_UN: /* Greater than, or Unordered */ mips_fcmpd (code, MIPS_FPU_OLE, ins->sreg1, ins->sreg2); MIPS_MOVE (code, ins->dreg, mips_zero); mips_fbtrue (code, 2); mips_nop (code); mips_addiu (code, ins->dreg, mips_zero, 1); break; case OP_MIPS_FBEQ: case OP_MIPS_FBNE: case OP_MIPS_FBLT: case OP_MIPS_FBLT_UN: case OP_MIPS_FBGT: case OP_MIPS_FBGT_UN: case OP_MIPS_FBGE: case OP_MIPS_FBGE_UN: case OP_MIPS_FBLE: case OP_MIPS_FBLE_UN: { int cond = 0; gboolean is_true = TRUE, is_ordered = FALSE; guint32 *buf = NULL; switch (ins->opcode) { case OP_MIPS_FBEQ: cond = MIPS_FPU_EQ; is_true = TRUE; break; case OP_MIPS_FBNE: cond = MIPS_FPU_EQ; is_true = FALSE; break; case OP_MIPS_FBLT: cond = MIPS_FPU_LT; is_true = TRUE; is_ordered = TRUE; break; case OP_MIPS_FBLT_UN: cond = MIPS_FPU_ULT; is_true = TRUE; break; case OP_MIPS_FBGT: cond = MIPS_FPU_LE; is_true = FALSE; is_ordered = TRUE; break; case OP_MIPS_FBGT_UN: cond = MIPS_FPU_OLE; is_true = FALSE; break; case OP_MIPS_FBGE: cond = MIPS_FPU_LT; is_true = FALSE; is_ordered = TRUE; break; case OP_MIPS_FBGE_UN: cond = MIPS_FPU_OLT; is_true = FALSE; break; case OP_MIPS_FBLE: cond = MIPS_FPU_OLE; is_true = TRUE; is_ordered = TRUE; break; case OP_MIPS_FBLE_UN: cond = MIPS_FPU_ULE; is_true = TRUE; break; default: g_assert_not_reached (); } if (is_ordered) { /* Skip the check if unordered */ mips_fcmpd (code, MIPS_FPU_UN, ins->sreg1, ins->sreg2); mips_nop (code); buf = (guint32*)code; mips_fbtrue (code, 0); mips_nop (code); } mips_fcmpd (code, cond, ins->sreg1, ins->sreg2); mips_nop (code); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); if (is_true) mips_fbtrue (code, 0); else mips_fbfalse (code, 0); mips_nop (code); if (is_ordered) mips_patch (buf, (guint32)code); break; } case OP_CKFINITE: { guint32 *branch_patch; mips_mfc1 (code, mips_at, ins->sreg1+1); mips_srl (code, mips_at, mips_at, 16+4); mips_andi (code, mips_at, mips_at, 2047); mips_addiu (code, mips_at, mips_at, -2047); branch_patch = (guint32 *)(void *)code; mips_bne (code, mips_at, mips_zero, 0); mips_nop (code); EMIT_SYSTEM_EXCEPTION_NAME("OverflowException"); mips_patch (branch_patch, (guint32)code); mips_fmovd (code, ins->dreg, ins->sreg1); break; } case OP_JUMP_TABLE: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_c1, ins->inst_p0); mips_load (code, ins->dreg, 0x0f0f0f0f); break; case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: break; default: g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__); g_assert_not_reached (); } if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) { g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)", mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset); g_assert_not_reached (); } cpos += max_len; last_ins = ins; } set_code_cursor (cfg, code); } void mono_arch_register_lowlevel_calls (void) { } void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target) { unsigned char *ip = ji->ip.i + code; switch (ji->type) { case MONO_PATCH_INFO_IP: patch_lui_addiu ((guint32 *)(void *)ip, (guint32)ip); continue; case MONO_PATCH_INFO_SWITCH: { gpointer *table = (gpointer *)ji->data.table->table; int i; patch_lui_addiu ((guint32 *)(void *)ip, (guint32)table); for (i = 0; i < ji->data.table->table_size; i++) { table [i] = (int)ji->data.table->table [i] + code; } continue; } case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_IMAGE: case MONO_PATCH_INFO_FIELD: case MONO_PATCH_INFO_VTABLE: case MONO_PATCH_INFO_IID: case MONO_PATCH_INFO_SFLDA: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDTOKEN: case MONO_PATCH_INFO_R4: case MONO_PATCH_INFO_R8: /* from OP_AOTCONST : lui + addiu */ patch_lui_addiu ((guint32 *)(void *)ip, (guint32)target); continue; #if 0 case MONO_PATCH_INFO_EXC_NAME: g_assert_not_reached (); *((gconstpointer *)(void *)(ip + 1)) = target; continue; #endif case MONO_PATCH_INFO_NONE: /* everything is dealt with at epilog output time */ continue; default: mips_patch ((guint32 *)(void *)ip, (guint32)target); break; } } void mips_adjust_stackframe(MonoCompile *cfg) { MonoBasicBlock *bb; int delta, threshold, i; MonoMethodSignature *sig; int ra_offset; if (cfg->stack_offset == cfg->arch.local_alloc_offset) return; /* adjust cfg->stack_offset for account for down-spilling */ cfg->stack_offset += SIZEOF_REGISTER; /* re-align cfg->stack_offset if needed (due to var spilling) */ cfg->stack_offset = (cfg->stack_offset + MIPS_STACK_ALIGNMENT - 1) & ~(MIPS_STACK_ALIGNMENT - 1); delta = cfg->stack_offset - cfg->arch.local_alloc_offset; if (cfg->verbose_level > 2) { g_print ("mips_adjust_stackframe:\n"); g_print ("\tspillvars allocated 0x%x -> 0x%x\n", cfg->arch.local_alloc_offset, cfg->stack_offset); } threshold = cfg->arch.local_alloc_offset; ra_offset = cfg->stack_offset - sizeof(gpointer); if (cfg->verbose_level > 2) { g_print ("\tra_offset %d/0x%x delta %d/0x%x\n", ra_offset, ra_offset, delta, delta); } sig = mono_method_signature_internal (cfg->method); if (sig && sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) { cfg->vret_addr->inst_offset += delta; } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { MonoInst *inst = cfg->args [i]; inst->inst_offset += delta; } /* * loads and stores based off the frame reg that (used to) lie * above the spill var area need to be increased by 'delta' * to make room for the spill vars. */ /* Need to find loads and stores to adjust that * are above where the spillvars were inserted, but * which are not the spillvar references themselves. * * Idea - since all offsets from fp are positive, make * spillvar offsets negative to begin with so we can spot * them here. */ #if 1 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { int ins_cnt = 0; MonoInst *ins; if (cfg->verbose_level > 2) { g_print ("BASIC BLOCK %d:\n", bb->block_num); } MONO_BB_FOR_EACH_INS (bb, ins) { int adj_c0 = 0; int adj_imm = 0; if (cfg->verbose_level > 2) { mono_print_ins_index (ins_cnt, ins); } /* The == mips_sp tests catch FP spills */ if (MONO_IS_LOAD_MEMBASE(ins) && ((ins->inst_basereg == mips_fp) || (ins->inst_basereg == mips_sp))) { switch (ins->opcode) { case OP_LOADI8_MEMBASE: case OP_LOADR8_MEMBASE: adj_c0 = 8; break; default: adj_c0 = 4; break; } } else if (MONO_IS_STORE_MEMBASE(ins) && ((ins->dreg == mips_fp) || (ins->dreg == mips_sp))) { switch (ins->opcode) { case OP_STOREI8_MEMBASE_REG: case OP_STORER8_MEMBASE_REG: case OP_STOREI8_MEMBASE_IMM: adj_c0 = 8; break; default: adj_c0 = 4; break; } } if (((ins->opcode == OP_ADD_IMM) || (ins->opcode == OP_IADD_IMM)) && (ins->sreg1 == cfg->frame_reg)) adj_imm = 1; if (adj_c0) { if (ins->inst_c0 >= threshold) { ins->inst_c0 += delta; if (cfg->verbose_level > 2) { g_print ("adj"); mono_print_ins_index (ins_cnt, ins); } } else if (ins->inst_c0 < 0) { /* Adj_c0 holds the size of the datatype. */ ins->inst_c0 = - ins->inst_c0 - adj_c0; if (cfg->verbose_level > 2) { g_print ("spill"); mono_print_ins_index (ins_cnt, ins); } } g_assert (ins->inst_c0 != ra_offset); } if (adj_imm) { if (ins->inst_imm >= threshold) { ins->inst_imm += delta; if (cfg->verbose_level > 2) { g_print ("adj"); mono_print_ins_index (ins_cnt, ins); } } g_assert (ins->inst_c0 != ra_offset); } ++ins_cnt; } } #endif } /* * Stack frame layout: * * ------------------- sp + cfg->stack_usage + cfg->param_area * param area incoming * ------------------- sp + cfg->stack_usage + MIPS_STACK_PARAM_OFFSET * a0-a3 incoming * ------------------- sp + cfg->stack_usage * ra * ------------------- sp + cfg->stack_usage-4 * spilled regs * ------------------- sp + * MonoLMF structure optional * ------------------- sp + cfg->arch.lmf_offset * saved registers s0-s8 * ------------------- sp + cfg->arch.iregs_offset * locals * ------------------- sp + cfg->param_area * param area outgoing * ------------------- sp + MIPS_STACK_PARAM_OFFSET * a0-a3 outgoing * ------------------- sp * red zone */ guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoMethodSignature *sig; MonoInst *inst; int alloc_size, pos, i, max_offset; int alloc2_size = 0; guint8 *code; CallInfo *cinfo; guint32 iregs_to_save = 0; #if SAVE_FP_REGS guint32 fregs_to_save = 0; #endif /* lmf_offset is the offset of the LMF from our stack pointer. */ guint32 lmf_offset = cfg->arch.lmf_offset; int cfa_offset = 0; MonoBasicBlock *bb; sig = mono_method_signature_internal (method); cfg->code_size = 768 + sig->param_count * 20; code = cfg->native_code = g_malloc (cfg->code_size); /* * compute max_offset in order to use short forward jumps. */ max_offset = 0; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins = bb->code; bb->max_offset = max_offset; MONO_BB_FOR_EACH_INS (bb, ins) max_offset += ins_get_size (ins->opcode); } if (max_offset > 0xffff) cfg->arch.long_branch = TRUE; /* * Currently, fp points to the bottom of the frame on MIPS, unlike other platforms. * This means that we have to adjust the offsets inside instructions which reference * arguments received on the stack, since the initial offset doesn't take into * account spill slots. */ mips_adjust_stackframe (cfg); /* Offset between current sp and the CFA */ cfa_offset = 0; mono_emit_unwind_op_def_cfa (cfg, code, mips_sp, cfa_offset); /* stack_offset should not be changed here. */ alloc_size = cfg->stack_offset; cfg->stack_usage = alloc_size; iregs_to_save = (cfg->used_int_regs & MONO_ARCH_CALLEE_SAVED_REGS); #if SAVE_FP_REGS #if 0 fregs_to_save = (cfg->used_float_regs & MONO_ARCH_CALLEE_SAVED_FREGS); #else fregs_to_save = MONO_ARCH_CALLEE_SAVED_FREGS; fregs_to_save |= (fregs_to_save << 1); #endif #endif /* If the stack size is too big, save 1024 bytes to start with * so the prologue can use imm16(reg) addressing, then allocate * the rest of the frame. */ if (alloc_size > ((1 << 15) - 1024)) { alloc2_size = alloc_size - 1024; alloc_size = 1024; } if (alloc_size) { g_assert (mips_is_imm16 (-alloc_size)); mips_addiu (code, mips_sp, mips_sp, -alloc_size); cfa_offset = alloc_size; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); } if ((cfg->flags & MONO_CFG_HAS_CALLS) || ALWAYS_SAVE_RA) { int offset = alloc_size + MIPS_RET_ADDR_OFFSET; if (mips_is_imm16(offset)) mips_sw (code, mips_ra, mips_sp, offset); else { g_assert_not_reached (); } /* sp = cfa - cfa_offset, so sp + offset = cfa - cfa_offset + offset */ mono_emit_unwind_op_offset (cfg, code, mips_ra, offset - cfa_offset); } /* XXX - optimize this later to not save all regs if LMF constructed */ pos = cfg->arch.iregs_offset - alloc2_size; if (iregs_to_save) { /* save used registers in own stack frame (at pos) */ for (i = MONO_MAX_IREGS-1; i >= 0; --i) { if (iregs_to_save & (1 << i)) { g_assert (pos < (int)(cfg->stack_usage - sizeof(gpointer))); g_assert (mips_is_imm16(pos)); MIPS_SW (code, i, mips_sp, pos); mono_emit_unwind_op_offset (cfg, code, i, pos - cfa_offset); pos += SIZEOF_REGISTER; } } } // FIXME: Don't save registers twice if there is an LMF // s8 has to be special cased since it is overwritten with the updated value // below if (method->save_lmf) { for (i = MONO_MAX_IREGS-1; i >= 0; --i) { int offset = lmf_offset + G_STRUCT_OFFSET(MonoLMF, iregs[i]); g_assert (mips_is_imm16(offset)); if (MIPS_LMF_IREGMASK & (1 << i)) MIPS_SW (code, i, mips_sp, offset); } } #if SAVE_FP_REGS /* Save float registers */ if (fregs_to_save) { for (i = MONO_MAX_FREGS-1; i >= 0; --i) { if (fregs_to_save & (1 << i)) { g_assert (pos < cfg->stack_usage - MIPS_STACK_ALIGNMENT); g_assert (mips_is_imm16(pos)); mips_swc1 (code, i, mips_sp, pos); pos += sizeof (gulong); } } } if (method->save_lmf) { for (i = MONO_MAX_FREGS-1; i >= 0; --i) { int offset = lmf_offset + G_STRUCT_OFFSET(MonoLMF, fregs[i]); g_assert (mips_is_imm16(offset)); mips_swc1 (code, i, mips_sp, offset); } } #endif if (cfg->frame_reg != mips_sp) { MIPS_MOVE (code, cfg->frame_reg, mips_sp); mono_emit_unwind_op_def_cfa (cfg, code, cfg->frame_reg, cfa_offset); if (method->save_lmf) { int offset = lmf_offset + G_STRUCT_OFFSET(MonoLMF, iregs[cfg->frame_reg]); g_assert (mips_is_imm16(offset)); MIPS_SW (code, cfg->frame_reg, mips_sp, offset); } } /* store runtime generic context */ if (cfg->rgctx_var) { MonoInst *ins = cfg->rgctx_var; g_assert (ins->opcode == OP_REGOFFSET); g_assert (mips_is_imm16 (ins->inst_offset)); mips_sw (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset); } /* load arguments allocated to register from the stack */ pos = 0; if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; if (MONO_TYPE_ISSTRUCT (sig->ret)) { ArgInfo *ainfo = &cinfo->ret; inst = cfg->vret_addr; if (inst->opcode == OP_REGVAR) MIPS_MOVE (code, inst->dreg, ainfo->reg); else if (mips_is_imm16 (inst->inst_offset)) { mips_sw (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } else { mips_load_const (code, mips_at, inst->inst_offset); mips_addu (code, mips_at, mips_at, inst->inst_basereg); mips_sw (code, ainfo->reg, mips_at, 0); } } if (sig->call_convention == MONO_CALL_VARARG) { ArgInfo *cookie = &cinfo->sig_cookie; int offset = alloc_size + cookie->offset; /* Save the sig cookie address */ g_assert (cookie->storage == ArgOnStack); g_assert (mips_is_imm16(offset)); mips_addi (code, mips_at, cfg->frame_reg, offset); mips_sw (code, mips_at, cfg->frame_reg, cfg->sig_cookie - alloc2_size); } /* Keep this in sync with emit_load_volatile_arguments */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; inst = cfg->args [pos]; if (cfg->verbose_level > 2) g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage); if (inst->opcode == OP_REGVAR) { /* Argument ends up in a register */ if (ainfo->storage == ArgInIReg) MIPS_MOVE (code, inst->dreg, ainfo->reg); else if (ainfo->storage == ArgInFReg) { g_assert_not_reached(); #if 0 ppc_fmr (code, inst->dreg, ainfo->reg); #endif } else if (ainfo->storage == ArgOnStack) { int offset = cfg->stack_usage + ainfo->offset; g_assert (mips_is_imm16(offset)); mips_lw (code, inst->dreg, mips_sp, offset); } else g_assert_not_reached (); if (cfg->verbose_level > 2) g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg)); } else { /* Argument ends up on the stack */ if (ainfo->storage == ArgInIReg) { int basereg_offset; /* Incoming parameters should be above this frame */ if (cfg->verbose_level > 2) g_print ("stack slot at %d of %d+%d\n", inst->inst_offset, alloc_size, alloc2_size); /* g_assert (inst->inst_offset >= alloc_size); */ g_assert (inst->inst_basereg == cfg->frame_reg); basereg_offset = inst->inst_offset - alloc2_size; g_assert (mips_is_imm16 (basereg_offset)); switch (ainfo->size) { case 1: mips_sb (code, ainfo->reg, inst->inst_basereg, basereg_offset); break; case 2: mips_sh (code, ainfo->reg, inst->inst_basereg, basereg_offset); break; case 0: /* XXX */ case 4: mips_sw (code, ainfo->reg, inst->inst_basereg, basereg_offset); break; case 8: #if (SIZEOF_REGISTER == 4) mips_sw (code, ainfo->reg, inst->inst_basereg, basereg_offset + ls_word_offset); mips_sw (code, ainfo->reg + 1, inst->inst_basereg, basereg_offset + ms_word_offset); #elif (SIZEOF_REGISTER == 8) mips_sd (code, ainfo->reg, inst->inst_basereg, basereg_offset); #endif break; default: g_assert_not_reached (); break; } } else if (ainfo->storage == ArgOnStack) { /* * Argument comes in on the stack, and ends up on the stack * 1 and 2 byte args are passed as 32-bit quantities, but used as * 8 and 16 bit quantities. Shorten them in place. */ g_assert (mips_is_imm16 (inst->inst_offset)); switch (ainfo->size) { case 1: mips_lw (code, mips_at, inst->inst_basereg, inst->inst_offset); mips_sb (code, mips_at, inst->inst_basereg, inst->inst_offset); break; case 2: mips_lw (code, mips_at, inst->inst_basereg, inst->inst_offset); mips_sh (code, mips_at, inst->inst_basereg, inst->inst_offset); break; case 0: /* XXX */ case 4: case 8: break; default: g_assert_not_reached (); } } else if (ainfo->storage == ArgInFReg) { g_assert (mips_is_imm16 (inst->inst_offset)); g_assert (mips_is_imm16 (inst->inst_offset+4)); if (ainfo->size == 8) { #if _MIPS_SIM == _ABIO32 mips_swc1 (code, ainfo->reg, inst->inst_basereg, inst->inst_offset + ls_word_offset); mips_swc1 (code, ainfo->reg+1, inst->inst_basereg, inst->inst_offset + ms_word_offset); #elif _MIPS_SIM == _ABIN32 mips_sdc1 (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); #endif } else if (ainfo->size == 4) mips_swc1 (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); else g_assert_not_reached (); } else if (ainfo->storage == ArgStructByVal) { int i; int doffset = inst->inst_offset; g_assert (mips_is_imm16 (inst->inst_offset)); g_assert (mips_is_imm16 (inst->inst_offset + ainfo->size * sizeof (target_mgreg_t))); /* Push the argument registers into their stack slots */ for (i = 0; i < ainfo->size; ++i) { g_assert (mips_is_imm16(doffset)); MIPS_SW (code, ainfo->reg + i, inst->inst_basereg, doffset); doffset += SIZEOF_REGISTER; } } else if (ainfo->storage == ArgStructByAddr) { g_assert (mips_is_imm16 (inst->inst_offset)); /* FIXME: handle overrun! with struct sizes not multiple of 4 */ code = emit_memcpy (code, ainfo->vtsize * sizeof (target_mgreg_t), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0); } else g_assert_not_reached (); } pos++; } if (method->save_lmf) { mips_load_const (code, mips_at, MIPS_LMF_MAGIC1); mips_sw (code, mips_at, mips_sp, lmf_offset + G_STRUCT_OFFSET(MonoLMF, magic)); /* This can/will clobber the a0-a3 registers */ mips_call (code, mips_t9, (gpointer)mono_get_lmf_addr); /* mips_v0 is the result from mono_get_lmf_addr () (MonoLMF **) */ g_assert (mips_is_imm16(lmf_offset + G_STRUCT_OFFSET(MonoLMF, lmf_addr))); mips_sw (code, mips_v0, mips_sp, lmf_offset + G_STRUCT_OFFSET(MonoLMF, lmf_addr)); /* new_lmf->previous_lmf = *lmf_addr */ mips_lw (code, mips_at, mips_v0, 0); g_assert (mips_is_imm16(lmf_offset + G_STRUCT_OFFSET(MonoLMF, previous_lmf))); mips_sw (code, mips_at, mips_sp, lmf_offset + G_STRUCT_OFFSET(MonoLMF, previous_lmf)); /* *(lmf_addr) = sp + lmf_offset */ g_assert (mips_is_imm16(lmf_offset)); mips_addiu (code, mips_at, mips_sp, lmf_offset); mips_sw (code, mips_at, mips_v0, 0); /* save method info */ mips_load_const (code, mips_at, method); g_assert (mips_is_imm16(lmf_offset + G_STRUCT_OFFSET(MonoLMF, method))); mips_sw (code, mips_at, mips_sp, lmf_offset + G_STRUCT_OFFSET(MonoLMF, method)); /* save the current IP */ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_IP, NULL); mips_load_const (code, mips_at, 0x01010101); mips_sw (code, mips_at, mips_sp, lmf_offset + G_STRUCT_OFFSET(MonoLMF, eip)); } if (alloc2_size) { if (mips_is_imm16 (-alloc2_size)) { mips_addu (code, mips_sp, mips_sp, -alloc2_size); } else { mips_load_const (code, mips_at, -alloc2_size); mips_addu (code, mips_sp, mips_sp, mips_at); } alloc_size += alloc2_size; cfa_offset += alloc2_size; if (cfg->frame_reg != mips_sp) MIPS_MOVE (code, cfg->frame_reg, mips_sp); else mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); } set_code_cursor (cfg, code); return code; } guint8 * mono_arch_emit_epilog_sub (MonoCompile *cfg) { guint8 *code = NULL; MonoMethod *method = cfg->method; int i; int max_epilog_size = 16 + 20*4; int alloc2_size = 0; guint32 iregs_to_restore; #if SAVE_FP_REGS guint32 fregs_to_restore; #endif if (cfg->method->save_lmf) max_epilog_size += 128; realloc_code (cfg, max_epilog_size); code = cfg->native_code + cfg->code_len; if (cfg->frame_reg != mips_sp) { MIPS_MOVE (code, mips_sp, cfg->frame_reg); } /* If the stack frame is really large, deconstruct it in two steps */ if (cfg->stack_usage > ((1 << 15) - 1024)) { alloc2_size = cfg->stack_usage - 1024; /* partially deconstruct the stack */ mips_load_const (code, mips_at, alloc2_size); mips_addu (code, mips_sp, mips_sp, mips_at); } int pos = cfg->arch.iregs_offset - alloc2_size; iregs_to_restore = (cfg->used_int_regs & MONO_ARCH_CALLEE_SAVED_REGS); if (iregs_to_restore) { for (i = MONO_MAX_IREGS-1; i >= 0; --i) { if (iregs_to_restore & (1 << i)) { g_assert (mips_is_imm16(pos)); MIPS_LW (code, i, mips_sp, pos); pos += SIZEOF_REGISTER; } } } #if SAVE_FP_REGS #if 0 fregs_to_restore = (cfg->used_float_regs & MONO_ARCH_CALLEE_SAVED_FREGS); #else fregs_to_restore = MONO_ARCH_CALLEE_SAVED_FREGS; fregs_to_restore |= (fregs_to_restore << 1); #endif if (fregs_to_restore) { for (i = MONO_MAX_FREGS-1; i >= 0; --i) { if (fregs_to_restore & (1 << i)) { g_assert (pos < cfg->stack_usage - MIPS_STACK_ALIGNMENT); g_assert (mips_is_imm16(pos)); mips_lwc1 (code, i, mips_sp, pos); pos += FREG_SIZE } } } #endif /* Unlink the LMF if necessary */ if (method->save_lmf) { int lmf_offset = cfg->arch.lmf_offset; /* t0 = current_lmf->previous_lmf */ g_assert (mips_is_imm16(lmf_offset + G_STRUCT_OFFSET(MonoLMF, previous_lmf))); mips_lw (code, mips_temp, mips_sp, lmf_offset + G_STRUCT_OFFSET(MonoLMF, previous_lmf)); /* t1 = lmf_addr */ g_assert (mips_is_imm16(lmf_offset + G_STRUCT_OFFSET(MonoLMF, lmf_addr))); mips_lw (code, mips_t1, mips_sp, lmf_offset + G_STRUCT_OFFSET(MonoLMF, lmf_addr)); /* (*lmf_addr) = previous_lmf */ mips_sw (code, mips_temp, mips_t1, 0); } #if 0 /* Restore the fp */ mips_lw (code, mips_fp, mips_sp, cfg->stack_usage + MIPS_FP_ADDR_OFFSET); #endif /* Restore ra */ if ((cfg->flags & MONO_CFG_HAS_CALLS) || ALWAYS_SAVE_RA) { g_assert (mips_is_imm16(cfg->stack_usage - alloc2_size + MIPS_RET_ADDR_OFFSET)); mips_lw (code, mips_ra, mips_sp, cfg->stack_usage - alloc2_size + MIPS_RET_ADDR_OFFSET); } /* Restore the stack pointer */ g_assert (mips_is_imm16(cfg->stack_usage - alloc2_size)); mips_addiu (code, mips_sp, mips_sp, cfg->stack_usage - alloc2_size); /* Caller will emit either return or tail-call sequence */ set_code_cursor (cfg, code); return (code); } void mono_arch_emit_epilog (MonoCompile *cfg) { guint8 *code = mono_arch_emit_epilog_sub (cfg); mips_jr (code, mips_ra); mips_nop (code); set_code_cursor (cfg, code); } /* remove once throw_exception_by_name is eliminated */ #if 0 static int exception_id_by_name (const char *name) { if (strcmp (name, "IndexOutOfRangeException") == 0) return MONO_EXC_INDEX_OUT_OF_RANGE; if (strcmp (name, "OverflowException") == 0) return MONO_EXC_OVERFLOW; if (strcmp (name, "ArithmeticException") == 0) return MONO_EXC_ARITHMETIC; if (strcmp (name, "DivideByZeroException") == 0) return MONO_EXC_DIVIDE_BY_ZERO; if (strcmp (name, "InvalidCastException") == 0) return MONO_EXC_INVALID_CAST; if (strcmp (name, "NullReferenceException") == 0) return MONO_EXC_NULL_REF; if (strcmp (name, "ArrayTypeMismatchException") == 0) return MONO_EXC_ARRAY_TYPE_MISMATCH; if (strcmp (name, "ArgumentException") == 0) return MONO_EXC_ARGUMENT; g_error ("Unknown intrinsic exception %s\n", name); return 0; } #endif void mono_arch_emit_exceptions (MonoCompile *cfg) { #if 0 MonoJumpInfo *patch_info; int i; guint8 *code; const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL}; guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0}; int max_epilog_size = 50; /* count the number of exception infos */ /* * make sure we have enough space for exceptions * 24 is the simulated call to throw_exception_by_name */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { #if 0 if (patch_info->type == MONO_PATCH_INFO_EXC) { i = exception_id_by_name (patch_info->data.target); g_assert (i < MONO_EXC_INTRINS_NUM); if (!exc_throw_found [i]) { max_epilog_size += 12; exc_throw_found [i] = TRUE; } } #endif } code = realloc_code (cfg, max_epilog_size); /* add code to raise exceptions */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { switch (patch_info->type) { case MONO_PATCH_INFO_EXC: { g_assert_not_reached(); break; } default: /* do nothing */ break; } } set_code_cursor (cfg, code); #endif } void mono_arch_finish_init (void) { } void mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg) { int this_dreg = mips_a0; if (vt_reg != -1) this_dreg = mips_a1; /* add the this argument */ if (this_reg != -1) { MonoInst *this_ins; MONO_INST_NEW (cfg, this_ins, OP_MOVE); this_ins->type = this_type; this_ins->sreg1 = this_reg; this_ins->dreg = mono_alloc_ireg (cfg); mono_bblock_add_inst (cfg->cbb, this_ins); mono_call_inst_add_outarg_reg (cfg, inst, this_ins->dreg, this_dreg, FALSE); } if (vt_reg != -1) { MonoInst *vtarg; MONO_INST_NEW (cfg, vtarg, OP_MOVE); vtarg->type = STACK_MP; vtarg->sreg1 = vt_reg; vtarg->dreg = mono_alloc_ireg (cfg); mono_bblock_add_inst (cfg->cbb, vtarg); mono_call_inst_add_outarg_reg (cfg, inst, vtarg->dreg, mips_a0, FALSE); } } MonoInst* mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins = NULL; return ins; } MonoInst* mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { return NULL; } host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { return ctx->sc_regs [reg]; } host_mgreg_t* mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { return &ctx->sc_regs [reg]; } #define ENABLE_WRONG_METHOD_CHECK 0 #define MIPS_LOAD_SEQUENCE_LENGTH 8 #define CMP_SIZE (MIPS_LOAD_SEQUENCE_LENGTH + 4) #define BR_SIZE 8 #define LOADSTORE_SIZE 4 #define JUMP_IMM_SIZE 16 #define JUMP_IMM32_SIZE (MIPS_LOAD_SEQUENCE_LENGTH + 8) #define LOAD_CONST_SIZE 8 #define JUMP_JR_SIZE 8 gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int i; int size = 0; guint8 *code, *start, *patch; for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { if (item->check_target_idx) { item->chunk_size += LOAD_CONST_SIZE + BR_SIZE + JUMP_JR_SIZE; if (item->has_target_code) item->chunk_size += LOAD_CONST_SIZE; else item->chunk_size += LOADSTORE_SIZE; } else { if (fail_tramp) { item->chunk_size += LOAD_CONST_SIZE + BR_SIZE + JUMP_IMM32_SIZE + LOADSTORE_SIZE + JUMP_IMM32_SIZE; if (!item->has_target_code) item->chunk_size += LOADSTORE_SIZE; } else { item->chunk_size += LOADSTORE_SIZE + JUMP_JR_SIZE; #if ENABLE_WRONG_METHOD_CHECK item->chunk_size += CMP_SIZE + BR_SIZE + 4; #endif } } } else { item->chunk_size += CMP_SIZE + BR_SIZE; imt_entries [item->check_target_idx]->compare_done = TRUE; } size += item->chunk_size; } /* the initial load of the vtable address */ size += MIPS_LOAD_SEQUENCE_LENGTH; if (fail_tramp) { code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size); } else { MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); code = mono_mem_manager_code_reserve (mem_manager, size); } start = code; /* t7 points to the vtable */ mips_load_const (code, mips_t7, (gsize)(& (vtable->vtable [0]))); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; item->code_target = code; if (item->is_equals) { if (item->check_target_idx) { mips_load_const (code, mips_temp, (gsize)item->key); item->jmp_code = code; mips_bne (code, mips_temp, MONO_ARCH_IMT_REG, 0); mips_nop (code); if (item->has_target_code) { mips_load_const (code, mips_t9, item->value.target_code); } else { mips_lw (code, mips_t9, mips_t7, (sizeof (target_mgreg_t) * item->value.vtable_slot)); } mips_jr (code, mips_t9); mips_nop (code); } else { if (fail_tramp) { mips_load_const (code, mips_temp, (gsize)item->key); patch = code; mips_bne (code, mips_temp, MONO_ARCH_IMT_REG, 0); mips_nop (code); if (item->has_target_code) { mips_load_const (code, mips_t9, item->value.target_code); } else { g_assert (vtable); mips_load_const (code, mips_at, & (vtable->vtable [item->value.vtable_slot])); mips_lw (code, mips_t9, mips_at, 0); } mips_jr (code, mips_t9); mips_nop (code); mips_patch ((guint32 *)(void *)patch, (guint32)code); mips_load_const (code, mips_t9, fail_tramp); mips_jr (code, mips_t9); mips_nop (code); } else { /* enable the commented code to assert on wrong method */ #if ENABLE_WRONG_METHOD_CHECK ppc_load (code, ppc_r0, (guint32)item->key); ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0); patch = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); #endif mips_lw (code, mips_t9, mips_t7, (sizeof (target_mgreg_t) * item->value.vtable_slot)); mips_jr (code, mips_t9); mips_nop (code); #if ENABLE_WRONG_METHOD_CHECK ppc_patch (patch, code); ppc_break (code); #endif } } } else { mips_load_const (code, mips_temp, (gulong)item->key); mips_slt (code, mips_temp, MONO_ARCH_IMT_REG, mips_temp); item->jmp_code = code; mips_beq (code, mips_temp, mips_zero, 0); mips_nop (code); } } /* patch the branches to get to the target items */ for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code && item->check_target_idx) { mips_patch ((guint32 *)item->jmp_code, (guint32)imt_entries [item->check_target_idx]->code_target); } } if (!fail_tramp) UnlockedAdd (&mono_stats.imt_trampolines_size, code - start); g_assert (code - start <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL)); mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), NULL); return start; } MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { return (MonoMethod*) regs [MONO_ARCH_IMT_REG]; } MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code) { return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG]; } /* Soft Debug support */ #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED /* * mono_arch_set_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; guint32 addr = (guint32)bp_trigger_page; mips_load_const (code, mips_t9, addr); mips_lw (code, mips_t9, mips_t9, 0); mono_arch_flush_icache (ip, code - ip); } /* * mono_arch_clear_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; mips_nop (code); mips_nop (code); mips_nop (code); mono_arch_flush_icache (ip, code - ip); } /* * mono_arch_start_single_stepping: * * See mini-amd64.c for docs. */ void mono_arch_start_single_stepping (void) { mono_mprotect (ss_trigger_page, mono_pagesize (), 0); } /* * mono_arch_stop_single_stepping: * * See mini-amd64.c for docs. */ void mono_arch_stop_single_stepping (void) { mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ); } /* * mono_arch_is_single_step_event: * * See mini-amd64.c for docs. */ gboolean mono_arch_is_single_step_event (void *info, void *sigctx) { siginfo_t* sinfo = (siginfo_t*) info; /* Sometimes the address is off by 4 */ if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128) return TRUE; else return FALSE; } /* * mono_arch_is_breakpoint_event: * * See mini-amd64.c for docs. */ gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx) { siginfo_t* sinfo = (siginfo_t*) info; /* Sometimes the address is off by 4 */ if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128) return TRUE; else return FALSE; } /* * mono_arch_skip_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji) { MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4); } /* * mono_arch_skip_single_step: * * See mini-amd64.c for docs. */ void mono_arch_skip_single_step (MonoContext *ctx) { MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4); } /* * mono_arch_get_seq_point_info: * * See mini-amd64.c for docs. */ SeqPointInfo* mono_arch_get_seq_point_info (guint8 *code) { NOT_IMPLEMENTED; return NULL; } #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */ gboolean mono_arch_opcode_supported (int opcode) { return FALSE; } gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { return FALSE; } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { return NULL; } GSList* mono_arch_get_cie_program (void) { NOT_IMPLEMENTED; return NULL; }
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/mini-ppc.c
/** * \file * PowerPC backend for the Mono code generator * * Authors: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * Andreas Faerber <[email protected]> * * (C) 2003 Ximian, Inc. * (C) 2007-2008 Andreas Faerber */ #include "mini.h" #include <string.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/utils/mono-proclib.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/unlocked.h> #include "mono/utils/mono-tls-inline.h" #include "mini-ppc.h" #ifdef TARGET_POWERPC64 #include "cpu-ppc64.h" #else #include "cpu-ppc.h" #endif #include "ir-emit.h" #include "aot-runtime.h" #include "mini-runtime.h" #ifdef __APPLE__ #include <sys/sysctl.h> #endif #ifdef __linux__ #include <unistd.h> #endif #ifdef _AIX #include <sys/systemcfg.h> #endif static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math") static GENERATE_TRY_GET_CLASS_WITH_CACHE (mathf, "System", "MathF") #define FORCE_INDIR_CALL 1 enum { TLS_MODE_DETECT, TLS_MODE_FAILED, TLS_MODE_LTHREADS, TLS_MODE_NPTL, TLS_MODE_DARWIN_G4, TLS_MODE_DARWIN_G5 }; /* cpu_hw_caps contains the flags defined below */ static int cpu_hw_caps = 0; static int cachelinesize = 0; static int cachelineinc = 0; enum { PPC_ICACHE_SNOOP = 1 << 0, PPC_MULTIPLE_LS_UNITS = 1 << 1, PPC_SMP_CAPABLE = 1 << 2, PPC_ISA_2X = 1 << 3, PPC_ISA_64 = 1 << 4, PPC_MOVE_FPR_GPR = 1 << 5, PPC_ISA_2_03 = 1 << 6, PPC_HW_CAP_END }; #define BREAKPOINT_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4) /* This mutex protects architecture specific caches */ #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex) #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex) static mono_mutex_t mini_arch_mutex; /* * The code generated for sequence points reads from this location, which is * made read-only when single stepping is enabled. */ static gpointer ss_trigger_page; /* Enabled breakpoints read from this trigger page */ static gpointer bp_trigger_page; #define MONO_EMIT_NEW_LOAD_R8(cfg,dr,addr) do { \ MonoInst *inst; \ MONO_INST_NEW ((cfg), (inst), OP_R8CONST); \ inst->type = STACK_R8; \ inst->dreg = (dr); \ inst->inst_p0 = (void*)(addr); \ mono_bblock_add_inst (cfg->cbb, inst); \ } while (0) const char* mono_arch_regname (int reg) { static const char rnames[][4] = { "r0", "sp", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } const char* mono_arch_fregname (int reg) { static const char rnames[][4] = { "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } /* this function overwrites r0, r11, r12 */ static guint8* emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset) { /* unrolled, use the counter in big */ if (size > sizeof (target_mgreg_t) * 5) { long shifted = size / TARGET_SIZEOF_VOID_P; guint8 *copy_loop_start, *copy_loop_jump; ppc_load (code, ppc_r0, shifted); ppc_mtctr (code, ppc_r0); //g_assert (sreg == ppc_r12); ppc_addi (code, ppc_r11, dreg, (doffset - sizeof (target_mgreg_t))); ppc_addi (code, ppc_r12, sreg, (soffset - sizeof (target_mgreg_t))); copy_loop_start = code; ppc_ldptr_update (code, ppc_r0, (unsigned int)sizeof (target_mgreg_t), ppc_r12); ppc_stptr_update (code, ppc_r0, (unsigned int)sizeof (target_mgreg_t), ppc_r11); copy_loop_jump = code; ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0); ppc_patch (copy_loop_jump, copy_loop_start); size -= shifted * sizeof (target_mgreg_t); doffset = soffset = 0; dreg = ppc_r11; } #ifdef __mono_ppc64__ /* the hardware has multiple load/store units and the move is long enough to use more then one register, then use load/load/store/store to execute 2 instructions per cycle. */ if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r11) && (sreg != ppc_r11)) { while (size >= 16) { ppc_ldptr (code, ppc_r0, soffset, sreg); ppc_ldptr (code, ppc_r11, soffset+8, sreg); ppc_stptr (code, ppc_r0, doffset, dreg); ppc_stptr (code, ppc_r11, doffset+8, dreg); size -= 16; soffset += 16; doffset += 16; } } while (size >= 8) { ppc_ldr (code, ppc_r0, soffset, sreg); ppc_str (code, ppc_r0, doffset, dreg); size -= 8; soffset += 8; doffset += 8; } #else if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r11) && (sreg != ppc_r11)) { while (size >= 8) { ppc_lwz (code, ppc_r0, soffset, sreg); ppc_lwz (code, ppc_r11, soffset+4, sreg); ppc_stw (code, ppc_r0, doffset, dreg); ppc_stw (code, ppc_r11, doffset+4, dreg); size -= 8; soffset += 8; doffset += 8; } } #endif while (size >= 4) { ppc_lwz (code, ppc_r0, soffset, sreg); ppc_stw (code, ppc_r0, doffset, dreg); size -= 4; soffset += 4; doffset += 4; } while (size >= 2) { ppc_lhz (code, ppc_r0, soffset, sreg); ppc_sth (code, ppc_r0, doffset, dreg); size -= 2; soffset += 2; doffset += 2; } while (size >= 1) { ppc_lbz (code, ppc_r0, soffset, sreg); ppc_stb (code, ppc_r0, doffset, dreg); size -= 1; soffset += 1; doffset += 1; } return code; } /* * mono_arch_get_argument_info: * @csig: a method signature * @param_count: the number of parameters to consider * @arg_info: an array to store the result infos * * Gathers information on parameters such as size, alignment and * padding. arg_info should be large enought to hold param_count + 1 entries. * * Returns the size of the activation frame. */ int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { #ifdef __mono_ppc64__ NOT_IMPLEMENTED; return -1; #else int k, frame_size = 0; int size, align, pad; int offset = 8; if (MONO_TYPE_ISSTRUCT (csig->ret)) { frame_size += sizeof (target_mgreg_t); offset += 4; } arg_info [0].offset = offset; if (csig->hasthis) { frame_size += sizeof (target_mgreg_t); offset += 4; } arg_info [0].size = frame_size; for (k = 0; k < param_count; k++) { if (csig->pinvoke && !csig->marshalling_disabled) size = mono_type_native_stack_size (csig->params [k], (guint32*)&align); else size = mini_type_stack_size (csig->params [k], &align); /* ignore alignment for now */ align = 1; frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; frame_size += size; arg_info [k + 1].pad = 0; arg_info [k + 1].size = size; offset += pad; arg_info [k + 1].offset = offset; offset += size; } align = MONO_ARCH_FRAME_ALIGNMENT; frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; return frame_size; #endif } #ifdef __mono_ppc64__ static gboolean is_load_sequence (guint32 *seq) { return ppc_opcode (seq [0]) == 15 && /* lis */ ppc_opcode (seq [1]) == 24 && /* ori */ ppc_opcode (seq [2]) == 30 && /* sldi */ ppc_opcode (seq [3]) == 25 && /* oris */ ppc_opcode (seq [4]) == 24; /* ori */ } #define ppc_load_get_dest(l) (((l)>>21) & 0x1f) #define ppc_load_get_off(l) ((gint16)((l) & 0xffff)) #endif /* ld || lwz */ #define ppc_is_load_op(opcode) (ppc_opcode ((opcode)) == 58 || ppc_opcode ((opcode)) == 32) /* code must point to the blrl */ gboolean mono_ppc_is_direct_call_sequence (guint32 *code) { #ifdef __mono_ppc64__ g_assert(*code == 0x4e800021 || *code == 0x4e800020 || *code == 0x4e800420); /* the thunk-less direct call sequence: lis/ori/sldi/oris/ori/mtlr/blrl */ if (ppc_opcode (code [-1]) == 31) { /* mtlr */ if (ppc_is_load_op (code [-2]) && ppc_is_load_op (code [-3])) { /* ld/ld */ if (!is_load_sequence (&code [-8])) return FALSE; /* one of the loads must be "ld r2,8(rX)" or "ld r2,4(rX) for ilp32 */ return (ppc_load_get_dest (code [-2]) == ppc_r2 && ppc_load_get_off (code [-2]) == sizeof (target_mgreg_t)) || (ppc_load_get_dest (code [-3]) == ppc_r2 && ppc_load_get_off (code [-3]) == sizeof (target_mgreg_t)); } if (ppc_opcode (code [-2]) == 24 && ppc_opcode (code [-3]) == 31) /* mr/nop */ return is_load_sequence (&code [-8]); else return is_load_sequence (&code [-6]); } return FALSE; #else g_assert(*code == 0x4e800021); /* the thunk-less direct call sequence: lis/ori/mtlr/blrl */ return ppc_opcode (code [-1]) == 31 && ppc_opcode (code [-2]) == 24 && ppc_opcode (code [-3]) == 15; #endif } #define MAX_ARCH_DELEGATE_PARAMS 7 static guint8* get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, guint32 param_count, gboolean aot) { guint8 *code, *start; if (has_target) { int size = MONO_PPC_32_64_CASE (32, 32) + PPC_FTNPTR_SIZE; start = code = mono_global_codeman_reserve (size); if (!aot) code = mono_ppc_create_pre_code_ftnptr (code); /* Replace the this argument with the target */ ppc_ldptr (code, ppc_r0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3); #ifdef PPC_USES_FUNCTION_DESCRIPTOR /* it's a function descriptor */ /* Can't use ldptr as it doesn't work with r0 */ ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0); #endif ppc_mtctr (code, ppc_r0); ppc_ldptr (code, ppc_r3, MONO_STRUCT_OFFSET (MonoDelegate, target), ppc_r3); ppc_bcctr (code, PPC_BR_ALWAYS, 0); g_assert ((code - start) <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } else { int size, i; size = MONO_PPC_32_64_CASE (32, 32) + param_count * 4 + PPC_FTNPTR_SIZE; start = code = mono_global_codeman_reserve (size); if (!aot) code = mono_ppc_create_pre_code_ftnptr (code); ppc_ldptr (code, ppc_r0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3); #ifdef PPC_USES_FUNCTION_DESCRIPTOR /* it's a function descriptor */ ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0); #endif ppc_mtctr (code, ppc_r0); /* slide down the arguments */ for (i = 0; i < param_count; ++i) { ppc_mr (code, (ppc_r3 + i), (ppc_r3 + i + 1)); } ppc_bcctr (code, PPC_BR_ALWAYS, 0); g_assert ((code - start) <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } if (has_target) { *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, NULL); } else { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count); *info = mono_tramp_info_create (name, start, code - start, NULL, NULL); g_free (name); } return start; } GSList* mono_arch_get_delegate_invoke_impls (void) { GSList *res = NULL; MonoTrampInfo *info; int i; get_delegate_invoke_impl (&info, TRUE, 0, TRUE); res = g_slist_prepend (res, info); for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) { get_delegate_invoke_impl (&info, FALSE, i, TRUE); res = g_slist_prepend (res, info); } return res; } gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target) { guint8 *code, *start; /* FIXME: Support more cases */ if (MONO_TYPE_ISSTRUCT (sig->ret)) return NULL; if (has_target) { static guint8* cached = NULL; if (cached) return cached; if (mono_ee_features.use_aot_trampolines) { start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target"); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, TRUE, 0, FALSE); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cached = start; } else { static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL}; int i; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; for (i = 0; i < sig->param_count; ++i) if (!mono_is_regsize_var (sig->params [i])) return NULL; code = cache [sig->param_count]; if (code) return code; if (mono_ee_features.use_aot_trampolines) { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); start = (guint8*)mono_aot_get_trampoline (name); g_free (name); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, FALSE, sig->param_count, FALSE); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cache [sig->param_count] = start; } return start; } gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg) { return NULL; } gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *r, guint8 *code) { return (gpointer)(gsize)r [ppc_r3]; } typedef struct { long int type; long int value; } AuxVec; #define MAX_AUX_ENTRIES 128 /* * PPC_FEATURE_POWER4, PPC_FEATURE_POWER5, PPC_FEATURE_POWER5_PLUS, PPC_FEATURE_CELL, * PPC_FEATURE_PA6T, PPC_FEATURE_ARCH_2_05 are considered supporting 2X ISA features */ #define ISA_2X (0x00080000 | 0x00040000 | 0x00020000 | 0x00010000 | 0x00000800 | 0x00001000) /* define PPC_FEATURE_64 HWCAP for 64-bit category. */ #define ISA_64 0x40000000 /* define PPC_FEATURE_POWER6_EXT HWCAP for power6x mffgpr/mftgpr instructions. */ #define ISA_MOVE_FPR_GPR 0x00000200 /* * Initialize the cpu to execute managed code. */ void mono_arch_cpu_init (void) { } /* * Initialize architecture specific code. */ void mono_arch_init (void) { #if defined(MONO_CROSS_COMPILE) #elif defined(__APPLE__) int mib [3]; size_t len = sizeof (cachelinesize); mib [0] = CTL_HW; mib [1] = HW_CACHELINE; if (sysctl (mib, 2, &cachelinesize, &len, NULL, 0) == -1) { perror ("sysctl"); cachelinesize = 128; } else { cachelineinc = cachelinesize; } #elif defined(__linux__) AuxVec vec [MAX_AUX_ENTRIES]; int i, vec_entries = 0; /* sadly this will work only with 2.6 kernels... */ FILE* f = fopen ("/proc/self/auxv", "rb"); if (f) { vec_entries = fread (&vec, sizeof (AuxVec), MAX_AUX_ENTRIES, f); fclose (f); } for (i = 0; i < vec_entries; i++) { int type = vec [i].type; if (type == 19) { /* AT_DCACHEBSIZE */ cachelinesize = vec [i].value; continue; } } #elif defined(G_COMPILER_CODEWARRIOR) cachelinesize = 32; cachelineinc = 32; #elif defined(_AIX) /* FIXME: use block instead? */ cachelinesize = _system_configuration.icache_line; cachelineinc = _system_configuration.icache_line; #else //#error Need a way to get cache line size #endif if (mono_hwcap_ppc_has_icache_snoop) cpu_hw_caps |= PPC_ICACHE_SNOOP; if (mono_hwcap_ppc_is_isa_2x) cpu_hw_caps |= PPC_ISA_2X; if (mono_hwcap_ppc_is_isa_2_03) cpu_hw_caps |= PPC_ISA_2_03; if (mono_hwcap_ppc_is_isa_64) cpu_hw_caps |= PPC_ISA_64; if (mono_hwcap_ppc_has_move_fpr_gpr) cpu_hw_caps |= PPC_MOVE_FPR_GPR; if (mono_hwcap_ppc_has_multiple_ls_units) cpu_hw_caps |= PPC_MULTIPLE_LS_UNITS; if (!cachelinesize) cachelinesize = 32; if (!cachelineinc) cachelineinc = cachelinesize; if (mono_cpu_count () > 1) cpu_hw_caps |= PPC_SMP_CAPABLE; mono_os_mutex_init_recursive (&mini_arch_mutex); ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER); bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER); mono_mprotect (bp_trigger_page, mono_pagesize (), 0); // FIXME: Fix partial sharing for power and remove this mono_set_partial_sharing_supported (FALSE); } /* * Cleanup architecture specific code. */ void mono_arch_cleanup (void) { mono_os_mutex_destroy (&mini_arch_mutex); } gboolean mono_arch_have_fast_tls (void) { return FALSE; } /* * This function returns the optimizations supported on this cpu. */ guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { guint32 opts = 0; /* no ppc-specific optimizations yet */ *exclude_mask = 0; return opts; } #ifdef __mono_ppc64__ #define CASE_PPC32(c) #define CASE_PPC64(c) case c: #else #define CASE_PPC32(c) case c: #define CASE_PPC64(c) #endif static gboolean is_regsize_var (MonoType *t) { if (m_type_is_byref (t)) return TRUE; t = mini_get_underlying_type (t); switch (t->type) { case MONO_TYPE_I4: case MONO_TYPE_U4: CASE_PPC64 (MONO_TYPE_I8) CASE_PPC64 (MONO_TYPE_U8) case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: return TRUE; case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: return TRUE; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (t)) return TRUE; return FALSE; case MONO_TYPE_VALUETYPE: return FALSE; } return FALSE; } #ifndef DISABLE_JIT GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG)) continue; /* we can only allocate 32 bit values */ if (is_regsize_var (ins->inst_vtype)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE); } } return vars; } #endif /* ifndef DISABLE_JIT */ GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; int i, top = 32; if (cfg->frame_reg != ppc_sp) top = 31; /* ppc_r13 is used by the system on PPC EABI */ for (i = 14; i < top; ++i) { /* * Reserve r29 for holding the vtable address for virtual calls in AOT mode, * since the trampolines can clobber r12. */ if (!(cfg->compile_aot && i == 29)) regs = g_list_prepend (regs, GUINT_TO_POINTER (i)); } return regs; } /* * mono_arch_regalloc_cost: * * Return the cost, in number of memory references, of the action of * allocating the variable VMV into a register during global register * allocation. */ guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { /* FIXME: */ return 2; } void mono_arch_flush_icache (guint8 *code, gint size) { #ifdef MONO_CROSS_COMPILE /* do nothing */ #else register guint8 *p; guint8 *endp, *start; p = start = code; endp = p + size; start = (guint8*)((gsize)start & ~(cachelinesize - 1)); /* use dcbf for smp support, later optimize for UP, see pem._64bit.d20030611.pdf page 211 */ #if defined(G_COMPILER_CODEWARRIOR) if (cpu_hw_caps & PPC_SMP_CAPABLE) { for (p = start; p < endp; p += cachelineinc) { asm { dcbf 0, p }; } } else { for (p = start; p < endp; p += cachelineinc) { asm { dcbst 0, p }; } } asm { sync }; p = code; for (p = start; p < endp; p += cachelineinc) { asm { icbi 0, p sync } } asm { sync isync } #else /* For POWER5/6 with ICACHE_SNOOPing only one icbi in the range is required. * The sync is required to insure that the store queue is completely empty. * While the icbi performs no cache operations, icbi/isync is required to * kill local prefetch. */ if (cpu_hw_caps & PPC_ICACHE_SNOOP) { asm ("sync"); asm ("icbi 0,%0;" : : "r"(code) : "memory"); asm ("isync"); return; } /* use dcbf for smp support, see pem._64bit.d20030611.pdf page 211 */ if (cpu_hw_caps & PPC_SMP_CAPABLE) { for (p = start; p < endp; p += cachelineinc) { asm ("dcbf 0,%0;" : : "r"(p) : "memory"); } } else { for (p = start; p < endp; p += cachelineinc) { asm ("dcbst 0,%0;" : : "r"(p) : "memory"); } } asm ("sync"); p = code; for (p = start; p < endp; p += cachelineinc) { /* for ISA2.0+ implementations we should not need any extra sync between the * icbi instructions. Both the 2.0 PEM and the PowerISA-2.05 say this. * So I am not sure which chip had this problem but its not an issue on * of the ISA V2 chips. */ if (cpu_hw_caps & PPC_ISA_2X) asm ("icbi 0,%0;" : : "r"(p) : "memory"); else asm ("icbi 0,%0; sync;" : : "r"(p) : "memory"); } if (!(cpu_hw_caps & PPC_ISA_2X)) asm ("sync"); asm ("isync"); #endif #endif } void mono_arch_flush_register_windows (void) { } #ifdef __APPLE__ #define ALWAYS_ON_STACK(s) s #define FP_ALSO_IN_REG(s) s #else #ifdef __mono_ppc64__ #define ALWAYS_ON_STACK(s) s #define FP_ALSO_IN_REG(s) s #else #define ALWAYS_ON_STACK(s) #define FP_ALSO_IN_REG(s) #endif #define ALIGN_DOUBLES #endif enum { RegTypeGeneral, RegTypeBase, RegTypeFP, RegTypeStructByVal, RegTypeStructByAddr, RegTypeFPStructByVal, // For the v2 ABI, floats should be passed in FRs instead of GRs. Only valid for ABI v2! }; typedef struct { gint32 offset; guint32 vtsize; /* in param area */ guint8 reg; guint8 vtregs; /* number of registers used to pass a RegTypeStructByVal/RegTypeFPStructByVal */ guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */ guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal/RegTypeFPStructByVal */ guint8 bytes : 4; /* size in bytes - only valid for RegTypeStructByVal/RegTypeFPStructByVal if the struct fits in one word, otherwise it's 0*/ } ArgInfo; struct CallInfo { int nargs; guint32 stack_usage; guint32 struct_ret; ArgInfo ret; ArgInfo sig_cookie; gboolean vtype_retaddr; int vret_arg_index; ArgInfo args [1]; }; #define DEBUG(a) #if PPC_RETURN_SMALL_FLOAT_STRUCTS_IN_FR_REGS // // Test if a structure is completely composed of either float XOR double fields and has fewer than // PPC_MOST_FLOAT_STRUCT_MEMBERS_TO_RETURN_VIA_REGISTER members. // If this is true the structure can be returned directly via float registers instead of by a hidden parameter // pointing to where the return value should be stored. // This is as per the ELF ABI v2. // static gboolean is_float_struct_returnable_via_regs (MonoType *type, int* member_cnt, int* member_size) { int local_member_cnt, local_member_size; if (!member_cnt) { member_cnt = &local_member_cnt; } if (!member_size) { member_size = &local_member_size; } gboolean is_all_floats = mini_type_is_hfa(type, member_cnt, member_size); return is_all_floats && (*member_cnt <= PPC_MOST_FLOAT_STRUCT_MEMBERS_TO_RETURN_VIA_REGISTERS); } #else #define is_float_struct_returnable_via_regs(a,b,c) (FALSE) #endif #if PPC_RETURN_SMALL_STRUCTS_IN_REGS // // Test if a structure is smaller in size than 2 doublewords (PPC_LARGEST_STRUCT_SIZE_TO_RETURN_VIA_REGISTERS) and is // completely composed of fields all of basic types. // If this is true the structure can be returned directly via registers r3/r4 instead of by a hidden parameter // pointing to where the return value should be stored. // This is as per the ELF ABI v2. // static gboolean is_struct_returnable_via_regs (MonoClass *klass, gboolean is_pinvoke) { gboolean has_a_field = FALSE; int size = 0; if (klass) { gpointer iter = NULL; MonoClassField *f; if (is_pinvoke) size = mono_type_native_stack_size (m_class_get_byval_arg (klass), 0); else size = mini_type_stack_size (m_class_get_byval_arg (klass), 0); if (size == 0) return TRUE; if (size > PPC_LARGEST_STRUCT_SIZE_TO_RETURN_VIA_REGISTERS) return FALSE; while ((f = mono_class_get_fields_internal (klass, &iter))) { if (!(f->type->attrs & FIELD_ATTRIBUTE_STATIC)) { // TBD: Is there a better way to check for the basic types? if (m_type_is_byref (f->type)) { return FALSE; } else if ((f->type->type >= MONO_TYPE_BOOLEAN) && (f->type->type <= MONO_TYPE_R8)) { has_a_field = TRUE; } else if (MONO_TYPE_ISSTRUCT (f->type)) { MonoClass *klass = mono_class_from_mono_type_internal (f->type); if (is_struct_returnable_via_regs(klass, is_pinvoke)) { has_a_field = TRUE; } else { return FALSE; } } else { return FALSE; } } } } return has_a_field; } #else #define is_struct_returnable_via_regs(a,b) (FALSE) #endif static void inline add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple) { #ifdef __mono_ppc64__ g_assert (simple); #endif if (simple) { if (*gr >= 3 + PPC_NUM_REG_ARGS) { ainfo->offset = PPC_STACK_PARAM_OFFSET + *stack_size; ainfo->reg = ppc_sp; /* in the caller */ ainfo->regtype = RegTypeBase; *stack_size += sizeof (target_mgreg_t); } else { ALWAYS_ON_STACK (*stack_size += sizeof (target_mgreg_t)); ainfo->reg = *gr; } } else { if (*gr >= 3 + PPC_NUM_REG_ARGS - 1) { #ifdef ALIGN_DOUBLES //*stack_size += (*stack_size % 8); #endif ainfo->offset = PPC_STACK_PARAM_OFFSET + *stack_size; ainfo->reg = ppc_sp; /* in the caller */ ainfo->regtype = RegTypeBase; *stack_size += 8; } else { #ifdef ALIGN_DOUBLES if (!((*gr) & 1)) (*gr) ++; #endif ALWAYS_ON_STACK (*stack_size += 8); ainfo->reg = *gr; } (*gr) ++; } (*gr) ++; } #if defined(__APPLE__) || (defined(__mono_ppc64__) && !PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS) static gboolean has_only_a_r48_field (MonoClass *klass) { gpointer iter; MonoClassField *f; gboolean have_field = FALSE; iter = NULL; while ((f = mono_class_get_fields_internal (klass, &iter))) { if (!(f->type->attrs & FIELD_ATTRIBUTE_STATIC)) { if (have_field) return FALSE; if (!m_type_is_byref (f->type) && (f->type->type == MONO_TYPE_R4 || f->type->type == MONO_TYPE_R8)) have_field = TRUE; else return FALSE; } } return have_field; } #endif static CallInfo* get_call_info (MonoMethodSignature *sig) { guint i, fr, gr, pstart; int n = sig->hasthis + sig->param_count; MonoType *simpletype; guint32 stack_size = 0; CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n); gboolean is_pinvoke = sig->pinvoke; fr = PPC_FIRST_FPARG_REG; gr = PPC_FIRST_ARG_REG; if (mini_type_is_vtype (sig->ret)) { cinfo->vtype_retaddr = TRUE; } pstart = 0; n = 0; /* * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after * the first argument, allowing 'this' to be always passed in the first arg reg. * Also do this if the first argument is a reference type, since virtual calls * are sometimes made using calli without sig->hasthis set, like in the delegate * invoke wrappers. */ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) { if (sig->hasthis) { add_general (&gr, &stack_size, cinfo->args + 0, TRUE); n ++; } else { add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE); pstart = 1; n ++; } add_general (&gr, &stack_size, &cinfo->ret, TRUE); cinfo->struct_ret = cinfo->ret.reg; cinfo->vret_arg_index = 1; } else { /* this */ if (sig->hasthis) { add_general (&gr, &stack_size, cinfo->args + 0, TRUE); n ++; } if (cinfo->vtype_retaddr) { add_general (&gr, &stack_size, &cinfo->ret, TRUE); cinfo->struct_ret = cinfo->ret.reg; } } DEBUG(printf("params: %d\n", sig->param_count)); for (i = pstart; i < sig->param_count; ++i) { if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ gr = PPC_LAST_ARG_REG + 1; /* FIXME: don't we have to set fr, too? */ /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE); } DEBUG(printf("param %d: ", i)); if (m_type_is_byref (sig->params [i])) { DEBUG(printf("byref\n")); add_general (&gr, &stack_size, cinfo->args + n, TRUE); n++; continue; } simpletype = mini_get_underlying_type (sig->params [i]); switch (simpletype->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: cinfo->args [n].size = 1; add_general (&gr, &stack_size, cinfo->args + n, TRUE); n++; break; case MONO_TYPE_CHAR: case MONO_TYPE_I2: case MONO_TYPE_U2: cinfo->args [n].size = 2; add_general (&gr, &stack_size, cinfo->args + n, TRUE); n++; break; case MONO_TYPE_I4: case MONO_TYPE_U4: cinfo->args [n].size = 4; add_general (&gr, &stack_size, cinfo->args + n, TRUE); n++; break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: cinfo->args [n].size = sizeof (target_mgreg_t); add_general (&gr, &stack_size, cinfo->args + n, TRUE); n++; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (simpletype)) { cinfo->args [n].size = sizeof (target_mgreg_t); add_general (&gr, &stack_size, cinfo->args + n, TRUE); n++; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: { gint size; MonoClass *klass = mono_class_from_mono_type_internal (sig->params [i]); if (simpletype->type == MONO_TYPE_TYPEDBYREF) size = MONO_ABI_SIZEOF (MonoTypedRef); else if (sig->pinvoke && !sig->marshalling_disabled) size = mono_class_native_size (klass, NULL); else size = mono_class_value_size (klass, NULL); #if defined(__APPLE__) || (defined(__mono_ppc64__) && !PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS) if ((size == 4 || size == 8) && has_only_a_r48_field (klass)) { cinfo->args [n].size = size; /* It was 7, now it is 8 in LinuxPPC */ if (fr <= PPC_LAST_FPARG_REG) { cinfo->args [n].regtype = RegTypeFP; cinfo->args [n].reg = fr; fr ++; FP_ALSO_IN_REG (gr ++); #if !defined(__mono_ppc64__) if (size == 8) FP_ALSO_IN_REG (gr ++); #endif ALWAYS_ON_STACK (stack_size += size); } else { cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size; cinfo->args [n].regtype = RegTypeBase; cinfo->args [n].reg = ppc_sp; /* in the caller*/ stack_size += 8; } n++; break; } #endif DEBUG(printf ("load %d bytes struct\n", mono_class_native_size (sig->params [i]->data.klass, NULL))); #if PPC_PASS_STRUCTS_BY_VALUE { int align_size = size; int nregs = 0; int rest = PPC_LAST_ARG_REG - gr + 1; int n_in_regs = 0; #if PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS int mbr_cnt = 0; int mbr_size = 0; gboolean is_all_floats = is_float_struct_returnable_via_regs (sig->params [i], &mbr_cnt, &mbr_size); if (is_all_floats) { rest = PPC_LAST_FPARG_REG - fr + 1; } // Pass small (<= 8 member) structures entirely made up of either float or double members // in FR registers. There have to be at least mbr_cnt registers left. if (is_all_floats && (rest >= mbr_cnt)) { nregs = mbr_cnt; n_in_regs = MIN (rest, nregs); cinfo->args [n].regtype = RegTypeFPStructByVal; cinfo->args [n].vtregs = n_in_regs; cinfo->args [n].size = mbr_size; cinfo->args [n].vtsize = nregs - n_in_regs; cinfo->args [n].reg = fr; fr += n_in_regs; if (mbr_size == 4) { // floats FP_ALSO_IN_REG (gr += (n_in_regs+1)/2); } else { // doubles FP_ALSO_IN_REG (gr += (n_in_regs)); } } else #endif { align_size += (sizeof (target_mgreg_t) - 1); align_size &= ~(sizeof (target_mgreg_t) - 1); nregs = (align_size + sizeof (target_mgreg_t) -1 ) / sizeof (target_mgreg_t); n_in_regs = MIN (rest, nregs); if (n_in_regs < 0) n_in_regs = 0; #ifdef __APPLE__ /* FIXME: check this */ if (size >= 3 && size % 4 != 0) n_in_regs = 0; #endif cinfo->args [n].regtype = RegTypeStructByVal; cinfo->args [n].vtregs = n_in_regs; cinfo->args [n].size = n_in_regs; cinfo->args [n].vtsize = nregs - n_in_regs; cinfo->args [n].reg = gr; gr += n_in_regs; } #ifdef __mono_ppc64__ if (nregs == 1 && is_pinvoke) cinfo->args [n].bytes = size; else #endif cinfo->args [n].bytes = 0; cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size; /*g_print ("offset for arg %d at %d\n", n, PPC_STACK_PARAM_OFFSET + stack_size);*/ stack_size += nregs * sizeof (target_mgreg_t); } #else add_general (&gr, &stack_size, cinfo->args + n, TRUE); cinfo->args [n].regtype = RegTypeStructByAddr; cinfo->args [n].vtsize = size; #endif n++; break; } case MONO_TYPE_U8: case MONO_TYPE_I8: cinfo->args [n].size = 8; add_general (&gr, &stack_size, cinfo->args + n, SIZEOF_REGISTER == 8); n++; break; case MONO_TYPE_R4: cinfo->args [n].size = 4; /* It was 7, now it is 8 in LinuxPPC */ if (fr <= PPC_LAST_FPARG_REG // For non-native vararg calls the parms must go in storage && !(!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) ) { cinfo->args [n].regtype = RegTypeFP; cinfo->args [n].reg = fr; fr ++; FP_ALSO_IN_REG (gr ++); ALWAYS_ON_STACK (stack_size += SIZEOF_REGISTER); } else { cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size + MONO_PPC_32_64_CASE (0, 4); cinfo->args [n].regtype = RegTypeBase; cinfo->args [n].reg = ppc_sp; /* in the caller*/ stack_size += SIZEOF_REGISTER; } n++; break; case MONO_TYPE_R8: cinfo->args [n].size = 8; /* It was 7, now it is 8 in LinuxPPC */ if (fr <= PPC_LAST_FPARG_REG // For non-native vararg calls the parms must go in storage && !(!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) ) { cinfo->args [n].regtype = RegTypeFP; cinfo->args [n].reg = fr; fr ++; FP_ALSO_IN_REG (gr += sizeof (double) / SIZEOF_REGISTER); ALWAYS_ON_STACK (stack_size += 8); } else { cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size; cinfo->args [n].regtype = RegTypeBase; cinfo->args [n].reg = ppc_sp; /* in the caller*/ stack_size += 8; } n++; break; default: g_error ("Can't trampoline 0x%x", sig->params [i]->type); } } cinfo->nargs = n; if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ gr = PPC_LAST_ARG_REG + 1; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE); } { simpletype = mini_get_underlying_type (sig->ret); switch (simpletype->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_STRING: cinfo->ret.reg = ppc_r3; break; case MONO_TYPE_U8: case MONO_TYPE_I8: cinfo->ret.reg = ppc_r3; break; case MONO_TYPE_R4: case MONO_TYPE_R8: cinfo->ret.reg = ppc_f1; cinfo->ret.regtype = RegTypeFP; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (simpletype)) { cinfo->ret.reg = ppc_r3; break; } break; case MONO_TYPE_VALUETYPE: break; case MONO_TYPE_TYPEDBYREF: case MONO_TYPE_VOID: break; default: g_error ("Can't handle as return value 0x%x", sig->ret->type); } } /* align stack size to 16 */ DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size)); stack_size = (stack_size + 15) & ~15; cinfo->stack_usage = stack_size; return cinfo; } #ifndef DISABLE_JIT gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { CallInfo *caller_info = get_call_info (caller_sig); CallInfo *callee_info = get_call_info (callee_sig); gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage) && IS_SUPPORTED_TAILCALL (memcmp (&callee_info->ret, &caller_info->ret, sizeof (caller_info->ret)) == 0); // FIXME ABIs vary as to if this local is in the parameter area or not, // so this check might not be needed. for (int i = 0; res && i < callee_info->nargs; ++i) { res = IS_SUPPORTED_TAILCALL (callee_info->args [i].regtype != RegTypeStructByAddr); /* An address on the callee's stack is passed as the argument */ } g_free (caller_info); g_free (callee_info); return res; } #endif /* * Set var information according to the calling convention. ppc version. * The locals var stuff should most likely be split in another method. */ void mono_arch_allocate_vars (MonoCompile *m) { MonoMethodSignature *sig; MonoMethodHeader *header; MonoInst *inst; int i, offset, size, align, curinst; int frame_reg = ppc_sp; gint32 *offsets; guint32 locals_stack_size, locals_stack_align; m->flags |= MONO_CFG_HAS_SPILLUP; /* this is bug #60332: remove when #59509 is fixed, so no weird vararg * call convs needs to be handled this way. */ if (m->flags & MONO_CFG_HAS_VARARGS) m->param_area = MAX (m->param_area, sizeof (target_mgreg_t)*8); /* gtk-sharp and other broken code will dllimport vararg functions even with * non-varargs signatures. Since there is little hope people will get this right * we assume they won't. */ if (m->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) m->param_area = MAX (m->param_area, sizeof (target_mgreg_t)*8); header = m->header; /* * We use the frame register also for any method that has * exception clauses. This way, when the handlers are called, * the code will reference local variables using the frame reg instead of * the stack pointer: if we had to restore the stack pointer, we'd * corrupt the method frames that are already on the stack (since * filters get called before stack unwinding happens) when the filter * code would call any method (this also applies to finally etc.). */ if ((m->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses) frame_reg = ppc_r31; m->frame_reg = frame_reg; if (frame_reg != ppc_sp) { m->used_int_regs |= 1 << frame_reg; } sig = mono_method_signature_internal (m->method); offset = 0; curinst = 0; if (MONO_TYPE_ISSTRUCT (sig->ret)) { m->ret->opcode = OP_REGVAR; m->ret->inst_c0 = m->ret->dreg = ppc_r3; } else { /* FIXME: handle long values? */ switch (mini_get_underlying_type (sig->ret)->type) { case MONO_TYPE_VOID: break; case MONO_TYPE_R4: case MONO_TYPE_R8: m->ret->opcode = OP_REGVAR; m->ret->inst_c0 = m->ret->dreg = ppc_f1; break; default: m->ret->opcode = OP_REGVAR; m->ret->inst_c0 = m->ret->dreg = ppc_r3; break; } } /* local vars are at a positive offset from the stack pointer */ /* * also note that if the function uses alloca, we use ppc_r31 * to point at the local variables. */ offset = PPC_MINIMAL_STACK_SIZE; /* linkage area */ /* align the offset to 16 bytes: not sure this is needed here */ //offset += 16 - 1; //offset &= ~(16 - 1); /* add parameter area size for called functions */ offset += m->param_area; offset += 16 - 1; offset &= ~(16 - 1); /* the MonoLMF structure is stored just below the stack pointer */ if (MONO_TYPE_ISSTRUCT (sig->ret)) { offset += sizeof(gpointer) - 1; offset &= ~(sizeof(gpointer) - 1); m->vret_addr->opcode = OP_REGOFFSET; m->vret_addr->inst_basereg = frame_reg; m->vret_addr->inst_offset = offset; if (G_UNLIKELY (m->verbose_level > 1)) { printf ("vret_addr ="); mono_print_ins (m->vret_addr); } offset += sizeof(gpointer); } offsets = mono_allocate_stack_slots (m, FALSE, &locals_stack_size, &locals_stack_align); if (locals_stack_align) { offset += (locals_stack_align - 1); offset &= ~(locals_stack_align - 1); } for (i = m->locals_start; i < m->num_varinfo; i++) { if (offsets [i] != -1) { MonoInst *inst = m->varinfo [i]; inst->opcode = OP_REGOFFSET; inst->inst_basereg = frame_reg; inst->inst_offset = offset + offsets [i]; /* g_print ("allocating local %d (%s) to %d\n", i, mono_type_get_name (inst->inst_vtype), inst->inst_offset); */ } } offset += locals_stack_size; curinst = 0; if (sig->hasthis) { inst = m->args [curinst]; if (inst->opcode != OP_REGVAR) { inst->opcode = OP_REGOFFSET; inst->inst_basereg = frame_reg; offset += sizeof (target_mgreg_t) - 1; offset &= ~(sizeof (target_mgreg_t) - 1); inst->inst_offset = offset; offset += sizeof (target_mgreg_t); } curinst++; } for (i = 0; i < sig->param_count; ++i) { inst = m->args [curinst]; if (inst->opcode != OP_REGVAR) { inst->opcode = OP_REGOFFSET; inst->inst_basereg = frame_reg; if (sig->pinvoke && !sig->marshalling_disabled) { size = mono_type_native_stack_size (sig->params [i], (guint32*)&align); inst->backend.is_pinvoke = 1; } else { size = mono_type_size (sig->params [i], &align); } if (MONO_TYPE_ISSTRUCT (sig->params [i]) && size < sizeof (target_mgreg_t)) size = align = sizeof (target_mgreg_t); /* * Use at least 4/8 byte alignment, since these might be passed in registers, and * they are saved using std in the prolog. */ align = sizeof (target_mgreg_t); offset += align - 1; offset &= ~(align - 1); inst->inst_offset = offset; offset += size; } curinst++; } /* some storage for fp conversions */ offset += 8 - 1; offset &= ~(8 - 1); m->arch.fp_conv_var_offset = offset; offset += 8; /* align the offset to 16 bytes */ offset += 16 - 1; offset &= ~(16 - 1); /* change sign? */ m->stack_offset = offset; if (sig->call_convention == MONO_CALL_VARARG) { CallInfo *cinfo = get_call_info (m->method->signature); m->sig_cookie = cinfo->sig_cookie.offset; g_free(cinfo); } } void mono_arch_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig = mono_method_signature_internal (cfg->method); if (MONO_TYPE_ISSTRUCT (sig->ret)) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG); } } /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode, * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info */ static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { int sig_reg = mono_alloc_ireg (cfg); /* FIXME: Add support for signature tokens to AOT */ cfg->disable_aot = TRUE; MONO_EMIT_NEW_ICONST (cfg, sig_reg, (gulong)call->signature); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, cinfo->sig_cookie.offset, sig_reg); } void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoInst *in, *ins; MonoMethodSignature *sig; int i, n; CallInfo *cinfo; sig = call->signature; n = sig->param_count + sig->hasthis; cinfo = get_call_info (sig); for (i = 0; i < n; ++i) { ArgInfo *ainfo = cinfo->args + i; MonoType *t; if (i >= sig->hasthis) t = sig->params [i - sig->hasthis]; else t = mono_get_int_type (); t = mini_get_underlying_type (t); if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) emit_sig_cookie (cfg, call, cinfo); in = call->args [i]; if (ainfo->regtype == RegTypeGeneral) { #ifndef __mono_ppc64__ if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = MONO_LVREG_LS (in->dreg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE); MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = MONO_LVREG_MS (in->dreg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); } else #endif { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); } } else if (ainfo->regtype == RegTypeStructByAddr) { MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); } else if (ainfo->regtype == RegTypeStructByVal) { /* this is further handled in mono_arch_emit_outarg_vt () */ MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); } else if (ainfo->regtype == RegTypeFPStructByVal) { /* this is further handled in mono_arch_emit_outarg_vt () */ MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_HAS_FPOUT; } else if (ainfo->regtype == RegTypeBase) { if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg); } else if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) { if (t->type == MONO_TYPE_R8) MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg); else MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg); } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg); } } else if (ainfo->regtype == RegTypeFP) { if (t->type == MONO_TYPE_VALUETYPE) { /* this is further handled in mono_arch_emit_outarg_vt () */ MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_HAS_FPOUT; } else { int dreg = mono_alloc_freg (cfg); if (ainfo->size == 4) { MONO_EMIT_NEW_UNALU (cfg, OP_FCONV_TO_R4, dreg, in->dreg); } else { MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->dreg = dreg; ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); } mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE); cfg->flags |= MONO_CFG_HAS_FPOUT; } } else { g_assert_not_reached (); } } /* Emit the signature cookie in the case that there is no additional argument */ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) emit_sig_cookie (cfg, call, cinfo); if (cinfo->struct_ret) { MonoInst *vtarg; MONO_INST_NEW (cfg, vtarg, OP_MOVE); vtarg->sreg1 = call->vret_var->dreg; vtarg->dreg = mono_alloc_preg (cfg); MONO_ADD_INS (cfg->cbb, vtarg); mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->struct_ret, FALSE); } call->stack_usage = cinfo->stack_usage; cfg->param_area = MAX (PPC_MINIMAL_PARAM_AREA_SIZE, MAX (cfg->param_area, cinfo->stack_usage)); cfg->flags |= MONO_CFG_HAS_CALLS; g_free (cinfo); } #ifndef DISABLE_JIT void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst*)ins->inst_p0; ArgInfo *ainfo = (ArgInfo*)ins->inst_p1; int ovf_size = ainfo->vtsize; int doffset = ainfo->offset; int i, soffset, dreg; if (ainfo->regtype == RegTypeStructByVal) { #ifdef __APPLE__ guint32 size = 0; #endif soffset = 0; #ifdef __APPLE__ /* * Darwin pinvokes needs some special handling for 1 * and 2 byte arguments */ g_assert (ins->klass); if (call->signature->pinvoke && !call->signature->marshalling_disabled) size = mono_class_native_size (ins->klass, NULL); if (size == 2 || size == 1) { int tmpr = mono_alloc_ireg (cfg); if (size == 1) MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmpr, src->dreg, soffset); else MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmpr, src->dreg, soffset); dreg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, dreg, tmpr); mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, FALSE); } else #endif for (i = 0; i < ainfo->vtregs; ++i) { dreg = mono_alloc_ireg (cfg); #if G_BYTE_ORDER == G_BIG_ENDIAN int antipadding = 0; if (ainfo->bytes) { g_assert (i == 0); antipadding = sizeof (target_mgreg_t) - ainfo->bytes; } MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset); if (antipadding) MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, dreg, dreg, antipadding * 8); #else MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset); #endif mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE); soffset += sizeof (target_mgreg_t); } if (ovf_size != 0) mini_emit_memcpy (cfg, ppc_r1, doffset + soffset, src->dreg, soffset, ovf_size * sizeof (target_mgreg_t), TARGET_SIZEOF_VOID_P); } else if (ainfo->regtype == RegTypeFPStructByVal) { soffset = 0; for (i = 0; i < ainfo->vtregs; ++i) { int tmpr = mono_alloc_freg (cfg); if (ainfo->size == 4) MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, tmpr, src->dreg, soffset); else // ==8 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmpr, src->dreg, soffset); dreg = mono_alloc_freg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, dreg, tmpr); mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg+i, TRUE); soffset += ainfo->size; } if (ovf_size != 0) mini_emit_memcpy (cfg, ppc_r1, doffset + soffset, src->dreg, soffset, ovf_size * sizeof (target_mgreg_t), TARGET_SIZEOF_VOID_P); } else if (ainfo->regtype == RegTypeFP) { int tmpr = mono_alloc_freg (cfg); if (ainfo->size == 4) MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, tmpr, src->dreg, 0); else MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmpr, src->dreg, 0); dreg = mono_alloc_freg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, dreg, tmpr); mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE); } else { MonoInst *vtcopy = mono_compile_create_var (cfg, m_class_get_byval_arg (src->klass), OP_LOCAL); MonoInst *load; guint32 size; /* FIXME: alignment? */ if (call->signature->pinvoke && !call->signature->marshalling_disabled) { size = mono_type_native_stack_size (m_class_get_byval_arg (src->klass), NULL); vtcopy->backend.is_pinvoke = 1; } else { size = mini_type_stack_size (m_class_get_byval_arg (src->klass), NULL); } if (size > 0) g_assert (ovf_size > 0); EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype); mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, TARGET_SIZEOF_VOID_P); if (ainfo->offset) MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, ainfo->offset, load->dreg); else mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg, FALSE); } } void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret); if (!rm_type_is_byref (ret)) { #ifndef __mono_ppc64__ if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) { MonoInst *ins; MONO_INST_NEW (cfg, ins, OP_SETLRET); ins->sreg1 = MONO_LVREG_LS (val->dreg); ins->sreg2 = MONO_LVREG_MS (val->dreg); MONO_ADD_INS (cfg->cbb, ins); return; } #endif if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) { MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); return; } } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); } gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return TRUE; } #endif /* DISABLE_JIT */ /* * Conditional branches have a small offset, so if it is likely overflowed, * we do a branch to the end of the method (uncond branches have much larger * offsets) where we perform the conditional and jump back unconditionally. * It's slightly slower, since we add two uncond branches, but it's very simple * with the current patch implementation and such large methods are likely not * going to be perf critical anyway. */ typedef struct { union { MonoBasicBlock *bb; const char *exception; } data; guint32 ip_offset; guint16 b0_cond; guint16 b1_cond; } MonoOvfJump; #define EMIT_COND_BRANCH_FLAGS(ins,b0,b1) \ if (0 && ins->inst_true_bb->native_offset) { \ ppc_bc (code, (b0), (b1), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffff); \ } else { \ int br_disp = ins->inst_true_bb->max_offset - offset; \ if (!ppc_is_imm16 (br_disp + 8 * 1024) || !ppc_is_imm16 (br_disp - 8 * 1024)) { \ MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \ ovfj->data.bb = ins->inst_true_bb; \ ovfj->ip_offset = 0; \ ovfj->b0_cond = (b0); \ ovfj->b1_cond = (b1); \ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB_OVF, ovfj); \ ppc_b (code, 0); \ } else { \ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \ ppc_bc (code, (b0), (b1), 0); \ } \ } #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_b0_table [(cond)], branch_b1_table [(cond)]) /* emit an exception if condition is fail * * We assign the extra code used to throw the implicit exceptions * to cfg->bb_exit as far as the big branch handling is concerned */ #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(b0,b1,exc_name) \ do { \ int br_disp = cfg->bb_exit->max_offset - offset; \ if (!ppc_is_imm16 (br_disp + 1024) || ! ppc_is_imm16 (ppc_is_imm16 (br_disp - 1024))) { \ MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \ ovfj->data.exception = (exc_name); \ ovfj->ip_offset = code - cfg->native_code; \ ovfj->b0_cond = (b0); \ ovfj->b1_cond = (b1); \ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj); \ ppc_bl (code, 0); \ cfg->bb_exit->max_offset += 24; \ } else { \ mono_add_patch_info (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_EXC, exc_name); \ ppc_bcl (code, (b0), (b1), 0); \ } \ } while (0); #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_b0_table [(cond)], branch_b1_table [(cond)], (exc_name)) void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { } static int normalize_opcode (int opcode) { switch (opcode) { #ifndef MONO_ARCH_ILP32 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMBASE, OP_LOADI8_MEMBASE): return OP_LOAD_MEMBASE; case MONO_PPC_32_64_CASE (OP_LOADI4_MEMINDEX, OP_LOADI8_MEMINDEX): return OP_LOAD_MEMINDEX; case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_REG, OP_STOREI8_MEMBASE_REG): return OP_STORE_MEMBASE_REG; case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_IMM, OP_STOREI8_MEMBASE_IMM): return OP_STORE_MEMBASE_IMM; case MONO_PPC_32_64_CASE (OP_STOREI4_MEMINDEX, OP_STOREI8_MEMINDEX): return OP_STORE_MEMINDEX; #endif case MONO_PPC_32_64_CASE (OP_ISHR_IMM, OP_LSHR_IMM): return OP_SHR_IMM; case MONO_PPC_32_64_CASE (OP_ISHR_UN_IMM, OP_LSHR_UN_IMM): return OP_SHR_UN_IMM; default: return opcode; } } void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n, *last_ins = NULL; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { switch (normalize_opcode (ins->opcode)) { case OP_MUL_IMM: /* remove unnecessary multiplication with 1 */ if (ins->inst_imm == 1) { if (ins->dreg != ins->sreg1) { ins->opcode = OP_MOVE; } else { MONO_DELETE_INS (bb, ins); continue; } } else if (inst->inst_imm > 0) { int power2 = mono_is_power_of_two (ins->inst_imm); if (power2 > 0) { ins->opcode = OP_SHL_IMM; ins->inst_imm = power2; } } break; case OP_LOAD_MEMBASE: /* * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg */ if (last_ins && normalize_opcode (last_ins->opcode) == OP_STORE_MEMBASE_REG && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } else { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_MOVE; ins->sreg1 = last_ins->sreg1; } /* * Note: reg1 must be different from the basereg in the second load * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_MOVE reg1, reg2 */ } else if (last_ins && normalize_opcode (last_ins->opcode) == OP_LOAD_MEMBASE && ins->inst_basereg != last_ins->dreg && ins->inst_basereg == last_ins->inst_basereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->dreg) { MONO_DELETE_INS (bb, ins); continue; } else { ins->opcode = OP_MOVE; ins->sreg1 = last_ins->dreg; } //g_assert_not_reached (); #if 0 /* * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg * --> * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_ICONST reg, imm */ } else if (last_ins && normalize_opcode (last_ins->opcode) == OP_STORE_MEMBASE_IMM && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_ICONST; ins->inst_c0 = last_ins->inst_imm; g_assert_not_reached (); // check this rule #endif } break; case OP_LOADU1_MEMBASE: case OP_LOADI1_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1; ins->sreg1 = last_ins->sreg1; } break; case OP_LOADU2_MEMBASE: case OP_LOADI2_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2; ins->sreg1 = last_ins->sreg1; } break; #ifdef __mono_ppc64__ case OP_LOADU4_MEMBASE: case OP_LOADI4_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI4_MEMBASE) ? OP_ICONV_TO_I4 : OP_ICONV_TO_U4; ins->sreg1 = last_ins->sreg1; } break; #endif case OP_MOVE: ins->opcode = OP_MOVE; /* * OP_MOVE reg, reg */ if (ins->dreg == ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } /* * OP_MOVE sreg, dreg * OP_MOVE dreg, sreg */ if (last_ins && last_ins->opcode == OP_MOVE && ins->sreg1 == last_ins->dreg && ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } break; } last_ins = ins; ins = ins->next; } bb->last_ins = last_ins; } void mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins) { switch (ins->opcode) { case OP_ICONV_TO_R_UN: { // This value is OK as-is for both big and little endian because of how it is stored static const guint64 adjust_val = 0x4330000000000000ULL; int msw_reg = mono_alloc_ireg (cfg); int adj_reg = mono_alloc_freg (cfg); int tmp_reg = mono_alloc_freg (cfg); int basereg = ppc_sp; int offset = -8; MONO_EMIT_NEW_ICONST (cfg, msw_reg, 0x43300000); if (!ppc_is_imm16 (offset + 4)) { basereg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset); } #if G_BYTE_ORDER == G_BIG_ENDIAN MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, msw_reg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, ins->sreg1); #else // For little endian the words are reversed MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, msw_reg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, ins->sreg1); #endif MONO_EMIT_NEW_LOAD_R8 (cfg, adj_reg, &adjust_val); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmp_reg, basereg, offset); MONO_EMIT_NEW_BIALU (cfg, OP_FSUB, ins->dreg, tmp_reg, adj_reg); ins->opcode = OP_NOP; break; } #ifndef __mono_ppc64__ case OP_ICONV_TO_R4: case OP_ICONV_TO_R8: { /* If we have a PPC_FEATURE_64 machine we can avoid this and use the fcfid instruction. Otherwise on an old 32-bit chip and we have to do this the hard way. */ if (!(cpu_hw_caps & PPC_ISA_64)) { /* FIXME: change precision for CEE_CONV_R4 */ static const guint64 adjust_val = 0x4330000080000000ULL; int msw_reg = mono_alloc_ireg (cfg); int xored = mono_alloc_ireg (cfg); int adj_reg = mono_alloc_freg (cfg); int tmp_reg = mono_alloc_freg (cfg); int basereg = ppc_sp; int offset = -8; if (!ppc_is_imm16 (offset + 4)) { basereg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset); } MONO_EMIT_NEW_ICONST (cfg, msw_reg, 0x43300000); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, msw_reg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_XOR_IMM, xored, ins->sreg1, 0x80000000); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, xored); MONO_EMIT_NEW_LOAD_R8 (cfg, adj_reg, (gpointer)&adjust_val); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmp_reg, basereg, offset); MONO_EMIT_NEW_BIALU (cfg, OP_FSUB, ins->dreg, tmp_reg, adj_reg); if (ins->opcode == OP_ICONV_TO_R4) MONO_EMIT_NEW_UNALU (cfg, OP_FCONV_TO_R4, ins->dreg, ins->dreg); ins->opcode = OP_NOP; } break; } #endif case OP_CKFINITE: { int msw_reg = mono_alloc_ireg (cfg); int basereg = ppc_sp; int offset = -8; if (!ppc_is_imm16 (offset + 4)) { basereg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset); } MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, basereg, offset, ins->sreg1); #if G_BYTE_ORDER == G_BIG_ENDIAN MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, msw_reg, basereg, offset); #else MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, msw_reg, basereg, offset+4); #endif MONO_EMIT_NEW_UNALU (cfg, OP_PPC_CHECK_FINITE, -1, msw_reg); MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1); ins->opcode = OP_NOP; break; } #ifdef __mono_ppc64__ case OP_IADD_OVF: case OP_IADD_OVF_UN: case OP_ISUB_OVF: { int shifted1_reg = mono_alloc_ireg (cfg); int shifted2_reg = mono_alloc_ireg (cfg); int result_shifted_reg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, shifted1_reg, ins->sreg1, 32); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, shifted2_reg, ins->sreg2, 32); MONO_EMIT_NEW_BIALU (cfg, ins->opcode, result_shifted_reg, shifted1_reg, shifted2_reg); if (ins->opcode == OP_IADD_OVF_UN) MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, ins->dreg, result_shifted_reg, 32); else MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, ins->dreg, result_shifted_reg, 32); ins->opcode = OP_NOP; break; } #endif default: break; } } void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins) { switch (ins->opcode) { case OP_LADD_OVF: /* ADC sets the condition code */ MONO_EMIT_NEW_BIALU (cfg, OP_ADDCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2)); MONO_EMIT_NEW_BIALU (cfg, OP_ADD_OVF_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2)); NULLIFY_INS (ins); break; case OP_LADD_OVF_UN: /* ADC sets the condition code */ MONO_EMIT_NEW_BIALU (cfg, OP_ADDCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2)); MONO_EMIT_NEW_BIALU (cfg, OP_ADD_OVF_UN_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2)); NULLIFY_INS (ins); break; case OP_LSUB_OVF: /* SBB sets the condition code */ MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2)); MONO_EMIT_NEW_BIALU (cfg, OP_SUB_OVF_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2)); NULLIFY_INS (ins); break; case OP_LSUB_OVF_UN: /* SBB sets the condition code */ MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2)); MONO_EMIT_NEW_BIALU (cfg, OP_SUB_OVF_UN_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2)); NULLIFY_INS (ins); break; case OP_LNEG: /* From gcc generated code */ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PPC_SUBFIC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), 0); MONO_EMIT_NEW_UNALU (cfg, OP_PPC_SUBFZE, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1)); NULLIFY_INS (ins); break; default: break; } } /* * the branch_b0_table should maintain the order of these * opcodes. case CEE_BEQ: case CEE_BGE: case CEE_BGT: case CEE_BLE: case CEE_BLT: case CEE_BNE_UN: case CEE_BGE_UN: case CEE_BGT_UN: case CEE_BLE_UN: case CEE_BLT_UN: */ static const guchar branch_b0_table [] = { PPC_BR_TRUE, PPC_BR_FALSE, PPC_BR_TRUE, PPC_BR_FALSE, PPC_BR_TRUE, PPC_BR_FALSE, PPC_BR_FALSE, PPC_BR_TRUE, PPC_BR_FALSE, PPC_BR_TRUE }; static const guchar branch_b1_table [] = { PPC_BR_EQ, PPC_BR_LT, PPC_BR_GT, PPC_BR_GT, PPC_BR_LT, PPC_BR_EQ, PPC_BR_LT, PPC_BR_GT, PPC_BR_GT, PPC_BR_LT }; #define NEW_INS(cfg,dest,op) do { \ MONO_INST_NEW((cfg), (dest), (op)); \ mono_bblock_insert_after_ins (bb, last_ins, (dest)); \ } while (0) static int map_to_reg_reg_op (int op) { switch (op) { case OP_ADD_IMM: return OP_IADD; case OP_SUB_IMM: return OP_ISUB; case OP_AND_IMM: return OP_IAND; case OP_COMPARE_IMM: return OP_COMPARE; case OP_ICOMPARE_IMM: return OP_ICOMPARE; case OP_LCOMPARE_IMM: return OP_LCOMPARE; case OP_ADDCC_IMM: return OP_IADDCC; case OP_ADC_IMM: return OP_IADC; case OP_SUBCC_IMM: return OP_ISUBCC; case OP_SBB_IMM: return OP_ISBB; case OP_OR_IMM: return OP_IOR; case OP_XOR_IMM: return OP_IXOR; case OP_MUL_IMM: return OP_IMUL; case OP_LMUL_IMM: return OP_LMUL; case OP_LOAD_MEMBASE: return OP_LOAD_MEMINDEX; case OP_LOADI4_MEMBASE: return OP_LOADI4_MEMINDEX; case OP_LOADU4_MEMBASE: return OP_LOADU4_MEMINDEX; case OP_LOADI8_MEMBASE: return OP_LOADI8_MEMINDEX; case OP_LOADU1_MEMBASE: return OP_LOADU1_MEMINDEX; case OP_LOADI2_MEMBASE: return OP_LOADI2_MEMINDEX; case OP_LOADU2_MEMBASE: return OP_LOADU2_MEMINDEX; case OP_LOADI1_MEMBASE: return OP_LOADI1_MEMINDEX; case OP_LOADR4_MEMBASE: return OP_LOADR4_MEMINDEX; case OP_LOADR8_MEMBASE: return OP_LOADR8_MEMINDEX; case OP_STOREI1_MEMBASE_REG: return OP_STOREI1_MEMINDEX; case OP_STOREI2_MEMBASE_REG: return OP_STOREI2_MEMINDEX; case OP_STOREI4_MEMBASE_REG: return OP_STOREI4_MEMINDEX; case OP_STOREI8_MEMBASE_REG: return OP_STOREI8_MEMINDEX; case OP_STORE_MEMBASE_REG: return OP_STORE_MEMINDEX; case OP_STORER4_MEMBASE_REG: return OP_STORER4_MEMINDEX; case OP_STORER8_MEMBASE_REG: return OP_STORER8_MEMINDEX; case OP_STORE_MEMBASE_IMM: return OP_STORE_MEMBASE_REG; case OP_STOREI1_MEMBASE_IMM: return OP_STOREI1_MEMBASE_REG; case OP_STOREI2_MEMBASE_IMM: return OP_STOREI2_MEMBASE_REG; case OP_STOREI4_MEMBASE_IMM: return OP_STOREI4_MEMBASE_REG; case OP_STOREI8_MEMBASE_IMM: return OP_STOREI8_MEMBASE_REG; } if (mono_op_imm_to_op (op) == -1) g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (op)); return mono_op_imm_to_op (op); } //#define map_to_reg_reg_op(op) (cfg->new_ir? mono_op_imm_to_op (op): map_to_reg_reg_op (op)) #define compare_opcode_is_unsigned(opcode) \ (((opcode) >= CEE_BNE_UN && (opcode) <= CEE_BLT_UN) || \ ((opcode) >= OP_IBNE_UN && (opcode) <= OP_IBLT_UN) || \ ((opcode) >= OP_LBNE_UN && (opcode) <= OP_LBLT_UN) || \ ((opcode) >= OP_COND_EXC_NE_UN && (opcode) <= OP_COND_EXC_LT_UN) || \ ((opcode) >= OP_COND_EXC_INE_UN && (opcode) <= OP_COND_EXC_ILT_UN) || \ ((opcode) == OP_CLT_UN || (opcode) == OP_CGT_UN || \ (opcode) == OP_ICLT_UN || (opcode) == OP_ICGT_UN || \ (opcode) == OP_LCLT_UN || (opcode) == OP_LCGT_UN)) /* * Remove from the instruction list the instructions that can't be * represented with very simple instructions with no register * requirements. */ void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *next, *temp, *last_ins = NULL; int imm; MONO_BB_FOR_EACH_INS (bb, ins) { loop_start: switch (ins->opcode) { case OP_IDIV_UN_IMM: case OP_IDIV_IMM: case OP_IREM_IMM: case OP_IREM_UN_IMM: CASE_PPC64 (OP_LREM_IMM) { NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; if (ins->opcode == OP_IDIV_IMM) ins->opcode = OP_IDIV; else if (ins->opcode == OP_IREM_IMM) ins->opcode = OP_IREM; else if (ins->opcode == OP_IDIV_UN_IMM) ins->opcode = OP_IDIV_UN; else if (ins->opcode == OP_IREM_UN_IMM) ins->opcode = OP_IREM_UN; else if (ins->opcode == OP_LREM_IMM) ins->opcode = OP_LREM; last_ins = temp; /* handle rem separately */ goto loop_start; } case OP_IREM: case OP_IREM_UN: CASE_PPC64 (OP_LREM) CASE_PPC64 (OP_LREM_UN) { MonoInst *mul; /* we change a rem dest, src1, src2 to * div temp1, src1, src2 * mul temp2, temp1, src2 * sub dest, src1, temp2 */ if (ins->opcode == OP_IREM || ins->opcode == OP_IREM_UN) { NEW_INS (cfg, mul, OP_IMUL); NEW_INS (cfg, temp, ins->opcode == OP_IREM? OP_IDIV: OP_IDIV_UN); ins->opcode = OP_ISUB; } else { NEW_INS (cfg, mul, OP_LMUL); NEW_INS (cfg, temp, ins->opcode == OP_LREM? OP_LDIV: OP_LDIV_UN); ins->opcode = OP_LSUB; } temp->sreg1 = ins->sreg1; temp->sreg2 = ins->sreg2; temp->dreg = mono_alloc_ireg (cfg); mul->sreg1 = temp->dreg; mul->sreg2 = ins->sreg2; mul->dreg = mono_alloc_ireg (cfg); ins->sreg2 = mul->dreg; break; } case OP_IADD_IMM: CASE_PPC64 (OP_LADD_IMM) case OP_ADD_IMM: case OP_ADDCC_IMM: if (!ppc_is_imm16 (ins->inst_imm)) { NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; case OP_ISUB_IMM: CASE_PPC64 (OP_LSUB_IMM) case OP_SUB_IMM: if (!ppc_is_imm16 (-ins->inst_imm)) { NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; case OP_IAND_IMM: case OP_IOR_IMM: case OP_IXOR_IMM: case OP_LAND_IMM: case OP_LOR_IMM: case OP_LXOR_IMM: case OP_AND_IMM: case OP_OR_IMM: case OP_XOR_IMM: { gboolean is_imm = ((ins->inst_imm & 0xffff0000) && (ins->inst_imm & 0xffff)); #ifdef __mono_ppc64__ if (ins->inst_imm & 0xffffffff00000000ULL) is_imm = TRUE; #endif if (is_imm) { NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; } case OP_ISBB_IMM: case OP_IADC_IMM: case OP_SBB_IMM: case OP_SUBCC_IMM: case OP_ADC_IMM: NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: CASE_PPC64 (OP_LCOMPARE_IMM) next = ins->next; /* Branch opts can eliminate the branch */ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) { ins->opcode = OP_NOP; break; } g_assert(next); if (compare_opcode_is_unsigned (next->opcode)) { if (!ppc_is_uimm16 (ins->inst_imm)) { NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } } else { if (!ppc_is_imm16 (ins->inst_imm)) { NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } } break; case OP_IMUL_IMM: case OP_MUL_IMM: CASE_PPC64 (OP_LMUL_IMM) if (ins->inst_imm == 1) { ins->opcode = OP_MOVE; break; } if (ins->inst_imm == 0) { ins->opcode = OP_ICONST; ins->inst_c0 = 0; break; } imm = (ins->inst_imm > 0) ? mono_is_power_of_two (ins->inst_imm) : -1; if (imm > 0) { ins->opcode = OP_SHL_IMM; ins->inst_imm = imm; break; } if (!ppc_is_imm16 (ins->inst_imm)) { NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; case OP_LOCALLOC_IMM: NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg1 = temp->dreg; ins->opcode = OP_LOCALLOC; break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: CASE_PPC64 (OP_LOADI8_MEMBASE) case OP_LOADU4_MEMBASE: case OP_LOADI2_MEMBASE: case OP_LOADU2_MEMBASE: case OP_LOADI1_MEMBASE: case OP_LOADU1_MEMBASE: case OP_LOADR4_MEMBASE: case OP_LOADR8_MEMBASE: case OP_STORE_MEMBASE_REG: CASE_PPC64 (OP_STOREI8_MEMBASE_REG) case OP_STOREI4_MEMBASE_REG: case OP_STOREI2_MEMBASE_REG: case OP_STOREI1_MEMBASE_REG: case OP_STORER4_MEMBASE_REG: case OP_STORER8_MEMBASE_REG: /* we can do two things: load the immed in a register * and use an indexed load, or see if the immed can be * represented as an ad_imm + a load with a smaller offset * that fits. We just do the first for now, optimize later. */ if (ppc_is_imm16 (ins->inst_offset)) break; NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_STORE_MEMBASE_IMM: case OP_STOREI1_MEMBASE_IMM: case OP_STOREI2_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: CASE_PPC64 (OP_STOREI8_MEMBASE_IMM) NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg1 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); last_ins = temp; goto loop_start; /* make it handle the possibly big ins->inst_offset */ case OP_R8CONST: case OP_R4CONST: if (cfg->compile_aot) { /* Keep these in the aot case */ break; } NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = (gulong)ins->inst_p0; temp->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = temp->dreg; ins->inst_offset = 0; ins->opcode = ins->opcode == OP_R4CONST? OP_LOADR4_MEMBASE: OP_LOADR8_MEMBASE; last_ins = temp; /* make it handle the possibly big ins->inst_offset * later optimize to use lis + load_membase */ goto loop_start; } last_ins = ins; } bb->last_ins = last_ins; bb->max_vreg = cfg->next_vreg; } static guchar* emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed) { long offset = cfg->arch.fp_conv_var_offset; long sub_offset; /* sreg is a float, dreg is an integer reg. ppc_f0 is used a scratch */ #ifdef __mono_ppc64__ if (size == 8) { ppc_fctidz (code, ppc_f0, sreg); sub_offset = 0; } else #endif { ppc_fctiwz (code, ppc_f0, sreg); sub_offset = 4; } if (ppc_is_imm16 (offset + sub_offset)) { ppc_stfd (code, ppc_f0, offset, cfg->frame_reg); if (size == 8) ppc_ldr (code, dreg, offset + sub_offset, cfg->frame_reg); else ppc_lwz (code, dreg, offset + sub_offset, cfg->frame_reg); } else { ppc_load (code, dreg, offset); ppc_add (code, dreg, dreg, cfg->frame_reg); ppc_stfd (code, ppc_f0, 0, dreg); if (size == 8) ppc_ldr (code, dreg, sub_offset, dreg); else ppc_lwz (code, dreg, sub_offset, dreg); } if (!is_signed) { if (size == 1) ppc_andid (code, dreg, dreg, 0xff); else if (size == 2) ppc_andid (code, dreg, dreg, 0xffff); #ifdef __mono_ppc64__ else if (size == 4) ppc_clrldi (code, dreg, dreg, 32); #endif } else { if (size == 1) ppc_extsb (code, dreg, dreg); else if (size == 2) ppc_extsh (code, dreg, dreg); #ifdef __mono_ppc64__ else if (size == 4) ppc_extsw (code, dreg, dreg); #endif } return code; } static void emit_thunk (guint8 *code, gconstpointer target) { guint8 *p = code; /* 2 bytes on 32bit, 5 bytes on 64bit */ ppc_load_sequence (code, ppc_r0, target); ppc_mtctr (code, ppc_r0); ppc_bcctr (code, PPC_BR_ALWAYS, 0); mono_arch_flush_icache (p, code - p); } static void handle_thunk (MonoCompile *cfg, guchar *code, const guchar *target) { MonoJitInfo *ji = NULL; MonoThunkJitInfo *info; guint8 *thunks, *p; int thunks_size; guint8 *orig_target; guint8 *target_thunk; if (cfg) { /* * This can be called multiple times during JITting, * save the current position in cfg->arch to avoid * doing a O(n^2) search. */ if (!cfg->arch.thunks) { cfg->arch.thunks = cfg->thunks; cfg->arch.thunks_size = cfg->thunk_area; } thunks = cfg->arch.thunks; thunks_size = cfg->arch.thunks_size; if (!thunks_size) { g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE)); g_assert_not_reached (); } g_assert (*(guint32*)thunks == 0); emit_thunk (thunks, target); ppc_patch (code, thunks); cfg->arch.thunks += THUNK_SIZE; cfg->arch.thunks_size -= THUNK_SIZE; } else { ji = mini_jit_info_table_find (code); g_assert (ji); info = mono_jit_info_get_thunk_info (ji); g_assert (info); thunks = (guint8 *) ji->code_start + info->thunks_offset; thunks_size = info->thunks_size; orig_target = mono_arch_get_call_target (code + 4); mono_mini_arch_lock (); target_thunk = NULL; if (orig_target >= thunks && orig_target < thunks + thunks_size) { /* The call already points to a thunk, because of trampolines etc. */ target_thunk = orig_target; } else { for (p = thunks; p < thunks + thunks_size; p += THUNK_SIZE) { if (((guint32 *) p) [0] == 0) { /* Free entry */ target_thunk = p; break; } else { /* ppc64 requires 5 instructions, 32bit two instructions */ #ifdef __mono_ppc64__ const int const_load_size = 5; #else const int const_load_size = 2; #endif guint32 load [const_load_size]; guchar *templ = (guchar *) load; ppc_load_sequence (templ, ppc_r0, target); if (!memcmp (p, load, const_load_size)) { /* Thunk already points to target */ target_thunk = p; break; } } } } // g_print ("THUNK: %p %p %p\n", code, target, target_thunk); if (!target_thunk) { mono_mini_arch_unlock (); g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE)); g_assert_not_reached (); } emit_thunk (target_thunk, target); ppc_patch (code, target_thunk); mono_mini_arch_unlock (); } } static void patch_ins (guint8 *code, guint32 ins) { *(guint32*)code = ins; mono_arch_flush_icache (code, 4); } static void ppc_patch_full (MonoCompile *cfg, guchar *code, const guchar *target, gboolean is_fd) { guint32 ins = *(guint32*)code; guint32 prim = ins >> 26; guint32 ovf; //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target); if (prim == 18) { // prefer relative branches, they are more position independent (e.g. for AOT compilation). gint diff = target - code; g_assert (!is_fd); if (diff >= 0){ if (diff <= 33554431){ ins = (18 << 26) | (diff) | (ins & 1); patch_ins (code, ins); return; } } else { /* diff between 0 and -33554432 */ if (diff >= -33554432){ ins = (18 << 26) | (diff & ~0xfc000000) | (ins & 1); patch_ins (code, ins); return; } } if ((glong)target >= 0){ if ((glong)target <= 33554431){ ins = (18 << 26) | ((gulong) target) | (ins & 1) | 2; patch_ins (code, ins); return; } } else { if ((glong)target >= -33554432){ ins = (18 << 26) | (((gulong)target) & ~0xfc000000) | (ins & 1) | 2; patch_ins (code, ins); return; } } handle_thunk (cfg, code, target); return; g_assert_not_reached (); } if (prim == 16) { g_assert (!is_fd); // absolute address if (ins & 2) { guint32 li = (gulong)target; ins = (ins & 0xffff0000) | (ins & 3); ovf = li & 0xffff0000; if (ovf != 0 && ovf != 0xffff0000) g_assert_not_reached (); li &= 0xffff; ins |= li; // FIXME: assert the top bits of li are 0 } else { gint diff = target - code; ins = (ins & 0xffff0000) | (ins & 3); ovf = diff & 0xffff0000; if (ovf != 0 && ovf != 0xffff0000) g_assert_not_reached (); diff &= 0xffff; ins |= diff; } patch_ins (code, ins); return; } if (prim == 15 || ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) { #ifdef __mono_ppc64__ guint32 *seq = (guint32*)code; guint32 *branch_ins; /* the trampoline code will try to patch the blrl, blr, bcctr */ if (ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) { branch_ins = seq; if (ppc_is_load_op (seq [-3]) || ppc_opcode (seq [-3]) == 31) /* ld || lwz || mr */ code -= 32; else code -= 24; } else { if (ppc_is_load_op (seq [5]) #ifdef PPC_USES_FUNCTION_DESCRIPTOR /* With function descs we need to do more careful matches. */ || ppc_opcode (seq [5]) == 31 /* ld || lwz || mr */ #endif ) branch_ins = seq + 8; else branch_ins = seq + 6; } seq = (guint32*)code; /* this is the lis/ori/sldi/oris/ori/(ld/ld|mr/nop)/mtlr/blrl sequence */ g_assert (mono_ppc_is_direct_call_sequence (branch_ins)); if (ppc_is_load_op (seq [5])) { g_assert (ppc_is_load_op (seq [6])); if (!is_fd) { guint8 *buf = (guint8*)&seq [5]; ppc_mr (buf, PPC_CALL_REG, ppc_r12); ppc_nop (buf); } } else { if (is_fd) target = (const guchar*)mono_get_addr_from_ftnptr ((gpointer)target); } /* FIXME: make this thread safe */ #ifdef PPC_USES_FUNCTION_DESCRIPTOR /* FIXME: we're assuming we're using r12 here */ ppc_load_ptr_sequence (code, ppc_r12, target); #else ppc_load_ptr_sequence (code, PPC_CALL_REG, target); #endif mono_arch_flush_icache ((guint8*)seq, 28); #else guint32 *seq; /* the trampoline code will try to patch the blrl, blr, bcctr */ if (ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) { code -= 12; } /* this is the lis/ori/mtlr/blrl sequence */ seq = (guint32*)code; g_assert ((seq [0] >> 26) == 15); g_assert ((seq [1] >> 26) == 24); g_assert ((seq [2] >> 26) == 31); g_assert (seq [3] == 0x4e800021 || seq [3] == 0x4e800020 || seq [3] == 0x4e800420); /* FIXME: make this thread safe */ ppc_lis (code, PPC_CALL_REG, (guint32)(target) >> 16); ppc_ori (code, PPC_CALL_REG, PPC_CALL_REG, (guint32)(target) & 0xffff); mono_arch_flush_icache (code - 8, 8); #endif } else { g_assert_not_reached (); } // g_print ("patched with 0x%08x\n", ins); } void ppc_patch (guchar *code, const guchar *target) { ppc_patch_full (NULL, code, target, FALSE); } void mono_ppc_patch (guchar *code, const guchar *target) { ppc_patch (code, target); } static guint8* emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code) { switch (ins->opcode) { case OP_FCALL: case OP_FCALL_REG: case OP_FCALL_MEMBASE: if (ins->dreg != ppc_f1) ppc_fmr (code, ins->dreg, ppc_f1); break; } return code; } static guint8* emit_reserve_param_area (MonoCompile *cfg, guint8 *code) { long size = cfg->param_area; size += MONO_ARCH_FRAME_ALIGNMENT - 1; size &= -MONO_ARCH_FRAME_ALIGNMENT; if (!size) return code; ppc_ldptr (code, ppc_r0, 0, ppc_sp); if (ppc_is_imm16 (-size)) { ppc_stptr_update (code, ppc_r0, -size, ppc_sp); } else { ppc_load (code, ppc_r12, -size); ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12); } return code; } static guint8* emit_unreserve_param_area (MonoCompile *cfg, guint8 *code) { long size = cfg->param_area; size += MONO_ARCH_FRAME_ALIGNMENT - 1; size &= -MONO_ARCH_FRAME_ALIGNMENT; if (!size) return code; ppc_ldptr (code, ppc_r0, 0, ppc_sp); if (ppc_is_imm16 (size)) { ppc_stptr_update (code, ppc_r0, size, ppc_sp); } else { ppc_load (code, ppc_r12, size); ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12); } return code; } #define MASK_SHIFT_IMM(i) ((i) & MONO_PPC_32_64_CASE (0x1f, 0x3f)) #ifndef DISABLE_JIT void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *next; MonoCallInst *call; guint8 *code = cfg->native_code + cfg->code_len; MonoInst *last_ins = NULL; int max_len, cpos; int L; /* we don't align basic blocks of loops on ppc */ if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); cpos = bb->max_offset; MONO_BB_FOR_EACH_INS (bb, ins) { const guint offset = code - cfg->native_code; set_code_cursor (cfg, code); max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); // if (ins->cil_code) // g_print ("cil code\n"); mono_debug_record_line_number (cfg, ins, offset); switch (normalize_opcode (ins->opcode)) { case OP_RELAXED_NOP: case OP_NOP: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_I8CONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: case OP_NOT_REACHED: case OP_NOT_NULL: break; case OP_IL_SEQ_POINT: mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); break; case OP_SEQ_POINT: { int i; if (cfg->compile_aot) NOT_IMPLEMENTED; /* * Read from the single stepping trigger page. This will cause a * SIGSEGV when single stepping is enabled. * We do this _before_ the breakpoint, so single stepping after * a breakpoint is hit will step to the next IL offset. */ if (ins->flags & MONO_INST_SINGLE_STEP_LOC) { ppc_load (code, ppc_r12, (gsize)ss_trigger_page); ppc_ldptr (code, ppc_r12, 0, ppc_r12); } mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); /* * A placeholder for a possible breakpoint inserted by * mono_arch_set_breakpoint (). */ for (i = 0; i < BREAKPOINT_SIZE / 4; ++i) ppc_nop (code); break; } case OP_BIGMUL: ppc_mullw (code, ppc_r0, ins->sreg1, ins->sreg2); ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2); ppc_mr (code, ppc_r4, ppc_r0); break; case OP_BIGMUL_UN: ppc_mullw (code, ppc_r0, ins->sreg1, ins->sreg2); ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2); ppc_mr (code, ppc_r4, ppc_r0); break; case OP_MEMORY_BARRIER: ppc_sync (code); break; case OP_STOREI1_MEMBASE_REG: if (ppc_is_imm16 (ins->inst_offset)) { ppc_stb (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); } else { if (ppc_is_imm32 (ins->inst_offset)) { ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset)); ppc_stb (code, ins->sreg1, ins->inst_offset, ppc_r11); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_stbx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0); } } break; case OP_STOREI2_MEMBASE_REG: if (ppc_is_imm16 (ins->inst_offset)) { ppc_sth (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); } else { if (ppc_is_imm32 (ins->inst_offset)) { ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset)); ppc_sth (code, ins->sreg1, ins->inst_offset, ppc_r11); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_sthx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0); } } break; case OP_STORE_MEMBASE_REG: if (ppc_is_imm16 (ins->inst_offset)) { ppc_stptr (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); } else { if (ppc_is_imm32 (ins->inst_offset)) { ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset)); ppc_stptr (code, ins->sreg1, ins->inst_offset, ppc_r11); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_stptr_indexed (code, ins->sreg1, ins->inst_destbasereg, ppc_r0); } } break; #ifdef MONO_ARCH_ILP32 case OP_STOREI8_MEMBASE_REG: if (ppc_is_imm16 (ins->inst_offset)) { ppc_str (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_str_indexed (code, ins->sreg1, ins->inst_destbasereg, ppc_r0); } break; #endif case OP_STOREI1_MEMINDEX: ppc_stbx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case OP_STOREI2_MEMINDEX: ppc_sthx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case OP_STORE_MEMINDEX: ppc_stptr_indexed (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case OP_LOADU4_MEM: g_assert_not_reached (); break; case OP_LOAD_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_ldptr (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) { ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset)); ppc_ldptr (code, ins->dreg, ins->inst_offset, ins->dreg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0); } } break; case OP_LOADI4_MEMBASE: #ifdef __mono_ppc64__ if (ppc_is_imm16 (ins->inst_offset)) { ppc_lwa (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) { ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset)); ppc_lwa (code, ins->dreg, ins->inst_offset, ins->dreg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_lwax (code, ins->dreg, ins->inst_basereg, ppc_r0); } } break; #endif case OP_LOADU4_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_lwz (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) { ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset)); ppc_lwz (code, ins->dreg, ins->inst_offset, ins->dreg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_lwzx (code, ins->dreg, ins->inst_basereg, ppc_r0); } } break; case OP_LOADI1_MEMBASE: case OP_LOADU1_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_lbz (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) { ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset)); ppc_lbz (code, ins->dreg, ins->inst_offset, ins->dreg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_lbzx (code, ins->dreg, ins->inst_basereg, ppc_r0); } } if (ins->opcode == OP_LOADI1_MEMBASE) ppc_extsb (code, ins->dreg, ins->dreg); break; case OP_LOADU2_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_lhz (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) { ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset)); ppc_lhz (code, ins->dreg, ins->inst_offset, ins->dreg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_lhzx (code, ins->dreg, ins->inst_basereg, ppc_r0); } } break; case OP_LOADI2_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_lha (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) { ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset)); ppc_lha (code, ins->dreg, ins->inst_offset, ins->dreg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_lhax (code, ins->dreg, ins->inst_basereg, ppc_r0); } } break; #ifdef MONO_ARCH_ILP32 case OP_LOADI8_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_ldr (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_ldr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0); } break; #endif case OP_LOAD_MEMINDEX: ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADI4_MEMINDEX: #ifdef __mono_ppc64__ ppc_lwax (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; #endif case OP_LOADU4_MEMINDEX: ppc_lwzx (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADU2_MEMINDEX: ppc_lhzx (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADI2_MEMINDEX: ppc_lhax (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADU1_MEMINDEX: ppc_lbzx (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADI1_MEMINDEX: ppc_lbzx (code, ins->dreg, ins->inst_basereg, ins->sreg2); ppc_extsb (code, ins->dreg, ins->dreg); break; case OP_ICONV_TO_I1: CASE_PPC64 (OP_LCONV_TO_I1) ppc_extsb (code, ins->dreg, ins->sreg1); break; case OP_ICONV_TO_I2: CASE_PPC64 (OP_LCONV_TO_I2) ppc_extsh (code, ins->dreg, ins->sreg1); break; case OP_ICONV_TO_U1: CASE_PPC64 (OP_LCONV_TO_U1) ppc_clrlwi (code, ins->dreg, ins->sreg1, 24); break; case OP_ICONV_TO_U2: CASE_PPC64 (OP_LCONV_TO_U2) ppc_clrlwi (code, ins->dreg, ins->sreg1, 16); break; case OP_COMPARE: case OP_ICOMPARE: CASE_PPC64 (OP_LCOMPARE) L = (sizeof (target_mgreg_t) == 4 || ins->opcode == OP_ICOMPARE) ? 0 : 1; next = ins->next; if (next && compare_opcode_is_unsigned (next->opcode)) ppc_cmpl (code, 0, L, ins->sreg1, ins->sreg2); else ppc_cmp (code, 0, L, ins->sreg1, ins->sreg2); break; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: CASE_PPC64 (OP_LCOMPARE_IMM) L = (sizeof (target_mgreg_t) == 4 || ins->opcode == OP_ICOMPARE_IMM) ? 0 : 1; next = ins->next; if (next && compare_opcode_is_unsigned (next->opcode)) { if (ppc_is_uimm16 (ins->inst_imm)) { ppc_cmpli (code, 0, L, ins->sreg1, (ins->inst_imm & 0xffff)); } else { g_assert_not_reached (); } } else { if (ppc_is_imm16 (ins->inst_imm)) { ppc_cmpi (code, 0, L, ins->sreg1, (ins->inst_imm & 0xffff)); } else { g_assert_not_reached (); } } break; case OP_BREAK: /* * gdb does not like encountering a trap in the debugged code. So * instead of emitting a trap, we emit a call a C function and place a * breakpoint there. */ //ppc_break (code); ppc_mr (code, ppc_r3, ins->sreg1); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break)); if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) { ppc_load_func (code, PPC_CALL_REG, 0); ppc_mtlr (code, PPC_CALL_REG); ppc_blrl (code); } else { ppc_bl (code, 0); } break; case OP_ADDCC: case OP_IADDCC: ppc_addco (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IADD: CASE_PPC64 (OP_LADD) ppc_add (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_ADC: case OP_IADC: ppc_adde (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_ADDCC_IMM: if (ppc_is_imm16 (ins->inst_imm)) { ppc_addic (code, ins->dreg, ins->sreg1, ins->inst_imm); } else { g_assert_not_reached (); } break; case OP_ADD_IMM: case OP_IADD_IMM: CASE_PPC64 (OP_LADD_IMM) if (ppc_is_imm16 (ins->inst_imm)) { ppc_addi (code, ins->dreg, ins->sreg1, ins->inst_imm); } else { g_assert_not_reached (); } break; case OP_IADD_OVF: /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_addo (code, ins->dreg, ins->sreg1, ins->sreg2); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_IADD_OVF_UN: /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_addco (code, ins->dreg, ins->sreg1, ins->sreg2); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<13)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_ISUB_OVF: CASE_PPC64 (OP_LSUB_OVF) /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_subfo (code, ins->dreg, ins->sreg2, ins->sreg1); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_ISUB_OVF_UN: CASE_PPC64 (OP_LSUB_OVF_UN) /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_subfc (code, ins->dreg, ins->sreg2, ins->sreg1); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<13)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException"); break; case OP_ADD_OVF_CARRY: /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_addeo (code, ins->dreg, ins->sreg1, ins->sreg2); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_ADD_OVF_UN_CARRY: /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_addeo (code, ins->dreg, ins->sreg1, ins->sreg2); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<13)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_SUB_OVF_CARRY: /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_subfeo (code, ins->dreg, ins->sreg2, ins->sreg1); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_SUB_OVF_UN_CARRY: /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_subfeo (code, ins->dreg, ins->sreg2, ins->sreg1); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<13)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException"); break; case OP_SUBCC: case OP_ISUBCC: ppc_subfco (code, ins->dreg, ins->sreg2, ins->sreg1); break; case OP_ISUB: CASE_PPC64 (OP_LSUB) ppc_subf (code, ins->dreg, ins->sreg2, ins->sreg1); break; case OP_SBB: case OP_ISBB: ppc_subfe (code, ins->dreg, ins->sreg2, ins->sreg1); break; case OP_SUB_IMM: case OP_ISUB_IMM: CASE_PPC64 (OP_LSUB_IMM) // we add the negated value if (ppc_is_imm16 (-ins->inst_imm)) ppc_addi (code, ins->dreg, ins->sreg1, -ins->inst_imm); else { g_assert_not_reached (); } break; case OP_PPC_SUBFIC: g_assert (ppc_is_imm16 (ins->inst_imm)); ppc_subfic (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_PPC_SUBFZE: ppc_subfze (code, ins->dreg, ins->sreg1); break; case OP_IAND: CASE_PPC64 (OP_LAND) /* FIXME: the ppc macros as inconsistent here: put dest as the first arg! */ ppc_and (code, ins->sreg1, ins->dreg, ins->sreg2); break; case OP_AND_IMM: case OP_IAND_IMM: CASE_PPC64 (OP_LAND_IMM) if (!(ins->inst_imm & 0xffff0000)) { ppc_andid (code, ins->sreg1, ins->dreg, ins->inst_imm); } else if (!(ins->inst_imm & 0xffff)) { ppc_andisd (code, ins->sreg1, ins->dreg, ((guint32)ins->inst_imm >> 16)); } else { g_assert_not_reached (); } break; case OP_IDIV: CASE_PPC64 (OP_LDIV) { guint8 *divisor_is_m1; /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits] */ ppc_compare_reg_imm (code, 0, ins->sreg2, -1); divisor_is_m1 = code; ppc_bc (code, PPC_BR_FALSE | PPC_BR_LIKELY, PPC_BR_EQ, 0); ppc_lis (code, ppc_r0, 0x8000); #ifdef __mono_ppc64__ if (ins->opcode == OP_LDIV) ppc_sldi (code, ppc_r0, ppc_r0, 32); #endif ppc_compare (code, 0, ins->sreg1, ppc_r0); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException"); ppc_patch (divisor_is_m1, code); /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits] */ if (ins->opcode == OP_IDIV) ppc_divwod (code, ins->dreg, ins->sreg1, ins->sreg2); #ifdef __mono_ppc64__ else ppc_divdod (code, ins->dreg, ins->sreg1, ins->sreg2); #endif ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException"); break; } case OP_IDIV_UN: CASE_PPC64 (OP_LDIV_UN) if (ins->opcode == OP_IDIV_UN) ppc_divwuod (code, ins->dreg, ins->sreg1, ins->sreg2); #ifdef __mono_ppc64__ else ppc_divduod (code, ins->dreg, ins->sreg1, ins->sreg2); #endif ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException"); break; case OP_DIV_IMM: case OP_IREM: case OP_IREM_UN: case OP_REM_IMM: g_assert_not_reached (); case OP_IOR: CASE_PPC64 (OP_LOR) ppc_or (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_OR_IMM: case OP_IOR_IMM: CASE_PPC64 (OP_LOR_IMM) if (!(ins->inst_imm & 0xffff0000)) { ppc_ori (code, ins->sreg1, ins->dreg, ins->inst_imm); } else if (!(ins->inst_imm & 0xffff)) { ppc_oris (code, ins->dreg, ins->sreg1, ((guint32)(ins->inst_imm) >> 16)); } else { g_assert_not_reached (); } break; case OP_IXOR: CASE_PPC64 (OP_LXOR) ppc_xor (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IXOR_IMM: case OP_XOR_IMM: CASE_PPC64 (OP_LXOR_IMM) if (!(ins->inst_imm & 0xffff0000)) { ppc_xori (code, ins->sreg1, ins->dreg, ins->inst_imm); } else if (!(ins->inst_imm & 0xffff)) { ppc_xoris (code, ins->sreg1, ins->dreg, ((guint32)(ins->inst_imm) >> 16)); } else { g_assert_not_reached (); } break; case OP_ISHL: CASE_PPC64 (OP_LSHL) ppc_shift_left (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SHL_IMM: case OP_ISHL_IMM: CASE_PPC64 (OP_LSHL_IMM) ppc_shift_left_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm)); break; case OP_ISHR: ppc_sraw (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SHR_IMM: ppc_shift_right_arith_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm)); break; case OP_SHR_UN_IMM: if (MASK_SHIFT_IMM (ins->inst_imm)) ppc_shift_right_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm)); else ppc_mr (code, ins->dreg, ins->sreg1); break; case OP_ISHR_UN: ppc_srw (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_INOT: CASE_PPC64 (OP_LNOT) ppc_not (code, ins->dreg, ins->sreg1); break; case OP_INEG: CASE_PPC64 (OP_LNEG) ppc_neg (code, ins->dreg, ins->sreg1); break; case OP_IMUL: CASE_PPC64 (OP_LMUL) ppc_multiply (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IMUL_IMM: case OP_MUL_IMM: CASE_PPC64 (OP_LMUL_IMM) if (ppc_is_imm16 (ins->inst_imm)) { ppc_mulli (code, ins->dreg, ins->sreg1, ins->inst_imm); } else { g_assert_not_reached (); } break; case OP_IMUL_OVF: CASE_PPC64 (OP_LMUL_OVF) /* we annot use mcrxr, since it's not implemented on some processors * XER format: SO, OV, CA, reserved [21 bits], count [8 bits] */ if (ins->opcode == OP_IMUL_OVF) ppc_mullwo (code, ins->dreg, ins->sreg1, ins->sreg2); #ifdef __mono_ppc64__ else ppc_mulldo (code, ins->dreg, ins->sreg1, ins->sreg2); #endif ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_IMUL_OVF_UN: CASE_PPC64 (OP_LMUL_OVF_UN) /* we first multiply to get the high word and compare to 0 * to set the flags, then the result is discarded and then * we multiply to get the lower * bits result */ if (ins->opcode == OP_IMUL_OVF_UN) ppc_mulhwu (code, ppc_r0, ins->sreg1, ins->sreg2); #ifdef __mono_ppc64__ else ppc_mulhdu (code, ppc_r0, ins->sreg1, ins->sreg2); #endif ppc_cmpi (code, 0, 0, ppc_r0, 0); EMIT_COND_SYSTEM_EXCEPTION (CEE_BNE_UN - CEE_BEQ, "OverflowException"); ppc_multiply (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_ICONST: ppc_load (code, ins->dreg, ins->inst_c0); break; case OP_I8CONST: { ppc_load (code, ins->dreg, ins->inst_l); break; } case OP_LOAD_GOTADDR: /* The PLT implementation depends on this */ g_assert (ins->dreg == ppc_r30); code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL); break; case OP_GOT_ENTRY: // FIXME: Fix max instruction length /* XXX: This is hairy; we're casting a pointer from a union to an enum... */ mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(intptr_t)ins->inst_right->inst_i1, ins->inst_right->inst_p0); /* arch_emit_got_access () patches this */ ppc_load32 (code, ppc_r0, 0); ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0); break; case OP_AOTCONST: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(intptr_t)ins->inst_i1, ins->inst_p0); ppc_load_sequence (code, ins->dreg, 0); break; CASE_PPC32 (OP_ICONV_TO_I4) CASE_PPC32 (OP_ICONV_TO_U4) case OP_MOVE: if (ins->dreg != ins->sreg1) ppc_mr (code, ins->dreg, ins->sreg1); break; case OP_SETLRET: { int saved = ins->sreg1; if (ins->sreg1 == ppc_r3) { ppc_mr (code, ppc_r0, ins->sreg1); saved = ppc_r0; } if (ins->sreg2 != ppc_r3) ppc_mr (code, ppc_r3, ins->sreg2); if (saved != ppc_r4) ppc_mr (code, ppc_r4, saved); break; } case OP_FMOVE: if (ins->dreg != ins->sreg1) ppc_fmr (code, ins->dreg, ins->sreg1); break; case OP_MOVE_F_TO_I4: ppc_stfs (code, ins->sreg1, -4, ppc_r1); ppc_ldptr (code, ins->dreg, -4, ppc_r1); break; case OP_MOVE_I4_TO_F: ppc_stw (code, ins->sreg1, -4, ppc_r1); ppc_lfs (code, ins->dreg, -4, ppc_r1); break; #ifdef __mono_ppc64__ case OP_MOVE_F_TO_I8: ppc_stfd (code, ins->sreg1, -8, ppc_r1); ppc_ldptr (code, ins->dreg, -8, ppc_r1); break; case OP_MOVE_I8_TO_F: ppc_stptr (code, ins->sreg1, -8, ppc_r1); ppc_lfd (code, ins->dreg, -8, ppc_r1); break; #endif case OP_FCONV_TO_R4: ppc_frsp (code, ins->dreg, ins->sreg1); break; case OP_TAILCALL_PARAMETER: // This opcode helps compute sizes, i.e. // of the subsequent OP_TAILCALL, but contributes no code. g_assert (ins->next); break; case OP_TAILCALL: { int i, pos; MonoCallInst *call = (MonoCallInst*)ins; /* * Keep in sync with mono_arch_emit_epilog */ g_assert (!cfg->method->save_lmf); /* * Note: we can use ppc_r12 here because it is dead anyway: * we're leaving the method. */ if (1 || cfg->flags & MONO_CFG_HAS_CALLS) { long ret_offset = cfg->stack_usage + PPC_RET_ADDR_OFFSET; if (ppc_is_imm16 (ret_offset)) { ppc_ldptr (code, ppc_r0, ret_offset, cfg->frame_reg); } else { ppc_load (code, ppc_r12, ret_offset); ppc_ldptr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r12); } ppc_mtlr (code, ppc_r0); } if (ppc_is_imm16 (cfg->stack_usage)) { ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage); } else { /* cfg->stack_usage is an int, so we can use * an addis/addi sequence here even in 64-bit. */ ppc_addis (code, ppc_r12, cfg->frame_reg, ppc_ha(cfg->stack_usage)); ppc_addi (code, ppc_r12, ppc_r12, cfg->stack_usage); } if (!cfg->method->save_lmf) { pos = 0; for (i = 31; i >= 13; --i) { if (cfg->used_int_regs & (1 << i)) { pos += sizeof (target_mgreg_t); ppc_ldptr (code, i, -pos, ppc_r12); } } } else { /* FIXME restore from MonoLMF: though this can't happen yet */ } /* Copy arguments on the stack to our argument area */ if (call->stack_usage) { code = emit_memcpy (code, call->stack_usage, ppc_r12, PPC_STACK_PARAM_OFFSET, ppc_sp, PPC_STACK_PARAM_OFFSET); /* r12 was clobbered */ g_assert (cfg->frame_reg == ppc_sp); if (ppc_is_imm16 (cfg->stack_usage)) { ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage); } else { /* cfg->stack_usage is an int, so we can use * an addis/addi sequence here even in 64-bit. */ ppc_addis (code, ppc_r12, cfg->frame_reg, ppc_ha(cfg->stack_usage)); ppc_addi (code, ppc_r12, ppc_r12, cfg->stack_usage); } } ppc_mr (code, ppc_sp, ppc_r12); mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method); cfg->thunk_area += THUNK_SIZE; if (cfg->compile_aot) { /* arch_emit_got_access () patches this */ ppc_load32 (code, ppc_r0, 0); #ifdef PPC_USES_FUNCTION_DESCRIPTOR ppc_ldptr_indexed (code, ppc_r12, ppc_r30, ppc_r0); ppc_ldptr (code, ppc_r0, 0, ppc_r12); #else ppc_ldptr_indexed (code, ppc_r0, ppc_r30, ppc_r0); #endif ppc_mtctr (code, ppc_r0); ppc_bcctr (code, PPC_BR_ALWAYS, 0); } else { ppc_b (code, 0); } break; } case OP_CHECK_THIS: /* ensure ins->sreg1 is not NULL */ ppc_ldptr (code, ppc_r0, 0, ins->sreg1); break; case OP_ARGLIST: { long cookie_offset = cfg->sig_cookie + cfg->stack_usage; if (ppc_is_imm16 (cookie_offset)) { ppc_addi (code, ppc_r0, cfg->frame_reg, cookie_offset); } else { ppc_load (code, ppc_r0, cookie_offset); ppc_add (code, ppc_r0, cfg->frame_reg, ppc_r0); } ppc_stptr (code, ppc_r0, 0, ins->sreg1); break; } case OP_FCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: call = (MonoCallInst*)ins; mono_call_add_patch_info (cfg, call, offset); if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) { ppc_load_func (code, PPC_CALL_REG, 0); ppc_mtlr (code, PPC_CALL_REG); ppc_blrl (code); } else { ppc_bl (code, 0); } /* FIXME: this should be handled somewhere else in the new jit */ code = emit_move_return_value (cfg, ins, code); break; case OP_FCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: #ifdef PPC_USES_FUNCTION_DESCRIPTOR ppc_ldptr (code, ppc_r0, 0, ins->sreg1); /* FIXME: if we know that this is a method, we can omit this load */ ppc_ldptr (code, ppc_r2, 8, ins->sreg1); ppc_mtlr (code, ppc_r0); #else #if (_CALL_ELF == 2) if (ins->flags & MONO_INST_HAS_METHOD) { // Not a global entry point } else { // Need to set up r12 with function entry address for global entry point if (ppc_r12 != ins->sreg1) { ppc_mr(code,ppc_r12,ins->sreg1); } } #endif ppc_mtlr (code, ins->sreg1); #endif ppc_blrl (code); /* FIXME: this should be handled somewhere else in the new jit */ code = emit_move_return_value (cfg, ins, code); break; case OP_FCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: if (cfg->compile_aot && ins->sreg1 == ppc_r12) { /* The trampolines clobber this */ ppc_mr (code, ppc_r29, ins->sreg1); ppc_ldptr (code, ppc_r0, ins->inst_offset, ppc_r29); } else { ppc_ldptr (code, ppc_r0, ins->inst_offset, ins->sreg1); } ppc_mtlr (code, ppc_r0); ppc_blrl (code); /* FIXME: this should be handled somewhere else in the new jit */ code = emit_move_return_value (cfg, ins, code); break; case OP_LOCALLOC: { guint8 * zero_loop_jump, * zero_loop_start; /* keep alignment */ int alloca_waste = PPC_STACK_PARAM_OFFSET + cfg->param_area + 31; int area_offset = alloca_waste; area_offset &= ~31; ppc_addi (code, ppc_r12, ins->sreg1, alloca_waste + 31); /* FIXME: should be calculated from MONO_ARCH_FRAME_ALIGNMENT */ ppc_clear_right_imm (code, ppc_r12, ppc_r12, 4); /* use ctr to store the number of words to 0 if needed */ if (ins->flags & MONO_INST_INIT) { /* we zero 4 bytes at a time: * we add 7 instead of 3 so that we set the counter to * at least 1, otherwise the bdnz instruction will make * it negative and iterate billions of times. */ ppc_addi (code, ppc_r0, ins->sreg1, 7); ppc_shift_right_arith_imm (code, ppc_r0, ppc_r0, 2); ppc_mtctr (code, ppc_r0); } ppc_ldptr (code, ppc_r0, 0, ppc_sp); ppc_neg (code, ppc_r12, ppc_r12); ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12); /* FIXME: make this loop work in 8 byte increments on PPC64 */ if (ins->flags & MONO_INST_INIT) { /* adjust the dest reg by -4 so we can use stwu */ /* we actually adjust -8 because we let the loop * run at least once */ ppc_addi (code, ins->dreg, ppc_sp, (area_offset - 8)); ppc_li (code, ppc_r12, 0); zero_loop_start = code; ppc_stwu (code, ppc_r12, 4, ins->dreg); zero_loop_jump = code; ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0); ppc_patch (zero_loop_jump, zero_loop_start); } ppc_addi (code, ins->dreg, ppc_sp, area_offset); break; } case OP_THROW: { //ppc_break (code); ppc_mr (code, ppc_r3, ins->sreg1); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception)); if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) { ppc_load_func (code, PPC_CALL_REG, 0); ppc_mtlr (code, PPC_CALL_REG); ppc_blrl (code); } else { ppc_bl (code, 0); } break; } case OP_RETHROW: { //ppc_break (code); ppc_mr (code, ppc_r3, ins->sreg1); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception)); if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) { ppc_load_func (code, PPC_CALL_REG, 0); ppc_mtlr (code, PPC_CALL_REG); ppc_blrl (code); } else { ppc_bl (code, 0); } break; } case OP_START_HANDLER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); g_assert (spvar->inst_basereg != ppc_sp); code = emit_reserve_param_area (cfg, code); ppc_mflr (code, ppc_r0); if (ppc_is_imm16 (spvar->inst_offset)) { ppc_stptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg); } else { ppc_load (code, ppc_r12, spvar->inst_offset); ppc_stptr_indexed (code, ppc_r0, ppc_r12, spvar->inst_basereg); } break; } case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); g_assert (spvar->inst_basereg != ppc_sp); code = emit_unreserve_param_area (cfg, code); if (ins->sreg1 != ppc_r3) ppc_mr (code, ppc_r3, ins->sreg1); if (ppc_is_imm16 (spvar->inst_offset)) { ppc_ldptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg); } else { ppc_load (code, ppc_r12, spvar->inst_offset); ppc_ldptr_indexed (code, ppc_r0, spvar->inst_basereg, ppc_r12); } ppc_mtlr (code, ppc_r0); ppc_blr (code); break; } case OP_ENDFINALLY: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); g_assert (spvar->inst_basereg != ppc_sp); code = emit_unreserve_param_area (cfg, code); ppc_ldptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg); ppc_mtlr (code, ppc_r0); ppc_blr (code); break; } case OP_CALL_HANDLER: mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb); ppc_bl (code, 0); for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); break; case OP_LABEL: ins->inst_c0 = code - cfg->native_code; break; case OP_BR: /*if (ins->inst_target_bb->native_offset) { ppc_b (code, 0); //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); } else*/ { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); ppc_b (code, 0); } break; case OP_BR_REG: ppc_mtctr (code, ins->sreg1); ppc_bcctr (code, PPC_BR_ALWAYS, 0); break; case OP_ICNEQ: ppc_li (code, ins->dreg, 0); ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 2); ppc_li (code, ins->dreg, 1); break; case OP_CEQ: case OP_ICEQ: CASE_PPC64 (OP_LCEQ) ppc_li (code, ins->dreg, 0); ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 2); ppc_li (code, ins->dreg, 1); break; case OP_CLT: case OP_CLT_UN: case OP_ICLT: case OP_ICLT_UN: CASE_PPC64 (OP_LCLT) CASE_PPC64 (OP_LCLT_UN) ppc_li (code, ins->dreg, 1); ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2); ppc_li (code, ins->dreg, 0); break; case OP_ICGE: case OP_ICGE_UN: ppc_li (code, ins->dreg, 1); ppc_bc (code, PPC_BR_FALSE, PPC_BR_LT, 2); ppc_li (code, ins->dreg, 0); break; case OP_CGT: case OP_CGT_UN: case OP_ICGT: case OP_ICGT_UN: CASE_PPC64 (OP_LCGT) CASE_PPC64 (OP_LCGT_UN) ppc_li (code, ins->dreg, 1); ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2); ppc_li (code, ins->dreg, 0); break; case OP_ICLE: case OP_ICLE_UN: ppc_li (code, ins->dreg, 1); ppc_bc (code, PPC_BR_FALSE, PPC_BR_GT, 2); ppc_li (code, ins->dreg, 0); break; case OP_COND_EXC_EQ: case OP_COND_EXC_NE_UN: case OP_COND_EXC_LT: case OP_COND_EXC_LT_UN: case OP_COND_EXC_GT: case OP_COND_EXC_GT_UN: case OP_COND_EXC_GE: case OP_COND_EXC_GE_UN: case OP_COND_EXC_LE: case OP_COND_EXC_LE_UN: EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, (const char*)ins->inst_p1); break; case OP_COND_EXC_IEQ: case OP_COND_EXC_INE_UN: case OP_COND_EXC_ILT: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_IGT: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_IGE: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_ILE: case OP_COND_EXC_ILE_UN: EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, (const char*)ins->inst_p1); break; case OP_IBEQ: case OP_IBNE_UN: case OP_IBLT: case OP_IBLT_UN: case OP_IBGT: case OP_IBGT_UN: case OP_IBGE: case OP_IBGE_UN: case OP_IBLE: case OP_IBLE_UN: EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ); break; /* floating point opcodes */ case OP_R8CONST: g_assert (cfg->compile_aot); /* FIXME: Optimize this */ ppc_bl (code, 1); ppc_mflr (code, ppc_r12); ppc_b (code, 3); *(double*)code = *(double*)ins->inst_p0; code += 8; ppc_lfd (code, ins->dreg, 8, ppc_r12); break; case OP_R4CONST: g_assert_not_reached (); break; case OP_STORER8_MEMBASE_REG: if (ppc_is_imm16 (ins->inst_offset)) { ppc_stfd (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); } else { if (ppc_is_imm32 (ins->inst_offset)) { ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset)); ppc_stfd (code, ins->sreg1, ins->inst_offset, ppc_r11); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_stfdx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0); } } break; case OP_LOADR8_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_lfd (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset)) { ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset)); ppc_lfd (code, ins->dreg, ins->inst_offset, ppc_r11); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_lfdx (code, ins->dreg, ins->inst_destbasereg, ppc_r0); } } break; case OP_STORER4_MEMBASE_REG: ppc_frsp (code, ins->sreg1, ins->sreg1); if (ppc_is_imm16 (ins->inst_offset)) { ppc_stfs (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); } else { if (ppc_is_imm32 (ins->inst_offset)) { ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset)); ppc_stfs (code, ins->sreg1, ins->inst_offset, ppc_r11); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_stfsx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0); } } break; case OP_LOADR4_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_lfs (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset)) { ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset)); ppc_lfs (code, ins->dreg, ins->inst_offset, ppc_r11); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_lfsx (code, ins->dreg, ins->inst_destbasereg, ppc_r0); } } break; case OP_LOADR4_MEMINDEX: ppc_lfsx (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADR8_MEMINDEX: ppc_lfdx (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_STORER4_MEMINDEX: ppc_frsp (code, ins->sreg1, ins->sreg1); ppc_stfsx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case OP_STORER8_MEMINDEX: ppc_stfdx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case CEE_CONV_R_UN: case CEE_CONV_R4: /* FIXME: change precision */ case CEE_CONV_R8: g_assert_not_reached (); case OP_FCONV_TO_I1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE); break; case OP_FCONV_TO_U1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE); break; case OP_FCONV_TO_I2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE); break; case OP_FCONV_TO_U2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE); break; case OP_FCONV_TO_I4: case OP_FCONV_TO_I: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE); break; case OP_FCONV_TO_U4: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE); break; case OP_LCONV_TO_R_UN: g_assert_not_reached (); /* Implemented as helper calls */ break; case OP_LCONV_TO_OVF_I4_2: case OP_LCONV_TO_OVF_I: { #ifdef __mono_ppc64__ NOT_IMPLEMENTED; #else guint8 *negative_branch, *msword_positive_branch, *msword_negative_branch, *ovf_ex_target; // Check if its negative ppc_cmpi (code, 0, 0, ins->sreg1, 0); negative_branch = code; ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 0); // Its positive msword == 0 ppc_cmpi (code, 0, 0, ins->sreg2, 0); msword_positive_branch = code; ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0); ovf_ex_target = code; EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_ALWAYS, 0, "OverflowException"); // Negative ppc_patch (negative_branch, code); ppc_cmpi (code, 0, 0, ins->sreg2, -1); msword_negative_branch = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); ppc_patch (msword_negative_branch, ovf_ex_target); ppc_patch (msword_positive_branch, code); if (ins->dreg != ins->sreg1) ppc_mr (code, ins->dreg, ins->sreg1); break; #endif } case OP_ROUND: ppc_frind (code, ins->dreg, ins->sreg1); break; case OP_PPC_TRUNC: ppc_frizd (code, ins->dreg, ins->sreg1); break; case OP_PPC_CEIL: ppc_fripd (code, ins->dreg, ins->sreg1); break; case OP_PPC_FLOOR: ppc_frimd (code, ins->dreg, ins->sreg1); break; case OP_ABS: ppc_fabsd (code, ins->dreg, ins->sreg1); break; case OP_SQRTF: ppc_fsqrtsd (code, ins->dreg, ins->sreg1); break; case OP_SQRT: ppc_fsqrtd (code, ins->dreg, ins->sreg1); break; case OP_FADD: ppc_fadd (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FSUB: ppc_fsub (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FMUL: ppc_fmul (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FDIV: ppc_fdiv (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FNEG: ppc_fneg (code, ins->dreg, ins->sreg1); break; case OP_FREM: /* emulated */ g_assert_not_reached (); break; /* These min/max require POWER5 */ case OP_IMIN: ppc_cmp (code, 0, 0, ins->sreg1, ins->sreg2); ppc_isellt (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IMIN_UN: ppc_cmpl (code, 0, 0, ins->sreg1, ins->sreg2); ppc_isellt (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IMAX: ppc_cmp (code, 0, 0, ins->sreg1, ins->sreg2); ppc_iselgt (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IMAX_UN: ppc_cmpl (code, 0, 0, ins->sreg1, ins->sreg2); ppc_iselgt (code, ins->dreg, ins->sreg1, ins->sreg2); break; CASE_PPC64 (OP_LMIN) ppc_cmp (code, 0, 1, ins->sreg1, ins->sreg2); ppc_isellt (code, ins->dreg, ins->sreg1, ins->sreg2); break; CASE_PPC64 (OP_LMIN_UN) ppc_cmpl (code, 0, 1, ins->sreg1, ins->sreg2); ppc_isellt (code, ins->dreg, ins->sreg1, ins->sreg2); break; CASE_PPC64 (OP_LMAX) ppc_cmp (code, 0, 1, ins->sreg1, ins->sreg2); ppc_iselgt (code, ins->dreg, ins->sreg1, ins->sreg2); break; CASE_PPC64 (OP_LMAX_UN) ppc_cmpl (code, 0, 1, ins->sreg1, ins->sreg2); ppc_iselgt (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FCOMPARE: ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2); break; case OP_FCEQ: case OP_FCNEQ: ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2); ppc_li (code, ins->dreg, 1); ppc_bc (code, ins->opcode == OP_FCEQ ? PPC_BR_TRUE : PPC_BR_FALSE, PPC_BR_EQ, 2); ppc_li (code, ins->dreg, 0); break; case OP_FCLT: case OP_FCGE: ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2); ppc_li (code, ins->dreg, 1); ppc_bc (code, ins->opcode == OP_FCLT ? PPC_BR_TRUE : PPC_BR_FALSE, PPC_BR_LT, 2); ppc_li (code, ins->dreg, 0); break; case OP_FCLT_UN: ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2); ppc_li (code, ins->dreg, 1); ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 3); ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2); ppc_li (code, ins->dreg, 0); break; case OP_FCGT: case OP_FCLE: ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2); ppc_li (code, ins->dreg, 1); ppc_bc (code, ins->opcode == OP_FCGT ? PPC_BR_TRUE : PPC_BR_FALSE, PPC_BR_GT, 2); ppc_li (code, ins->dreg, 0); break; case OP_FCGT_UN: ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2); ppc_li (code, ins->dreg, 1); ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 3); ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2); ppc_li (code, ins->dreg, 0); break; case OP_FBEQ: EMIT_COND_BRANCH (ins, CEE_BEQ - CEE_BEQ); break; case OP_FBNE_UN: EMIT_COND_BRANCH (ins, CEE_BNE_UN - CEE_BEQ); break; case OP_FBLT: ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2); EMIT_COND_BRANCH (ins, CEE_BLT - CEE_BEQ); break; case OP_FBLT_UN: EMIT_COND_BRANCH_FLAGS (ins, PPC_BR_TRUE, PPC_BR_SO); EMIT_COND_BRANCH (ins, CEE_BLT_UN - CEE_BEQ); break; case OP_FBGT: ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2); EMIT_COND_BRANCH (ins, CEE_BGT - CEE_BEQ); break; case OP_FBGT_UN: EMIT_COND_BRANCH_FLAGS (ins, PPC_BR_TRUE, PPC_BR_SO); EMIT_COND_BRANCH (ins, CEE_BGT_UN - CEE_BEQ); break; case OP_FBGE: ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2); EMIT_COND_BRANCH (ins, CEE_BGE - CEE_BEQ); break; case OP_FBGE_UN: EMIT_COND_BRANCH (ins, CEE_BGE_UN - CEE_BEQ); break; case OP_FBLE: ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2); EMIT_COND_BRANCH (ins, CEE_BLE - CEE_BEQ); break; case OP_FBLE_UN: EMIT_COND_BRANCH (ins, CEE_BLE_UN - CEE_BEQ); break; case OP_CKFINITE: g_assert_not_reached (); case OP_PPC_CHECK_FINITE: { ppc_rlwinm (code, ins->sreg1, ins->sreg1, 0, 1, 31); ppc_addis (code, ins->sreg1, ins->sreg1, -32752); ppc_rlwinmd (code, ins->sreg1, ins->sreg1, 1, 31, 31); EMIT_COND_SYSTEM_EXCEPTION (CEE_BEQ - CEE_BEQ, "ArithmeticException"); break; case OP_JUMP_TABLE: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_c1, ins->inst_p0); #ifdef __mono_ppc64__ ppc_load_sequence (code, ins->dreg, (guint64)0x0f0f0f0f0f0f0f0fLL); #else ppc_load_sequence (code, ins->dreg, (gulong)0x0f0f0f0fL); #endif break; } #ifdef __mono_ppc64__ case OP_ICONV_TO_I4: case OP_SEXT_I4: ppc_extsw (code, ins->dreg, ins->sreg1); break; case OP_ICONV_TO_U4: case OP_ZEXT_I4: ppc_clrldi (code, ins->dreg, ins->sreg1, 32); break; case OP_ICONV_TO_R4: case OP_ICONV_TO_R8: case OP_LCONV_TO_R4: case OP_LCONV_TO_R8: { int tmp; if (ins->opcode == OP_ICONV_TO_R4 || ins->opcode == OP_ICONV_TO_R8) { ppc_extsw (code, ppc_r0, ins->sreg1); tmp = ppc_r0; } else { tmp = ins->sreg1; } if (cpu_hw_caps & PPC_MOVE_FPR_GPR) { ppc_mffgpr (code, ins->dreg, tmp); } else { ppc_str (code, tmp, -8, ppc_r1); ppc_lfd (code, ins->dreg, -8, ppc_r1); } ppc_fcfid (code, ins->dreg, ins->dreg); if (ins->opcode == OP_ICONV_TO_R4 || ins->opcode == OP_LCONV_TO_R4) ppc_frsp (code, ins->dreg, ins->dreg); break; } case OP_LSHR: ppc_srad (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_LSHR_UN: ppc_srd (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_COND_EXC_C: /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1 << 13)); /* CA */ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, (const char*)ins->inst_p1); break; case OP_COND_EXC_OV: ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1 << 14)); /* OV */ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, (const char*)ins->inst_p1); break; case OP_LBEQ: case OP_LBNE_UN: case OP_LBLT: case OP_LBLT_UN: case OP_LBGT: case OP_LBGT_UN: case OP_LBGE: case OP_LBGE_UN: case OP_LBLE: case OP_LBLE_UN: EMIT_COND_BRANCH (ins, ins->opcode - OP_LBEQ); break; case OP_FCONV_TO_I8: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE); break; case OP_FCONV_TO_U8: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, FALSE); break; case OP_STOREI4_MEMBASE_REG: if (ppc_is_imm16 (ins->inst_offset)) { ppc_stw (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_stwx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0); } break; case OP_STOREI4_MEMINDEX: ppc_stwx (code, ins->sreg1, ins->sreg2, ins->inst_destbasereg); break; case OP_ISHR_IMM: ppc_srawi (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f)); break; case OP_ISHR_UN_IMM: if (ins->inst_imm & 0x1f) ppc_srwi (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f)); else ppc_mr (code, ins->dreg, ins->sreg1); break; #else case OP_ICONV_TO_R4: case OP_ICONV_TO_R8: { if (cpu_hw_caps & PPC_ISA_64) { ppc_srawi(code, ppc_r0, ins->sreg1, 31); ppc_stw (code, ppc_r0, -8, ppc_r1); ppc_stw (code, ins->sreg1, -4, ppc_r1); ppc_lfd (code, ins->dreg, -8, ppc_r1); ppc_fcfid (code, ins->dreg, ins->dreg); if (ins->opcode == OP_ICONV_TO_R4) ppc_frsp (code, ins->dreg, ins->dreg); } break; } #endif case OP_ATOMIC_ADD_I4: CASE_PPC64 (OP_ATOMIC_ADD_I8) { int location = ins->inst_basereg; int addend = ins->sreg2; guint8 *loop, *branch; g_assert (ins->inst_offset == 0); loop = code; ppc_sync (code); if (ins->opcode == OP_ATOMIC_ADD_I4) ppc_lwarx (code, ppc_r0, 0, location); #ifdef __mono_ppc64__ else ppc_ldarx (code, ppc_r0, 0, location); #endif ppc_add (code, ppc_r0, ppc_r0, addend); if (ins->opcode == OP_ATOMIC_ADD_I4) ppc_stwcxd (code, ppc_r0, 0, location); #ifdef __mono_ppc64__ else ppc_stdcxd (code, ppc_r0, 0, location); #endif branch = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); ppc_patch (branch, loop); ppc_sync (code); ppc_mr (code, ins->dreg, ppc_r0); break; } case OP_ATOMIC_CAS_I4: CASE_PPC64 (OP_ATOMIC_CAS_I8) { int location = ins->sreg1; int value = ins->sreg2; int comparand = ins->sreg3; guint8 *start, *not_equal, *lost_reservation; start = code; ppc_sync (code); if (ins->opcode == OP_ATOMIC_CAS_I4) ppc_lwarx (code, ppc_r0, 0, location); #ifdef __mono_ppc64__ else ppc_ldarx (code, ppc_r0, 0, location); #endif ppc_cmp (code, 0, ins->opcode == OP_ATOMIC_CAS_I4 ? 0 : 1, ppc_r0, comparand); not_equal = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); if (ins->opcode == OP_ATOMIC_CAS_I4) ppc_stwcxd (code, value, 0, location); #ifdef __mono_ppc64__ else ppc_stdcxd (code, value, 0, location); #endif lost_reservation = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); ppc_patch (lost_reservation, start); ppc_patch (not_equal, code); ppc_sync (code); ppc_mr (code, ins->dreg, ppc_r0); break; } case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: break; default: g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__); g_assert_not_reached (); } if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) { g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)", mono_inst_name (ins->opcode), max_len, (glong)(code - cfg->native_code - offset)); g_assert_not_reached (); } cpos += max_len; last_ins = ins; } set_code_cursor (cfg, code); } #endif /* !DISABLE_JIT */ void mono_arch_register_lowlevel_calls (void) { /* The signature doesn't matter */ mono_register_jit_icall (mono_ppc_throw_exception, mono_icall_sig_void, TRUE); } #ifdef __mono_ppc64__ #if G_BYTE_ORDER == G_LITTLE_ENDIAN #define patch_load_sequence(ip,val) do {\ guint16 *__load = (guint16*)(ip); \ g_assert (sizeof (val) == sizeof (gsize)); \ __load [0] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \ __load [2] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \ __load [6] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \ __load [8] = ((guint64)(gsize)(val)) & 0xffff; \ } while (0) #elif G_BYTE_ORDER == G_BIG_ENDIAN #define patch_load_sequence(ip,val) do {\ guint16 *__load = (guint16*)(ip); \ g_assert (sizeof (val) == sizeof (gsize)); \ __load [1] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \ __load [3] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \ __load [7] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \ __load [9] = ((guint64)(gsize)(val)) & 0xffff; \ } while (0) #else #error huh? No endianess defined by compiler #endif #else #define patch_load_sequence(ip,val) do {\ guint16 *__lis_ori = (guint16*)(ip); \ __lis_ori [1] = (((gulong)(val)) >> 16) & 0xffff; \ __lis_ori [3] = ((gulong)(val)) & 0xffff; \ } while (0) #endif #ifndef DISABLE_JIT void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target) { unsigned char *ip = ji->ip.i + code; gboolean is_fd = FALSE; switch (ji->type) { case MONO_PATCH_INFO_IP: patch_load_sequence (ip, ip); break; case MONO_PATCH_INFO_SWITCH: { gpointer *table = (gpointer *)ji->data.table->table; int i; patch_load_sequence (ip, table); for (i = 0; i < ji->data.table->table_size; i++) { table [i] = (glong)ji->data.table->table [i] + code; } /* we put into the table the absolute address, no need for ppc_patch in this case */ break; } case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_IMAGE: case MONO_PATCH_INFO_FIELD: case MONO_PATCH_INFO_VTABLE: case MONO_PATCH_INFO_IID: case MONO_PATCH_INFO_SFLDA: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDTOKEN: /* from OP_AOTCONST : lis + ori */ patch_load_sequence (ip, target); break; case MONO_PATCH_INFO_R4: case MONO_PATCH_INFO_R8: g_assert_not_reached (); *((gconstpointer *)(ip + 2)) = ji->data.target; break; case MONO_PATCH_INFO_EXC_NAME: g_assert_not_reached (); *((gconstpointer *)(ip + 1)) = ji->data.name; break; case MONO_PATCH_INFO_NONE: case MONO_PATCH_INFO_BB_OVF: case MONO_PATCH_INFO_EXC_OVF: /* everything is dealt with at epilog output time */ break; #ifdef PPC_USES_FUNCTION_DESCRIPTOR case MONO_PATCH_INFO_JIT_ICALL_ID: case MONO_PATCH_INFO_ABS: case MONO_PATCH_INFO_RGCTX_FETCH: case MONO_PATCH_INFO_JIT_ICALL_ADDR: case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR: is_fd = TRUE; /* fall through */ #endif default: ppc_patch_full (cfg, ip, (const guchar*)target, is_fd); break; } } /* * Emit code to save the registers in used_int_regs or the registers in the MonoLMF * structure at positive offset pos from register base_reg. pos is guaranteed to fit into * the instruction offset immediate for all the registers. */ static guint8* save_registers (MonoCompile *cfg, guint8* code, int pos, int base_reg, gboolean save_lmf, guint32 used_int_regs, int cfa_offset) { int i; if (!save_lmf) { for (i = 13; i <= 31; i++) { if (used_int_regs & (1 << i)) { ppc_str (code, i, pos, base_reg); mono_emit_unwind_op_offset (cfg, code, i, pos - cfa_offset); pos += sizeof (target_mgreg_t); } } } else { /* pos is the start of the MonoLMF structure */ int offset = pos + G_STRUCT_OFFSET (MonoLMF, iregs); for (i = 13; i <= 31; i++) { ppc_str (code, i, offset, base_reg); mono_emit_unwind_op_offset (cfg, code, i, offset - cfa_offset); offset += sizeof (target_mgreg_t); } offset = pos + G_STRUCT_OFFSET (MonoLMF, fregs); for (i = 14; i < 32; i++) { ppc_stfd (code, i, offset, base_reg); offset += sizeof (gdouble); } } return code; } /* * Stack frame layout: * * ------------------- sp * MonoLMF structure or saved registers * ------------------- * spilled regs * ------------------- * locals * ------------------- * param area size is cfg->param_area * ------------------- * linkage area size is PPC_STACK_PARAM_OFFSET * ------------------- sp * red zone */ guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoBasicBlock *bb; MonoMethodSignature *sig; MonoInst *inst; long alloc_size, pos, max_offset, cfa_offset; int i; guint8 *code; CallInfo *cinfo; int lmf_offset = 0; int tailcall_struct_index; sig = mono_method_signature_internal (method); cfg->code_size = 512 + sig->param_count * 32; code = cfg->native_code = g_malloc (cfg->code_size); cfa_offset = 0; /* We currently emit unwind info for aot, but don't use it */ mono_emit_unwind_op_def_cfa (cfg, code, ppc_r1, 0); if (1 || cfg->flags & MONO_CFG_HAS_CALLS) { ppc_mflr (code, ppc_r0); ppc_str (code, ppc_r0, PPC_RET_ADDR_OFFSET, ppc_sp); mono_emit_unwind_op_offset (cfg, code, ppc_lr, PPC_RET_ADDR_OFFSET); } alloc_size = cfg->stack_offset; pos = 0; if (!method->save_lmf) { for (i = 31; i >= 13; --i) { if (cfg->used_int_regs & (1 << i)) { pos += sizeof (target_mgreg_t); } } } else { pos += sizeof (MonoLMF); lmf_offset = pos; } alloc_size += pos; // align to MONO_ARCH_FRAME_ALIGNMENT bytes if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) { alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1; alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1); } cfg->stack_usage = alloc_size; g_assert ((alloc_size & (MONO_ARCH_FRAME_ALIGNMENT-1)) == 0); if (alloc_size) { if (ppc_is_imm16 (-alloc_size)) { ppc_str_update (code, ppc_sp, -alloc_size, ppc_sp); cfa_offset = alloc_size; mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size); code = save_registers (cfg, code, alloc_size - pos, ppc_sp, method->save_lmf, cfg->used_int_regs, cfa_offset); } else { if (pos) ppc_addi (code, ppc_r12, ppc_sp, -pos); ppc_load (code, ppc_r0, -alloc_size); ppc_str_update_indexed (code, ppc_sp, ppc_sp, ppc_r0); cfa_offset = alloc_size; mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size); code = save_registers (cfg, code, 0, ppc_r12, method->save_lmf, cfg->used_int_regs, cfa_offset); } } if (cfg->frame_reg != ppc_sp) { ppc_mr (code, cfg->frame_reg, ppc_sp); mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg); } /* store runtime generic context */ if (cfg->rgctx_var) { g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET && (cfg->rgctx_var->inst_basereg == ppc_r1 || cfg->rgctx_var->inst_basereg == ppc_r31)); ppc_stptr (code, MONO_ARCH_RGCTX_REG, cfg->rgctx_var->inst_offset, cfg->rgctx_var->inst_basereg); } /* compute max_offset in order to use short forward jumps * we always do it on ppc because the immediate displacement * for jumps is too small */ max_offset = 0; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins; bb->max_offset = max_offset; MONO_BB_FOR_EACH_INS (bb, ins) max_offset += ins_get_size (ins->opcode); } /* load arguments allocated to register from the stack */ pos = 0; cinfo = get_call_info (sig); if (MONO_TYPE_ISSTRUCT (sig->ret)) { ArgInfo *ainfo = &cinfo->ret; inst = cfg->vret_addr; g_assert (inst); if (ppc_is_imm16 (inst->inst_offset)) { ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_stptr_indexed (code, ainfo->reg, ppc_r12, inst->inst_basereg); } } tailcall_struct_index = 0; for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; inst = cfg->args [pos]; if (cfg->verbose_level > 2) g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype); if (inst->opcode == OP_REGVAR) { if (ainfo->regtype == RegTypeGeneral) ppc_mr (code, inst->dreg, ainfo->reg); else if (ainfo->regtype == RegTypeFP) ppc_fmr (code, inst->dreg, ainfo->reg); else if (ainfo->regtype == RegTypeBase) { ppc_ldr (code, ppc_r12, 0, ppc_sp); ppc_ldptr (code, inst->dreg, ainfo->offset, ppc_r12); } else g_assert_not_reached (); if (cfg->verbose_level > 2) g_print ("Argument %ld assigned to register %s\n", pos, mono_arch_regname (inst->dreg)); } else { /* the argument should be put on the stack: FIXME handle size != word */ if (ainfo->regtype == RegTypeGeneral) { switch (ainfo->size) { case 1: if (ppc_is_imm16 (inst->inst_offset)) { ppc_stb (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_stb (code, ainfo->reg, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_stbx (code, ainfo->reg, inst->inst_basereg, ppc_r12); } } break; case 2: if (ppc_is_imm16 (inst->inst_offset)) { ppc_sth (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_sth (code, ainfo->reg, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_sthx (code, ainfo->reg, inst->inst_basereg, ppc_r12); } } break; #ifdef __mono_ppc64__ case 4: if (ppc_is_imm16 (inst->inst_offset)) { ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_stw (code, ainfo->reg, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_stwx (code, ainfo->reg, inst->inst_basereg, ppc_r12); } } break; case 8: if (ppc_is_imm16 (inst->inst_offset)) { ppc_str (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_str_indexed (code, ainfo->reg, ppc_r12, inst->inst_basereg); } break; #else case 8: if (ppc_is_imm16 (inst->inst_offset + 4)) { ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); ppc_stw (code, ainfo->reg + 1, inst->inst_offset + 4, inst->inst_basereg); } else { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_addi (code, ppc_r12, ppc_r12, inst->inst_offset); ppc_stw (code, ainfo->reg, 0, ppc_r12); ppc_stw (code, ainfo->reg + 1, 4, ppc_r12); } break; #endif default: if (ppc_is_imm16 (inst->inst_offset)) { ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_stptr (code, ainfo->reg, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_stptr_indexed (code, ainfo->reg, inst->inst_basereg, ppc_r12); } } break; } } else if (ainfo->regtype == RegTypeBase) { g_assert (ppc_is_imm16 (ainfo->offset)); /* load the previous stack pointer in r12 */ ppc_ldr (code, ppc_r12, 0, ppc_sp); ppc_ldptr (code, ppc_r0, ainfo->offset, ppc_r12); switch (ainfo->size) { case 1: if (ppc_is_imm16 (inst->inst_offset)) { ppc_stb (code, ppc_r0, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_stb (code, ppc_r0, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_stbx (code, ppc_r0, inst->inst_basereg, ppc_r12); } } break; case 2: if (ppc_is_imm16 (inst->inst_offset)) { ppc_sth (code, ppc_r0, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_sth (code, ppc_r0, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_sthx (code, ppc_r0, inst->inst_basereg, ppc_r12); } } break; #ifdef __mono_ppc64__ case 4: if (ppc_is_imm16 (inst->inst_offset)) { ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_stw (code, ppc_r0, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_stwx (code, ppc_r0, inst->inst_basereg, ppc_r12); } } break; case 8: if (ppc_is_imm16 (inst->inst_offset)) { ppc_str (code, ppc_r0, inst->inst_offset, inst->inst_basereg); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_str_indexed (code, ppc_r0, ppc_r12, inst->inst_basereg); } break; #else case 8: g_assert (ppc_is_imm16 (ainfo->offset + 4)); if (ppc_is_imm16 (inst->inst_offset + 4)) { ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg); ppc_lwz (code, ppc_r0, ainfo->offset + 4, ppc_r12); ppc_stw (code, ppc_r0, inst->inst_offset + 4, inst->inst_basereg); } else { /* use r11 to load the 2nd half of the long before we clobber r12. */ ppc_lwz (code, ppc_r11, ainfo->offset + 4, ppc_r12); ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_addi (code, ppc_r12, ppc_r12, inst->inst_offset); ppc_stw (code, ppc_r0, 0, ppc_r12); ppc_stw (code, ppc_r11, 4, ppc_r12); } break; #endif default: if (ppc_is_imm16 (inst->inst_offset)) { ppc_stptr (code, ppc_r0, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_stptr (code, ppc_r0, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_stptr_indexed (code, ppc_r0, inst->inst_basereg, ppc_r12); } } break; } } else if (ainfo->regtype == RegTypeFP) { g_assert (ppc_is_imm16 (inst->inst_offset)); if (ainfo->size == 8) ppc_stfd (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); else if (ainfo->size == 4) ppc_stfs (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); else g_assert_not_reached (); } else if (ainfo->regtype == RegTypeFPStructByVal) { int doffset = inst->inst_offset; int soffset = 0; int cur_reg; int size = 0; g_assert (ppc_is_imm16 (inst->inst_offset)); g_assert (ppc_is_imm16 (inst->inst_offset + ainfo->vtregs * sizeof (target_mgreg_t))); /* FIXME: what if there is no class? */ if (sig->pinvoke && !sig->marshalling_disabled && mono_class_from_mono_type_internal (inst->inst_vtype)) size = mono_class_native_size (mono_class_from_mono_type_internal (inst->inst_vtype), NULL); for (cur_reg = 0; cur_reg < ainfo->vtregs; ++cur_reg) { if (ainfo->size == 4) { ppc_stfs (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); } else { ppc_stfd (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); } soffset += ainfo->size; doffset += ainfo->size; } } else if (ainfo->regtype == RegTypeStructByVal) { int doffset = inst->inst_offset; int soffset = 0; int cur_reg; int size = 0; g_assert (ppc_is_imm16 (inst->inst_offset)); g_assert (ppc_is_imm16 (inst->inst_offset + ainfo->vtregs * sizeof (target_mgreg_t))); /* FIXME: what if there is no class? */ if (sig->pinvoke && !sig->marshalling_disabled && mono_class_from_mono_type_internal (inst->inst_vtype)) size = mono_class_native_size (mono_class_from_mono_type_internal (inst->inst_vtype), NULL); for (cur_reg = 0; cur_reg < ainfo->vtregs; ++cur_reg) { #if __APPLE__ /* * Darwin handles 1 and 2 byte * structs specially by * loading h/b into the arg * register. Only done for * pinvokes. */ if (size == 2) ppc_sth (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); else if (size == 1) ppc_stb (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); else #endif { #ifdef __mono_ppc64__ if (ainfo->bytes) { g_assert (cur_reg == 0); #if G_BYTE_ORDER == G_BIG_ENDIAN ppc_sldi (code, ppc_r0, ainfo->reg, (sizeof (target_mgreg_t) - ainfo->bytes) * 8); ppc_stptr (code, ppc_r0, doffset, inst->inst_basereg); #else if (mono_class_native_size (inst->klass, NULL) == 1) { ppc_stb (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); } else if (mono_class_native_size (inst->klass, NULL) == 2) { ppc_sth (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); } else if (mono_class_native_size (inst->klass, NULL) == 4) { // WDS -- maybe <=4? ppc_stw (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); } else { ppc_stptr (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); // WDS -- Better way? } #endif } else #endif { ppc_stptr (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); } } soffset += sizeof (target_mgreg_t); doffset += sizeof (target_mgreg_t); } if (ainfo->vtsize) { /* FIXME: we need to do the shifting here, too */ if (ainfo->bytes) NOT_IMPLEMENTED; /* load the previous stack pointer in r12 (r0 gets overwritten by the memcpy) */ ppc_ldr (code, ppc_r12, 0, ppc_sp); if ((size & MONO_PPC_32_64_CASE (3, 7)) != 0) { code = emit_memcpy (code, size - soffset, inst->inst_basereg, doffset, ppc_r12, ainfo->offset + soffset); } else { code = emit_memcpy (code, ainfo->vtsize * sizeof (target_mgreg_t), inst->inst_basereg, doffset, ppc_r12, ainfo->offset + soffset); } } } else if (ainfo->regtype == RegTypeStructByAddr) { /* if it was originally a RegTypeBase */ if (ainfo->offset) { /* load the previous stack pointer in r12 */ ppc_ldr (code, ppc_r12, 0, ppc_sp); ppc_ldptr (code, ppc_r12, ainfo->offset, ppc_r12); } else { ppc_mr (code, ppc_r12, ainfo->reg); } g_assert (ppc_is_imm16 (inst->inst_offset)); code = emit_memcpy (code, ainfo->vtsize, inst->inst_basereg, inst->inst_offset, ppc_r12, 0); /*g_print ("copy in %s: %d bytes from %d to offset: %d\n", method->name, ainfo->vtsize, ainfo->reg, inst->inst_offset);*/ } else g_assert_not_reached (); } pos++; } if (method->save_lmf) { if (cfg->compile_aot) { /* Compute the got address which is needed by the PLT entry */ code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL); } mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern)); if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) { ppc_load_func (code, PPC_CALL_REG, 0); ppc_mtlr (code, PPC_CALL_REG); ppc_blrl (code); } else { ppc_bl (code, 0); } /* we build the MonoLMF structure on the stack - see mini-ppc.h */ /* lmf_offset is the offset from the previous stack pointer, * alloc_size is the total stack space allocated, so the offset * of MonoLMF from the current stack ptr is alloc_size - lmf_offset. * The pointer to the struct is put in ppc_r12 (new_lmf). * The callee-saved registers are already in the MonoLMF structure */ ppc_addi (code, ppc_r12, ppc_sp, alloc_size - lmf_offset); /* ppc_r3 is the result from mono_get_lmf_addr () */ ppc_stptr (code, ppc_r3, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r12); /* new_lmf->previous_lmf = *lmf_addr */ ppc_ldptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3); ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r12); /* *(lmf_addr) = r12 */ ppc_stptr (code, ppc_r12, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3); /* save method info */ if (cfg->compile_aot) // FIXME: ppc_load (code, ppc_r0, 0); else ppc_load_ptr (code, ppc_r0, method); ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, method), ppc_r12); ppc_stptr (code, ppc_sp, G_STRUCT_OFFSET(MonoLMF, ebp), ppc_r12); /* save the current IP */ if (cfg->compile_aot) { ppc_bl (code, 1); ppc_mflr (code, ppc_r0); } else { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_IP, NULL); #ifdef __mono_ppc64__ ppc_load_sequence (code, ppc_r0, (guint64)0x0101010101010101LL); #else ppc_load_sequence (code, ppc_r0, (gulong)0x01010101L); #endif } ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, eip), ppc_r12); } set_code_cursor (cfg, code); g_free (cinfo); return code; } void mono_arch_emit_epilog (MonoCompile *cfg) { MonoMethod *method = cfg->method; int pos, i; int max_epilog_size = 16 + 20*4; guint8 *code; if (cfg->method->save_lmf) max_epilog_size += 128; code = realloc_code (cfg, max_epilog_size); pos = 0; if (method->save_lmf) { int lmf_offset; pos += sizeof (MonoLMF); lmf_offset = pos; /* save the frame reg in r8 */ ppc_mr (code, ppc_r8, cfg->frame_reg); ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage - lmf_offset); /* r5 = previous_lmf */ ppc_ldptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r12); /* r6 = lmf_addr */ ppc_ldptr (code, ppc_r6, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r12); /* *(lmf_addr) = previous_lmf */ ppc_stptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r6); /* FIXME: speedup: there is no actual need to restore the registers if * we didn't actually change them (idea from Zoltan). */ /* restore iregs */ ppc_ldr_multiple (code, ppc_r13, G_STRUCT_OFFSET(MonoLMF, iregs), ppc_r12); /* restore fregs */ /*for (i = 14; i < 32; i++) { ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r12); }*/ g_assert (ppc_is_imm16 (cfg->stack_usage + PPC_RET_ADDR_OFFSET)); /* use the saved copy of the frame reg in r8 */ if (1 || cfg->flags & MONO_CFG_HAS_CALLS) { ppc_ldr (code, ppc_r0, cfg->stack_usage + PPC_RET_ADDR_OFFSET, ppc_r8); ppc_mtlr (code, ppc_r0); } ppc_addic (code, ppc_sp, ppc_r8, cfg->stack_usage); } else { if (1 || cfg->flags & MONO_CFG_HAS_CALLS) { long return_offset = cfg->stack_usage + PPC_RET_ADDR_OFFSET; if (ppc_is_imm16 (return_offset)) { ppc_ldr (code, ppc_r0, return_offset, cfg->frame_reg); } else { ppc_load (code, ppc_r12, return_offset); ppc_ldr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r12); } ppc_mtlr (code, ppc_r0); } if (ppc_is_imm16 (cfg->stack_usage)) { int offset = cfg->stack_usage; for (i = 13; i <= 31; i++) { if (cfg->used_int_regs & (1 << i)) offset -= sizeof (target_mgreg_t); } if (cfg->frame_reg != ppc_sp) ppc_mr (code, ppc_r12, cfg->frame_reg); /* note r31 (possibly the frame register) is restored last */ for (i = 13; i <= 31; i++) { if (cfg->used_int_regs & (1 << i)) { ppc_ldr (code, i, offset, cfg->frame_reg); offset += sizeof (target_mgreg_t); } } if (cfg->frame_reg != ppc_sp) ppc_addi (code, ppc_sp, ppc_r12, cfg->stack_usage); else ppc_addi (code, ppc_sp, ppc_sp, cfg->stack_usage); } else { ppc_load32 (code, ppc_r12, cfg->stack_usage); if (cfg->used_int_regs) { ppc_add (code, ppc_r12, cfg->frame_reg, ppc_r12); for (i = 31; i >= 13; --i) { if (cfg->used_int_regs & (1 << i)) { pos += sizeof (target_mgreg_t); ppc_ldr (code, i, -pos, ppc_r12); } } ppc_mr (code, ppc_sp, ppc_r12); } else { ppc_add (code, ppc_sp, cfg->frame_reg, ppc_r12); } } } ppc_blr (code); set_code_cursor (cfg, code); } #endif /* ifndef DISABLE_JIT */ /* remove once throw_exception_by_name is eliminated */ static int exception_id_by_name (const char *name) { if (strcmp (name, "IndexOutOfRangeException") == 0) return MONO_EXC_INDEX_OUT_OF_RANGE; if (strcmp (name, "OverflowException") == 0) return MONO_EXC_OVERFLOW; if (strcmp (name, "ArithmeticException") == 0) return MONO_EXC_ARITHMETIC; if (strcmp (name, "DivideByZeroException") == 0) return MONO_EXC_DIVIDE_BY_ZERO; if (strcmp (name, "InvalidCastException") == 0) return MONO_EXC_INVALID_CAST; if (strcmp (name, "NullReferenceException") == 0) return MONO_EXC_NULL_REF; if (strcmp (name, "ArrayTypeMismatchException") == 0) return MONO_EXC_ARRAY_TYPE_MISMATCH; if (strcmp (name, "ArgumentException") == 0) return MONO_EXC_ARGUMENT; g_error ("Unknown intrinsic exception %s\n", name); return 0; } #ifndef DISABLE_JIT void mono_arch_emit_exceptions (MonoCompile *cfg) { MonoJumpInfo *patch_info; int i; guint8 *code; guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM]; guint8 exc_throw_found [MONO_EXC_INTRINS_NUM]; int max_epilog_size = 50; for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) { exc_throw_pos [i] = NULL; exc_throw_found [i] = 0; } /* count the number of exception infos */ /* * make sure we have enough space for exceptions */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { if (patch_info->type == MONO_PATCH_INFO_EXC) { i = exception_id_by_name ((const char*)patch_info->data.target); if (!exc_throw_found [i]) { max_epilog_size += (2 * PPC_LOAD_SEQUENCE_LENGTH) + 5 * 4; exc_throw_found [i] = TRUE; } } else if (patch_info->type == MONO_PATCH_INFO_BB_OVF) max_epilog_size += 12; else if (patch_info->type == MONO_PATCH_INFO_EXC_OVF) { MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target; i = exception_id_by_name (ovfj->data.exception); if (!exc_throw_found [i]) { max_epilog_size += (2 * PPC_LOAD_SEQUENCE_LENGTH) + 5 * 4; exc_throw_found [i] = TRUE; } max_epilog_size += 8; } } code = realloc_code (cfg, max_epilog_size); /* add code to raise exceptions */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { switch (patch_info->type) { case MONO_PATCH_INFO_BB_OVF: { MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target; unsigned char *ip = patch_info->ip.i + cfg->native_code; /* patch the initial jump */ ppc_patch (ip, code); ppc_bc (code, ovfj->b0_cond, ovfj->b1_cond, 2); ppc_b (code, 0); ppc_patch (code - 4, ip + 4); /* jump back after the initiali branch */ /* jump back to the true target */ ppc_b (code, 0); ip = ovfj->data.bb->native_offset + cfg->native_code; ppc_patch (code - 4, ip); patch_info->type = MONO_PATCH_INFO_NONE; break; } case MONO_PATCH_INFO_EXC_OVF: { MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target; MonoJumpInfo *newji; unsigned char *ip = patch_info->ip.i + cfg->native_code; unsigned char *bcl = code; /* patch the initial jump: we arrived here with a call */ ppc_patch (ip, code); ppc_bc (code, ovfj->b0_cond, ovfj->b1_cond, 0); ppc_b (code, 0); ppc_patch (code - 4, ip + 4); /* jump back after the initiali branch */ /* patch the conditional jump to the right handler */ /* make it processed next */ newji = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo)); newji->type = MONO_PATCH_INFO_EXC; newji->ip.i = bcl - cfg->native_code; newji->data.target = ovfj->data.exception; newji->next = patch_info->next; patch_info->next = newji; patch_info->type = MONO_PATCH_INFO_NONE; break; } case MONO_PATCH_INFO_EXC: { MonoClass *exc_class; unsigned char *ip = patch_info->ip.i + cfg->native_code; i = exception_id_by_name ((const char*)patch_info->data.target); if (exc_throw_pos [i] && !(ip > exc_throw_pos [i] && ip - exc_throw_pos [i] > 50000)) { ppc_patch (ip, exc_throw_pos [i]); patch_info->type = MONO_PATCH_INFO_NONE; break; } else { exc_throw_pos [i] = code; } exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name); ppc_patch (ip, code); /*mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);*/ ppc_load (code, ppc_r3, m_class_get_type_token (exc_class)); /* we got here from a conditional call, so the calling ip is set in lr */ ppc_mflr (code, ppc_r4); patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID; patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception; patch_info->ip.i = code - cfg->native_code; if (FORCE_INDIR_CALL || cfg->method->dynamic) { ppc_load_func (code, PPC_CALL_REG, 0); ppc_mtctr (code, PPC_CALL_REG); ppc_bcctr (code, PPC_BR_ALWAYS, 0); } else { ppc_bl (code, 0); } break; } default: /* do nothing */ break; } } set_code_cursor (cfg, code); } #endif #if DEAD_CODE static int try_offset_access (void *value, guint32 idx) { register void* me __asm__ ("r2"); void ***p = (void***)((char*)me + 284); int idx1 = idx / 32; int idx2 = idx % 32; if (!p [idx1]) return 0; if (value != p[idx1][idx2]) return 0; return 1; } #endif void mono_arch_finish_init (void) { } #define CMP_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4) #define BR_SIZE 4 #define LOADSTORE_SIZE 4 #define JUMP_IMM_SIZE 12 #define JUMP_IMM32_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 8) #define ENABLE_WRONG_METHOD_CHECK 0 gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int i; int size = 0; guint8 *code, *start; MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) item->chunk_size += CMP_SIZE; if (item->has_target_code) item->chunk_size += BR_SIZE + JUMP_IMM32_SIZE; else item->chunk_size += LOADSTORE_SIZE + BR_SIZE + JUMP_IMM_SIZE; } else { if (fail_tramp) { item->chunk_size += CMP_SIZE + BR_SIZE + JUMP_IMM32_SIZE * 2; if (!item->has_target_code) item->chunk_size += LOADSTORE_SIZE; } else { item->chunk_size += LOADSTORE_SIZE + JUMP_IMM_SIZE; #if ENABLE_WRONG_METHOD_CHECK item->chunk_size += CMP_SIZE + BR_SIZE + 4; #endif } } } else { item->chunk_size += CMP_SIZE + BR_SIZE; imt_entries [item->check_target_idx]->compare_done = TRUE; } size += item->chunk_size; } /* the initial load of the vtable address */ size += PPC_LOAD_SEQUENCE_LENGTH + LOADSTORE_SIZE; if (fail_tramp) { code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size); } else { code = mono_mem_manager_code_reserve (mem_manager, size); } start = code; /* * We need to save and restore r12 because it might be * used by the caller as the vtable register, so * clobbering it will trip up the magic trampoline. * * FIXME: Get rid of this by making sure that r12 is * not used as the vtable register in interface calls. */ ppc_stptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp); ppc_load (code, ppc_r12, (gsize)(& (vtable->vtable [0]))); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; item->code_target = code; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) { ppc_load (code, ppc_r0, (gsize)item->key); ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0); } item->jmp_code = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); if (item->has_target_code) { ppc_load_ptr (code, ppc_r0, item->value.target_code); } else { ppc_ldptr (code, ppc_r0, (sizeof (target_mgreg_t) * item->value.vtable_slot), ppc_r12); ppc_ldptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp); } ppc_mtctr (code, ppc_r0); ppc_bcctr (code, PPC_BR_ALWAYS, 0); } else { if (fail_tramp) { ppc_load (code, ppc_r0, (gulong)item->key); ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0); item->jmp_code = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); if (item->has_target_code) { ppc_load_ptr (code, ppc_r0, item->value.target_code); } else { g_assert (vtable); ppc_load_ptr (code, ppc_r0, & (vtable->vtable [item->value.vtable_slot])); ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0); } ppc_mtctr (code, ppc_r0); ppc_bcctr (code, PPC_BR_ALWAYS, 0); ppc_patch (item->jmp_code, code); ppc_load_ptr (code, ppc_r0, fail_tramp); ppc_mtctr (code, ppc_r0); ppc_bcctr (code, PPC_BR_ALWAYS, 0); item->jmp_code = NULL; } else { /* enable the commented code to assert on wrong method */ #if ENABLE_WRONG_METHOD_CHECK ppc_load (code, ppc_r0, (guint32)item->key); ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0); item->jmp_code = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); #endif ppc_ldptr (code, ppc_r0, (sizeof (target_mgreg_t) * item->value.vtable_slot), ppc_r12); ppc_ldptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp); ppc_mtctr (code, ppc_r0); ppc_bcctr (code, PPC_BR_ALWAYS, 0); #if ENABLE_WRONG_METHOD_CHECK ppc_patch (item->jmp_code, code); ppc_break (code); item->jmp_code = NULL; #endif } } } else { ppc_load (code, ppc_r0, (gulong)item->key); ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0); item->jmp_code = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_LT, 0); } } /* patch the branches to get to the target items */ for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code) { if (item->check_target_idx) { ppc_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target); } } } if (!fail_tramp) UnlockedAdd (&mono_stats.imt_trampolines_size, code - start); g_assert (code - start <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL)); mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), mem_manager); return start; } MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { host_mgreg_t *r = (host_mgreg_t*)regs; return (MonoMethod*)(gsize) r [MONO_ARCH_IMT_REG]; } MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code) { return (MonoVTable*)(gsize) regs [MONO_ARCH_RGCTX_REG]; } GSList* mono_arch_get_cie_program (void) { GSList *l = NULL; mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ppc_r1, 0); return l; } MonoInst* mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins = NULL; int opcode = 0; if (cmethod->klass == mono_class_try_get_math_class ()) { if (strcmp (cmethod->name, "Sqrt") == 0) { opcode = OP_SQRT; } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) { opcode = OP_ABS; } if (opcode && fsig->param_count == 1) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_R8; ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = args [0]->dreg; MONO_ADD_INS (cfg->cbb, ins); } /* Check for Min/Max for (u)int(32|64) */ opcode = 0; if (cpu_hw_caps & PPC_ISA_2_03) { if (strcmp (cmethod->name, "Min") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMIN; if (fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_IMIN_UN; #ifdef __mono_ppc64__ else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_LMIN; else if (fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_LMIN_UN; #endif } else if (strcmp (cmethod->name, "Max") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMAX; if (fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_IMAX_UN; #ifdef __mono_ppc64__ else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_LMAX; else if (fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_LMAX_UN; #endif } /* * TODO: Floating point version with fsel, but fsel has * some peculiarities (need a scratch reg unless * comparing with 0, NaN/Inf behaviour (then MathF too) */ } if (opcode && fsig->param_count == 2) { MONO_INST_NEW (cfg, ins, opcode); ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8; ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; MONO_ADD_INS (cfg->cbb, ins); } /* Rounding instructions */ opcode = 0; if ((cpu_hw_caps & PPC_ISA_2X) && (fsig->param_count == 1) && (fsig->params [0]->type == MONO_TYPE_R8)) { /* * XXX: sysmath.c and the POWER ISA documentation for * frin[.] imply rounding is a little more complicated * than expected; the semantics are slightly different, * so just "frin." isn't a drop-in replacement. Floor, * Truncate, and Ceiling seem to work normally though. * (also, no float versions of these ops, but frsp * could be preprended?) */ //if (!strcmp (cmethod->name, "Round")) // opcode = OP_ROUND; if (!strcmp (cmethod->name, "Floor")) opcode = OP_PPC_FLOOR; else if (!strcmp (cmethod->name, "Ceiling")) opcode = OP_PPC_CEIL; else if (!strcmp (cmethod->name, "Truncate")) opcode = OP_PPC_TRUNC; if (opcode != 0) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_R8; ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = args [0]->dreg; MONO_ADD_INS (cfg->cbb, ins); } } } if (cmethod->klass == mono_class_try_get_mathf_class ()) { if (strcmp (cmethod->name, "Sqrt") == 0) { opcode = OP_SQRTF; } /* XXX: POWER has no single-precision normal FPU abs? */ if (opcode && fsig->param_count == 1) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_R4; ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = args [0]->dreg; MONO_ADD_INS (cfg->cbb, ins); } } return ins; } host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { if (reg == ppc_r1) return (host_mgreg_t)(gsize)MONO_CONTEXT_GET_SP (ctx); return ctx->regs [reg]; } host_mgreg_t* mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { if (reg == ppc_r1) return (host_mgreg_t)(gsize)&MONO_CONTEXT_GET_SP (ctx); return &ctx->regs [reg]; } guint32 mono_arch_get_patch_offset (guint8 *code) { return 0; } /* * mono_aot_emit_load_got_addr: * * Emit code to load the got address. * On PPC, the result is placed into r30. */ guint8* mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji) { ppc_bl (code, 1); ppc_mflr (code, ppc_r30); if (cfg) mono_add_patch_info (cfg, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL); else *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL); /* arch_emit_got_address () patches this */ #if defined(TARGET_POWERPC64) ppc_nop (code); ppc_nop (code); ppc_nop (code); ppc_nop (code); #else ppc_load32 (code, ppc_r0, 0); ppc_add (code, ppc_r30, ppc_r30, ppc_r0); #endif set_code_cursor (cfg, code); return code; } /* * mono_ppc_emit_load_aotconst: * * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and * TARGET from the mscorlib GOT in full-aot code. * On PPC, the GOT address is assumed to be in r30, and the result is placed into * r12. */ guint8* mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, MonoJumpInfoType tramp_type, gconstpointer target) { /* Load the mscorlib got address */ ppc_ldptr (code, ppc_r12, sizeof (target_mgreg_t), ppc_r30); *ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target); /* arch_emit_got_access () patches this */ ppc_load32 (code, ppc_r0, 0); ppc_ldptr_indexed (code, ppc_r12, ppc_r12, ppc_r0); return code; } /* Soft Debug support */ #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED /* * BREAKPOINTS */ /* * mono_arch_set_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; guint8 *orig_code = code; ppc_load_sequence (code, ppc_r12, (gsize)bp_trigger_page); ppc_ldptr (code, ppc_r12, 0, ppc_r12); g_assert (code - orig_code == BREAKPOINT_SIZE); mono_arch_flush_icache (orig_code, code - orig_code); } /* * mono_arch_clear_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; int i; for (i = 0; i < BREAKPOINT_SIZE / 4; ++i) ppc_nop (code); mono_arch_flush_icache (ip, code - ip); } /* * mono_arch_is_breakpoint_event: * * See mini-amd64.c for docs. */ gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx) { siginfo_t* sinfo = (siginfo_t*) info; /* Sometimes the address is off by 4 */ if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128) return TRUE; else return FALSE; } /* * mono_arch_skip_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji) { /* skip the ldptr */ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4); } /* * SINGLE STEPPING */ /* * mono_arch_start_single_stepping: * * See mini-amd64.c for docs. */ void mono_arch_start_single_stepping (void) { mono_mprotect (ss_trigger_page, mono_pagesize (), 0); } /* * mono_arch_stop_single_stepping: * * See mini-amd64.c for docs. */ void mono_arch_stop_single_stepping (void) { mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ); } /* * mono_arch_is_single_step_event: * * See mini-amd64.c for docs. */ gboolean mono_arch_is_single_step_event (void *info, void *sigctx) { siginfo_t* sinfo = (siginfo_t*) info; /* Sometimes the address is off by 4 */ if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128) return TRUE; else return FALSE; } /* * mono_arch_skip_single_step: * * See mini-amd64.c for docs. */ void mono_arch_skip_single_step (MonoContext *ctx) { /* skip the ldptr */ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4); } /* * mono_arch_create_seq_point_info: * * See mini-amd64.c for docs. */ SeqPointInfo* mono_arch_get_seq_point_info (guint8 *code) { NOT_IMPLEMENTED; return NULL; } #endif gboolean mono_arch_opcode_supported (int opcode) { switch (opcode) { case OP_ATOMIC_ADD_I4: case OP_ATOMIC_CAS_I4: #ifdef TARGET_POWERPC64 case OP_ATOMIC_ADD_I8: case OP_ATOMIC_CAS_I8: #endif return TRUE; default: return FALSE; } } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { gpointer target = NULL; switch (jit_icall_id) { #undef MONO_AOT_ICALL #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break; MONO_AOT_ICALL (mono_ppc_throw_exception) } return target; }
/** * \file * PowerPC backend for the Mono code generator * * Authors: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * Andreas Faerber <[email protected]> * * (C) 2003 Ximian, Inc. * (C) 2007-2008 Andreas Faerber */ #include "mini.h" #include <string.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/utils/mono-proclib.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/unlocked.h> #include "mono/utils/mono-tls-inline.h" #include "mini-ppc.h" #ifdef TARGET_POWERPC64 #include "cpu-ppc64.h" #else #include "cpu-ppc.h" #endif #include "ir-emit.h" #include "aot-runtime.h" #include "mini-runtime.h" #ifdef __APPLE__ #include <sys/sysctl.h> #endif #ifdef __linux__ #include <unistd.h> #endif #ifdef _AIX #include <sys/systemcfg.h> #endif static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math") static GENERATE_TRY_GET_CLASS_WITH_CACHE (mathf, "System", "MathF") #define FORCE_INDIR_CALL 1 enum { TLS_MODE_DETECT, TLS_MODE_FAILED, TLS_MODE_LTHREADS, TLS_MODE_NPTL, TLS_MODE_DARWIN_G4, TLS_MODE_DARWIN_G5 }; /* cpu_hw_caps contains the flags defined below */ static int cpu_hw_caps = 0; static int cachelinesize = 0; static int cachelineinc = 0; enum { PPC_ICACHE_SNOOP = 1 << 0, PPC_MULTIPLE_LS_UNITS = 1 << 1, PPC_SMP_CAPABLE = 1 << 2, PPC_ISA_2X = 1 << 3, PPC_ISA_64 = 1 << 4, PPC_MOVE_FPR_GPR = 1 << 5, PPC_ISA_2_03 = 1 << 6, PPC_HW_CAP_END }; #define BREAKPOINT_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4) /* This mutex protects architecture specific caches */ #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex) #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex) static mono_mutex_t mini_arch_mutex; /* * The code generated for sequence points reads from this location, which is * made read-only when single stepping is enabled. */ static gpointer ss_trigger_page; /* Enabled breakpoints read from this trigger page */ static gpointer bp_trigger_page; #define MONO_EMIT_NEW_LOAD_R8(cfg,dr,addr) do { \ MonoInst *inst; \ MONO_INST_NEW ((cfg), (inst), OP_R8CONST); \ inst->type = STACK_R8; \ inst->dreg = (dr); \ inst->inst_p0 = (void*)(addr); \ mono_bblock_add_inst (cfg->cbb, inst); \ } while (0) const char* mono_arch_regname (int reg) { static const char rnames[][4] = { "r0", "sp", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } const char* mono_arch_fregname (int reg) { static const char rnames[][4] = { "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } /* this function overwrites r0, r11, r12 */ static guint8* emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset) { /* unrolled, use the counter in big */ if (size > sizeof (target_mgreg_t) * 5) { long shifted = size / TARGET_SIZEOF_VOID_P; guint8 *copy_loop_start, *copy_loop_jump; ppc_load (code, ppc_r0, shifted); ppc_mtctr (code, ppc_r0); //g_assert (sreg == ppc_r12); ppc_addi (code, ppc_r11, dreg, (doffset - sizeof (target_mgreg_t))); ppc_addi (code, ppc_r12, sreg, (soffset - sizeof (target_mgreg_t))); copy_loop_start = code; ppc_ldptr_update (code, ppc_r0, (unsigned int)sizeof (target_mgreg_t), ppc_r12); ppc_stptr_update (code, ppc_r0, (unsigned int)sizeof (target_mgreg_t), ppc_r11); copy_loop_jump = code; ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0); ppc_patch (copy_loop_jump, copy_loop_start); size -= shifted * sizeof (target_mgreg_t); doffset = soffset = 0; dreg = ppc_r11; } #ifdef __mono_ppc64__ /* the hardware has multiple load/store units and the move is long enough to use more then one register, then use load/load/store/store to execute 2 instructions per cycle. */ if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r11) && (sreg != ppc_r11)) { while (size >= 16) { ppc_ldptr (code, ppc_r0, soffset, sreg); ppc_ldptr (code, ppc_r11, soffset+8, sreg); ppc_stptr (code, ppc_r0, doffset, dreg); ppc_stptr (code, ppc_r11, doffset+8, dreg); size -= 16; soffset += 16; doffset += 16; } } while (size >= 8) { ppc_ldr (code, ppc_r0, soffset, sreg); ppc_str (code, ppc_r0, doffset, dreg); size -= 8; soffset += 8; doffset += 8; } #else if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r11) && (sreg != ppc_r11)) { while (size >= 8) { ppc_lwz (code, ppc_r0, soffset, sreg); ppc_lwz (code, ppc_r11, soffset+4, sreg); ppc_stw (code, ppc_r0, doffset, dreg); ppc_stw (code, ppc_r11, doffset+4, dreg); size -= 8; soffset += 8; doffset += 8; } } #endif while (size >= 4) { ppc_lwz (code, ppc_r0, soffset, sreg); ppc_stw (code, ppc_r0, doffset, dreg); size -= 4; soffset += 4; doffset += 4; } while (size >= 2) { ppc_lhz (code, ppc_r0, soffset, sreg); ppc_sth (code, ppc_r0, doffset, dreg); size -= 2; soffset += 2; doffset += 2; } while (size >= 1) { ppc_lbz (code, ppc_r0, soffset, sreg); ppc_stb (code, ppc_r0, doffset, dreg); size -= 1; soffset += 1; doffset += 1; } return code; } /* * mono_arch_get_argument_info: * @csig: a method signature * @param_count: the number of parameters to consider * @arg_info: an array to store the result infos * * Gathers information on parameters such as size, alignment and * padding. arg_info should be large enought to hold param_count + 1 entries. * * Returns the size of the activation frame. */ int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { #ifdef __mono_ppc64__ NOT_IMPLEMENTED; return -1; #else int k, frame_size = 0; int size, align, pad; int offset = 8; if (MONO_TYPE_ISSTRUCT (csig->ret)) { frame_size += sizeof (target_mgreg_t); offset += 4; } arg_info [0].offset = offset; if (csig->hasthis) { frame_size += sizeof (target_mgreg_t); offset += 4; } arg_info [0].size = frame_size; for (k = 0; k < param_count; k++) { if (csig->pinvoke && !csig->marshalling_disabled) size = mono_type_native_stack_size (csig->params [k], (guint32*)&align); else size = mini_type_stack_size (csig->params [k], &align); /* ignore alignment for now */ align = 1; frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; frame_size += size; arg_info [k + 1].pad = 0; arg_info [k + 1].size = size; offset += pad; arg_info [k + 1].offset = offset; offset += size; } align = MONO_ARCH_FRAME_ALIGNMENT; frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; return frame_size; #endif } #ifdef __mono_ppc64__ static gboolean is_load_sequence (guint32 *seq) { return ppc_opcode (seq [0]) == 15 && /* lis */ ppc_opcode (seq [1]) == 24 && /* ori */ ppc_opcode (seq [2]) == 30 && /* sldi */ ppc_opcode (seq [3]) == 25 && /* oris */ ppc_opcode (seq [4]) == 24; /* ori */ } #define ppc_load_get_dest(l) (((l)>>21) & 0x1f) #define ppc_load_get_off(l) ((gint16)((l) & 0xffff)) #endif /* ld || lwz */ #define ppc_is_load_op(opcode) (ppc_opcode ((opcode)) == 58 || ppc_opcode ((opcode)) == 32) /* code must point to the blrl */ gboolean mono_ppc_is_direct_call_sequence (guint32 *code) { #ifdef __mono_ppc64__ g_assert(*code == 0x4e800021 || *code == 0x4e800020 || *code == 0x4e800420); /* the thunk-less direct call sequence: lis/ori/sldi/oris/ori/mtlr/blrl */ if (ppc_opcode (code [-1]) == 31) { /* mtlr */ if (ppc_is_load_op (code [-2]) && ppc_is_load_op (code [-3])) { /* ld/ld */ if (!is_load_sequence (&code [-8])) return FALSE; /* one of the loads must be "ld r2,8(rX)" or "ld r2,4(rX) for ilp32 */ return (ppc_load_get_dest (code [-2]) == ppc_r2 && ppc_load_get_off (code [-2]) == sizeof (target_mgreg_t)) || (ppc_load_get_dest (code [-3]) == ppc_r2 && ppc_load_get_off (code [-3]) == sizeof (target_mgreg_t)); } if (ppc_opcode (code [-2]) == 24 && ppc_opcode (code [-3]) == 31) /* mr/nop */ return is_load_sequence (&code [-8]); else return is_load_sequence (&code [-6]); } return FALSE; #else g_assert(*code == 0x4e800021); /* the thunk-less direct call sequence: lis/ori/mtlr/blrl */ return ppc_opcode (code [-1]) == 31 && ppc_opcode (code [-2]) == 24 && ppc_opcode (code [-3]) == 15; #endif } #define MAX_ARCH_DELEGATE_PARAMS 7 static guint8* get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, guint32 param_count, gboolean aot) { guint8 *code, *start; if (has_target) { int size = MONO_PPC_32_64_CASE (32, 32) + PPC_FTNPTR_SIZE; start = code = mono_global_codeman_reserve (size); if (!aot) code = mono_ppc_create_pre_code_ftnptr (code); /* Replace the this argument with the target */ ppc_ldptr (code, ppc_r0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3); #ifdef PPC_USES_FUNCTION_DESCRIPTOR /* it's a function descriptor */ /* Can't use ldptr as it doesn't work with r0 */ ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0); #endif ppc_mtctr (code, ppc_r0); ppc_ldptr (code, ppc_r3, MONO_STRUCT_OFFSET (MonoDelegate, target), ppc_r3); ppc_bcctr (code, PPC_BR_ALWAYS, 0); g_assert ((code - start) <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } else { int size, i; size = MONO_PPC_32_64_CASE (32, 32) + param_count * 4 + PPC_FTNPTR_SIZE; start = code = mono_global_codeman_reserve (size); if (!aot) code = mono_ppc_create_pre_code_ftnptr (code); ppc_ldptr (code, ppc_r0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3); #ifdef PPC_USES_FUNCTION_DESCRIPTOR /* it's a function descriptor */ ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0); #endif ppc_mtctr (code, ppc_r0); /* slide down the arguments */ for (i = 0; i < param_count; ++i) { ppc_mr (code, (ppc_r3 + i), (ppc_r3 + i + 1)); } ppc_bcctr (code, PPC_BR_ALWAYS, 0); g_assert ((code - start) <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } if (has_target) { *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, NULL); } else { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count); *info = mono_tramp_info_create (name, start, code - start, NULL, NULL); g_free (name); } return start; } GSList* mono_arch_get_delegate_invoke_impls (void) { GSList *res = NULL; MonoTrampInfo *info; int i; get_delegate_invoke_impl (&info, TRUE, 0, TRUE); res = g_slist_prepend (res, info); for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) { get_delegate_invoke_impl (&info, FALSE, i, TRUE); res = g_slist_prepend (res, info); } return res; } gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target) { guint8 *code, *start; /* FIXME: Support more cases */ if (MONO_TYPE_ISSTRUCT (sig->ret)) return NULL; if (has_target) { static guint8* cached = NULL; if (cached) return cached; if (mono_ee_features.use_aot_trampolines) { start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target"); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, TRUE, 0, FALSE); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cached = start; } else { static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL}; int i; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; for (i = 0; i < sig->param_count; ++i) if (!mono_is_regsize_var (sig->params [i])) return NULL; code = cache [sig->param_count]; if (code) return code; if (mono_ee_features.use_aot_trampolines) { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); start = (guint8*)mono_aot_get_trampoline (name); g_free (name); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, FALSE, sig->param_count, FALSE); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cache [sig->param_count] = start; } return start; } gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg) { return NULL; } gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *r, guint8 *code) { return (gpointer)(gsize)r [ppc_r3]; } typedef struct { long int type; long int value; } AuxVec; #define MAX_AUX_ENTRIES 128 /* * PPC_FEATURE_POWER4, PPC_FEATURE_POWER5, PPC_FEATURE_POWER5_PLUS, PPC_FEATURE_CELL, * PPC_FEATURE_PA6T, PPC_FEATURE_ARCH_2_05 are considered supporting 2X ISA features */ #define ISA_2X (0x00080000 | 0x00040000 | 0x00020000 | 0x00010000 | 0x00000800 | 0x00001000) /* define PPC_FEATURE_64 HWCAP for 64-bit category. */ #define ISA_64 0x40000000 /* define PPC_FEATURE_POWER6_EXT HWCAP for power6x mffgpr/mftgpr instructions. */ #define ISA_MOVE_FPR_GPR 0x00000200 /* * Initialize the cpu to execute managed code. */ void mono_arch_cpu_init (void) { } /* * Initialize architecture specific code. */ void mono_arch_init (void) { #if defined(MONO_CROSS_COMPILE) #elif defined(__APPLE__) int mib [3]; size_t len = sizeof (cachelinesize); mib [0] = CTL_HW; mib [1] = HW_CACHELINE; if (sysctl (mib, 2, &cachelinesize, &len, NULL, 0) == -1) { perror ("sysctl"); cachelinesize = 128; } else { cachelineinc = cachelinesize; } #elif defined(__linux__) AuxVec vec [MAX_AUX_ENTRIES]; int i, vec_entries = 0; /* sadly this will work only with 2.6 kernels... */ FILE* f = fopen ("/proc/self/auxv", "rb"); if (f) { vec_entries = fread (&vec, sizeof (AuxVec), MAX_AUX_ENTRIES, f); fclose (f); } for (i = 0; i < vec_entries; i++) { int type = vec [i].type; if (type == 19) { /* AT_DCACHEBSIZE */ cachelinesize = vec [i].value; continue; } } #elif defined(G_COMPILER_CODEWARRIOR) cachelinesize = 32; cachelineinc = 32; #elif defined(_AIX) /* FIXME: use block instead? */ cachelinesize = _system_configuration.icache_line; cachelineinc = _system_configuration.icache_line; #else //#error Need a way to get cache line size #endif if (mono_hwcap_ppc_has_icache_snoop) cpu_hw_caps |= PPC_ICACHE_SNOOP; if (mono_hwcap_ppc_is_isa_2x) cpu_hw_caps |= PPC_ISA_2X; if (mono_hwcap_ppc_is_isa_2_03) cpu_hw_caps |= PPC_ISA_2_03; if (mono_hwcap_ppc_is_isa_64) cpu_hw_caps |= PPC_ISA_64; if (mono_hwcap_ppc_has_move_fpr_gpr) cpu_hw_caps |= PPC_MOVE_FPR_GPR; if (mono_hwcap_ppc_has_multiple_ls_units) cpu_hw_caps |= PPC_MULTIPLE_LS_UNITS; if (!cachelinesize) cachelinesize = 32; if (!cachelineinc) cachelineinc = cachelinesize; if (mono_cpu_count () > 1) cpu_hw_caps |= PPC_SMP_CAPABLE; mono_os_mutex_init_recursive (&mini_arch_mutex); ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER); bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER); mono_mprotect (bp_trigger_page, mono_pagesize (), 0); // FIXME: Fix partial sharing for power and remove this mono_set_partial_sharing_supported (FALSE); } /* * Cleanup architecture specific code. */ void mono_arch_cleanup (void) { mono_os_mutex_destroy (&mini_arch_mutex); } gboolean mono_arch_have_fast_tls (void) { return FALSE; } /* * This function returns the optimizations supported on this cpu. */ guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { guint32 opts = 0; /* no ppc-specific optimizations yet */ *exclude_mask = 0; return opts; } #ifdef __mono_ppc64__ #define CASE_PPC32(c) #define CASE_PPC64(c) case c: #else #define CASE_PPC32(c) case c: #define CASE_PPC64(c) #endif static gboolean is_regsize_var (MonoType *t) { if (m_type_is_byref (t)) return TRUE; t = mini_get_underlying_type (t); switch (t->type) { case MONO_TYPE_I4: case MONO_TYPE_U4: CASE_PPC64 (MONO_TYPE_I8) CASE_PPC64 (MONO_TYPE_U8) case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: return TRUE; case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: return TRUE; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (t)) return TRUE; return FALSE; case MONO_TYPE_VALUETYPE: return FALSE; } return FALSE; } #ifndef DISABLE_JIT GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG)) continue; /* we can only allocate 32 bit values */ if (is_regsize_var (ins->inst_vtype)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE); } } return vars; } #endif /* ifndef DISABLE_JIT */ GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; int i, top = 32; if (cfg->frame_reg != ppc_sp) top = 31; /* ppc_r13 is used by the system on PPC EABI */ for (i = 14; i < top; ++i) { /* * Reserve r29 for holding the vtable address for virtual calls in AOT mode, * since the trampolines can clobber r12. */ if (!(cfg->compile_aot && i == 29)) regs = g_list_prepend (regs, GUINT_TO_POINTER (i)); } return regs; } /* * mono_arch_regalloc_cost: * * Return the cost, in number of memory references, of the action of * allocating the variable VMV into a register during global register * allocation. */ guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { /* FIXME: */ return 2; } void mono_arch_flush_icache (guint8 *code, gint size) { #ifdef MONO_CROSS_COMPILE /* do nothing */ #else register guint8 *p; guint8 *endp, *start; p = start = code; endp = p + size; start = (guint8*)((gsize)start & ~(cachelinesize - 1)); /* use dcbf for smp support, later optimize for UP, see pem._64bit.d20030611.pdf page 211 */ #if defined(G_COMPILER_CODEWARRIOR) if (cpu_hw_caps & PPC_SMP_CAPABLE) { for (p = start; p < endp; p += cachelineinc) { asm { dcbf 0, p }; } } else { for (p = start; p < endp; p += cachelineinc) { asm { dcbst 0, p }; } } asm { sync }; p = code; for (p = start; p < endp; p += cachelineinc) { asm { icbi 0, p sync } } asm { sync isync } #else /* For POWER5/6 with ICACHE_SNOOPing only one icbi in the range is required. * The sync is required to insure that the store queue is completely empty. * While the icbi performs no cache operations, icbi/isync is required to * kill local prefetch. */ if (cpu_hw_caps & PPC_ICACHE_SNOOP) { asm ("sync"); asm ("icbi 0,%0;" : : "r"(code) : "memory"); asm ("isync"); return; } /* use dcbf for smp support, see pem._64bit.d20030611.pdf page 211 */ if (cpu_hw_caps & PPC_SMP_CAPABLE) { for (p = start; p < endp; p += cachelineinc) { asm ("dcbf 0,%0;" : : "r"(p) : "memory"); } } else { for (p = start; p < endp; p += cachelineinc) { asm ("dcbst 0,%0;" : : "r"(p) : "memory"); } } asm ("sync"); p = code; for (p = start; p < endp; p += cachelineinc) { /* for ISA2.0+ implementations we should not need any extra sync between the * icbi instructions. Both the 2.0 PEM and the PowerISA-2.05 say this. * So I am not sure which chip had this problem but its not an issue on * of the ISA V2 chips. */ if (cpu_hw_caps & PPC_ISA_2X) asm ("icbi 0,%0;" : : "r"(p) : "memory"); else asm ("icbi 0,%0; sync;" : : "r"(p) : "memory"); } if (!(cpu_hw_caps & PPC_ISA_2X)) asm ("sync"); asm ("isync"); #endif #endif } void mono_arch_flush_register_windows (void) { } #ifdef __APPLE__ #define ALWAYS_ON_STACK(s) s #define FP_ALSO_IN_REG(s) s #else #ifdef __mono_ppc64__ #define ALWAYS_ON_STACK(s) s #define FP_ALSO_IN_REG(s) s #else #define ALWAYS_ON_STACK(s) #define FP_ALSO_IN_REG(s) #endif #define ALIGN_DOUBLES #endif enum { RegTypeGeneral, RegTypeBase, RegTypeFP, RegTypeStructByVal, RegTypeStructByAddr, RegTypeFPStructByVal, // For the v2 ABI, floats should be passed in FRs instead of GRs. Only valid for ABI v2! }; typedef struct { gint32 offset; guint32 vtsize; /* in param area */ guint8 reg; guint8 vtregs; /* number of registers used to pass a RegTypeStructByVal/RegTypeFPStructByVal */ guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */ guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal/RegTypeFPStructByVal */ guint8 bytes : 4; /* size in bytes - only valid for RegTypeStructByVal/RegTypeFPStructByVal if the struct fits in one word, otherwise it's 0*/ } ArgInfo; struct CallInfo { int nargs; guint32 stack_usage; guint32 struct_ret; ArgInfo ret; ArgInfo sig_cookie; gboolean vtype_retaddr; int vret_arg_index; ArgInfo args [1]; }; #define DEBUG(a) #if PPC_RETURN_SMALL_FLOAT_STRUCTS_IN_FR_REGS // // Test if a structure is completely composed of either float XOR double fields and has fewer than // PPC_MOST_FLOAT_STRUCT_MEMBERS_TO_RETURN_VIA_REGISTER members. // If this is true the structure can be returned directly via float registers instead of by a hidden parameter // pointing to where the return value should be stored. // This is as per the ELF ABI v2. // static gboolean is_float_struct_returnable_via_regs (MonoType *type, int* member_cnt, int* member_size) { int local_member_cnt, local_member_size; if (!member_cnt) { member_cnt = &local_member_cnt; } if (!member_size) { member_size = &local_member_size; } gboolean is_all_floats = mini_type_is_hfa(type, member_cnt, member_size); return is_all_floats && (*member_cnt <= PPC_MOST_FLOAT_STRUCT_MEMBERS_TO_RETURN_VIA_REGISTERS); } #else #define is_float_struct_returnable_via_regs(a,b,c) (FALSE) #endif #if PPC_RETURN_SMALL_STRUCTS_IN_REGS // // Test if a structure is smaller in size than 2 doublewords (PPC_LARGEST_STRUCT_SIZE_TO_RETURN_VIA_REGISTERS) and is // completely composed of fields all of basic types. // If this is true the structure can be returned directly via registers r3/r4 instead of by a hidden parameter // pointing to where the return value should be stored. // This is as per the ELF ABI v2. // static gboolean is_struct_returnable_via_regs (MonoClass *klass, gboolean is_pinvoke) { gboolean has_a_field = FALSE; int size = 0; if (klass) { gpointer iter = NULL; MonoClassField *f; if (is_pinvoke) size = mono_type_native_stack_size (m_class_get_byval_arg (klass), 0); else size = mini_type_stack_size (m_class_get_byval_arg (klass), 0); if (size == 0) return TRUE; if (size > PPC_LARGEST_STRUCT_SIZE_TO_RETURN_VIA_REGISTERS) return FALSE; while ((f = mono_class_get_fields_internal (klass, &iter))) { if (!(f->type->attrs & FIELD_ATTRIBUTE_STATIC)) { // TBD: Is there a better way to check for the basic types? if (m_type_is_byref (f->type)) { return FALSE; } else if ((f->type->type >= MONO_TYPE_BOOLEAN) && (f->type->type <= MONO_TYPE_R8)) { has_a_field = TRUE; } else if (MONO_TYPE_ISSTRUCT (f->type)) { MonoClass *klass = mono_class_from_mono_type_internal (f->type); if (is_struct_returnable_via_regs(klass, is_pinvoke)) { has_a_field = TRUE; } else { return FALSE; } } else { return FALSE; } } } } return has_a_field; } #else #define is_struct_returnable_via_regs(a,b) (FALSE) #endif static void inline add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple) { #ifdef __mono_ppc64__ g_assert (simple); #endif if (simple) { if (*gr >= 3 + PPC_NUM_REG_ARGS) { ainfo->offset = PPC_STACK_PARAM_OFFSET + *stack_size; ainfo->reg = ppc_sp; /* in the caller */ ainfo->regtype = RegTypeBase; *stack_size += sizeof (target_mgreg_t); } else { ALWAYS_ON_STACK (*stack_size += sizeof (target_mgreg_t)); ainfo->reg = *gr; } } else { if (*gr >= 3 + PPC_NUM_REG_ARGS - 1) { #ifdef ALIGN_DOUBLES //*stack_size += (*stack_size % 8); #endif ainfo->offset = PPC_STACK_PARAM_OFFSET + *stack_size; ainfo->reg = ppc_sp; /* in the caller */ ainfo->regtype = RegTypeBase; *stack_size += 8; } else { #ifdef ALIGN_DOUBLES if (!((*gr) & 1)) (*gr) ++; #endif ALWAYS_ON_STACK (*stack_size += 8); ainfo->reg = *gr; } (*gr) ++; } (*gr) ++; } #if defined(__APPLE__) || (defined(__mono_ppc64__) && !PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS) static gboolean has_only_a_r48_field (MonoClass *klass) { gpointer iter; MonoClassField *f; gboolean have_field = FALSE; iter = NULL; while ((f = mono_class_get_fields_internal (klass, &iter))) { if (!(f->type->attrs & FIELD_ATTRIBUTE_STATIC)) { if (have_field) return FALSE; if (!m_type_is_byref (f->type) && (f->type->type == MONO_TYPE_R4 || f->type->type == MONO_TYPE_R8)) have_field = TRUE; else return FALSE; } } return have_field; } #endif static CallInfo* get_call_info (MonoMethodSignature *sig) { guint i, fr, gr, pstart; int n = sig->hasthis + sig->param_count; MonoType *simpletype; guint32 stack_size = 0; CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n); gboolean is_pinvoke = sig->pinvoke; fr = PPC_FIRST_FPARG_REG; gr = PPC_FIRST_ARG_REG; if (mini_type_is_vtype (sig->ret)) { cinfo->vtype_retaddr = TRUE; } pstart = 0; n = 0; /* * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after * the first argument, allowing 'this' to be always passed in the first arg reg. * Also do this if the first argument is a reference type, since virtual calls * are sometimes made using calli without sig->hasthis set, like in the delegate * invoke wrappers. */ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) { if (sig->hasthis) { add_general (&gr, &stack_size, cinfo->args + 0, TRUE); n ++; } else { add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE); pstart = 1; n ++; } add_general (&gr, &stack_size, &cinfo->ret, TRUE); cinfo->struct_ret = cinfo->ret.reg; cinfo->vret_arg_index = 1; } else { /* this */ if (sig->hasthis) { add_general (&gr, &stack_size, cinfo->args + 0, TRUE); n ++; } if (cinfo->vtype_retaddr) { add_general (&gr, &stack_size, &cinfo->ret, TRUE); cinfo->struct_ret = cinfo->ret.reg; } } DEBUG(printf("params: %d\n", sig->param_count)); for (i = pstart; i < sig->param_count; ++i) { if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ gr = PPC_LAST_ARG_REG + 1; /* FIXME: don't we have to set fr, too? */ /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE); } DEBUG(printf("param %d: ", i)); if (m_type_is_byref (sig->params [i])) { DEBUG(printf("byref\n")); add_general (&gr, &stack_size, cinfo->args + n, TRUE); n++; continue; } simpletype = mini_get_underlying_type (sig->params [i]); switch (simpletype->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: cinfo->args [n].size = 1; add_general (&gr, &stack_size, cinfo->args + n, TRUE); n++; break; case MONO_TYPE_CHAR: case MONO_TYPE_I2: case MONO_TYPE_U2: cinfo->args [n].size = 2; add_general (&gr, &stack_size, cinfo->args + n, TRUE); n++; break; case MONO_TYPE_I4: case MONO_TYPE_U4: cinfo->args [n].size = 4; add_general (&gr, &stack_size, cinfo->args + n, TRUE); n++; break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: cinfo->args [n].size = sizeof (target_mgreg_t); add_general (&gr, &stack_size, cinfo->args + n, TRUE); n++; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (simpletype)) { cinfo->args [n].size = sizeof (target_mgreg_t); add_general (&gr, &stack_size, cinfo->args + n, TRUE); n++; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: { gint size; MonoClass *klass = mono_class_from_mono_type_internal (sig->params [i]); if (simpletype->type == MONO_TYPE_TYPEDBYREF) size = MONO_ABI_SIZEOF (MonoTypedRef); else if (sig->pinvoke && !sig->marshalling_disabled) size = mono_class_native_size (klass, NULL); else size = mono_class_value_size (klass, NULL); #if defined(__APPLE__) || (defined(__mono_ppc64__) && !PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS) if ((size == 4 || size == 8) && has_only_a_r48_field (klass)) { cinfo->args [n].size = size; /* It was 7, now it is 8 in LinuxPPC */ if (fr <= PPC_LAST_FPARG_REG) { cinfo->args [n].regtype = RegTypeFP; cinfo->args [n].reg = fr; fr ++; FP_ALSO_IN_REG (gr ++); #if !defined(__mono_ppc64__) if (size == 8) FP_ALSO_IN_REG (gr ++); #endif ALWAYS_ON_STACK (stack_size += size); } else { cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size; cinfo->args [n].regtype = RegTypeBase; cinfo->args [n].reg = ppc_sp; /* in the caller*/ stack_size += 8; } n++; break; } #endif DEBUG(printf ("load %d bytes struct\n", mono_class_native_size (sig->params [i]->data.klass, NULL))); #if PPC_PASS_STRUCTS_BY_VALUE { int align_size = size; int nregs = 0; int rest = PPC_LAST_ARG_REG - gr + 1; int n_in_regs = 0; #if PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS int mbr_cnt = 0; int mbr_size = 0; gboolean is_all_floats = is_float_struct_returnable_via_regs (sig->params [i], &mbr_cnt, &mbr_size); if (is_all_floats) { rest = PPC_LAST_FPARG_REG - fr + 1; } // Pass small (<= 8 member) structures entirely made up of either float or double members // in FR registers. There have to be at least mbr_cnt registers left. if (is_all_floats && (rest >= mbr_cnt)) { nregs = mbr_cnt; n_in_regs = MIN (rest, nregs); cinfo->args [n].regtype = RegTypeFPStructByVal; cinfo->args [n].vtregs = n_in_regs; cinfo->args [n].size = mbr_size; cinfo->args [n].vtsize = nregs - n_in_regs; cinfo->args [n].reg = fr; fr += n_in_regs; if (mbr_size == 4) { // floats FP_ALSO_IN_REG (gr += (n_in_regs+1)/2); } else { // doubles FP_ALSO_IN_REG (gr += (n_in_regs)); } } else #endif { align_size += (sizeof (target_mgreg_t) - 1); align_size &= ~(sizeof (target_mgreg_t) - 1); nregs = (align_size + sizeof (target_mgreg_t) -1 ) / sizeof (target_mgreg_t); n_in_regs = MIN (rest, nregs); if (n_in_regs < 0) n_in_regs = 0; #ifdef __APPLE__ /* FIXME: check this */ if (size >= 3 && size % 4 != 0) n_in_regs = 0; #endif cinfo->args [n].regtype = RegTypeStructByVal; cinfo->args [n].vtregs = n_in_regs; cinfo->args [n].size = n_in_regs; cinfo->args [n].vtsize = nregs - n_in_regs; cinfo->args [n].reg = gr; gr += n_in_regs; } #ifdef __mono_ppc64__ if (nregs == 1 && is_pinvoke) cinfo->args [n].bytes = size; else #endif cinfo->args [n].bytes = 0; cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size; /*g_print ("offset for arg %d at %d\n", n, PPC_STACK_PARAM_OFFSET + stack_size);*/ stack_size += nregs * sizeof (target_mgreg_t); } #else add_general (&gr, &stack_size, cinfo->args + n, TRUE); cinfo->args [n].regtype = RegTypeStructByAddr; cinfo->args [n].vtsize = size; #endif n++; break; } case MONO_TYPE_U8: case MONO_TYPE_I8: cinfo->args [n].size = 8; add_general (&gr, &stack_size, cinfo->args + n, SIZEOF_REGISTER == 8); n++; break; case MONO_TYPE_R4: cinfo->args [n].size = 4; /* It was 7, now it is 8 in LinuxPPC */ if (fr <= PPC_LAST_FPARG_REG // For non-native vararg calls the parms must go in storage && !(!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) ) { cinfo->args [n].regtype = RegTypeFP; cinfo->args [n].reg = fr; fr ++; FP_ALSO_IN_REG (gr ++); ALWAYS_ON_STACK (stack_size += SIZEOF_REGISTER); } else { cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size + MONO_PPC_32_64_CASE (0, 4); cinfo->args [n].regtype = RegTypeBase; cinfo->args [n].reg = ppc_sp; /* in the caller*/ stack_size += SIZEOF_REGISTER; } n++; break; case MONO_TYPE_R8: cinfo->args [n].size = 8; /* It was 7, now it is 8 in LinuxPPC */ if (fr <= PPC_LAST_FPARG_REG // For non-native vararg calls the parms must go in storage && !(!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) ) { cinfo->args [n].regtype = RegTypeFP; cinfo->args [n].reg = fr; fr ++; FP_ALSO_IN_REG (gr += sizeof (double) / SIZEOF_REGISTER); ALWAYS_ON_STACK (stack_size += 8); } else { cinfo->args [n].offset = PPC_STACK_PARAM_OFFSET + stack_size; cinfo->args [n].regtype = RegTypeBase; cinfo->args [n].reg = ppc_sp; /* in the caller*/ stack_size += 8; } n++; break; default: g_error ("Can't trampoline 0x%x", sig->params [i]->type); } } cinfo->nargs = n; if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Prevent implicit arguments and sig_cookie from being passed in registers */ gr = PPC_LAST_ARG_REG + 1; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE); } { simpletype = mini_get_underlying_type (sig->ret); switch (simpletype->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_STRING: cinfo->ret.reg = ppc_r3; break; case MONO_TYPE_U8: case MONO_TYPE_I8: cinfo->ret.reg = ppc_r3; break; case MONO_TYPE_R4: case MONO_TYPE_R8: cinfo->ret.reg = ppc_f1; cinfo->ret.regtype = RegTypeFP; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (simpletype)) { cinfo->ret.reg = ppc_r3; break; } break; case MONO_TYPE_VALUETYPE: break; case MONO_TYPE_TYPEDBYREF: case MONO_TYPE_VOID: break; default: g_error ("Can't handle as return value 0x%x", sig->ret->type); } } /* align stack size to 16 */ DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size)); stack_size = (stack_size + 15) & ~15; cinfo->stack_usage = stack_size; return cinfo; } #ifndef DISABLE_JIT gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { CallInfo *caller_info = get_call_info (caller_sig); CallInfo *callee_info = get_call_info (callee_sig); gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage) && IS_SUPPORTED_TAILCALL (memcmp (&callee_info->ret, &caller_info->ret, sizeof (caller_info->ret)) == 0); // FIXME ABIs vary as to if this local is in the parameter area or not, // so this check might not be needed. for (int i = 0; res && i < callee_info->nargs; ++i) { res = IS_SUPPORTED_TAILCALL (callee_info->args [i].regtype != RegTypeStructByAddr); /* An address on the callee's stack is passed as the argument */ } g_free (caller_info); g_free (callee_info); return res; } #endif /* * Set var information according to the calling convention. ppc version. * The locals var stuff should most likely be split in another method. */ void mono_arch_allocate_vars (MonoCompile *m) { MonoMethodSignature *sig; MonoMethodHeader *header; MonoInst *inst; int i, offset, size, align, curinst; int frame_reg = ppc_sp; gint32 *offsets; guint32 locals_stack_size, locals_stack_align; m->flags |= MONO_CFG_HAS_SPILLUP; /* this is bug #60332: remove when #59509 is fixed, so no weird vararg * call convs needs to be handled this way. */ if (m->flags & MONO_CFG_HAS_VARARGS) m->param_area = MAX (m->param_area, sizeof (target_mgreg_t)*8); /* gtk-sharp and other broken code will dllimport vararg functions even with * non-varargs signatures. Since there is little hope people will get this right * we assume they won't. */ if (m->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) m->param_area = MAX (m->param_area, sizeof (target_mgreg_t)*8); header = m->header; /* * We use the frame register also for any method that has * exception clauses. This way, when the handlers are called, * the code will reference local variables using the frame reg instead of * the stack pointer: if we had to restore the stack pointer, we'd * corrupt the method frames that are already on the stack (since * filters get called before stack unwinding happens) when the filter * code would call any method (this also applies to finally etc.). */ if ((m->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses) frame_reg = ppc_r31; m->frame_reg = frame_reg; if (frame_reg != ppc_sp) { m->used_int_regs |= 1 << frame_reg; } sig = mono_method_signature_internal (m->method); offset = 0; curinst = 0; if (MONO_TYPE_ISSTRUCT (sig->ret)) { m->ret->opcode = OP_REGVAR; m->ret->inst_c0 = m->ret->dreg = ppc_r3; } else { /* FIXME: handle long values? */ switch (mini_get_underlying_type (sig->ret)->type) { case MONO_TYPE_VOID: break; case MONO_TYPE_R4: case MONO_TYPE_R8: m->ret->opcode = OP_REGVAR; m->ret->inst_c0 = m->ret->dreg = ppc_f1; break; default: m->ret->opcode = OP_REGVAR; m->ret->inst_c0 = m->ret->dreg = ppc_r3; break; } } /* local vars are at a positive offset from the stack pointer */ /* * also note that if the function uses alloca, we use ppc_r31 * to point at the local variables. */ offset = PPC_MINIMAL_STACK_SIZE; /* linkage area */ /* align the offset to 16 bytes: not sure this is needed here */ //offset += 16 - 1; //offset &= ~(16 - 1); /* add parameter area size for called functions */ offset += m->param_area; offset += 16 - 1; offset &= ~(16 - 1); /* the MonoLMF structure is stored just below the stack pointer */ if (MONO_TYPE_ISSTRUCT (sig->ret)) { offset += sizeof(gpointer) - 1; offset &= ~(sizeof(gpointer) - 1); m->vret_addr->opcode = OP_REGOFFSET; m->vret_addr->inst_basereg = frame_reg; m->vret_addr->inst_offset = offset; if (G_UNLIKELY (m->verbose_level > 1)) { printf ("vret_addr ="); mono_print_ins (m->vret_addr); } offset += sizeof(gpointer); } offsets = mono_allocate_stack_slots (m, FALSE, &locals_stack_size, &locals_stack_align); if (locals_stack_align) { offset += (locals_stack_align - 1); offset &= ~(locals_stack_align - 1); } for (i = m->locals_start; i < m->num_varinfo; i++) { if (offsets [i] != -1) { MonoInst *inst = m->varinfo [i]; inst->opcode = OP_REGOFFSET; inst->inst_basereg = frame_reg; inst->inst_offset = offset + offsets [i]; /* g_print ("allocating local %d (%s) to %d\n", i, mono_type_get_name (inst->inst_vtype), inst->inst_offset); */ } } offset += locals_stack_size; curinst = 0; if (sig->hasthis) { inst = m->args [curinst]; if (inst->opcode != OP_REGVAR) { inst->opcode = OP_REGOFFSET; inst->inst_basereg = frame_reg; offset += sizeof (target_mgreg_t) - 1; offset &= ~(sizeof (target_mgreg_t) - 1); inst->inst_offset = offset; offset += sizeof (target_mgreg_t); } curinst++; } for (i = 0; i < sig->param_count; ++i) { inst = m->args [curinst]; if (inst->opcode != OP_REGVAR) { inst->opcode = OP_REGOFFSET; inst->inst_basereg = frame_reg; if (sig->pinvoke && !sig->marshalling_disabled) { size = mono_type_native_stack_size (sig->params [i], (guint32*)&align); inst->backend.is_pinvoke = 1; } else { size = mono_type_size (sig->params [i], &align); } if (MONO_TYPE_ISSTRUCT (sig->params [i]) && size < sizeof (target_mgreg_t)) size = align = sizeof (target_mgreg_t); /* * Use at least 4/8 byte alignment, since these might be passed in registers, and * they are saved using std in the prolog. */ align = sizeof (target_mgreg_t); offset += align - 1; offset &= ~(align - 1); inst->inst_offset = offset; offset += size; } curinst++; } /* some storage for fp conversions */ offset += 8 - 1; offset &= ~(8 - 1); m->arch.fp_conv_var_offset = offset; offset += 8; /* align the offset to 16 bytes */ offset += 16 - 1; offset &= ~(16 - 1); /* change sign? */ m->stack_offset = offset; if (sig->call_convention == MONO_CALL_VARARG) { CallInfo *cinfo = get_call_info (m->method->signature); m->sig_cookie = cinfo->sig_cookie.offset; g_free(cinfo); } } void mono_arch_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig = mono_method_signature_internal (cfg->method); if (MONO_TYPE_ISSTRUCT (sig->ret)) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG); } } /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode, * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info */ static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { int sig_reg = mono_alloc_ireg (cfg); /* FIXME: Add support for signature tokens to AOT */ cfg->disable_aot = TRUE; MONO_EMIT_NEW_ICONST (cfg, sig_reg, (gulong)call->signature); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, cinfo->sig_cookie.offset, sig_reg); } void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoInst *in, *ins; MonoMethodSignature *sig; int i, n; CallInfo *cinfo; sig = call->signature; n = sig->param_count + sig->hasthis; cinfo = get_call_info (sig); for (i = 0; i < n; ++i) { ArgInfo *ainfo = cinfo->args + i; MonoType *t; if (i >= sig->hasthis) t = sig->params [i - sig->hasthis]; else t = mono_get_int_type (); t = mini_get_underlying_type (t); if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) emit_sig_cookie (cfg, call, cinfo); in = call->args [i]; if (ainfo->regtype == RegTypeGeneral) { #ifndef __mono_ppc64__ if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = MONO_LVREG_LS (in->dreg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE); MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = MONO_LVREG_MS (in->dreg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); } else #endif { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE); } } else if (ainfo->regtype == RegTypeStructByAddr) { MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); } else if (ainfo->regtype == RegTypeStructByVal) { /* this is further handled in mono_arch_emit_outarg_vt () */ MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); } else if (ainfo->regtype == RegTypeFPStructByVal) { /* this is further handled in mono_arch_emit_outarg_vt () */ MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_HAS_FPOUT; } else if (ainfo->regtype == RegTypeBase) { if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg); } else if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) { if (t->type == MONO_TYPE_R8) MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg); else MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg); } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg); } } else if (ainfo->regtype == RegTypeFP) { if (t->type == MONO_TYPE_VALUETYPE) { /* this is further handled in mono_arch_emit_outarg_vt () */ MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->opcode = OP_OUTARG_VT; ins->sreg1 = in->dreg; ins->klass = in->klass; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); cfg->flags |= MONO_CFG_HAS_FPOUT; } else { int dreg = mono_alloc_freg (cfg); if (ainfo->size == 4) { MONO_EMIT_NEW_UNALU (cfg, OP_FCONV_TO_R4, dreg, in->dreg); } else { MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->dreg = dreg; ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); } mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE); cfg->flags |= MONO_CFG_HAS_FPOUT; } } else { g_assert_not_reached (); } } /* Emit the signature cookie in the case that there is no additional argument */ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) emit_sig_cookie (cfg, call, cinfo); if (cinfo->struct_ret) { MonoInst *vtarg; MONO_INST_NEW (cfg, vtarg, OP_MOVE); vtarg->sreg1 = call->vret_var->dreg; vtarg->dreg = mono_alloc_preg (cfg); MONO_ADD_INS (cfg->cbb, vtarg); mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->struct_ret, FALSE); } call->stack_usage = cinfo->stack_usage; cfg->param_area = MAX (PPC_MINIMAL_PARAM_AREA_SIZE, MAX (cfg->param_area, cinfo->stack_usage)); cfg->flags |= MONO_CFG_HAS_CALLS; g_free (cinfo); } #ifndef DISABLE_JIT void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst*)ins->inst_p0; ArgInfo *ainfo = (ArgInfo*)ins->inst_p1; int ovf_size = ainfo->vtsize; int doffset = ainfo->offset; int i, soffset, dreg; if (ainfo->regtype == RegTypeStructByVal) { #ifdef __APPLE__ guint32 size = 0; #endif soffset = 0; #ifdef __APPLE__ /* * Darwin pinvokes needs some special handling for 1 * and 2 byte arguments */ g_assert (ins->klass); if (call->signature->pinvoke && !call->signature->marshalling_disabled) size = mono_class_native_size (ins->klass, NULL); if (size == 2 || size == 1) { int tmpr = mono_alloc_ireg (cfg); if (size == 1) MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmpr, src->dreg, soffset); else MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmpr, src->dreg, soffset); dreg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, dreg, tmpr); mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, FALSE); } else #endif for (i = 0; i < ainfo->vtregs; ++i) { dreg = mono_alloc_ireg (cfg); #if G_BYTE_ORDER == G_BIG_ENDIAN int antipadding = 0; if (ainfo->bytes) { g_assert (i == 0); antipadding = sizeof (target_mgreg_t) - ainfo->bytes; } MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset); if (antipadding) MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, dreg, dreg, antipadding * 8); #else MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset); #endif mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE); soffset += sizeof (target_mgreg_t); } if (ovf_size != 0) mini_emit_memcpy (cfg, ppc_r1, doffset + soffset, src->dreg, soffset, ovf_size * sizeof (target_mgreg_t), TARGET_SIZEOF_VOID_P); } else if (ainfo->regtype == RegTypeFPStructByVal) { soffset = 0; for (i = 0; i < ainfo->vtregs; ++i) { int tmpr = mono_alloc_freg (cfg); if (ainfo->size == 4) MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, tmpr, src->dreg, soffset); else // ==8 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmpr, src->dreg, soffset); dreg = mono_alloc_freg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, dreg, tmpr); mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg+i, TRUE); soffset += ainfo->size; } if (ovf_size != 0) mini_emit_memcpy (cfg, ppc_r1, doffset + soffset, src->dreg, soffset, ovf_size * sizeof (target_mgreg_t), TARGET_SIZEOF_VOID_P); } else if (ainfo->regtype == RegTypeFP) { int tmpr = mono_alloc_freg (cfg); if (ainfo->size == 4) MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, tmpr, src->dreg, 0); else MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmpr, src->dreg, 0); dreg = mono_alloc_freg (cfg); MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, dreg, tmpr); mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE); } else { MonoInst *vtcopy = mono_compile_create_var (cfg, m_class_get_byval_arg (src->klass), OP_LOCAL); MonoInst *load; guint32 size; /* FIXME: alignment? */ if (call->signature->pinvoke && !call->signature->marshalling_disabled) { size = mono_type_native_stack_size (m_class_get_byval_arg (src->klass), NULL); vtcopy->backend.is_pinvoke = 1; } else { size = mini_type_stack_size (m_class_get_byval_arg (src->klass), NULL); } if (size > 0) g_assert (ovf_size > 0); EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype); mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, TARGET_SIZEOF_VOID_P); if (ainfo->offset) MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, ainfo->offset, load->dreg); else mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg, FALSE); } } void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret); if (!rm_type_is_byref (ret)) { #ifndef __mono_ppc64__ if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) { MonoInst *ins; MONO_INST_NEW (cfg, ins, OP_SETLRET); ins->sreg1 = MONO_LVREG_LS (val->dreg); ins->sreg2 = MONO_LVREG_MS (val->dreg); MONO_ADD_INS (cfg->cbb, ins); return; } #endif if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) { MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); return; } } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); } gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return TRUE; } #endif /* DISABLE_JIT */ /* * Conditional branches have a small offset, so if it is likely overflowed, * we do a branch to the end of the method (uncond branches have much larger * offsets) where we perform the conditional and jump back unconditionally. * It's slightly slower, since we add two uncond branches, but it's very simple * with the current patch implementation and such large methods are likely not * going to be perf critical anyway. */ typedef struct { union { MonoBasicBlock *bb; const char *exception; } data; guint32 ip_offset; guint16 b0_cond; guint16 b1_cond; } MonoOvfJump; #define EMIT_COND_BRANCH_FLAGS(ins,b0,b1) \ if (0 && ins->inst_true_bb->native_offset) { \ ppc_bc (code, (b0), (b1), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffff); \ } else { \ int br_disp = ins->inst_true_bb->max_offset - offset; \ if (!ppc_is_imm16 (br_disp + 8 * 1024) || !ppc_is_imm16 (br_disp - 8 * 1024)) { \ MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \ ovfj->data.bb = ins->inst_true_bb; \ ovfj->ip_offset = 0; \ ovfj->b0_cond = (b0); \ ovfj->b1_cond = (b1); \ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB_OVF, ovfj); \ ppc_b (code, 0); \ } else { \ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \ ppc_bc (code, (b0), (b1), 0); \ } \ } #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_b0_table [(cond)], branch_b1_table [(cond)]) /* emit an exception if condition is fail * * We assign the extra code used to throw the implicit exceptions * to cfg->bb_exit as far as the big branch handling is concerned */ #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(b0,b1,exc_name) \ do { \ int br_disp = cfg->bb_exit->max_offset - offset; \ if (!ppc_is_imm16 (br_disp + 1024) || ! ppc_is_imm16 (ppc_is_imm16 (br_disp - 1024))) { \ MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \ ovfj->data.exception = (exc_name); \ ovfj->ip_offset = code - cfg->native_code; \ ovfj->b0_cond = (b0); \ ovfj->b1_cond = (b1); \ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj); \ ppc_bl (code, 0); \ cfg->bb_exit->max_offset += 24; \ } else { \ mono_add_patch_info (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_EXC, exc_name); \ ppc_bcl (code, (b0), (b1), 0); \ } \ } while (0); #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_b0_table [(cond)], branch_b1_table [(cond)], (exc_name)) void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { } static int normalize_opcode (int opcode) { switch (opcode) { #ifndef MONO_ARCH_ILP32 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMBASE, OP_LOADI8_MEMBASE): return OP_LOAD_MEMBASE; case MONO_PPC_32_64_CASE (OP_LOADI4_MEMINDEX, OP_LOADI8_MEMINDEX): return OP_LOAD_MEMINDEX; case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_REG, OP_STOREI8_MEMBASE_REG): return OP_STORE_MEMBASE_REG; case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_IMM, OP_STOREI8_MEMBASE_IMM): return OP_STORE_MEMBASE_IMM; case MONO_PPC_32_64_CASE (OP_STOREI4_MEMINDEX, OP_STOREI8_MEMINDEX): return OP_STORE_MEMINDEX; #endif case MONO_PPC_32_64_CASE (OP_ISHR_IMM, OP_LSHR_IMM): return OP_SHR_IMM; case MONO_PPC_32_64_CASE (OP_ISHR_UN_IMM, OP_LSHR_UN_IMM): return OP_SHR_UN_IMM; default: return opcode; } } void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n, *last_ins = NULL; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { switch (normalize_opcode (ins->opcode)) { case OP_MUL_IMM: /* remove unnecessary multiplication with 1 */ if (ins->inst_imm == 1) { if (ins->dreg != ins->sreg1) { ins->opcode = OP_MOVE; } else { MONO_DELETE_INS (bb, ins); continue; } } else if (inst->inst_imm > 0) { int power2 = mono_is_power_of_two (ins->inst_imm); if (power2 > 0) { ins->opcode = OP_SHL_IMM; ins->inst_imm = power2; } } break; case OP_LOAD_MEMBASE: /* * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg */ if (last_ins && normalize_opcode (last_ins->opcode) == OP_STORE_MEMBASE_REG && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } else { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_MOVE; ins->sreg1 = last_ins->sreg1; } /* * Note: reg1 must be different from the basereg in the second load * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_MOVE reg1, reg2 */ } else if (last_ins && normalize_opcode (last_ins->opcode) == OP_LOAD_MEMBASE && ins->inst_basereg != last_ins->dreg && ins->inst_basereg == last_ins->inst_basereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->dreg) { MONO_DELETE_INS (bb, ins); continue; } else { ins->opcode = OP_MOVE; ins->sreg1 = last_ins->dreg; } //g_assert_not_reached (); #if 0 /* * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg * --> * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_ICONST reg, imm */ } else if (last_ins && normalize_opcode (last_ins->opcode) == OP_STORE_MEMBASE_IMM && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_ICONST; ins->inst_c0 = last_ins->inst_imm; g_assert_not_reached (); // check this rule #endif } break; case OP_LOADU1_MEMBASE: case OP_LOADI1_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1; ins->sreg1 = last_ins->sreg1; } break; case OP_LOADU2_MEMBASE: case OP_LOADI2_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2; ins->sreg1 = last_ins->sreg1; } break; #ifdef __mono_ppc64__ case OP_LOADU4_MEMBASE: case OP_LOADI4_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI4_MEMBASE) ? OP_ICONV_TO_I4 : OP_ICONV_TO_U4; ins->sreg1 = last_ins->sreg1; } break; #endif case OP_MOVE: ins->opcode = OP_MOVE; /* * OP_MOVE reg, reg */ if (ins->dreg == ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } /* * OP_MOVE sreg, dreg * OP_MOVE dreg, sreg */ if (last_ins && last_ins->opcode == OP_MOVE && ins->sreg1 == last_ins->dreg && ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } break; } last_ins = ins; ins = ins->next; } bb->last_ins = last_ins; } void mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins) { switch (ins->opcode) { case OP_ICONV_TO_R_UN: { // This value is OK as-is for both big and little endian because of how it is stored static const guint64 adjust_val = 0x4330000000000000ULL; int msw_reg = mono_alloc_ireg (cfg); int adj_reg = mono_alloc_freg (cfg); int tmp_reg = mono_alloc_freg (cfg); int basereg = ppc_sp; int offset = -8; MONO_EMIT_NEW_ICONST (cfg, msw_reg, 0x43300000); if (!ppc_is_imm16 (offset + 4)) { basereg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset); } #if G_BYTE_ORDER == G_BIG_ENDIAN MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, msw_reg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, ins->sreg1); #else // For little endian the words are reversed MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, msw_reg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, ins->sreg1); #endif MONO_EMIT_NEW_LOAD_R8 (cfg, adj_reg, &adjust_val); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmp_reg, basereg, offset); MONO_EMIT_NEW_BIALU (cfg, OP_FSUB, ins->dreg, tmp_reg, adj_reg); ins->opcode = OP_NOP; break; } #ifndef __mono_ppc64__ case OP_ICONV_TO_R4: case OP_ICONV_TO_R8: { /* If we have a PPC_FEATURE_64 machine we can avoid this and use the fcfid instruction. Otherwise on an old 32-bit chip and we have to do this the hard way. */ if (!(cpu_hw_caps & PPC_ISA_64)) { /* FIXME: change precision for CEE_CONV_R4 */ static const guint64 adjust_val = 0x4330000080000000ULL; int msw_reg = mono_alloc_ireg (cfg); int xored = mono_alloc_ireg (cfg); int adj_reg = mono_alloc_freg (cfg); int tmp_reg = mono_alloc_freg (cfg); int basereg = ppc_sp; int offset = -8; if (!ppc_is_imm16 (offset + 4)) { basereg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset); } MONO_EMIT_NEW_ICONST (cfg, msw_reg, 0x43300000); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset, msw_reg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_XOR_IMM, xored, ins->sreg1, 0x80000000); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, basereg, offset + 4, xored); MONO_EMIT_NEW_LOAD_R8 (cfg, adj_reg, (gpointer)&adjust_val); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmp_reg, basereg, offset); MONO_EMIT_NEW_BIALU (cfg, OP_FSUB, ins->dreg, tmp_reg, adj_reg); if (ins->opcode == OP_ICONV_TO_R4) MONO_EMIT_NEW_UNALU (cfg, OP_FCONV_TO_R4, ins->dreg, ins->dreg); ins->opcode = OP_NOP; } break; } #endif case OP_CKFINITE: { int msw_reg = mono_alloc_ireg (cfg); int basereg = ppc_sp; int offset = -8; if (!ppc_is_imm16 (offset + 4)) { basereg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IADD_IMM, basereg, cfg->frame_reg, offset); } MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, basereg, offset, ins->sreg1); #if G_BYTE_ORDER == G_BIG_ENDIAN MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, msw_reg, basereg, offset); #else MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, msw_reg, basereg, offset+4); #endif MONO_EMIT_NEW_UNALU (cfg, OP_PPC_CHECK_FINITE, -1, msw_reg); MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1); ins->opcode = OP_NOP; break; } #ifdef __mono_ppc64__ case OP_IADD_OVF: case OP_IADD_OVF_UN: case OP_ISUB_OVF: { int shifted1_reg = mono_alloc_ireg (cfg); int shifted2_reg = mono_alloc_ireg (cfg); int result_shifted_reg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, shifted1_reg, ins->sreg1, 32); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, shifted2_reg, ins->sreg2, 32); MONO_EMIT_NEW_BIALU (cfg, ins->opcode, result_shifted_reg, shifted1_reg, shifted2_reg); if (ins->opcode == OP_IADD_OVF_UN) MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, ins->dreg, result_shifted_reg, 32); else MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, ins->dreg, result_shifted_reg, 32); ins->opcode = OP_NOP; break; } #endif default: break; } } void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins) { switch (ins->opcode) { case OP_LADD_OVF: /* ADC sets the condition code */ MONO_EMIT_NEW_BIALU (cfg, OP_ADDCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2)); MONO_EMIT_NEW_BIALU (cfg, OP_ADD_OVF_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2)); NULLIFY_INS (ins); break; case OP_LADD_OVF_UN: /* ADC sets the condition code */ MONO_EMIT_NEW_BIALU (cfg, OP_ADDCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2)); MONO_EMIT_NEW_BIALU (cfg, OP_ADD_OVF_UN_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2)); NULLIFY_INS (ins); break; case OP_LSUB_OVF: /* SBB sets the condition code */ MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2)); MONO_EMIT_NEW_BIALU (cfg, OP_SUB_OVF_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2)); NULLIFY_INS (ins); break; case OP_LSUB_OVF_UN: /* SBB sets the condition code */ MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), MONO_LVREG_LS (ins->sreg2)); MONO_EMIT_NEW_BIALU (cfg, OP_SUB_OVF_UN_CARRY, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), MONO_LVREG_MS (ins->sreg2)); NULLIFY_INS (ins); break; case OP_LNEG: /* From gcc generated code */ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PPC_SUBFIC, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), 0); MONO_EMIT_NEW_UNALU (cfg, OP_PPC_SUBFZE, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1)); NULLIFY_INS (ins); break; default: break; } } /* * the branch_b0_table should maintain the order of these * opcodes. case CEE_BEQ: case CEE_BGE: case CEE_BGT: case CEE_BLE: case CEE_BLT: case CEE_BNE_UN: case CEE_BGE_UN: case CEE_BGT_UN: case CEE_BLE_UN: case CEE_BLT_UN: */ static const guchar branch_b0_table [] = { PPC_BR_TRUE, PPC_BR_FALSE, PPC_BR_TRUE, PPC_BR_FALSE, PPC_BR_TRUE, PPC_BR_FALSE, PPC_BR_FALSE, PPC_BR_TRUE, PPC_BR_FALSE, PPC_BR_TRUE }; static const guchar branch_b1_table [] = { PPC_BR_EQ, PPC_BR_LT, PPC_BR_GT, PPC_BR_GT, PPC_BR_LT, PPC_BR_EQ, PPC_BR_LT, PPC_BR_GT, PPC_BR_GT, PPC_BR_LT }; #define NEW_INS(cfg,dest,op) do { \ MONO_INST_NEW((cfg), (dest), (op)); \ mono_bblock_insert_after_ins (bb, last_ins, (dest)); \ } while (0) static int map_to_reg_reg_op (int op) { switch (op) { case OP_ADD_IMM: return OP_IADD; case OP_SUB_IMM: return OP_ISUB; case OP_AND_IMM: return OP_IAND; case OP_COMPARE_IMM: return OP_COMPARE; case OP_ICOMPARE_IMM: return OP_ICOMPARE; case OP_LCOMPARE_IMM: return OP_LCOMPARE; case OP_ADDCC_IMM: return OP_IADDCC; case OP_ADC_IMM: return OP_IADC; case OP_SUBCC_IMM: return OP_ISUBCC; case OP_SBB_IMM: return OP_ISBB; case OP_OR_IMM: return OP_IOR; case OP_XOR_IMM: return OP_IXOR; case OP_MUL_IMM: return OP_IMUL; case OP_LMUL_IMM: return OP_LMUL; case OP_LOAD_MEMBASE: return OP_LOAD_MEMINDEX; case OP_LOADI4_MEMBASE: return OP_LOADI4_MEMINDEX; case OP_LOADU4_MEMBASE: return OP_LOADU4_MEMINDEX; case OP_LOADI8_MEMBASE: return OP_LOADI8_MEMINDEX; case OP_LOADU1_MEMBASE: return OP_LOADU1_MEMINDEX; case OP_LOADI2_MEMBASE: return OP_LOADI2_MEMINDEX; case OP_LOADU2_MEMBASE: return OP_LOADU2_MEMINDEX; case OP_LOADI1_MEMBASE: return OP_LOADI1_MEMINDEX; case OP_LOADR4_MEMBASE: return OP_LOADR4_MEMINDEX; case OP_LOADR8_MEMBASE: return OP_LOADR8_MEMINDEX; case OP_STOREI1_MEMBASE_REG: return OP_STOREI1_MEMINDEX; case OP_STOREI2_MEMBASE_REG: return OP_STOREI2_MEMINDEX; case OP_STOREI4_MEMBASE_REG: return OP_STOREI4_MEMINDEX; case OP_STOREI8_MEMBASE_REG: return OP_STOREI8_MEMINDEX; case OP_STORE_MEMBASE_REG: return OP_STORE_MEMINDEX; case OP_STORER4_MEMBASE_REG: return OP_STORER4_MEMINDEX; case OP_STORER8_MEMBASE_REG: return OP_STORER8_MEMINDEX; case OP_STORE_MEMBASE_IMM: return OP_STORE_MEMBASE_REG; case OP_STOREI1_MEMBASE_IMM: return OP_STOREI1_MEMBASE_REG; case OP_STOREI2_MEMBASE_IMM: return OP_STOREI2_MEMBASE_REG; case OP_STOREI4_MEMBASE_IMM: return OP_STOREI4_MEMBASE_REG; case OP_STOREI8_MEMBASE_IMM: return OP_STOREI8_MEMBASE_REG; } if (mono_op_imm_to_op (op) == -1) g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (op)); return mono_op_imm_to_op (op); } //#define map_to_reg_reg_op(op) (cfg->new_ir? mono_op_imm_to_op (op): map_to_reg_reg_op (op)) #define compare_opcode_is_unsigned(opcode) \ (((opcode) >= CEE_BNE_UN && (opcode) <= CEE_BLT_UN) || \ ((opcode) >= OP_IBNE_UN && (opcode) <= OP_IBLT_UN) || \ ((opcode) >= OP_LBNE_UN && (opcode) <= OP_LBLT_UN) || \ ((opcode) >= OP_COND_EXC_NE_UN && (opcode) <= OP_COND_EXC_LT_UN) || \ ((opcode) >= OP_COND_EXC_INE_UN && (opcode) <= OP_COND_EXC_ILT_UN) || \ ((opcode) == OP_CLT_UN || (opcode) == OP_CGT_UN || \ (opcode) == OP_ICLT_UN || (opcode) == OP_ICGT_UN || \ (opcode) == OP_LCLT_UN || (opcode) == OP_LCGT_UN)) /* * Remove from the instruction list the instructions that can't be * represented with very simple instructions with no register * requirements. */ void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *next, *temp, *last_ins = NULL; int imm; MONO_BB_FOR_EACH_INS (bb, ins) { loop_start: switch (ins->opcode) { case OP_IDIV_UN_IMM: case OP_IDIV_IMM: case OP_IREM_IMM: case OP_IREM_UN_IMM: CASE_PPC64 (OP_LREM_IMM) { NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; if (ins->opcode == OP_IDIV_IMM) ins->opcode = OP_IDIV; else if (ins->opcode == OP_IREM_IMM) ins->opcode = OP_IREM; else if (ins->opcode == OP_IDIV_UN_IMM) ins->opcode = OP_IDIV_UN; else if (ins->opcode == OP_IREM_UN_IMM) ins->opcode = OP_IREM_UN; else if (ins->opcode == OP_LREM_IMM) ins->opcode = OP_LREM; last_ins = temp; /* handle rem separately */ goto loop_start; } case OP_IREM: case OP_IREM_UN: CASE_PPC64 (OP_LREM) CASE_PPC64 (OP_LREM_UN) { MonoInst *mul; /* we change a rem dest, src1, src2 to * div temp1, src1, src2 * mul temp2, temp1, src2 * sub dest, src1, temp2 */ if (ins->opcode == OP_IREM || ins->opcode == OP_IREM_UN) { NEW_INS (cfg, mul, OP_IMUL); NEW_INS (cfg, temp, ins->opcode == OP_IREM? OP_IDIV: OP_IDIV_UN); ins->opcode = OP_ISUB; } else { NEW_INS (cfg, mul, OP_LMUL); NEW_INS (cfg, temp, ins->opcode == OP_LREM? OP_LDIV: OP_LDIV_UN); ins->opcode = OP_LSUB; } temp->sreg1 = ins->sreg1; temp->sreg2 = ins->sreg2; temp->dreg = mono_alloc_ireg (cfg); mul->sreg1 = temp->dreg; mul->sreg2 = ins->sreg2; mul->dreg = mono_alloc_ireg (cfg); ins->sreg2 = mul->dreg; break; } case OP_IADD_IMM: CASE_PPC64 (OP_LADD_IMM) case OP_ADD_IMM: case OP_ADDCC_IMM: if (!ppc_is_imm16 (ins->inst_imm)) { NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; case OP_ISUB_IMM: CASE_PPC64 (OP_LSUB_IMM) case OP_SUB_IMM: if (!ppc_is_imm16 (-ins->inst_imm)) { NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; case OP_IAND_IMM: case OP_IOR_IMM: case OP_IXOR_IMM: case OP_LAND_IMM: case OP_LOR_IMM: case OP_LXOR_IMM: case OP_AND_IMM: case OP_OR_IMM: case OP_XOR_IMM: { gboolean is_imm = ((ins->inst_imm & 0xffff0000) && (ins->inst_imm & 0xffff)); #ifdef __mono_ppc64__ if (ins->inst_imm & 0xffffffff00000000ULL) is_imm = TRUE; #endif if (is_imm) { NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; } case OP_ISBB_IMM: case OP_IADC_IMM: case OP_SBB_IMM: case OP_SUBCC_IMM: case OP_ADC_IMM: NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: CASE_PPC64 (OP_LCOMPARE_IMM) next = ins->next; /* Branch opts can eliminate the branch */ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) { ins->opcode = OP_NOP; break; } g_assert(next); if (compare_opcode_is_unsigned (next->opcode)) { if (!ppc_is_uimm16 (ins->inst_imm)) { NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } } else { if (!ppc_is_imm16 (ins->inst_imm)) { NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } } break; case OP_IMUL_IMM: case OP_MUL_IMM: CASE_PPC64 (OP_LMUL_IMM) if (ins->inst_imm == 1) { ins->opcode = OP_MOVE; break; } if (ins->inst_imm == 0) { ins->opcode = OP_ICONST; ins->inst_c0 = 0; break; } imm = (ins->inst_imm > 0) ? mono_is_power_of_two (ins->inst_imm) : -1; if (imm > 0) { ins->opcode = OP_SHL_IMM; ins->inst_imm = imm; break; } if (!ppc_is_imm16 (ins->inst_imm)) { NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); } break; case OP_LOCALLOC_IMM: NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg1 = temp->dreg; ins->opcode = OP_LOCALLOC; break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: CASE_PPC64 (OP_LOADI8_MEMBASE) case OP_LOADU4_MEMBASE: case OP_LOADI2_MEMBASE: case OP_LOADU2_MEMBASE: case OP_LOADI1_MEMBASE: case OP_LOADU1_MEMBASE: case OP_LOADR4_MEMBASE: case OP_LOADR8_MEMBASE: case OP_STORE_MEMBASE_REG: CASE_PPC64 (OP_STOREI8_MEMBASE_REG) case OP_STOREI4_MEMBASE_REG: case OP_STOREI2_MEMBASE_REG: case OP_STOREI1_MEMBASE_REG: case OP_STORER4_MEMBASE_REG: case OP_STORER8_MEMBASE_REG: /* we can do two things: load the immed in a register * and use an indexed load, or see if the immed can be * represented as an ad_imm + a load with a smaller offset * that fits. We just do the first for now, optimize later. */ if (ppc_is_imm16 (ins->inst_offset)) break; NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_offset; temp->dreg = mono_alloc_ireg (cfg); ins->sreg2 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); break; case OP_STORE_MEMBASE_IMM: case OP_STOREI1_MEMBASE_IMM: case OP_STOREI2_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: CASE_PPC64 (OP_STOREI8_MEMBASE_IMM) NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = ins->inst_imm; temp->dreg = mono_alloc_ireg (cfg); ins->sreg1 = temp->dreg; ins->opcode = map_to_reg_reg_op (ins->opcode); last_ins = temp; goto loop_start; /* make it handle the possibly big ins->inst_offset */ case OP_R8CONST: case OP_R4CONST: if (cfg->compile_aot) { /* Keep these in the aot case */ break; } NEW_INS (cfg, temp, OP_ICONST); temp->inst_c0 = (gulong)ins->inst_p0; temp->dreg = mono_alloc_ireg (cfg); ins->inst_basereg = temp->dreg; ins->inst_offset = 0; ins->opcode = ins->opcode == OP_R4CONST? OP_LOADR4_MEMBASE: OP_LOADR8_MEMBASE; last_ins = temp; /* make it handle the possibly big ins->inst_offset * later optimize to use lis + load_membase */ goto loop_start; } last_ins = ins; } bb->last_ins = last_ins; bb->max_vreg = cfg->next_vreg; } static guchar* emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed) { long offset = cfg->arch.fp_conv_var_offset; long sub_offset; /* sreg is a float, dreg is an integer reg. ppc_f0 is used a scratch */ #ifdef __mono_ppc64__ if (size == 8) { ppc_fctidz (code, ppc_f0, sreg); sub_offset = 0; } else #endif { ppc_fctiwz (code, ppc_f0, sreg); sub_offset = 4; } if (ppc_is_imm16 (offset + sub_offset)) { ppc_stfd (code, ppc_f0, offset, cfg->frame_reg); if (size == 8) ppc_ldr (code, dreg, offset + sub_offset, cfg->frame_reg); else ppc_lwz (code, dreg, offset + sub_offset, cfg->frame_reg); } else { ppc_load (code, dreg, offset); ppc_add (code, dreg, dreg, cfg->frame_reg); ppc_stfd (code, ppc_f0, 0, dreg); if (size == 8) ppc_ldr (code, dreg, sub_offset, dreg); else ppc_lwz (code, dreg, sub_offset, dreg); } if (!is_signed) { if (size == 1) ppc_andid (code, dreg, dreg, 0xff); else if (size == 2) ppc_andid (code, dreg, dreg, 0xffff); #ifdef __mono_ppc64__ else if (size == 4) ppc_clrldi (code, dreg, dreg, 32); #endif } else { if (size == 1) ppc_extsb (code, dreg, dreg); else if (size == 2) ppc_extsh (code, dreg, dreg); #ifdef __mono_ppc64__ else if (size == 4) ppc_extsw (code, dreg, dreg); #endif } return code; } static void emit_thunk (guint8 *code, gconstpointer target) { guint8 *p = code; /* 2 bytes on 32bit, 5 bytes on 64bit */ ppc_load_sequence (code, ppc_r0, target); ppc_mtctr (code, ppc_r0); ppc_bcctr (code, PPC_BR_ALWAYS, 0); mono_arch_flush_icache (p, code - p); } static void handle_thunk (MonoCompile *cfg, guchar *code, const guchar *target) { MonoJitInfo *ji = NULL; MonoThunkJitInfo *info; guint8 *thunks, *p; int thunks_size; guint8 *orig_target; guint8 *target_thunk; if (cfg) { /* * This can be called multiple times during JITting, * save the current position in cfg->arch to avoid * doing a O(n^2) search. */ if (!cfg->arch.thunks) { cfg->arch.thunks = cfg->thunks; cfg->arch.thunks_size = cfg->thunk_area; } thunks = cfg->arch.thunks; thunks_size = cfg->arch.thunks_size; if (!thunks_size) { g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE)); g_assert_not_reached (); } g_assert (*(guint32*)thunks == 0); emit_thunk (thunks, target); ppc_patch (code, thunks); cfg->arch.thunks += THUNK_SIZE; cfg->arch.thunks_size -= THUNK_SIZE; } else { ji = mini_jit_info_table_find (code); g_assert (ji); info = mono_jit_info_get_thunk_info (ji); g_assert (info); thunks = (guint8 *) ji->code_start + info->thunks_offset; thunks_size = info->thunks_size; orig_target = mono_arch_get_call_target (code + 4); mono_mini_arch_lock (); target_thunk = NULL; if (orig_target >= thunks && orig_target < thunks + thunks_size) { /* The call already points to a thunk, because of trampolines etc. */ target_thunk = orig_target; } else { for (p = thunks; p < thunks + thunks_size; p += THUNK_SIZE) { if (((guint32 *) p) [0] == 0) { /* Free entry */ target_thunk = p; break; } else { /* ppc64 requires 5 instructions, 32bit two instructions */ #ifdef __mono_ppc64__ const int const_load_size = 5; #else const int const_load_size = 2; #endif guint32 load [const_load_size]; guchar *templ = (guchar *) load; ppc_load_sequence (templ, ppc_r0, target); if (!memcmp (p, load, const_load_size)) { /* Thunk already points to target */ target_thunk = p; break; } } } } // g_print ("THUNK: %p %p %p\n", code, target, target_thunk); if (!target_thunk) { mono_mini_arch_unlock (); g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE)); g_assert_not_reached (); } emit_thunk (target_thunk, target); ppc_patch (code, target_thunk); mono_mini_arch_unlock (); } } static void patch_ins (guint8 *code, guint32 ins) { *(guint32*)code = ins; mono_arch_flush_icache (code, 4); } static void ppc_patch_full (MonoCompile *cfg, guchar *code, const guchar *target, gboolean is_fd) { guint32 ins = *(guint32*)code; guint32 prim = ins >> 26; guint32 ovf; //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target); if (prim == 18) { // prefer relative branches, they are more position independent (e.g. for AOT compilation). gint diff = target - code; g_assert (!is_fd); if (diff >= 0){ if (diff <= 33554431){ ins = (18 << 26) | (diff) | (ins & 1); patch_ins (code, ins); return; } } else { /* diff between 0 and -33554432 */ if (diff >= -33554432){ ins = (18 << 26) | (diff & ~0xfc000000) | (ins & 1); patch_ins (code, ins); return; } } if ((glong)target >= 0){ if ((glong)target <= 33554431){ ins = (18 << 26) | ((gulong) target) | (ins & 1) | 2; patch_ins (code, ins); return; } } else { if ((glong)target >= -33554432){ ins = (18 << 26) | (((gulong)target) & ~0xfc000000) | (ins & 1) | 2; patch_ins (code, ins); return; } } handle_thunk (cfg, code, target); return; g_assert_not_reached (); } if (prim == 16) { g_assert (!is_fd); // absolute address if (ins & 2) { guint32 li = (gulong)target; ins = (ins & 0xffff0000) | (ins & 3); ovf = li & 0xffff0000; if (ovf != 0 && ovf != 0xffff0000) g_assert_not_reached (); li &= 0xffff; ins |= li; // FIXME: assert the top bits of li are 0 } else { gint diff = target - code; ins = (ins & 0xffff0000) | (ins & 3); ovf = diff & 0xffff0000; if (ovf != 0 && ovf != 0xffff0000) g_assert_not_reached (); diff &= 0xffff; ins |= diff; } patch_ins (code, ins); return; } if (prim == 15 || ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) { #ifdef __mono_ppc64__ guint32 *seq = (guint32*)code; guint32 *branch_ins; /* the trampoline code will try to patch the blrl, blr, bcctr */ if (ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) { branch_ins = seq; if (ppc_is_load_op (seq [-3]) || ppc_opcode (seq [-3]) == 31) /* ld || lwz || mr */ code -= 32; else code -= 24; } else { if (ppc_is_load_op (seq [5]) #ifdef PPC_USES_FUNCTION_DESCRIPTOR /* With function descs we need to do more careful matches. */ || ppc_opcode (seq [5]) == 31 /* ld || lwz || mr */ #endif ) branch_ins = seq + 8; else branch_ins = seq + 6; } seq = (guint32*)code; /* this is the lis/ori/sldi/oris/ori/(ld/ld|mr/nop)/mtlr/blrl sequence */ g_assert (mono_ppc_is_direct_call_sequence (branch_ins)); if (ppc_is_load_op (seq [5])) { g_assert (ppc_is_load_op (seq [6])); if (!is_fd) { guint8 *buf = (guint8*)&seq [5]; ppc_mr (buf, PPC_CALL_REG, ppc_r12); ppc_nop (buf); } } else { if (is_fd) target = (const guchar*)mono_get_addr_from_ftnptr ((gpointer)target); } /* FIXME: make this thread safe */ #ifdef PPC_USES_FUNCTION_DESCRIPTOR /* FIXME: we're assuming we're using r12 here */ ppc_load_ptr_sequence (code, ppc_r12, target); #else ppc_load_ptr_sequence (code, PPC_CALL_REG, target); #endif mono_arch_flush_icache ((guint8*)seq, 28); #else guint32 *seq; /* the trampoline code will try to patch the blrl, blr, bcctr */ if (ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) { code -= 12; } /* this is the lis/ori/mtlr/blrl sequence */ seq = (guint32*)code; g_assert ((seq [0] >> 26) == 15); g_assert ((seq [1] >> 26) == 24); g_assert ((seq [2] >> 26) == 31); g_assert (seq [3] == 0x4e800021 || seq [3] == 0x4e800020 || seq [3] == 0x4e800420); /* FIXME: make this thread safe */ ppc_lis (code, PPC_CALL_REG, (guint32)(target) >> 16); ppc_ori (code, PPC_CALL_REG, PPC_CALL_REG, (guint32)(target) & 0xffff); mono_arch_flush_icache (code - 8, 8); #endif } else { g_assert_not_reached (); } // g_print ("patched with 0x%08x\n", ins); } void ppc_patch (guchar *code, const guchar *target) { ppc_patch_full (NULL, code, target, FALSE); } void mono_ppc_patch (guchar *code, const guchar *target) { ppc_patch (code, target); } static guint8* emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code) { switch (ins->opcode) { case OP_FCALL: case OP_FCALL_REG: case OP_FCALL_MEMBASE: if (ins->dreg != ppc_f1) ppc_fmr (code, ins->dreg, ppc_f1); break; } return code; } static guint8* emit_reserve_param_area (MonoCompile *cfg, guint8 *code) { long size = cfg->param_area; size += MONO_ARCH_FRAME_ALIGNMENT - 1; size &= -MONO_ARCH_FRAME_ALIGNMENT; if (!size) return code; ppc_ldptr (code, ppc_r0, 0, ppc_sp); if (ppc_is_imm16 (-size)) { ppc_stptr_update (code, ppc_r0, -size, ppc_sp); } else { ppc_load (code, ppc_r12, -size); ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12); } return code; } static guint8* emit_unreserve_param_area (MonoCompile *cfg, guint8 *code) { long size = cfg->param_area; size += MONO_ARCH_FRAME_ALIGNMENT - 1; size &= -MONO_ARCH_FRAME_ALIGNMENT; if (!size) return code; ppc_ldptr (code, ppc_r0, 0, ppc_sp); if (ppc_is_imm16 (size)) { ppc_stptr_update (code, ppc_r0, size, ppc_sp); } else { ppc_load (code, ppc_r12, size); ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12); } return code; } #define MASK_SHIFT_IMM(i) ((i) & MONO_PPC_32_64_CASE (0x1f, 0x3f)) #ifndef DISABLE_JIT void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *next; MonoCallInst *call; guint8 *code = cfg->native_code + cfg->code_len; MonoInst *last_ins = NULL; int max_len, cpos; int L; /* we don't align basic blocks of loops on ppc */ if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); cpos = bb->max_offset; MONO_BB_FOR_EACH_INS (bb, ins) { const guint offset = code - cfg->native_code; set_code_cursor (cfg, code); max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); // if (ins->cil_code) // g_print ("cil code\n"); mono_debug_record_line_number (cfg, ins, offset); switch (normalize_opcode (ins->opcode)) { case OP_RELAXED_NOP: case OP_NOP: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_I8CONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: case OP_NOT_REACHED: case OP_NOT_NULL: break; case OP_IL_SEQ_POINT: mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); break; case OP_SEQ_POINT: { int i; if (cfg->compile_aot) NOT_IMPLEMENTED; /* * Read from the single stepping trigger page. This will cause a * SIGSEGV when single stepping is enabled. * We do this _before_ the breakpoint, so single stepping after * a breakpoint is hit will step to the next IL offset. */ if (ins->flags & MONO_INST_SINGLE_STEP_LOC) { ppc_load (code, ppc_r12, (gsize)ss_trigger_page); ppc_ldptr (code, ppc_r12, 0, ppc_r12); } mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); /* * A placeholder for a possible breakpoint inserted by * mono_arch_set_breakpoint (). */ for (i = 0; i < BREAKPOINT_SIZE / 4; ++i) ppc_nop (code); break; } case OP_BIGMUL: ppc_mullw (code, ppc_r0, ins->sreg1, ins->sreg2); ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2); ppc_mr (code, ppc_r4, ppc_r0); break; case OP_BIGMUL_UN: ppc_mullw (code, ppc_r0, ins->sreg1, ins->sreg2); ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2); ppc_mr (code, ppc_r4, ppc_r0); break; case OP_MEMORY_BARRIER: ppc_sync (code); break; case OP_STOREI1_MEMBASE_REG: if (ppc_is_imm16 (ins->inst_offset)) { ppc_stb (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); } else { if (ppc_is_imm32 (ins->inst_offset)) { ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset)); ppc_stb (code, ins->sreg1, ins->inst_offset, ppc_r11); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_stbx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0); } } break; case OP_STOREI2_MEMBASE_REG: if (ppc_is_imm16 (ins->inst_offset)) { ppc_sth (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); } else { if (ppc_is_imm32 (ins->inst_offset)) { ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset)); ppc_sth (code, ins->sreg1, ins->inst_offset, ppc_r11); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_sthx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0); } } break; case OP_STORE_MEMBASE_REG: if (ppc_is_imm16 (ins->inst_offset)) { ppc_stptr (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); } else { if (ppc_is_imm32 (ins->inst_offset)) { ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset)); ppc_stptr (code, ins->sreg1, ins->inst_offset, ppc_r11); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_stptr_indexed (code, ins->sreg1, ins->inst_destbasereg, ppc_r0); } } break; #ifdef MONO_ARCH_ILP32 case OP_STOREI8_MEMBASE_REG: if (ppc_is_imm16 (ins->inst_offset)) { ppc_str (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_str_indexed (code, ins->sreg1, ins->inst_destbasereg, ppc_r0); } break; #endif case OP_STOREI1_MEMINDEX: ppc_stbx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case OP_STOREI2_MEMINDEX: ppc_sthx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case OP_STORE_MEMINDEX: ppc_stptr_indexed (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case OP_LOADU4_MEM: g_assert_not_reached (); break; case OP_LOAD_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_ldptr (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) { ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset)); ppc_ldptr (code, ins->dreg, ins->inst_offset, ins->dreg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0); } } break; case OP_LOADI4_MEMBASE: #ifdef __mono_ppc64__ if (ppc_is_imm16 (ins->inst_offset)) { ppc_lwa (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) { ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset)); ppc_lwa (code, ins->dreg, ins->inst_offset, ins->dreg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_lwax (code, ins->dreg, ins->inst_basereg, ppc_r0); } } break; #endif case OP_LOADU4_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_lwz (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) { ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset)); ppc_lwz (code, ins->dreg, ins->inst_offset, ins->dreg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_lwzx (code, ins->dreg, ins->inst_basereg, ppc_r0); } } break; case OP_LOADI1_MEMBASE: case OP_LOADU1_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_lbz (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) { ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset)); ppc_lbz (code, ins->dreg, ins->inst_offset, ins->dreg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_lbzx (code, ins->dreg, ins->inst_basereg, ppc_r0); } } if (ins->opcode == OP_LOADI1_MEMBASE) ppc_extsb (code, ins->dreg, ins->dreg); break; case OP_LOADU2_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_lhz (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) { ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset)); ppc_lhz (code, ins->dreg, ins->inst_offset, ins->dreg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_lhzx (code, ins->dreg, ins->inst_basereg, ppc_r0); } } break; case OP_LOADI2_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_lha (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset) && (ins->dreg > 0)) { ppc_addis (code, ins->dreg, ins->inst_basereg, ppc_ha(ins->inst_offset)); ppc_lha (code, ins->dreg, ins->inst_offset, ins->dreg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_lhax (code, ins->dreg, ins->inst_basereg, ppc_r0); } } break; #ifdef MONO_ARCH_ILP32 case OP_LOADI8_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_ldr (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_ldr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0); } break; #endif case OP_LOAD_MEMINDEX: ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADI4_MEMINDEX: #ifdef __mono_ppc64__ ppc_lwax (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; #endif case OP_LOADU4_MEMINDEX: ppc_lwzx (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADU2_MEMINDEX: ppc_lhzx (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADI2_MEMINDEX: ppc_lhax (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADU1_MEMINDEX: ppc_lbzx (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADI1_MEMINDEX: ppc_lbzx (code, ins->dreg, ins->inst_basereg, ins->sreg2); ppc_extsb (code, ins->dreg, ins->dreg); break; case OP_ICONV_TO_I1: CASE_PPC64 (OP_LCONV_TO_I1) ppc_extsb (code, ins->dreg, ins->sreg1); break; case OP_ICONV_TO_I2: CASE_PPC64 (OP_LCONV_TO_I2) ppc_extsh (code, ins->dreg, ins->sreg1); break; case OP_ICONV_TO_U1: CASE_PPC64 (OP_LCONV_TO_U1) ppc_clrlwi (code, ins->dreg, ins->sreg1, 24); break; case OP_ICONV_TO_U2: CASE_PPC64 (OP_LCONV_TO_U2) ppc_clrlwi (code, ins->dreg, ins->sreg1, 16); break; case OP_COMPARE: case OP_ICOMPARE: CASE_PPC64 (OP_LCOMPARE) L = (sizeof (target_mgreg_t) == 4 || ins->opcode == OP_ICOMPARE) ? 0 : 1; next = ins->next; if (next && compare_opcode_is_unsigned (next->opcode)) ppc_cmpl (code, 0, L, ins->sreg1, ins->sreg2); else ppc_cmp (code, 0, L, ins->sreg1, ins->sreg2); break; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: CASE_PPC64 (OP_LCOMPARE_IMM) L = (sizeof (target_mgreg_t) == 4 || ins->opcode == OP_ICOMPARE_IMM) ? 0 : 1; next = ins->next; if (next && compare_opcode_is_unsigned (next->opcode)) { if (ppc_is_uimm16 (ins->inst_imm)) { ppc_cmpli (code, 0, L, ins->sreg1, (ins->inst_imm & 0xffff)); } else { g_assert_not_reached (); } } else { if (ppc_is_imm16 (ins->inst_imm)) { ppc_cmpi (code, 0, L, ins->sreg1, (ins->inst_imm & 0xffff)); } else { g_assert_not_reached (); } } break; case OP_BREAK: /* * gdb does not like encountering a trap in the debugged code. So * instead of emitting a trap, we emit a call a C function and place a * breakpoint there. */ //ppc_break (code); ppc_mr (code, ppc_r3, ins->sreg1); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break)); if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) { ppc_load_func (code, PPC_CALL_REG, 0); ppc_mtlr (code, PPC_CALL_REG); ppc_blrl (code); } else { ppc_bl (code, 0); } break; case OP_ADDCC: case OP_IADDCC: ppc_addco (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IADD: CASE_PPC64 (OP_LADD) ppc_add (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_ADC: case OP_IADC: ppc_adde (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_ADDCC_IMM: if (ppc_is_imm16 (ins->inst_imm)) { ppc_addic (code, ins->dreg, ins->sreg1, ins->inst_imm); } else { g_assert_not_reached (); } break; case OP_ADD_IMM: case OP_IADD_IMM: CASE_PPC64 (OP_LADD_IMM) if (ppc_is_imm16 (ins->inst_imm)) { ppc_addi (code, ins->dreg, ins->sreg1, ins->inst_imm); } else { g_assert_not_reached (); } break; case OP_IADD_OVF: /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_addo (code, ins->dreg, ins->sreg1, ins->sreg2); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_IADD_OVF_UN: /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_addco (code, ins->dreg, ins->sreg1, ins->sreg2); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<13)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_ISUB_OVF: CASE_PPC64 (OP_LSUB_OVF) /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_subfo (code, ins->dreg, ins->sreg2, ins->sreg1); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_ISUB_OVF_UN: CASE_PPC64 (OP_LSUB_OVF_UN) /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_subfc (code, ins->dreg, ins->sreg2, ins->sreg1); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<13)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException"); break; case OP_ADD_OVF_CARRY: /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_addeo (code, ins->dreg, ins->sreg1, ins->sreg2); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_ADD_OVF_UN_CARRY: /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_addeo (code, ins->dreg, ins->sreg1, ins->sreg2); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<13)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_SUB_OVF_CARRY: /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_subfeo (code, ins->dreg, ins->sreg2, ins->sreg1); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_SUB_OVF_UN_CARRY: /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_subfeo (code, ins->dreg, ins->sreg2, ins->sreg1); ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<13)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException"); break; case OP_SUBCC: case OP_ISUBCC: ppc_subfco (code, ins->dreg, ins->sreg2, ins->sreg1); break; case OP_ISUB: CASE_PPC64 (OP_LSUB) ppc_subf (code, ins->dreg, ins->sreg2, ins->sreg1); break; case OP_SBB: case OP_ISBB: ppc_subfe (code, ins->dreg, ins->sreg2, ins->sreg1); break; case OP_SUB_IMM: case OP_ISUB_IMM: CASE_PPC64 (OP_LSUB_IMM) // we add the negated value if (ppc_is_imm16 (-ins->inst_imm)) ppc_addi (code, ins->dreg, ins->sreg1, -ins->inst_imm); else { g_assert_not_reached (); } break; case OP_PPC_SUBFIC: g_assert (ppc_is_imm16 (ins->inst_imm)); ppc_subfic (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_PPC_SUBFZE: ppc_subfze (code, ins->dreg, ins->sreg1); break; case OP_IAND: CASE_PPC64 (OP_LAND) /* FIXME: the ppc macros as inconsistent here: put dest as the first arg! */ ppc_and (code, ins->sreg1, ins->dreg, ins->sreg2); break; case OP_AND_IMM: case OP_IAND_IMM: CASE_PPC64 (OP_LAND_IMM) if (!(ins->inst_imm & 0xffff0000)) { ppc_andid (code, ins->sreg1, ins->dreg, ins->inst_imm); } else if (!(ins->inst_imm & 0xffff)) { ppc_andisd (code, ins->sreg1, ins->dreg, ((guint32)ins->inst_imm >> 16)); } else { g_assert_not_reached (); } break; case OP_IDIV: CASE_PPC64 (OP_LDIV) { guint8 *divisor_is_m1; /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits] */ ppc_compare_reg_imm (code, 0, ins->sreg2, -1); divisor_is_m1 = code; ppc_bc (code, PPC_BR_FALSE | PPC_BR_LIKELY, PPC_BR_EQ, 0); ppc_lis (code, ppc_r0, 0x8000); #ifdef __mono_ppc64__ if (ins->opcode == OP_LDIV) ppc_sldi (code, ppc_r0, ppc_r0, 32); #endif ppc_compare (code, 0, ins->sreg1, ppc_r0); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException"); ppc_patch (divisor_is_m1, code); /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits] */ if (ins->opcode == OP_IDIV) ppc_divwod (code, ins->dreg, ins->sreg1, ins->sreg2); #ifdef __mono_ppc64__ else ppc_divdod (code, ins->dreg, ins->sreg1, ins->sreg2); #endif ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException"); break; } case OP_IDIV_UN: CASE_PPC64 (OP_LDIV_UN) if (ins->opcode == OP_IDIV_UN) ppc_divwuod (code, ins->dreg, ins->sreg1, ins->sreg2); #ifdef __mono_ppc64__ else ppc_divduod (code, ins->dreg, ins->sreg1, ins->sreg2); #endif ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException"); break; case OP_DIV_IMM: case OP_IREM: case OP_IREM_UN: case OP_REM_IMM: g_assert_not_reached (); case OP_IOR: CASE_PPC64 (OP_LOR) ppc_or (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_OR_IMM: case OP_IOR_IMM: CASE_PPC64 (OP_LOR_IMM) if (!(ins->inst_imm & 0xffff0000)) { ppc_ori (code, ins->sreg1, ins->dreg, ins->inst_imm); } else if (!(ins->inst_imm & 0xffff)) { ppc_oris (code, ins->dreg, ins->sreg1, ((guint32)(ins->inst_imm) >> 16)); } else { g_assert_not_reached (); } break; case OP_IXOR: CASE_PPC64 (OP_LXOR) ppc_xor (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IXOR_IMM: case OP_XOR_IMM: CASE_PPC64 (OP_LXOR_IMM) if (!(ins->inst_imm & 0xffff0000)) { ppc_xori (code, ins->sreg1, ins->dreg, ins->inst_imm); } else if (!(ins->inst_imm & 0xffff)) { ppc_xoris (code, ins->sreg1, ins->dreg, ((guint32)(ins->inst_imm) >> 16)); } else { g_assert_not_reached (); } break; case OP_ISHL: CASE_PPC64 (OP_LSHL) ppc_shift_left (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SHL_IMM: case OP_ISHL_IMM: CASE_PPC64 (OP_LSHL_IMM) ppc_shift_left_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm)); break; case OP_ISHR: ppc_sraw (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_SHR_IMM: ppc_shift_right_arith_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm)); break; case OP_SHR_UN_IMM: if (MASK_SHIFT_IMM (ins->inst_imm)) ppc_shift_right_imm (code, ins->dreg, ins->sreg1, MASK_SHIFT_IMM (ins->inst_imm)); else ppc_mr (code, ins->dreg, ins->sreg1); break; case OP_ISHR_UN: ppc_srw (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_INOT: CASE_PPC64 (OP_LNOT) ppc_not (code, ins->dreg, ins->sreg1); break; case OP_INEG: CASE_PPC64 (OP_LNEG) ppc_neg (code, ins->dreg, ins->sreg1); break; case OP_IMUL: CASE_PPC64 (OP_LMUL) ppc_multiply (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IMUL_IMM: case OP_MUL_IMM: CASE_PPC64 (OP_LMUL_IMM) if (ppc_is_imm16 (ins->inst_imm)) { ppc_mulli (code, ins->dreg, ins->sreg1, ins->inst_imm); } else { g_assert_not_reached (); } break; case OP_IMUL_OVF: CASE_PPC64 (OP_LMUL_OVF) /* we annot use mcrxr, since it's not implemented on some processors * XER format: SO, OV, CA, reserved [21 bits], count [8 bits] */ if (ins->opcode == OP_IMUL_OVF) ppc_mullwo (code, ins->dreg, ins->sreg1, ins->sreg2); #ifdef __mono_ppc64__ else ppc_mulldo (code, ins->dreg, ins->sreg1, ins->sreg2); #endif ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1<<14)); EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException"); break; case OP_IMUL_OVF_UN: CASE_PPC64 (OP_LMUL_OVF_UN) /* we first multiply to get the high word and compare to 0 * to set the flags, then the result is discarded and then * we multiply to get the lower * bits result */ if (ins->opcode == OP_IMUL_OVF_UN) ppc_mulhwu (code, ppc_r0, ins->sreg1, ins->sreg2); #ifdef __mono_ppc64__ else ppc_mulhdu (code, ppc_r0, ins->sreg1, ins->sreg2); #endif ppc_cmpi (code, 0, 0, ppc_r0, 0); EMIT_COND_SYSTEM_EXCEPTION (CEE_BNE_UN - CEE_BEQ, "OverflowException"); ppc_multiply (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_ICONST: ppc_load (code, ins->dreg, ins->inst_c0); break; case OP_I8CONST: { ppc_load (code, ins->dreg, ins->inst_l); break; } case OP_LOAD_GOTADDR: /* The PLT implementation depends on this */ g_assert (ins->dreg == ppc_r30); code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL); break; case OP_GOT_ENTRY: // FIXME: Fix max instruction length /* XXX: This is hairy; we're casting a pointer from a union to an enum... */ mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(intptr_t)ins->inst_right->inst_i1, ins->inst_right->inst_p0); /* arch_emit_got_access () patches this */ ppc_load32 (code, ppc_r0, 0); ppc_ldptr_indexed (code, ins->dreg, ins->inst_basereg, ppc_r0); break; case OP_AOTCONST: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(intptr_t)ins->inst_i1, ins->inst_p0); ppc_load_sequence (code, ins->dreg, 0); break; CASE_PPC32 (OP_ICONV_TO_I4) CASE_PPC32 (OP_ICONV_TO_U4) case OP_MOVE: if (ins->dreg != ins->sreg1) ppc_mr (code, ins->dreg, ins->sreg1); break; case OP_SETLRET: { int saved = ins->sreg1; if (ins->sreg1 == ppc_r3) { ppc_mr (code, ppc_r0, ins->sreg1); saved = ppc_r0; } if (ins->sreg2 != ppc_r3) ppc_mr (code, ppc_r3, ins->sreg2); if (saved != ppc_r4) ppc_mr (code, ppc_r4, saved); break; } case OP_FMOVE: if (ins->dreg != ins->sreg1) ppc_fmr (code, ins->dreg, ins->sreg1); break; case OP_MOVE_F_TO_I4: ppc_stfs (code, ins->sreg1, -4, ppc_r1); ppc_ldptr (code, ins->dreg, -4, ppc_r1); break; case OP_MOVE_I4_TO_F: ppc_stw (code, ins->sreg1, -4, ppc_r1); ppc_lfs (code, ins->dreg, -4, ppc_r1); break; #ifdef __mono_ppc64__ case OP_MOVE_F_TO_I8: ppc_stfd (code, ins->sreg1, -8, ppc_r1); ppc_ldptr (code, ins->dreg, -8, ppc_r1); break; case OP_MOVE_I8_TO_F: ppc_stptr (code, ins->sreg1, -8, ppc_r1); ppc_lfd (code, ins->dreg, -8, ppc_r1); break; #endif case OP_FCONV_TO_R4: ppc_frsp (code, ins->dreg, ins->sreg1); break; case OP_TAILCALL_PARAMETER: // This opcode helps compute sizes, i.e. // of the subsequent OP_TAILCALL, but contributes no code. g_assert (ins->next); break; case OP_TAILCALL: { int i, pos; MonoCallInst *call = (MonoCallInst*)ins; /* * Keep in sync with mono_arch_emit_epilog */ g_assert (!cfg->method->save_lmf); /* * Note: we can use ppc_r12 here because it is dead anyway: * we're leaving the method. */ if (1 || cfg->flags & MONO_CFG_HAS_CALLS) { long ret_offset = cfg->stack_usage + PPC_RET_ADDR_OFFSET; if (ppc_is_imm16 (ret_offset)) { ppc_ldptr (code, ppc_r0, ret_offset, cfg->frame_reg); } else { ppc_load (code, ppc_r12, ret_offset); ppc_ldptr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r12); } ppc_mtlr (code, ppc_r0); } if (ppc_is_imm16 (cfg->stack_usage)) { ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage); } else { /* cfg->stack_usage is an int, so we can use * an addis/addi sequence here even in 64-bit. */ ppc_addis (code, ppc_r12, cfg->frame_reg, ppc_ha(cfg->stack_usage)); ppc_addi (code, ppc_r12, ppc_r12, cfg->stack_usage); } if (!cfg->method->save_lmf) { pos = 0; for (i = 31; i >= 13; --i) { if (cfg->used_int_regs & (1 << i)) { pos += sizeof (target_mgreg_t); ppc_ldptr (code, i, -pos, ppc_r12); } } } else { /* FIXME restore from MonoLMF: though this can't happen yet */ } /* Copy arguments on the stack to our argument area */ if (call->stack_usage) { code = emit_memcpy (code, call->stack_usage, ppc_r12, PPC_STACK_PARAM_OFFSET, ppc_sp, PPC_STACK_PARAM_OFFSET); /* r12 was clobbered */ g_assert (cfg->frame_reg == ppc_sp); if (ppc_is_imm16 (cfg->stack_usage)) { ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage); } else { /* cfg->stack_usage is an int, so we can use * an addis/addi sequence here even in 64-bit. */ ppc_addis (code, ppc_r12, cfg->frame_reg, ppc_ha(cfg->stack_usage)); ppc_addi (code, ppc_r12, ppc_r12, cfg->stack_usage); } } ppc_mr (code, ppc_sp, ppc_r12); mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method); cfg->thunk_area += THUNK_SIZE; if (cfg->compile_aot) { /* arch_emit_got_access () patches this */ ppc_load32 (code, ppc_r0, 0); #ifdef PPC_USES_FUNCTION_DESCRIPTOR ppc_ldptr_indexed (code, ppc_r12, ppc_r30, ppc_r0); ppc_ldptr (code, ppc_r0, 0, ppc_r12); #else ppc_ldptr_indexed (code, ppc_r0, ppc_r30, ppc_r0); #endif ppc_mtctr (code, ppc_r0); ppc_bcctr (code, PPC_BR_ALWAYS, 0); } else { ppc_b (code, 0); } break; } case OP_CHECK_THIS: /* ensure ins->sreg1 is not NULL */ ppc_ldptr (code, ppc_r0, 0, ins->sreg1); break; case OP_ARGLIST: { long cookie_offset = cfg->sig_cookie + cfg->stack_usage; if (ppc_is_imm16 (cookie_offset)) { ppc_addi (code, ppc_r0, cfg->frame_reg, cookie_offset); } else { ppc_load (code, ppc_r0, cookie_offset); ppc_add (code, ppc_r0, cfg->frame_reg, ppc_r0); } ppc_stptr (code, ppc_r0, 0, ins->sreg1); break; } case OP_FCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: call = (MonoCallInst*)ins; mono_call_add_patch_info (cfg, call, offset); if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) { ppc_load_func (code, PPC_CALL_REG, 0); ppc_mtlr (code, PPC_CALL_REG); ppc_blrl (code); } else { ppc_bl (code, 0); } /* FIXME: this should be handled somewhere else in the new jit */ code = emit_move_return_value (cfg, ins, code); break; case OP_FCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: #ifdef PPC_USES_FUNCTION_DESCRIPTOR ppc_ldptr (code, ppc_r0, 0, ins->sreg1); /* FIXME: if we know that this is a method, we can omit this load */ ppc_ldptr (code, ppc_r2, 8, ins->sreg1); ppc_mtlr (code, ppc_r0); #else #if (_CALL_ELF == 2) if (ins->flags & MONO_INST_HAS_METHOD) { // Not a global entry point } else { // Need to set up r12 with function entry address for global entry point if (ppc_r12 != ins->sreg1) { ppc_mr(code,ppc_r12,ins->sreg1); } } #endif ppc_mtlr (code, ins->sreg1); #endif ppc_blrl (code); /* FIXME: this should be handled somewhere else in the new jit */ code = emit_move_return_value (cfg, ins, code); break; case OP_FCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: if (cfg->compile_aot && ins->sreg1 == ppc_r12) { /* The trampolines clobber this */ ppc_mr (code, ppc_r29, ins->sreg1); ppc_ldptr (code, ppc_r0, ins->inst_offset, ppc_r29); } else { ppc_ldptr (code, ppc_r0, ins->inst_offset, ins->sreg1); } ppc_mtlr (code, ppc_r0); ppc_blrl (code); /* FIXME: this should be handled somewhere else in the new jit */ code = emit_move_return_value (cfg, ins, code); break; case OP_LOCALLOC: { guint8 * zero_loop_jump, * zero_loop_start; /* keep alignment */ int alloca_waste = PPC_STACK_PARAM_OFFSET + cfg->param_area + 31; int area_offset = alloca_waste; area_offset &= ~31; ppc_addi (code, ppc_r12, ins->sreg1, alloca_waste + 31); /* FIXME: should be calculated from MONO_ARCH_FRAME_ALIGNMENT */ ppc_clear_right_imm (code, ppc_r12, ppc_r12, 4); /* use ctr to store the number of words to 0 if needed */ if (ins->flags & MONO_INST_INIT) { /* we zero 4 bytes at a time: * we add 7 instead of 3 so that we set the counter to * at least 1, otherwise the bdnz instruction will make * it negative and iterate billions of times. */ ppc_addi (code, ppc_r0, ins->sreg1, 7); ppc_shift_right_arith_imm (code, ppc_r0, ppc_r0, 2); ppc_mtctr (code, ppc_r0); } ppc_ldptr (code, ppc_r0, 0, ppc_sp); ppc_neg (code, ppc_r12, ppc_r12); ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12); /* FIXME: make this loop work in 8 byte increments on PPC64 */ if (ins->flags & MONO_INST_INIT) { /* adjust the dest reg by -4 so we can use stwu */ /* we actually adjust -8 because we let the loop * run at least once */ ppc_addi (code, ins->dreg, ppc_sp, (area_offset - 8)); ppc_li (code, ppc_r12, 0); zero_loop_start = code; ppc_stwu (code, ppc_r12, 4, ins->dreg); zero_loop_jump = code; ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0); ppc_patch (zero_loop_jump, zero_loop_start); } ppc_addi (code, ins->dreg, ppc_sp, area_offset); break; } case OP_THROW: { //ppc_break (code); ppc_mr (code, ppc_r3, ins->sreg1); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception)); if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) { ppc_load_func (code, PPC_CALL_REG, 0); ppc_mtlr (code, PPC_CALL_REG); ppc_blrl (code); } else { ppc_bl (code, 0); } break; } case OP_RETHROW: { //ppc_break (code); ppc_mr (code, ppc_r3, ins->sreg1); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception)); if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) { ppc_load_func (code, PPC_CALL_REG, 0); ppc_mtlr (code, PPC_CALL_REG); ppc_blrl (code); } else { ppc_bl (code, 0); } break; } case OP_START_HANDLER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); g_assert (spvar->inst_basereg != ppc_sp); code = emit_reserve_param_area (cfg, code); ppc_mflr (code, ppc_r0); if (ppc_is_imm16 (spvar->inst_offset)) { ppc_stptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg); } else { ppc_load (code, ppc_r12, spvar->inst_offset); ppc_stptr_indexed (code, ppc_r0, ppc_r12, spvar->inst_basereg); } break; } case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); g_assert (spvar->inst_basereg != ppc_sp); code = emit_unreserve_param_area (cfg, code); if (ins->sreg1 != ppc_r3) ppc_mr (code, ppc_r3, ins->sreg1); if (ppc_is_imm16 (spvar->inst_offset)) { ppc_ldptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg); } else { ppc_load (code, ppc_r12, spvar->inst_offset); ppc_ldptr_indexed (code, ppc_r0, spvar->inst_basereg, ppc_r12); } ppc_mtlr (code, ppc_r0); ppc_blr (code); break; } case OP_ENDFINALLY: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); g_assert (spvar->inst_basereg != ppc_sp); code = emit_unreserve_param_area (cfg, code); ppc_ldptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg); ppc_mtlr (code, ppc_r0); ppc_blr (code); break; } case OP_CALL_HANDLER: mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb); ppc_bl (code, 0); for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); break; case OP_LABEL: ins->inst_c0 = code - cfg->native_code; break; case OP_BR: /*if (ins->inst_target_bb->native_offset) { ppc_b (code, 0); //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); } else*/ { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); ppc_b (code, 0); } break; case OP_BR_REG: ppc_mtctr (code, ins->sreg1); ppc_bcctr (code, PPC_BR_ALWAYS, 0); break; case OP_ICNEQ: ppc_li (code, ins->dreg, 0); ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 2); ppc_li (code, ins->dreg, 1); break; case OP_CEQ: case OP_ICEQ: CASE_PPC64 (OP_LCEQ) ppc_li (code, ins->dreg, 0); ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 2); ppc_li (code, ins->dreg, 1); break; case OP_CLT: case OP_CLT_UN: case OP_ICLT: case OP_ICLT_UN: CASE_PPC64 (OP_LCLT) CASE_PPC64 (OP_LCLT_UN) ppc_li (code, ins->dreg, 1); ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2); ppc_li (code, ins->dreg, 0); break; case OP_ICGE: case OP_ICGE_UN: ppc_li (code, ins->dreg, 1); ppc_bc (code, PPC_BR_FALSE, PPC_BR_LT, 2); ppc_li (code, ins->dreg, 0); break; case OP_CGT: case OP_CGT_UN: case OP_ICGT: case OP_ICGT_UN: CASE_PPC64 (OP_LCGT) CASE_PPC64 (OP_LCGT_UN) ppc_li (code, ins->dreg, 1); ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2); ppc_li (code, ins->dreg, 0); break; case OP_ICLE: case OP_ICLE_UN: ppc_li (code, ins->dreg, 1); ppc_bc (code, PPC_BR_FALSE, PPC_BR_GT, 2); ppc_li (code, ins->dreg, 0); break; case OP_COND_EXC_EQ: case OP_COND_EXC_NE_UN: case OP_COND_EXC_LT: case OP_COND_EXC_LT_UN: case OP_COND_EXC_GT: case OP_COND_EXC_GT_UN: case OP_COND_EXC_GE: case OP_COND_EXC_GE_UN: case OP_COND_EXC_LE: case OP_COND_EXC_LE_UN: EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, (const char*)ins->inst_p1); break; case OP_COND_EXC_IEQ: case OP_COND_EXC_INE_UN: case OP_COND_EXC_ILT: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_IGT: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_IGE: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_ILE: case OP_COND_EXC_ILE_UN: EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, (const char*)ins->inst_p1); break; case OP_IBEQ: case OP_IBNE_UN: case OP_IBLT: case OP_IBLT_UN: case OP_IBGT: case OP_IBGT_UN: case OP_IBGE: case OP_IBGE_UN: case OP_IBLE: case OP_IBLE_UN: EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ); break; /* floating point opcodes */ case OP_R8CONST: g_assert (cfg->compile_aot); /* FIXME: Optimize this */ ppc_bl (code, 1); ppc_mflr (code, ppc_r12); ppc_b (code, 3); *(double*)code = *(double*)ins->inst_p0; code += 8; ppc_lfd (code, ins->dreg, 8, ppc_r12); break; case OP_R4CONST: g_assert_not_reached (); break; case OP_STORER8_MEMBASE_REG: if (ppc_is_imm16 (ins->inst_offset)) { ppc_stfd (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); } else { if (ppc_is_imm32 (ins->inst_offset)) { ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset)); ppc_stfd (code, ins->sreg1, ins->inst_offset, ppc_r11); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_stfdx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0); } } break; case OP_LOADR8_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_lfd (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset)) { ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset)); ppc_lfd (code, ins->dreg, ins->inst_offset, ppc_r11); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_lfdx (code, ins->dreg, ins->inst_destbasereg, ppc_r0); } } break; case OP_STORER4_MEMBASE_REG: ppc_frsp (code, ins->sreg1, ins->sreg1); if (ppc_is_imm16 (ins->inst_offset)) { ppc_stfs (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); } else { if (ppc_is_imm32 (ins->inst_offset)) { ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset)); ppc_stfs (code, ins->sreg1, ins->inst_offset, ppc_r11); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_stfsx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0); } } break; case OP_LOADR4_MEMBASE: if (ppc_is_imm16 (ins->inst_offset)) { ppc_lfs (code, ins->dreg, ins->inst_offset, ins->inst_basereg); } else { if (ppc_is_imm32 (ins->inst_offset)) { ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset)); ppc_lfs (code, ins->dreg, ins->inst_offset, ppc_r11); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_lfsx (code, ins->dreg, ins->inst_destbasereg, ppc_r0); } } break; case OP_LOADR4_MEMINDEX: ppc_lfsx (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_LOADR8_MEMINDEX: ppc_lfdx (code, ins->dreg, ins->inst_basereg, ins->sreg2); break; case OP_STORER4_MEMINDEX: ppc_frsp (code, ins->sreg1, ins->sreg1); ppc_stfsx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case OP_STORER8_MEMINDEX: ppc_stfdx (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2); break; case CEE_CONV_R_UN: case CEE_CONV_R4: /* FIXME: change precision */ case CEE_CONV_R8: g_assert_not_reached (); case OP_FCONV_TO_I1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE); break; case OP_FCONV_TO_U1: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE); break; case OP_FCONV_TO_I2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE); break; case OP_FCONV_TO_U2: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE); break; case OP_FCONV_TO_I4: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE); break; case OP_FCONV_TO_U4: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE); break; case OP_LCONV_TO_R_UN: g_assert_not_reached (); /* Implemented as helper calls */ break; case OP_LCONV_TO_OVF_I4_2: case OP_LCONV_TO_OVF_I: { #ifdef __mono_ppc64__ NOT_IMPLEMENTED; #else guint8 *negative_branch, *msword_positive_branch, *msword_negative_branch, *ovf_ex_target; // Check if its negative ppc_cmpi (code, 0, 0, ins->sreg1, 0); negative_branch = code; ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 0); // Its positive msword == 0 ppc_cmpi (code, 0, 0, ins->sreg2, 0); msword_positive_branch = code; ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0); ovf_ex_target = code; EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_ALWAYS, 0, "OverflowException"); // Negative ppc_patch (negative_branch, code); ppc_cmpi (code, 0, 0, ins->sreg2, -1); msword_negative_branch = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); ppc_patch (msword_negative_branch, ovf_ex_target); ppc_patch (msword_positive_branch, code); if (ins->dreg != ins->sreg1) ppc_mr (code, ins->dreg, ins->sreg1); break; #endif } case OP_ROUND: ppc_frind (code, ins->dreg, ins->sreg1); break; case OP_PPC_TRUNC: ppc_frizd (code, ins->dreg, ins->sreg1); break; case OP_PPC_CEIL: ppc_fripd (code, ins->dreg, ins->sreg1); break; case OP_PPC_FLOOR: ppc_frimd (code, ins->dreg, ins->sreg1); break; case OP_ABS: ppc_fabsd (code, ins->dreg, ins->sreg1); break; case OP_SQRTF: ppc_fsqrtsd (code, ins->dreg, ins->sreg1); break; case OP_SQRT: ppc_fsqrtd (code, ins->dreg, ins->sreg1); break; case OP_FADD: ppc_fadd (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FSUB: ppc_fsub (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FMUL: ppc_fmul (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FDIV: ppc_fdiv (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FNEG: ppc_fneg (code, ins->dreg, ins->sreg1); break; case OP_FREM: /* emulated */ g_assert_not_reached (); break; /* These min/max require POWER5 */ case OP_IMIN: ppc_cmp (code, 0, 0, ins->sreg1, ins->sreg2); ppc_isellt (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IMIN_UN: ppc_cmpl (code, 0, 0, ins->sreg1, ins->sreg2); ppc_isellt (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IMAX: ppc_cmp (code, 0, 0, ins->sreg1, ins->sreg2); ppc_iselgt (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_IMAX_UN: ppc_cmpl (code, 0, 0, ins->sreg1, ins->sreg2); ppc_iselgt (code, ins->dreg, ins->sreg1, ins->sreg2); break; CASE_PPC64 (OP_LMIN) ppc_cmp (code, 0, 1, ins->sreg1, ins->sreg2); ppc_isellt (code, ins->dreg, ins->sreg1, ins->sreg2); break; CASE_PPC64 (OP_LMIN_UN) ppc_cmpl (code, 0, 1, ins->sreg1, ins->sreg2); ppc_isellt (code, ins->dreg, ins->sreg1, ins->sreg2); break; CASE_PPC64 (OP_LMAX) ppc_cmp (code, 0, 1, ins->sreg1, ins->sreg2); ppc_iselgt (code, ins->dreg, ins->sreg1, ins->sreg2); break; CASE_PPC64 (OP_LMAX_UN) ppc_cmpl (code, 0, 1, ins->sreg1, ins->sreg2); ppc_iselgt (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_FCOMPARE: ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2); break; case OP_FCEQ: case OP_FCNEQ: ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2); ppc_li (code, ins->dreg, 1); ppc_bc (code, ins->opcode == OP_FCEQ ? PPC_BR_TRUE : PPC_BR_FALSE, PPC_BR_EQ, 2); ppc_li (code, ins->dreg, 0); break; case OP_FCLT: case OP_FCGE: ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2); ppc_li (code, ins->dreg, 1); ppc_bc (code, ins->opcode == OP_FCLT ? PPC_BR_TRUE : PPC_BR_FALSE, PPC_BR_LT, 2); ppc_li (code, ins->dreg, 0); break; case OP_FCLT_UN: ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2); ppc_li (code, ins->dreg, 1); ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 3); ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2); ppc_li (code, ins->dreg, 0); break; case OP_FCGT: case OP_FCLE: ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2); ppc_li (code, ins->dreg, 1); ppc_bc (code, ins->opcode == OP_FCGT ? PPC_BR_TRUE : PPC_BR_FALSE, PPC_BR_GT, 2); ppc_li (code, ins->dreg, 0); break; case OP_FCGT_UN: ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2); ppc_li (code, ins->dreg, 1); ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 3); ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2); ppc_li (code, ins->dreg, 0); break; case OP_FBEQ: EMIT_COND_BRANCH (ins, CEE_BEQ - CEE_BEQ); break; case OP_FBNE_UN: EMIT_COND_BRANCH (ins, CEE_BNE_UN - CEE_BEQ); break; case OP_FBLT: ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2); EMIT_COND_BRANCH (ins, CEE_BLT - CEE_BEQ); break; case OP_FBLT_UN: EMIT_COND_BRANCH_FLAGS (ins, PPC_BR_TRUE, PPC_BR_SO); EMIT_COND_BRANCH (ins, CEE_BLT_UN - CEE_BEQ); break; case OP_FBGT: ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2); EMIT_COND_BRANCH (ins, CEE_BGT - CEE_BEQ); break; case OP_FBGT_UN: EMIT_COND_BRANCH_FLAGS (ins, PPC_BR_TRUE, PPC_BR_SO); EMIT_COND_BRANCH (ins, CEE_BGT_UN - CEE_BEQ); break; case OP_FBGE: ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2); EMIT_COND_BRANCH (ins, CEE_BGE - CEE_BEQ); break; case OP_FBGE_UN: EMIT_COND_BRANCH (ins, CEE_BGE_UN - CEE_BEQ); break; case OP_FBLE: ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2); EMIT_COND_BRANCH (ins, CEE_BLE - CEE_BEQ); break; case OP_FBLE_UN: EMIT_COND_BRANCH (ins, CEE_BLE_UN - CEE_BEQ); break; case OP_CKFINITE: g_assert_not_reached (); case OP_PPC_CHECK_FINITE: { ppc_rlwinm (code, ins->sreg1, ins->sreg1, 0, 1, 31); ppc_addis (code, ins->sreg1, ins->sreg1, -32752); ppc_rlwinmd (code, ins->sreg1, ins->sreg1, 1, 31, 31); EMIT_COND_SYSTEM_EXCEPTION (CEE_BEQ - CEE_BEQ, "ArithmeticException"); break; case OP_JUMP_TABLE: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_c1, ins->inst_p0); #ifdef __mono_ppc64__ ppc_load_sequence (code, ins->dreg, (guint64)0x0f0f0f0f0f0f0f0fLL); #else ppc_load_sequence (code, ins->dreg, (gulong)0x0f0f0f0fL); #endif break; } #ifdef __mono_ppc64__ case OP_ICONV_TO_I4: case OP_SEXT_I4: ppc_extsw (code, ins->dreg, ins->sreg1); break; case OP_ICONV_TO_U4: case OP_ZEXT_I4: ppc_clrldi (code, ins->dreg, ins->sreg1, 32); break; case OP_ICONV_TO_R4: case OP_ICONV_TO_R8: case OP_LCONV_TO_R4: case OP_LCONV_TO_R8: { int tmp; if (ins->opcode == OP_ICONV_TO_R4 || ins->opcode == OP_ICONV_TO_R8) { ppc_extsw (code, ppc_r0, ins->sreg1); tmp = ppc_r0; } else { tmp = ins->sreg1; } if (cpu_hw_caps & PPC_MOVE_FPR_GPR) { ppc_mffgpr (code, ins->dreg, tmp); } else { ppc_str (code, tmp, -8, ppc_r1); ppc_lfd (code, ins->dreg, -8, ppc_r1); } ppc_fcfid (code, ins->dreg, ins->dreg); if (ins->opcode == OP_ICONV_TO_R4 || ins->opcode == OP_LCONV_TO_R4) ppc_frsp (code, ins->dreg, ins->dreg); break; } case OP_LSHR: ppc_srad (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_LSHR_UN: ppc_srd (code, ins->dreg, ins->sreg1, ins->sreg2); break; case OP_COND_EXC_C: /* check XER [0-3] (SO, OV, CA): we can't use mcrxr */ ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1 << 13)); /* CA */ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, (const char*)ins->inst_p1); break; case OP_COND_EXC_OV: ppc_mfspr (code, ppc_r0, ppc_xer); ppc_andisd (code, ppc_r0, ppc_r0, (1 << 14)); /* OV */ EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, (const char*)ins->inst_p1); break; case OP_LBEQ: case OP_LBNE_UN: case OP_LBLT: case OP_LBLT_UN: case OP_LBGT: case OP_LBGT_UN: case OP_LBGE: case OP_LBGE_UN: case OP_LBLE: case OP_LBLE_UN: EMIT_COND_BRANCH (ins, ins->opcode - OP_LBEQ); break; case OP_FCONV_TO_I8: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE); break; case OP_FCONV_TO_U8: code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, FALSE); break; case OP_STOREI4_MEMBASE_REG: if (ppc_is_imm16 (ins->inst_offset)) { ppc_stw (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); } else { ppc_load (code, ppc_r0, ins->inst_offset); ppc_stwx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0); } break; case OP_STOREI4_MEMINDEX: ppc_stwx (code, ins->sreg1, ins->sreg2, ins->inst_destbasereg); break; case OP_ISHR_IMM: ppc_srawi (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f)); break; case OP_ISHR_UN_IMM: if (ins->inst_imm & 0x1f) ppc_srwi (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f)); else ppc_mr (code, ins->dreg, ins->sreg1); break; #else case OP_ICONV_TO_R4: case OP_ICONV_TO_R8: { if (cpu_hw_caps & PPC_ISA_64) { ppc_srawi(code, ppc_r0, ins->sreg1, 31); ppc_stw (code, ppc_r0, -8, ppc_r1); ppc_stw (code, ins->sreg1, -4, ppc_r1); ppc_lfd (code, ins->dreg, -8, ppc_r1); ppc_fcfid (code, ins->dreg, ins->dreg); if (ins->opcode == OP_ICONV_TO_R4) ppc_frsp (code, ins->dreg, ins->dreg); } break; } #endif case OP_ATOMIC_ADD_I4: CASE_PPC64 (OP_ATOMIC_ADD_I8) { int location = ins->inst_basereg; int addend = ins->sreg2; guint8 *loop, *branch; g_assert (ins->inst_offset == 0); loop = code; ppc_sync (code); if (ins->opcode == OP_ATOMIC_ADD_I4) ppc_lwarx (code, ppc_r0, 0, location); #ifdef __mono_ppc64__ else ppc_ldarx (code, ppc_r0, 0, location); #endif ppc_add (code, ppc_r0, ppc_r0, addend); if (ins->opcode == OP_ATOMIC_ADD_I4) ppc_stwcxd (code, ppc_r0, 0, location); #ifdef __mono_ppc64__ else ppc_stdcxd (code, ppc_r0, 0, location); #endif branch = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); ppc_patch (branch, loop); ppc_sync (code); ppc_mr (code, ins->dreg, ppc_r0); break; } case OP_ATOMIC_CAS_I4: CASE_PPC64 (OP_ATOMIC_CAS_I8) { int location = ins->sreg1; int value = ins->sreg2; int comparand = ins->sreg3; guint8 *start, *not_equal, *lost_reservation; start = code; ppc_sync (code); if (ins->opcode == OP_ATOMIC_CAS_I4) ppc_lwarx (code, ppc_r0, 0, location); #ifdef __mono_ppc64__ else ppc_ldarx (code, ppc_r0, 0, location); #endif ppc_cmp (code, 0, ins->opcode == OP_ATOMIC_CAS_I4 ? 0 : 1, ppc_r0, comparand); not_equal = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); if (ins->opcode == OP_ATOMIC_CAS_I4) ppc_stwcxd (code, value, 0, location); #ifdef __mono_ppc64__ else ppc_stdcxd (code, value, 0, location); #endif lost_reservation = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); ppc_patch (lost_reservation, start); ppc_patch (not_equal, code); ppc_sync (code); ppc_mr (code, ins->dreg, ppc_r0); break; } case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: break; default: g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__); g_assert_not_reached (); } if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) { g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)", mono_inst_name (ins->opcode), max_len, (glong)(code - cfg->native_code - offset)); g_assert_not_reached (); } cpos += max_len; last_ins = ins; } set_code_cursor (cfg, code); } #endif /* !DISABLE_JIT */ void mono_arch_register_lowlevel_calls (void) { /* The signature doesn't matter */ mono_register_jit_icall (mono_ppc_throw_exception, mono_icall_sig_void, TRUE); } #ifdef __mono_ppc64__ #if G_BYTE_ORDER == G_LITTLE_ENDIAN #define patch_load_sequence(ip,val) do {\ guint16 *__load = (guint16*)(ip); \ g_assert (sizeof (val) == sizeof (gsize)); \ __load [0] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \ __load [2] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \ __load [6] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \ __load [8] = ((guint64)(gsize)(val)) & 0xffff; \ } while (0) #elif G_BYTE_ORDER == G_BIG_ENDIAN #define patch_load_sequence(ip,val) do {\ guint16 *__load = (guint16*)(ip); \ g_assert (sizeof (val) == sizeof (gsize)); \ __load [1] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \ __load [3] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \ __load [7] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \ __load [9] = ((guint64)(gsize)(val)) & 0xffff; \ } while (0) #else #error huh? No endianess defined by compiler #endif #else #define patch_load_sequence(ip,val) do {\ guint16 *__lis_ori = (guint16*)(ip); \ __lis_ori [1] = (((gulong)(val)) >> 16) & 0xffff; \ __lis_ori [3] = ((gulong)(val)) & 0xffff; \ } while (0) #endif #ifndef DISABLE_JIT void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target) { unsigned char *ip = ji->ip.i + code; gboolean is_fd = FALSE; switch (ji->type) { case MONO_PATCH_INFO_IP: patch_load_sequence (ip, ip); break; case MONO_PATCH_INFO_SWITCH: { gpointer *table = (gpointer *)ji->data.table->table; int i; patch_load_sequence (ip, table); for (i = 0; i < ji->data.table->table_size; i++) { table [i] = (glong)ji->data.table->table [i] + code; } /* we put into the table the absolute address, no need for ppc_patch in this case */ break; } case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_IMAGE: case MONO_PATCH_INFO_FIELD: case MONO_PATCH_INFO_VTABLE: case MONO_PATCH_INFO_IID: case MONO_PATCH_INFO_SFLDA: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDTOKEN: /* from OP_AOTCONST : lis + ori */ patch_load_sequence (ip, target); break; case MONO_PATCH_INFO_R4: case MONO_PATCH_INFO_R8: g_assert_not_reached (); *((gconstpointer *)(ip + 2)) = ji->data.target; break; case MONO_PATCH_INFO_EXC_NAME: g_assert_not_reached (); *((gconstpointer *)(ip + 1)) = ji->data.name; break; case MONO_PATCH_INFO_NONE: case MONO_PATCH_INFO_BB_OVF: case MONO_PATCH_INFO_EXC_OVF: /* everything is dealt with at epilog output time */ break; #ifdef PPC_USES_FUNCTION_DESCRIPTOR case MONO_PATCH_INFO_JIT_ICALL_ID: case MONO_PATCH_INFO_ABS: case MONO_PATCH_INFO_RGCTX_FETCH: case MONO_PATCH_INFO_JIT_ICALL_ADDR: case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR: is_fd = TRUE; /* fall through */ #endif default: ppc_patch_full (cfg, ip, (const guchar*)target, is_fd); break; } } /* * Emit code to save the registers in used_int_regs or the registers in the MonoLMF * structure at positive offset pos from register base_reg. pos is guaranteed to fit into * the instruction offset immediate for all the registers. */ static guint8* save_registers (MonoCompile *cfg, guint8* code, int pos, int base_reg, gboolean save_lmf, guint32 used_int_regs, int cfa_offset) { int i; if (!save_lmf) { for (i = 13; i <= 31; i++) { if (used_int_regs & (1 << i)) { ppc_str (code, i, pos, base_reg); mono_emit_unwind_op_offset (cfg, code, i, pos - cfa_offset); pos += sizeof (target_mgreg_t); } } } else { /* pos is the start of the MonoLMF structure */ int offset = pos + G_STRUCT_OFFSET (MonoLMF, iregs); for (i = 13; i <= 31; i++) { ppc_str (code, i, offset, base_reg); mono_emit_unwind_op_offset (cfg, code, i, offset - cfa_offset); offset += sizeof (target_mgreg_t); } offset = pos + G_STRUCT_OFFSET (MonoLMF, fregs); for (i = 14; i < 32; i++) { ppc_stfd (code, i, offset, base_reg); offset += sizeof (gdouble); } } return code; } /* * Stack frame layout: * * ------------------- sp * MonoLMF structure or saved registers * ------------------- * spilled regs * ------------------- * locals * ------------------- * param area size is cfg->param_area * ------------------- * linkage area size is PPC_STACK_PARAM_OFFSET * ------------------- sp * red zone */ guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoBasicBlock *bb; MonoMethodSignature *sig; MonoInst *inst; long alloc_size, pos, max_offset, cfa_offset; int i; guint8 *code; CallInfo *cinfo; int lmf_offset = 0; int tailcall_struct_index; sig = mono_method_signature_internal (method); cfg->code_size = 512 + sig->param_count * 32; code = cfg->native_code = g_malloc (cfg->code_size); cfa_offset = 0; /* We currently emit unwind info for aot, but don't use it */ mono_emit_unwind_op_def_cfa (cfg, code, ppc_r1, 0); if (1 || cfg->flags & MONO_CFG_HAS_CALLS) { ppc_mflr (code, ppc_r0); ppc_str (code, ppc_r0, PPC_RET_ADDR_OFFSET, ppc_sp); mono_emit_unwind_op_offset (cfg, code, ppc_lr, PPC_RET_ADDR_OFFSET); } alloc_size = cfg->stack_offset; pos = 0; if (!method->save_lmf) { for (i = 31; i >= 13; --i) { if (cfg->used_int_regs & (1 << i)) { pos += sizeof (target_mgreg_t); } } } else { pos += sizeof (MonoLMF); lmf_offset = pos; } alloc_size += pos; // align to MONO_ARCH_FRAME_ALIGNMENT bytes if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) { alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1; alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1); } cfg->stack_usage = alloc_size; g_assert ((alloc_size & (MONO_ARCH_FRAME_ALIGNMENT-1)) == 0); if (alloc_size) { if (ppc_is_imm16 (-alloc_size)) { ppc_str_update (code, ppc_sp, -alloc_size, ppc_sp); cfa_offset = alloc_size; mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size); code = save_registers (cfg, code, alloc_size - pos, ppc_sp, method->save_lmf, cfg->used_int_regs, cfa_offset); } else { if (pos) ppc_addi (code, ppc_r12, ppc_sp, -pos); ppc_load (code, ppc_r0, -alloc_size); ppc_str_update_indexed (code, ppc_sp, ppc_sp, ppc_r0); cfa_offset = alloc_size; mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size); code = save_registers (cfg, code, 0, ppc_r12, method->save_lmf, cfg->used_int_regs, cfa_offset); } } if (cfg->frame_reg != ppc_sp) { ppc_mr (code, cfg->frame_reg, ppc_sp); mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg); } /* store runtime generic context */ if (cfg->rgctx_var) { g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET && (cfg->rgctx_var->inst_basereg == ppc_r1 || cfg->rgctx_var->inst_basereg == ppc_r31)); ppc_stptr (code, MONO_ARCH_RGCTX_REG, cfg->rgctx_var->inst_offset, cfg->rgctx_var->inst_basereg); } /* compute max_offset in order to use short forward jumps * we always do it on ppc because the immediate displacement * for jumps is too small */ max_offset = 0; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins; bb->max_offset = max_offset; MONO_BB_FOR_EACH_INS (bb, ins) max_offset += ins_get_size (ins->opcode); } /* load arguments allocated to register from the stack */ pos = 0; cinfo = get_call_info (sig); if (MONO_TYPE_ISSTRUCT (sig->ret)) { ArgInfo *ainfo = &cinfo->ret; inst = cfg->vret_addr; g_assert (inst); if (ppc_is_imm16 (inst->inst_offset)) { ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_stptr_indexed (code, ainfo->reg, ppc_r12, inst->inst_basereg); } } tailcall_struct_index = 0; for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; inst = cfg->args [pos]; if (cfg->verbose_level > 2) g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype); if (inst->opcode == OP_REGVAR) { if (ainfo->regtype == RegTypeGeneral) ppc_mr (code, inst->dreg, ainfo->reg); else if (ainfo->regtype == RegTypeFP) ppc_fmr (code, inst->dreg, ainfo->reg); else if (ainfo->regtype == RegTypeBase) { ppc_ldr (code, ppc_r12, 0, ppc_sp); ppc_ldptr (code, inst->dreg, ainfo->offset, ppc_r12); } else g_assert_not_reached (); if (cfg->verbose_level > 2) g_print ("Argument %ld assigned to register %s\n", pos, mono_arch_regname (inst->dreg)); } else { /* the argument should be put on the stack: FIXME handle size != word */ if (ainfo->regtype == RegTypeGeneral) { switch (ainfo->size) { case 1: if (ppc_is_imm16 (inst->inst_offset)) { ppc_stb (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_stb (code, ainfo->reg, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_stbx (code, ainfo->reg, inst->inst_basereg, ppc_r12); } } break; case 2: if (ppc_is_imm16 (inst->inst_offset)) { ppc_sth (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_sth (code, ainfo->reg, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_sthx (code, ainfo->reg, inst->inst_basereg, ppc_r12); } } break; #ifdef __mono_ppc64__ case 4: if (ppc_is_imm16 (inst->inst_offset)) { ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_stw (code, ainfo->reg, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_stwx (code, ainfo->reg, inst->inst_basereg, ppc_r12); } } break; case 8: if (ppc_is_imm16 (inst->inst_offset)) { ppc_str (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_str_indexed (code, ainfo->reg, ppc_r12, inst->inst_basereg); } break; #else case 8: if (ppc_is_imm16 (inst->inst_offset + 4)) { ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); ppc_stw (code, ainfo->reg + 1, inst->inst_offset + 4, inst->inst_basereg); } else { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_addi (code, ppc_r12, ppc_r12, inst->inst_offset); ppc_stw (code, ainfo->reg, 0, ppc_r12); ppc_stw (code, ainfo->reg + 1, 4, ppc_r12); } break; #endif default: if (ppc_is_imm16 (inst->inst_offset)) { ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_stptr (code, ainfo->reg, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_stptr_indexed (code, ainfo->reg, inst->inst_basereg, ppc_r12); } } break; } } else if (ainfo->regtype == RegTypeBase) { g_assert (ppc_is_imm16 (ainfo->offset)); /* load the previous stack pointer in r12 */ ppc_ldr (code, ppc_r12, 0, ppc_sp); ppc_ldptr (code, ppc_r0, ainfo->offset, ppc_r12); switch (ainfo->size) { case 1: if (ppc_is_imm16 (inst->inst_offset)) { ppc_stb (code, ppc_r0, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_stb (code, ppc_r0, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_stbx (code, ppc_r0, inst->inst_basereg, ppc_r12); } } break; case 2: if (ppc_is_imm16 (inst->inst_offset)) { ppc_sth (code, ppc_r0, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_sth (code, ppc_r0, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_sthx (code, ppc_r0, inst->inst_basereg, ppc_r12); } } break; #ifdef __mono_ppc64__ case 4: if (ppc_is_imm16 (inst->inst_offset)) { ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_stw (code, ppc_r0, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_stwx (code, ppc_r0, inst->inst_basereg, ppc_r12); } } break; case 8: if (ppc_is_imm16 (inst->inst_offset)) { ppc_str (code, ppc_r0, inst->inst_offset, inst->inst_basereg); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_str_indexed (code, ppc_r0, ppc_r12, inst->inst_basereg); } break; #else case 8: g_assert (ppc_is_imm16 (ainfo->offset + 4)); if (ppc_is_imm16 (inst->inst_offset + 4)) { ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg); ppc_lwz (code, ppc_r0, ainfo->offset + 4, ppc_r12); ppc_stw (code, ppc_r0, inst->inst_offset + 4, inst->inst_basereg); } else { /* use r11 to load the 2nd half of the long before we clobber r12. */ ppc_lwz (code, ppc_r11, ainfo->offset + 4, ppc_r12); ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_addi (code, ppc_r12, ppc_r12, inst->inst_offset); ppc_stw (code, ppc_r0, 0, ppc_r12); ppc_stw (code, ppc_r11, 4, ppc_r12); } break; #endif default: if (ppc_is_imm16 (inst->inst_offset)) { ppc_stptr (code, ppc_r0, inst->inst_offset, inst->inst_basereg); } else { if (ppc_is_imm32 (inst->inst_offset)) { ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset)); ppc_stptr (code, ppc_r0, inst->inst_offset, ppc_r12); } else { ppc_load (code, ppc_r12, inst->inst_offset); ppc_stptr_indexed (code, ppc_r0, inst->inst_basereg, ppc_r12); } } break; } } else if (ainfo->regtype == RegTypeFP) { g_assert (ppc_is_imm16 (inst->inst_offset)); if (ainfo->size == 8) ppc_stfd (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); else if (ainfo->size == 4) ppc_stfs (code, ainfo->reg, inst->inst_offset, inst->inst_basereg); else g_assert_not_reached (); } else if (ainfo->regtype == RegTypeFPStructByVal) { int doffset = inst->inst_offset; int soffset = 0; int cur_reg; int size = 0; g_assert (ppc_is_imm16 (inst->inst_offset)); g_assert (ppc_is_imm16 (inst->inst_offset + ainfo->vtregs * sizeof (target_mgreg_t))); /* FIXME: what if there is no class? */ if (sig->pinvoke && !sig->marshalling_disabled && mono_class_from_mono_type_internal (inst->inst_vtype)) size = mono_class_native_size (mono_class_from_mono_type_internal (inst->inst_vtype), NULL); for (cur_reg = 0; cur_reg < ainfo->vtregs; ++cur_reg) { if (ainfo->size == 4) { ppc_stfs (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); } else { ppc_stfd (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); } soffset += ainfo->size; doffset += ainfo->size; } } else if (ainfo->regtype == RegTypeStructByVal) { int doffset = inst->inst_offset; int soffset = 0; int cur_reg; int size = 0; g_assert (ppc_is_imm16 (inst->inst_offset)); g_assert (ppc_is_imm16 (inst->inst_offset + ainfo->vtregs * sizeof (target_mgreg_t))); /* FIXME: what if there is no class? */ if (sig->pinvoke && !sig->marshalling_disabled && mono_class_from_mono_type_internal (inst->inst_vtype)) size = mono_class_native_size (mono_class_from_mono_type_internal (inst->inst_vtype), NULL); for (cur_reg = 0; cur_reg < ainfo->vtregs; ++cur_reg) { #if __APPLE__ /* * Darwin handles 1 and 2 byte * structs specially by * loading h/b into the arg * register. Only done for * pinvokes. */ if (size == 2) ppc_sth (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); else if (size == 1) ppc_stb (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); else #endif { #ifdef __mono_ppc64__ if (ainfo->bytes) { g_assert (cur_reg == 0); #if G_BYTE_ORDER == G_BIG_ENDIAN ppc_sldi (code, ppc_r0, ainfo->reg, (sizeof (target_mgreg_t) - ainfo->bytes) * 8); ppc_stptr (code, ppc_r0, doffset, inst->inst_basereg); #else if (mono_class_native_size (inst->klass, NULL) == 1) { ppc_stb (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); } else if (mono_class_native_size (inst->klass, NULL) == 2) { ppc_sth (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); } else if (mono_class_native_size (inst->klass, NULL) == 4) { // WDS -- maybe <=4? ppc_stw (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); } else { ppc_stptr (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); // WDS -- Better way? } #endif } else #endif { ppc_stptr (code, ainfo->reg + cur_reg, doffset, inst->inst_basereg); } } soffset += sizeof (target_mgreg_t); doffset += sizeof (target_mgreg_t); } if (ainfo->vtsize) { /* FIXME: we need to do the shifting here, too */ if (ainfo->bytes) NOT_IMPLEMENTED; /* load the previous stack pointer in r12 (r0 gets overwritten by the memcpy) */ ppc_ldr (code, ppc_r12, 0, ppc_sp); if ((size & MONO_PPC_32_64_CASE (3, 7)) != 0) { code = emit_memcpy (code, size - soffset, inst->inst_basereg, doffset, ppc_r12, ainfo->offset + soffset); } else { code = emit_memcpy (code, ainfo->vtsize * sizeof (target_mgreg_t), inst->inst_basereg, doffset, ppc_r12, ainfo->offset + soffset); } } } else if (ainfo->regtype == RegTypeStructByAddr) { /* if it was originally a RegTypeBase */ if (ainfo->offset) { /* load the previous stack pointer in r12 */ ppc_ldr (code, ppc_r12, 0, ppc_sp); ppc_ldptr (code, ppc_r12, ainfo->offset, ppc_r12); } else { ppc_mr (code, ppc_r12, ainfo->reg); } g_assert (ppc_is_imm16 (inst->inst_offset)); code = emit_memcpy (code, ainfo->vtsize, inst->inst_basereg, inst->inst_offset, ppc_r12, 0); /*g_print ("copy in %s: %d bytes from %d to offset: %d\n", method->name, ainfo->vtsize, ainfo->reg, inst->inst_offset);*/ } else g_assert_not_reached (); } pos++; } if (method->save_lmf) { if (cfg->compile_aot) { /* Compute the got address which is needed by the PLT entry */ code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL); } mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern)); if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) { ppc_load_func (code, PPC_CALL_REG, 0); ppc_mtlr (code, PPC_CALL_REG); ppc_blrl (code); } else { ppc_bl (code, 0); } /* we build the MonoLMF structure on the stack - see mini-ppc.h */ /* lmf_offset is the offset from the previous stack pointer, * alloc_size is the total stack space allocated, so the offset * of MonoLMF from the current stack ptr is alloc_size - lmf_offset. * The pointer to the struct is put in ppc_r12 (new_lmf). * The callee-saved registers are already in the MonoLMF structure */ ppc_addi (code, ppc_r12, ppc_sp, alloc_size - lmf_offset); /* ppc_r3 is the result from mono_get_lmf_addr () */ ppc_stptr (code, ppc_r3, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r12); /* new_lmf->previous_lmf = *lmf_addr */ ppc_ldptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3); ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r12); /* *(lmf_addr) = r12 */ ppc_stptr (code, ppc_r12, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3); /* save method info */ if (cfg->compile_aot) // FIXME: ppc_load (code, ppc_r0, 0); else ppc_load_ptr (code, ppc_r0, method); ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, method), ppc_r12); ppc_stptr (code, ppc_sp, G_STRUCT_OFFSET(MonoLMF, ebp), ppc_r12); /* save the current IP */ if (cfg->compile_aot) { ppc_bl (code, 1); ppc_mflr (code, ppc_r0); } else { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_IP, NULL); #ifdef __mono_ppc64__ ppc_load_sequence (code, ppc_r0, (guint64)0x0101010101010101LL); #else ppc_load_sequence (code, ppc_r0, (gulong)0x01010101L); #endif } ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, eip), ppc_r12); } set_code_cursor (cfg, code); g_free (cinfo); return code; } void mono_arch_emit_epilog (MonoCompile *cfg) { MonoMethod *method = cfg->method; int pos, i; int max_epilog_size = 16 + 20*4; guint8 *code; if (cfg->method->save_lmf) max_epilog_size += 128; code = realloc_code (cfg, max_epilog_size); pos = 0; if (method->save_lmf) { int lmf_offset; pos += sizeof (MonoLMF); lmf_offset = pos; /* save the frame reg in r8 */ ppc_mr (code, ppc_r8, cfg->frame_reg); ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage - lmf_offset); /* r5 = previous_lmf */ ppc_ldptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r12); /* r6 = lmf_addr */ ppc_ldptr (code, ppc_r6, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r12); /* *(lmf_addr) = previous_lmf */ ppc_stptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r6); /* FIXME: speedup: there is no actual need to restore the registers if * we didn't actually change them (idea from Zoltan). */ /* restore iregs */ ppc_ldr_multiple (code, ppc_r13, G_STRUCT_OFFSET(MonoLMF, iregs), ppc_r12); /* restore fregs */ /*for (i = 14; i < 32; i++) { ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r12); }*/ g_assert (ppc_is_imm16 (cfg->stack_usage + PPC_RET_ADDR_OFFSET)); /* use the saved copy of the frame reg in r8 */ if (1 || cfg->flags & MONO_CFG_HAS_CALLS) { ppc_ldr (code, ppc_r0, cfg->stack_usage + PPC_RET_ADDR_OFFSET, ppc_r8); ppc_mtlr (code, ppc_r0); } ppc_addic (code, ppc_sp, ppc_r8, cfg->stack_usage); } else { if (1 || cfg->flags & MONO_CFG_HAS_CALLS) { long return_offset = cfg->stack_usage + PPC_RET_ADDR_OFFSET; if (ppc_is_imm16 (return_offset)) { ppc_ldr (code, ppc_r0, return_offset, cfg->frame_reg); } else { ppc_load (code, ppc_r12, return_offset); ppc_ldr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r12); } ppc_mtlr (code, ppc_r0); } if (ppc_is_imm16 (cfg->stack_usage)) { int offset = cfg->stack_usage; for (i = 13; i <= 31; i++) { if (cfg->used_int_regs & (1 << i)) offset -= sizeof (target_mgreg_t); } if (cfg->frame_reg != ppc_sp) ppc_mr (code, ppc_r12, cfg->frame_reg); /* note r31 (possibly the frame register) is restored last */ for (i = 13; i <= 31; i++) { if (cfg->used_int_regs & (1 << i)) { ppc_ldr (code, i, offset, cfg->frame_reg); offset += sizeof (target_mgreg_t); } } if (cfg->frame_reg != ppc_sp) ppc_addi (code, ppc_sp, ppc_r12, cfg->stack_usage); else ppc_addi (code, ppc_sp, ppc_sp, cfg->stack_usage); } else { ppc_load32 (code, ppc_r12, cfg->stack_usage); if (cfg->used_int_regs) { ppc_add (code, ppc_r12, cfg->frame_reg, ppc_r12); for (i = 31; i >= 13; --i) { if (cfg->used_int_regs & (1 << i)) { pos += sizeof (target_mgreg_t); ppc_ldr (code, i, -pos, ppc_r12); } } ppc_mr (code, ppc_sp, ppc_r12); } else { ppc_add (code, ppc_sp, cfg->frame_reg, ppc_r12); } } } ppc_blr (code); set_code_cursor (cfg, code); } #endif /* ifndef DISABLE_JIT */ /* remove once throw_exception_by_name is eliminated */ static int exception_id_by_name (const char *name) { if (strcmp (name, "IndexOutOfRangeException") == 0) return MONO_EXC_INDEX_OUT_OF_RANGE; if (strcmp (name, "OverflowException") == 0) return MONO_EXC_OVERFLOW; if (strcmp (name, "ArithmeticException") == 0) return MONO_EXC_ARITHMETIC; if (strcmp (name, "DivideByZeroException") == 0) return MONO_EXC_DIVIDE_BY_ZERO; if (strcmp (name, "InvalidCastException") == 0) return MONO_EXC_INVALID_CAST; if (strcmp (name, "NullReferenceException") == 0) return MONO_EXC_NULL_REF; if (strcmp (name, "ArrayTypeMismatchException") == 0) return MONO_EXC_ARRAY_TYPE_MISMATCH; if (strcmp (name, "ArgumentException") == 0) return MONO_EXC_ARGUMENT; g_error ("Unknown intrinsic exception %s\n", name); return 0; } #ifndef DISABLE_JIT void mono_arch_emit_exceptions (MonoCompile *cfg) { MonoJumpInfo *patch_info; int i; guint8 *code; guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM]; guint8 exc_throw_found [MONO_EXC_INTRINS_NUM]; int max_epilog_size = 50; for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) { exc_throw_pos [i] = NULL; exc_throw_found [i] = 0; } /* count the number of exception infos */ /* * make sure we have enough space for exceptions */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { if (patch_info->type == MONO_PATCH_INFO_EXC) { i = exception_id_by_name ((const char*)patch_info->data.target); if (!exc_throw_found [i]) { max_epilog_size += (2 * PPC_LOAD_SEQUENCE_LENGTH) + 5 * 4; exc_throw_found [i] = TRUE; } } else if (patch_info->type == MONO_PATCH_INFO_BB_OVF) max_epilog_size += 12; else if (patch_info->type == MONO_PATCH_INFO_EXC_OVF) { MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target; i = exception_id_by_name (ovfj->data.exception); if (!exc_throw_found [i]) { max_epilog_size += (2 * PPC_LOAD_SEQUENCE_LENGTH) + 5 * 4; exc_throw_found [i] = TRUE; } max_epilog_size += 8; } } code = realloc_code (cfg, max_epilog_size); /* add code to raise exceptions */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { switch (patch_info->type) { case MONO_PATCH_INFO_BB_OVF: { MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target; unsigned char *ip = patch_info->ip.i + cfg->native_code; /* patch the initial jump */ ppc_patch (ip, code); ppc_bc (code, ovfj->b0_cond, ovfj->b1_cond, 2); ppc_b (code, 0); ppc_patch (code - 4, ip + 4); /* jump back after the initiali branch */ /* jump back to the true target */ ppc_b (code, 0); ip = ovfj->data.bb->native_offset + cfg->native_code; ppc_patch (code - 4, ip); patch_info->type = MONO_PATCH_INFO_NONE; break; } case MONO_PATCH_INFO_EXC_OVF: { MonoOvfJump *ovfj = (MonoOvfJump*)patch_info->data.target; MonoJumpInfo *newji; unsigned char *ip = patch_info->ip.i + cfg->native_code; unsigned char *bcl = code; /* patch the initial jump: we arrived here with a call */ ppc_patch (ip, code); ppc_bc (code, ovfj->b0_cond, ovfj->b1_cond, 0); ppc_b (code, 0); ppc_patch (code - 4, ip + 4); /* jump back after the initiali branch */ /* patch the conditional jump to the right handler */ /* make it processed next */ newji = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo)); newji->type = MONO_PATCH_INFO_EXC; newji->ip.i = bcl - cfg->native_code; newji->data.target = ovfj->data.exception; newji->next = patch_info->next; patch_info->next = newji; patch_info->type = MONO_PATCH_INFO_NONE; break; } case MONO_PATCH_INFO_EXC: { MonoClass *exc_class; unsigned char *ip = patch_info->ip.i + cfg->native_code; i = exception_id_by_name ((const char*)patch_info->data.target); if (exc_throw_pos [i] && !(ip > exc_throw_pos [i] && ip - exc_throw_pos [i] > 50000)) { ppc_patch (ip, exc_throw_pos [i]); patch_info->type = MONO_PATCH_INFO_NONE; break; } else { exc_throw_pos [i] = code; } exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name); ppc_patch (ip, code); /*mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);*/ ppc_load (code, ppc_r3, m_class_get_type_token (exc_class)); /* we got here from a conditional call, so the calling ip is set in lr */ ppc_mflr (code, ppc_r4); patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID; patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception; patch_info->ip.i = code - cfg->native_code; if (FORCE_INDIR_CALL || cfg->method->dynamic) { ppc_load_func (code, PPC_CALL_REG, 0); ppc_mtctr (code, PPC_CALL_REG); ppc_bcctr (code, PPC_BR_ALWAYS, 0); } else { ppc_bl (code, 0); } break; } default: /* do nothing */ break; } } set_code_cursor (cfg, code); } #endif #if DEAD_CODE static int try_offset_access (void *value, guint32 idx) { register void* me __asm__ ("r2"); void ***p = (void***)((char*)me + 284); int idx1 = idx / 32; int idx2 = idx % 32; if (!p [idx1]) return 0; if (value != p[idx1][idx2]) return 0; return 1; } #endif void mono_arch_finish_init (void) { } #define CMP_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4) #define BR_SIZE 4 #define LOADSTORE_SIZE 4 #define JUMP_IMM_SIZE 12 #define JUMP_IMM32_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 8) #define ENABLE_WRONG_METHOD_CHECK 0 gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int i; int size = 0; guint8 *code, *start; MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) item->chunk_size += CMP_SIZE; if (item->has_target_code) item->chunk_size += BR_SIZE + JUMP_IMM32_SIZE; else item->chunk_size += LOADSTORE_SIZE + BR_SIZE + JUMP_IMM_SIZE; } else { if (fail_tramp) { item->chunk_size += CMP_SIZE + BR_SIZE + JUMP_IMM32_SIZE * 2; if (!item->has_target_code) item->chunk_size += LOADSTORE_SIZE; } else { item->chunk_size += LOADSTORE_SIZE + JUMP_IMM_SIZE; #if ENABLE_WRONG_METHOD_CHECK item->chunk_size += CMP_SIZE + BR_SIZE + 4; #endif } } } else { item->chunk_size += CMP_SIZE + BR_SIZE; imt_entries [item->check_target_idx]->compare_done = TRUE; } size += item->chunk_size; } /* the initial load of the vtable address */ size += PPC_LOAD_SEQUENCE_LENGTH + LOADSTORE_SIZE; if (fail_tramp) { code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size); } else { code = mono_mem_manager_code_reserve (mem_manager, size); } start = code; /* * We need to save and restore r12 because it might be * used by the caller as the vtable register, so * clobbering it will trip up the magic trampoline. * * FIXME: Get rid of this by making sure that r12 is * not used as the vtable register in interface calls. */ ppc_stptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp); ppc_load (code, ppc_r12, (gsize)(& (vtable->vtable [0]))); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; item->code_target = code; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) { ppc_load (code, ppc_r0, (gsize)item->key); ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0); } item->jmp_code = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); if (item->has_target_code) { ppc_load_ptr (code, ppc_r0, item->value.target_code); } else { ppc_ldptr (code, ppc_r0, (sizeof (target_mgreg_t) * item->value.vtable_slot), ppc_r12); ppc_ldptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp); } ppc_mtctr (code, ppc_r0); ppc_bcctr (code, PPC_BR_ALWAYS, 0); } else { if (fail_tramp) { ppc_load (code, ppc_r0, (gulong)item->key); ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0); item->jmp_code = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); if (item->has_target_code) { ppc_load_ptr (code, ppc_r0, item->value.target_code); } else { g_assert (vtable); ppc_load_ptr (code, ppc_r0, & (vtable->vtable [item->value.vtable_slot])); ppc_ldptr_indexed (code, ppc_r0, 0, ppc_r0); } ppc_mtctr (code, ppc_r0); ppc_bcctr (code, PPC_BR_ALWAYS, 0); ppc_patch (item->jmp_code, code); ppc_load_ptr (code, ppc_r0, fail_tramp); ppc_mtctr (code, ppc_r0); ppc_bcctr (code, PPC_BR_ALWAYS, 0); item->jmp_code = NULL; } else { /* enable the commented code to assert on wrong method */ #if ENABLE_WRONG_METHOD_CHECK ppc_load (code, ppc_r0, (guint32)item->key); ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0); item->jmp_code = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0); #endif ppc_ldptr (code, ppc_r0, (sizeof (target_mgreg_t) * item->value.vtable_slot), ppc_r12); ppc_ldptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp); ppc_mtctr (code, ppc_r0); ppc_bcctr (code, PPC_BR_ALWAYS, 0); #if ENABLE_WRONG_METHOD_CHECK ppc_patch (item->jmp_code, code); ppc_break (code); item->jmp_code = NULL; #endif } } } else { ppc_load (code, ppc_r0, (gulong)item->key); ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0); item->jmp_code = code; ppc_bc (code, PPC_BR_FALSE, PPC_BR_LT, 0); } } /* patch the branches to get to the target items */ for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code) { if (item->check_target_idx) { ppc_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target); } } } if (!fail_tramp) UnlockedAdd (&mono_stats.imt_trampolines_size, code - start); g_assert (code - start <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL)); mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), mem_manager); return start; } MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { host_mgreg_t *r = (host_mgreg_t*)regs; return (MonoMethod*)(gsize) r [MONO_ARCH_IMT_REG]; } MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code) { return (MonoVTable*)(gsize) regs [MONO_ARCH_RGCTX_REG]; } GSList* mono_arch_get_cie_program (void) { GSList *l = NULL; mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ppc_r1, 0); return l; } MonoInst* mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins = NULL; int opcode = 0; if (cmethod->klass == mono_class_try_get_math_class ()) { if (strcmp (cmethod->name, "Sqrt") == 0) { opcode = OP_SQRT; } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) { opcode = OP_ABS; } if (opcode && fsig->param_count == 1) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_R8; ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = args [0]->dreg; MONO_ADD_INS (cfg->cbb, ins); } /* Check for Min/Max for (u)int(32|64) */ opcode = 0; if (cpu_hw_caps & PPC_ISA_2_03) { if (strcmp (cmethod->name, "Min") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMIN; if (fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_IMIN_UN; #ifdef __mono_ppc64__ else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_LMIN; else if (fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_LMIN_UN; #endif } else if (strcmp (cmethod->name, "Max") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMAX; if (fsig->params [0]->type == MONO_TYPE_U4) opcode = OP_IMAX_UN; #ifdef __mono_ppc64__ else if (fsig->params [0]->type == MONO_TYPE_I8) opcode = OP_LMAX; else if (fsig->params [0]->type == MONO_TYPE_U8) opcode = OP_LMAX_UN; #endif } /* * TODO: Floating point version with fsel, but fsel has * some peculiarities (need a scratch reg unless * comparing with 0, NaN/Inf behaviour (then MathF too) */ } if (opcode && fsig->param_count == 2) { MONO_INST_NEW (cfg, ins, opcode); ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8; ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; MONO_ADD_INS (cfg->cbb, ins); } /* Rounding instructions */ opcode = 0; if ((cpu_hw_caps & PPC_ISA_2X) && (fsig->param_count == 1) && (fsig->params [0]->type == MONO_TYPE_R8)) { /* * XXX: sysmath.c and the POWER ISA documentation for * frin[.] imply rounding is a little more complicated * than expected; the semantics are slightly different, * so just "frin." isn't a drop-in replacement. Floor, * Truncate, and Ceiling seem to work normally though. * (also, no float versions of these ops, but frsp * could be preprended?) */ //if (!strcmp (cmethod->name, "Round")) // opcode = OP_ROUND; if (!strcmp (cmethod->name, "Floor")) opcode = OP_PPC_FLOOR; else if (!strcmp (cmethod->name, "Ceiling")) opcode = OP_PPC_CEIL; else if (!strcmp (cmethod->name, "Truncate")) opcode = OP_PPC_TRUNC; if (opcode != 0) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_R8; ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = args [0]->dreg; MONO_ADD_INS (cfg->cbb, ins); } } } if (cmethod->klass == mono_class_try_get_mathf_class ()) { if (strcmp (cmethod->name, "Sqrt") == 0) { opcode = OP_SQRTF; } /* XXX: POWER has no single-precision normal FPU abs? */ if (opcode && fsig->param_count == 1) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_R4; ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = args [0]->dreg; MONO_ADD_INS (cfg->cbb, ins); } } return ins; } host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { if (reg == ppc_r1) return (host_mgreg_t)(gsize)MONO_CONTEXT_GET_SP (ctx); return ctx->regs [reg]; } host_mgreg_t* mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { if (reg == ppc_r1) return (host_mgreg_t)(gsize)&MONO_CONTEXT_GET_SP (ctx); return &ctx->regs [reg]; } guint32 mono_arch_get_patch_offset (guint8 *code) { return 0; } /* * mono_aot_emit_load_got_addr: * * Emit code to load the got address. * On PPC, the result is placed into r30. */ guint8* mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji) { ppc_bl (code, 1); ppc_mflr (code, ppc_r30); if (cfg) mono_add_patch_info (cfg, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL); else *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL); /* arch_emit_got_address () patches this */ #if defined(TARGET_POWERPC64) ppc_nop (code); ppc_nop (code); ppc_nop (code); ppc_nop (code); #else ppc_load32 (code, ppc_r0, 0); ppc_add (code, ppc_r30, ppc_r30, ppc_r0); #endif set_code_cursor (cfg, code); return code; } /* * mono_ppc_emit_load_aotconst: * * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and * TARGET from the mscorlib GOT in full-aot code. * On PPC, the GOT address is assumed to be in r30, and the result is placed into * r12. */ guint8* mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, MonoJumpInfoType tramp_type, gconstpointer target) { /* Load the mscorlib got address */ ppc_ldptr (code, ppc_r12, sizeof (target_mgreg_t), ppc_r30); *ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target); /* arch_emit_got_access () patches this */ ppc_load32 (code, ppc_r0, 0); ppc_ldptr_indexed (code, ppc_r12, ppc_r12, ppc_r0); return code; } /* Soft Debug support */ #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED /* * BREAKPOINTS */ /* * mono_arch_set_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; guint8 *orig_code = code; ppc_load_sequence (code, ppc_r12, (gsize)bp_trigger_page); ppc_ldptr (code, ppc_r12, 0, ppc_r12); g_assert (code - orig_code == BREAKPOINT_SIZE); mono_arch_flush_icache (orig_code, code - orig_code); } /* * mono_arch_clear_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip; int i; for (i = 0; i < BREAKPOINT_SIZE / 4; ++i) ppc_nop (code); mono_arch_flush_icache (ip, code - ip); } /* * mono_arch_is_breakpoint_event: * * See mini-amd64.c for docs. */ gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx) { siginfo_t* sinfo = (siginfo_t*) info; /* Sometimes the address is off by 4 */ if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128) return TRUE; else return FALSE; } /* * mono_arch_skip_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji) { /* skip the ldptr */ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4); } /* * SINGLE STEPPING */ /* * mono_arch_start_single_stepping: * * See mini-amd64.c for docs. */ void mono_arch_start_single_stepping (void) { mono_mprotect (ss_trigger_page, mono_pagesize (), 0); } /* * mono_arch_stop_single_stepping: * * See mini-amd64.c for docs. */ void mono_arch_stop_single_stepping (void) { mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ); } /* * mono_arch_is_single_step_event: * * See mini-amd64.c for docs. */ gboolean mono_arch_is_single_step_event (void *info, void *sigctx) { siginfo_t* sinfo = (siginfo_t*) info; /* Sometimes the address is off by 4 */ if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128) return TRUE; else return FALSE; } /* * mono_arch_skip_single_step: * * See mini-amd64.c for docs. */ void mono_arch_skip_single_step (MonoContext *ctx) { /* skip the ldptr */ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4); } /* * mono_arch_create_seq_point_info: * * See mini-amd64.c for docs. */ SeqPointInfo* mono_arch_get_seq_point_info (guint8 *code) { NOT_IMPLEMENTED; return NULL; } #endif gboolean mono_arch_opcode_supported (int opcode) { switch (opcode) { case OP_ATOMIC_ADD_I4: case OP_ATOMIC_CAS_I4: #ifdef TARGET_POWERPC64 case OP_ATOMIC_ADD_I8: case OP_ATOMIC_CAS_I8: #endif return TRUE; default: return FALSE; } } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { gpointer target = NULL; switch (jit_icall_id) { #undef MONO_AOT_ICALL #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break; MONO_AOT_ICALL (mono_ppc_throw_exception) } return target; }
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/mini-runtime.c
/** * \file * Runtime code for the JIT * * Authors: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * * Copyright 2002-2003 Ximian, Inc. * Copyright 2003-2010 Novell, Inc. * Copyright 2011-2015 Xamarin, Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include <config.h> #ifdef HAVE_ALLOCA_H #include <alloca.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <math.h> #ifdef HAVE_SYS_TIME_H #include <sys/time.h> #endif #include <signal.h> #include <mono/utils/memcheck.h> #include <mono/metadata/assembly.h> #include <mono/metadata/assembly-internals.h> #include <mono/metadata/loader.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/class.h> #include <mono/metadata/object.h> #include <mono/metadata/tokentype.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/threads.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/domain-internals.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/mono-config.h> #include <mono/metadata/environment.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/gc-internals.h> #include <mono/metadata/threads-types.h> #include <mono/metadata/mempool-internals.h> #include <mono/metadata/runtime.h> #include <mono/metadata/reflection-internals.h> #include <mono/metadata/monitor.h> #include <mono/metadata/icall-internals.h> #include <mono/metadata/loader-internals.h> #define MONO_MATH_DECLARE_ALL 1 #include <mono/utils/mono-math.h> #include <mono/utils/mono-compiler.h> #include <mono/utils/mono-counters.h> #include <mono/utils/mono-error-internals.h> #include <mono/utils/mono-logger-internals.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-path.h> #include <mono/utils/mono-tls.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/dtrace.h> #include <mono/utils/mono-signal-handler.h> #include <mono/utils/mono-threads.h> #include <mono/utils/mono-threads-coop.h> #include <mono/utils/checked-build.h> #include <mono/utils/mono-compiler.h> #include <mono/utils/mono-proclib.h> #include <mono/utils/mono-time.h> #include <mono/metadata/w32handle.h> #include <mono/metadata/components.h> #include <mono/mini/debugger-agent-external.h> #include "mini.h" #include "seq-points.h" #include <string.h> #include <ctype.h> #include "trace.h" #include "aot-compiler.h" #include "aot-runtime.h" #include "llvmonly-runtime.h" #include "jit-icalls.h" #include "mini-gc.h" #include "mini-llvm.h" #include "llvm-runtime.h" #include "lldb.h" #include "mini-runtime.h" #include "interp/interp.h" #ifdef MONO_ARCH_LLVM_SUPPORTED #ifdef ENABLE_LLVM #include "mini-llvm-cpp.h" #include "llvm-jit.h" #endif #endif #include "mono/metadata/icall-signatures.h" #include "mono/utils/mono-tls-inline.h" static guint32 default_opt = 0; static gboolean default_opt_set = FALSE; MonoMethodDesc *mono_stats_method_desc; gboolean mono_compile_aot = FALSE; /* If this is set, no code is generated dynamically, everything is taken from AOT files */ gboolean mono_aot_only = FALSE; /* Same as mono_aot_only, but only LLVM compiled code is used, no trampolines */ gboolean mono_llvm_only = FALSE; /* By default, don't require AOT but attempt to probe */ MonoAotMode mono_aot_mode = MONO_AOT_MODE_NORMAL; MonoEEFeatures mono_ee_features; const char *mono_build_date; gboolean mono_do_signal_chaining; gboolean mono_do_crash_chaining; int mini_verbose = 0; /* * This flag controls whenever the runtime uses LLVM for JIT compilation, and whenever * it can load AOT code compiled by LLVM. */ gboolean mono_use_llvm = FALSE; gboolean mono_use_fast_math = FALSE; // Lists of allowlisted and blocklisted CPU features MonoCPUFeatures mono_cpu_features_enabled = (MonoCPUFeatures)0; #ifdef DISABLE_SIMD MonoCPUFeatures mono_cpu_features_disabled = MONO_CPU_X86_FULL_SSEAVX_COMBINED; #else MonoCPUFeatures mono_cpu_features_disabled = (MonoCPUFeatures)0; #endif gboolean mono_use_interpreter = FALSE; const char *mono_interp_opts_string = NULL; #define mono_jit_lock() mono_os_mutex_lock (&jit_mutex) #define mono_jit_unlock() mono_os_mutex_unlock (&jit_mutex) static mono_mutex_t jit_mutex; static MonoCodeManager *global_codeman; MonoDebugOptions mini_debug_options; #ifdef VALGRIND_JIT_REGISTER_MAP int valgrind_register; #endif GList* mono_aot_paths; static GPtrArray *profile_options; static GSList *tramp_infos; GSList *mono_interp_only_classes; static void register_icalls (void); static void runtime_cleanup (MonoDomain *domain, gpointer user_data); static void mini_invalidate_transformed_interp_methods (MonoAssemblyLoadContext *alc, uint32_t generation); static void mini_interp_jit_info_foreach(InterpJitInfoFunc func, gpointer user_data); static gboolean mini_interp_sufficient_stack (gsize size); gboolean mono_running_on_valgrind (void) { #ifndef HOST_WIN32 if (RUNNING_ON_VALGRIND){ #ifdef VALGRIND_JIT_REGISTER_MAP valgrind_register = TRUE; #endif return TRUE; } else #endif return FALSE; } void mono_set_use_llvm (mono_bool use_llvm) { mono_use_llvm = (gboolean)use_llvm; } typedef struct { void *ip; MonoMethod *method; } FindTrampUserData; static void find_tramp (gpointer key, gpointer value, gpointer user_data) { FindTrampUserData *ud = (FindTrampUserData*)user_data; if (value == ud->ip) ud->method = (MonoMethod*)key; } static char* mono_get_method_from_ip_u (void *ip); /* debug function */ char* mono_get_method_from_ip (void *ip) { char *result; MONO_ENTER_GC_UNSAFE; result = mono_get_method_from_ip_u (ip); MONO_EXIT_GC_UNSAFE; return result; } /* debug function */ static char* mono_get_method_from_ip_u (void *ip) { MonoJitInfo *ji; MonoMethod *method; char *method_name; char *res; MonoDomain *domain = mono_domain_get (); MonoDebugSourceLocation *location; FindTrampUserData user_data; if (!domain) domain = mono_get_root_domain (); ji = mono_jit_info_table_find_internal (ip, TRUE, TRUE); if (!ji) { user_data.ip = ip; user_data.method = NULL; MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); g_hash_table_foreach (jit_mm->jit_trampoline_hash, find_tramp, &user_data); jit_mm_unlock (jit_mm); if (user_data.method) { char *mname = mono_method_full_name (user_data.method, TRUE); res = g_strdup_printf ("<%p - JIT trampoline for %s>", ip, mname); g_free (mname); return res; } else return NULL; } else if (ji->is_trampoline) { res = g_strdup_printf ("<%p - %s trampoline>", ip, ji->d.tramp_info->name); return res; } method = jinfo_get_method (ji); method_name = mono_method_get_name_full (method, TRUE, FALSE, MONO_TYPE_NAME_FORMAT_IL); location = mono_debug_lookup_source_location (method, (guint32)((guint8*)ip - (guint8*)ji->code_start), domain); char *file_loc = NULL; if (location) file_loc = g_strdup_printf ("[%s :: %du]", location->source_file, location->row); const char *in_interp = ji->is_interp ? " interp" : ""; res = g_strdup_printf (" %s [{%p} + 0x%x%s] %s (%p %p) [%p - %s]", method_name, method, (int)((char*)ip - (char*)ji->code_start), in_interp, file_loc ? file_loc : "", ji->code_start, (char*)ji->code_start + ji->code_size, domain, domain->friendly_name); mono_debug_free_source_location (location); g_free (method_name); g_free (file_loc); return res; } /** * mono_pmip: * \param ip an instruction pointer address * * This method is used from a debugger to get the name of the * method at address \p ip. This routine is typically invoked from * a debugger like this: * * (gdb) print mono_pmip ($pc) * * \returns the name of the method at address \p ip. */ G_GNUC_UNUSED char * mono_pmip (void *ip) { return mono_get_method_from_ip (ip); } G_GNUC_UNUSED char * mono_pmip_u (void *ip) { return mono_get_method_from_ip_u (ip); } /** * mono_print_method_from_ip: * \param ip an instruction pointer address * * This method is used from a debugger to get the name of the * method at address \p ip. * * This prints the name of the method at address \p ip in the standard * output. Unlike \c mono_pmip which returns a string, this routine * prints the value on the standard output. */ MONO_ATTR_USED void mono_print_method_from_ip (void *ip) { MonoJitInfo *ji; char *method; MonoDebugSourceLocation *source; MonoDomain *domain = mono_domain_get (); MonoDomain *target_domain = mono_domain_get (); FindTrampUserData user_data; MonoGenericSharingContext*gsctx; const char *shared_type; if (!domain) domain = mono_get_root_domain (); ji = mini_jit_info_table_find_ext (ip, TRUE); if (ji && ji->is_trampoline) { MonoTrampInfo *tinfo = ji->d.tramp_info; printf ("IP %p is at offset 0x%x of trampoline '%s'.\n", ip, (int)((guint8*)ip - tinfo->code), tinfo->name); return; } if (!ji) { user_data.ip = ip; user_data.method = NULL; MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); g_hash_table_foreach (jit_mm->jit_trampoline_hash, find_tramp, &user_data); jit_mm_unlock (jit_mm); if (user_data.method) { char *mname = mono_method_full_name (user_data.method, TRUE); printf ("IP %p is a JIT trampoline for %s\n", ip, mname); g_free (mname); return; } g_print ("No method at %p\n", ip); fflush (stdout); return; } method = mono_method_full_name (jinfo_get_method (ji), TRUE); source = mono_debug_lookup_source_location (jinfo_get_method (ji), (guint32)((guint8*)ip - (guint8*)ji->code_start), target_domain); gsctx = mono_jit_info_get_generic_sharing_context (ji); shared_type = ""; if (gsctx) { if (gsctx->is_gsharedvt) shared_type = "gsharedvt "; else shared_type = "gshared "; } g_print ("IP %p at offset 0x%x of %smethod %s (%p %p)[domain %p - %s]\n", ip, (int)((char*)ip - (char*)ji->code_start), shared_type, method, ji->code_start, (char*)ji->code_start + ji->code_size, target_domain, target_domain->friendly_name); if (source) g_print ("%s:%d\n", source->source_file, source->row); fflush (stdout); mono_debug_free_source_location (source); g_free (method); } /* * mono_method_same_domain: * * Determine whenever two compiled methods are in the same domain, thus * the address of the callee can be embedded in the caller. */ gboolean mono_method_same_domain (MonoJitInfo *caller, MonoJitInfo *callee) { if (!caller || caller->is_trampoline || !callee || callee->is_trampoline) return FALSE; return TRUE; } /* * mono_global_codeman_reserve: * * Allocate code memory from the global code manager. */ void *(mono_global_codeman_reserve) (int size) { void *ptr; if (mono_aot_only) g_error ("Attempting to allocate from the global code manager while running in aot-only mode.\n"); if (!global_codeman) { /* This can happen during startup */ if (!mono_compile_aot) global_codeman = mono_code_manager_new (); else global_codeman = mono_code_manager_new_aot (); return mono_code_manager_reserve (global_codeman, size); } else { mono_jit_lock (); ptr = mono_code_manager_reserve (global_codeman, size); mono_jit_unlock (); return ptr; } } /* The callback shouldn't take any locks */ void mono_global_codeman_foreach (MonoCodeManagerFunc func, void *user_data) { mono_jit_lock (); mono_code_manager_foreach (global_codeman, func, user_data); mono_jit_unlock (); } /** * mono_create_unwind_op: * * Create an unwind op with the given parameters. */ MonoUnwindOp* mono_create_unwind_op (int when, int tag, int reg, int val) { MonoUnwindOp *op = g_new0 (MonoUnwindOp, 1); op->op = tag; op->reg = reg; op->val = val; op->when = when; return op; } MonoJumpInfoToken * mono_jump_info_token_new2 (MonoMemPool *mp, MonoImage *image, guint32 token, MonoGenericContext *context) { MonoJumpInfoToken *res = (MonoJumpInfoToken *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoToken)); res->image = image; res->token = token; res->has_context = context != NULL; if (context) memcpy (&res->context, context, sizeof (MonoGenericContext)); return res; } MonoJumpInfoToken * mono_jump_info_token_new (MonoMemPool *mp, MonoImage *image, guint32 token) { return mono_jump_info_token_new2 (mp, image, token, NULL); } /* * mono_tramp_info_create: * * Create a MonoTrampInfo structure from the arguments. This function assumes ownership * of JI, and UNWIND_OPS. */ MonoTrampInfo* mono_tramp_info_create (const char *name, guint8 *code, guint32 code_size, MonoJumpInfo *ji, GSList *unwind_ops) { MonoTrampInfo *info = g_new0 (MonoTrampInfo, 1); info->name = g_strdup (name); info->code = code; info->code_size = code_size; info->ji = ji; info->unwind_ops = unwind_ops; return info; } void mono_tramp_info_free (MonoTrampInfo *info) { g_free (info->name); // FIXME: ji mono_free_unwind_info (info->unwind_ops); if (info->owns_uw_info) g_free (info->uw_info); g_free (info); } static void register_trampoline_jit_info (MonoMemoryManager *mem_manager, MonoTrampInfo *info) { MonoJitInfo *ji; ji = (MonoJitInfo *)mono_mem_manager_alloc0 (mem_manager, mono_jit_info_size ((MonoJitInfoFlags)0, 0, 0)); mono_jit_info_init (ji, NULL, (guint8*)MINI_FTNPTR_TO_ADDR (info->code), info->code_size, (MonoJitInfoFlags)0, 0, 0); ji->d.tramp_info = info; ji->is_trampoline = TRUE; ji->unwind_info = mono_cache_unwind_info (info->uw_info, info->uw_info_len); mono_jit_info_table_add (ji); } /* * mono_tramp_info_register: * * Remember INFO for use by xdebug, mono_print_method_from_ip (), jit maps, etc. * INFO can be NULL. * Frees INFO. */ static void mono_tramp_info_register_internal (MonoTrampInfo *info, MonoMemoryManager *mem_manager, gboolean aot) { MonoTrampInfo *copy; MonoDomain *domain = mono_get_root_domain (); if (!info) return; if (mem_manager) copy = mono_mem_manager_alloc0 (mem_manager, sizeof (MonoTrampInfo)); else copy = g_new0 (MonoTrampInfo, 1); copy->code = info->code; copy->code_size = info->code_size; copy->name = mem_manager ? mono_mem_manager_strdup (mem_manager, info->name) : g_strdup (info->name); copy->method = info->method; if (info->unwind_ops) { copy->uw_info = mono_unwind_ops_encode (info->unwind_ops, &copy->uw_info_len); copy->owns_uw_info = TRUE; if (mem_manager) { guint8 *temp = copy->uw_info; copy->uw_info = mono_mem_manager_alloc (mem_manager, copy->uw_info_len); memcpy (copy->uw_info, temp, copy->uw_info_len); g_free (temp); } } else { /* Trampolines from aot have the unwind ops already encoded */ copy->uw_info = info->uw_info; copy->uw_info_len = info->uw_info_len; } mono_lldb_save_trampoline_info (info); #ifdef MONO_ARCH_HAVE_UNWIND_TABLE if (!aot) mono_arch_unwindinfo_install_tramp_unwind_info (info->unwind_ops, info->code, info->code_size); #endif if (!domain) { /* If no domain has been created yet, postpone the registration. */ mono_jit_lock (); tramp_infos = g_slist_prepend (tramp_infos, copy); mono_jit_unlock (); } else if (copy->uw_info || info->method) { /* Only register trampolines that have unwind info */ register_trampoline_jit_info (mem_manager ? mem_manager : get_default_mem_manager (), copy); } if (mono_jit_map_is_enabled ()) mono_emit_jit_tramp (info->code, info->code_size, info->name); mono_tramp_info_free (info); } void mono_tramp_info_register (MonoTrampInfo *info, MonoMemoryManager *mem_manager) { mono_tramp_info_register_internal (info, mem_manager, FALSE); } void mono_aot_tramp_info_register (MonoTrampInfo *info, MonoMemoryManager *mem_manager) { mono_tramp_info_register_internal (info, mem_manager, TRUE); } /* Register trampolines created before the root domain was created in the jit info tables */ static void register_trampolines (MonoDomain *domain) { GSList *l; for (l = tramp_infos; l; l = l->next) { MonoTrampInfo *info = (MonoTrampInfo *)l->data; register_trampoline_jit_info (get_default_mem_manager (), info); } } G_GNUC_UNUSED static void break_count (void) { } /* * Runtime debugging tool, use if (debug_count ()) <x> else <y> to do <x> the first COUNT times, then do <y> afterwards. * Set a breakpoint in break_count () to break the last time <x> is done. */ G_GNUC_UNUSED gboolean mono_debug_count (void) { static int count = 0, int_val = 0; static gboolean inited, has_value = FALSE; count ++; if (!inited) { char *value = g_getenv ("COUNT"); if (value) { int_val = atoi (value); g_free (value); has_value = TRUE; } inited = TRUE; } if (!has_value) return TRUE; if (count == int_val) break_count (); if (count > int_val) return FALSE; return TRUE; } MonoMethod* mono_icall_get_wrapper_method (MonoJitICallInfo* callinfo) { /* This icall is used to check for exceptions, so don't check in the wrapper */ gboolean check_exc = (callinfo != &mono_get_jit_icall_info ()->mono_thread_interruption_checkpoint); return mono_marshal_get_icall_wrapper (callinfo, check_exc); } gconstpointer mono_icall_get_wrapper_full (MonoJitICallInfo* callinfo, gboolean do_compile) { ERROR_DECL (error); MonoMethod *wrapper; gconstpointer addr, trampoline; if (callinfo->wrapper) return callinfo->wrapper; wrapper = mono_icall_get_wrapper_method (callinfo); if (do_compile) { addr = mono_compile_method_checked (wrapper, error); mono_error_assert_ok (error); mono_memory_barrier (); callinfo->wrapper = addr; return addr; } else { if (callinfo->trampoline) return callinfo->trampoline; trampoline = mono_create_jit_trampoline (wrapper, error); mono_error_assert_ok (error); trampoline = mono_create_ftnptr ((gpointer)trampoline); mono_loader_lock (); if (!callinfo->trampoline) { callinfo->trampoline = trampoline; } mono_loader_unlock (); return callinfo->trampoline; } } gconstpointer mono_icall_get_wrapper (MonoJitICallInfo* callinfo) { return mono_icall_get_wrapper_full (callinfo, FALSE); } static MonoJitDynamicMethodInfo* mono_dynamic_code_hash_lookup (MonoMethod *method) { MonoJitDynamicMethodInfo *res; MonoJitMemoryManager *jit_mm; jit_mm = jit_mm_for_method (method); jit_mm_lock (jit_mm); if (jit_mm->dynamic_code_hash) res = (MonoJitDynamicMethodInfo *)g_hash_table_lookup (jit_mm->dynamic_code_hash, method); else res = NULL; jit_mm_unlock (jit_mm); return res; } #ifdef __cplusplus template <typename T> static void register_opcode_emulation (int opcode, MonoJitICallInfo *jit_icall_info, const char *name, MonoMethodSignature *sig, T func, const char *symbol, gboolean no_wrapper) #else static void register_opcode_emulation (int opcode, MonoJitICallInfo *jit_icall_info, const char *name, MonoMethodSignature *sig, gpointer func, const char *symbol, gboolean no_wrapper) #endif { #ifndef DISABLE_JIT mini_register_opcode_emulation (opcode, jit_icall_info, name, sig, func, symbol, no_wrapper); #else // FIXME ifdef in mini_register_opcode_emulation and just call it. g_assert (!sig->hasthis); g_assert (sig->param_count < 3); mono_register_jit_icall_info (jit_icall_info, func, name, sig, no_wrapper, symbol); #endif } #define register_opcode_emulation(opcode, name, sig, func, no_wrapper) \ (register_opcode_emulation ((opcode), &mono_get_jit_icall_info ()->name, #name, (sig), func, #func, (no_wrapper))) /* * For JIT icalls implemented in C. * NAME should be the same as the name of the C function whose address is FUNC. * If @avoid_wrapper is TRUE, no wrapper is generated. This is for perf critical icalls which * can't throw exceptions. * * func is an identifier, that names a function, and is also in jit-icall-reg.h, * and therefore a field in mono_jit_icall_info and can be token pasted into an enum value. * * The name of func must be linkable for AOT, for example g_free does not work (monoeg_g_free instead), * nor does the C++ overload fmod (mono_fmod instead). These functions therefore * must be extern "C". */ #define register_icall(func, sig, avoid_wrapper) \ (mono_register_jit_icall_info (&mono_get_jit_icall_info ()->func, func, #func, (sig), (avoid_wrapper), #func)) #define register_icall_no_wrapper(func, sig) register_icall (func, sig, TRUE) #define register_icall_with_wrapper(func, sig) register_icall (func, sig, FALSE) /* * Register an icall where FUNC is dynamically generated or otherwise not * possible to link to it using NAME during AOT. * * func is an expression, such a local variable or a function call to get a function pointer. * name is an identifier * * Providing func and name separately is what distinguishes "dyn" from regular. * * This also passes last parameter c_symbol=NULL since there is not a directly linkable symbol. */ #define register_dyn_icall(func, name, sig, save) \ (mono_register_jit_icall_info (&mono_get_jit_icall_info ()->name, (func), #name, (sig), (save), NULL)) MonoLMF * mono_get_lmf (void) { MonoJitTlsData *jit_tls; if ((jit_tls = mono_tls_get_jit_tls ())) return jit_tls->lmf; /* * We do not assert here because this function can be called from * mini-gc.c on a thread that has not executed any managed code, yet * (the thread object allocation can trigger a collection). */ return NULL; } void mono_set_lmf (MonoLMF *lmf) { (*mono_get_lmf_addr ()) = lmf; } static void mono_set_jit_tls (MonoJitTlsData *jit_tls) { MonoThreadInfo *info; mono_tls_set_jit_tls (jit_tls); /* Save it into MonoThreadInfo so it can be accessed by mono_thread_state_init_from_handle () */ info = mono_thread_info_current (); if (info) mono_thread_info_tls_set (info, TLS_KEY_JIT_TLS, jit_tls); } static void mono_set_lmf_addr (MonoLMF **lmf_addr) { MonoThreadInfo *info; mono_tls_set_lmf_addr (lmf_addr); /* Save it into MonoThreadInfo so it can be accessed by mono_thread_state_init_from_handle () */ info = mono_thread_info_current (); if (info) mono_thread_info_tls_set (info, TLS_KEY_LMF_ADDR, lmf_addr); } /* * mono_push_lmf: * * Push an MonoLMFExt frame on the LMF stack. */ void mono_push_lmf (MonoLMFExt *ext) { MonoLMF **lmf_addr; lmf_addr = mono_get_lmf_addr (); ext->lmf.previous_lmf = *lmf_addr; /* Mark that this is a MonoLMFExt */ ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2); mono_set_lmf ((MonoLMF*)ext); } /* * mono_pop_lmf: * * Pop the last frame from the LMF stack. */ void mono_pop_lmf (MonoLMF *lmf) { mono_set_lmf ((MonoLMF *)(((gssize)lmf->previous_lmf) & ~3)); } /* * mono_jit_thread_attach: * * Called by Xamarin.Mac and other products. Attach thread to runtime if * needed and switch to @domain. * * This function is external only and @deprecated don't use it. Use mono_threads_attach_coop (). * * If the thread is newly-attached, put into GC Safe mode. * * @return the original domain which needs to be restored, or NULL. */ MonoDomain* mono_jit_thread_attach (MonoDomain *domain) { gboolean attached; if (!domain) { /* Happens when called from AOTed code which is only used in the root domain. */ domain = mono_get_root_domain (); } g_assert (domain); attached = mono_tls_get_jit_tls () != NULL; if (!attached) { // #678164 gboolean background = TRUE; mono_thread_attach_external_native_thread (domain, background); /* mono_jit_thread_attach is external-only and not called by * the runtime on any of our own threads. So if we get here, * the thread is running native code - leave it in GC Safe mode * and leave it to the n2m invoke wrappers or MONO_API entry * points to switch to GC Unsafe. */ MONO_STACKDATA (stackdata); mono_threads_enter_gc_safe_region_unbalanced_internal (&stackdata); } return NULL; } /* * mono_jit_set_domain: * * Set domain to @domain if @domain is not null */ void mono_jit_set_domain (MonoDomain *domain) { g_assert (!mono_threads_is_blocking_transition_enabled ()); if (domain) mono_domain_set_fast (domain); } /** * mono_thread_abort: * \param obj exception object * Abort the thread, print exception information and stack trace */ static void mono_thread_abort (MonoObject *obj) { /* MonoJitTlsData *jit_tls = mono_tls_get_jit_tls (); */ /* handle_remove should be eventually called for this thread, too g_free (jit_tls);*/ if ((obj->vtable->klass == mono_defaults.threadabortexception_class) || ((obj->vtable->klass) == mono_class_try_get_appdomain_unloaded_exception_class () && mono_thread_info_current ()->runtime_thread)) { mono_thread_exit (); } else { mono_invoke_unhandled_exception_hook (obj); } } static MonoJitTlsData* setup_jit_tls_data (gpointer stack_start, MonoAbortFunction abort_func) { MonoJitTlsData *jit_tls; MonoLMF *lmf; jit_tls = mono_tls_get_jit_tls (); if (jit_tls) return jit_tls; jit_tls = g_new0 (MonoJitTlsData, 1); jit_tls->abort_func = abort_func; jit_tls->end_of_stack = stack_start; mono_set_jit_tls (jit_tls); lmf = g_new0 (MonoLMF, 1); MONO_ARCH_INIT_TOP_LMF_ENTRY (lmf); jit_tls->first_lmf = lmf; mono_set_lmf_addr (&jit_tls->lmf); jit_tls->lmf = lmf; #ifdef MONO_ARCH_HAVE_TLS_INIT mono_arch_tls_init (); #endif mono_setup_altstack (jit_tls); return jit_tls; } static void free_jit_tls_data (MonoJitTlsData *jit_tls) { //This happens during AOT cuz the thread is never attached if (!jit_tls) return; mono_free_altstack (jit_tls); if (jit_tls->interp_context) mini_get_interp_callbacks ()->free_context (jit_tls->interp_context); g_free (jit_tls->first_lmf); g_free (jit_tls); } static void mono_thread_start_cb (intptr_t tid, gpointer stack_start, gpointer func) { MonoThreadInfo *thread; MonoJitTlsData *jit_tls = setup_jit_tls_data (stack_start, mono_thread_abort); thread = mono_thread_info_current_unchecked (); if (thread) thread->jit_data = jit_tls; mono_arch_cpu_init (); } void (*mono_thread_attach_aborted_cb ) (MonoObject *obj) = NULL; static void mono_thread_abort_dummy (MonoObject *obj) { if (mono_thread_attach_aborted_cb) mono_thread_attach_aborted_cb (obj); else mono_thread_abort (obj); } static void mono_thread_attach_cb (intptr_t tid, gpointer stack_start) { MonoThreadInfo *thread; MonoJitTlsData *jit_tls = setup_jit_tls_data (stack_start, mono_thread_abort_dummy); thread = mono_thread_info_current_unchecked (); if (thread) thread->jit_data = jit_tls; mono_arch_cpu_init (); } static void mini_thread_cleanup (MonoNativeThreadId tid) { MonoJitTlsData *jit_tls = NULL; MonoThreadInfo *info; info = mono_thread_info_current_unchecked (); /* We can't clean up tls information if we are on another thread, it will clean up the wrong stuff * It would be nice to issue a warning when this happens outside of the shutdown sequence. but it's * not a trivial thing. * * The current offender is mono_thread_manage which cleanup threads from the outside. */ if (info && mono_thread_info_get_tid (info) == tid) { jit_tls = info->jit_data; info->jit_data = NULL; mono_set_jit_tls (NULL); /* If we attach a thread but never call into managed land, we might never get an lmf.*/ if (mono_get_lmf ()) { mono_set_lmf (NULL); mono_set_lmf_addr (NULL); } } else { info = mono_thread_info_lookup (tid); if (info) { jit_tls = info->jit_data; info->jit_data = NULL; } mono_hazard_pointer_clear (mono_hazard_pointer_get (), 1); } if (jit_tls) free_jit_tls_data (jit_tls); } MonoJumpInfo * mono_patch_info_list_prepend (MonoJumpInfo *list, int ip, MonoJumpInfoType type, gconstpointer target) { MonoJumpInfo *ji = g_new0 (MonoJumpInfo, 1); ji->ip.i = ip; ji->type = type; ji->data.target = target; ji->next = list; return ji; } #if !defined(DISABLE_LOGGING) && !defined(DISABLE_JIT) static const char* const patch_info_str[] = { #define PATCH_INFO(a,b) "" #a, #include "patch-info.h" #undef PATCH_INFO }; const char* mono_ji_type_to_string (MonoJumpInfoType type) { return patch_info_str [type]; } void mono_print_ji (const MonoJumpInfo *ji) { const char *type = patch_info_str [ji->type]; switch (ji->type) { case MONO_PATCH_INFO_RGCTX_FETCH: case MONO_PATCH_INFO_RGCTX_SLOT_INDEX: { MonoJumpInfoRgctxEntry *entry = ji->data.rgctx_entry; printf ("[%s ", type); mono_print_ji (entry->data); printf (" -> %s]", mono_rgctx_info_type_to_str (entry->info_type)); break; } case MONO_PATCH_INFO_METHOD: case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_METHOD_FTNDESC: case MONO_PATCH_INFO_LLVMONLY_INTERP_ENTRY: { char *s = mono_method_get_full_name (ji->data.method); printf ("[%s %s]", type, s); g_free (s); break; } case MONO_PATCH_INFO_JIT_ICALL_ID: printf ("[JIT_ICALL %s]", mono_find_jit_icall_info (ji->data.jit_icall_id)->name); break; case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_VTABLE: { char *name = mono_class_full_name (ji->data.klass); printf ("[%s %s]", type, name); g_free (name); break; } default: printf ("[%s]", type); break; } } #else const char* mono_ji_type_to_string (MonoJumpInfoType type) { return ""; } void mono_print_ji (const MonoJumpInfo *ji) { } #endif /** * mono_patch_info_dup_mp: * * Make a copy of PATCH_INFO, allocating memory from the mempool MP. */ MonoJumpInfo* mono_patch_info_dup_mp (MonoMemPool *mp, MonoJumpInfo *patch_info) { MonoJumpInfo *res = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo)); memcpy (res, patch_info, sizeof (MonoJumpInfo)); switch (patch_info->type) { case MONO_PATCH_INFO_RVA: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDTOKEN: case MONO_PATCH_INFO_DECLSEC: res->data.token = (MonoJumpInfoToken *)mono_mempool_alloc (mp, sizeof (MonoJumpInfoToken)); memcpy (res->data.token, patch_info->data.token, sizeof (MonoJumpInfoToken)); break; case MONO_PATCH_INFO_SWITCH: res->data.table = (MonoJumpInfoBBTable *)mono_mempool_alloc (mp, sizeof (MonoJumpInfoBBTable)); memcpy (res->data.table, patch_info->data.table, sizeof (MonoJumpInfoBBTable)); res->data.table->table = (MonoBasicBlock **)mono_mempool_alloc (mp, sizeof (MonoBasicBlock*) * patch_info->data.table->table_size); memcpy (res->data.table->table, patch_info->data.table->table, sizeof (MonoBasicBlock*) * patch_info->data.table->table_size); break; case MONO_PATCH_INFO_RGCTX_FETCH: case MONO_PATCH_INFO_RGCTX_SLOT_INDEX: res->data.rgctx_entry = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc (mp, sizeof (MonoJumpInfoRgctxEntry)); memcpy (res->data.rgctx_entry, patch_info->data.rgctx_entry, sizeof (MonoJumpInfoRgctxEntry)); res->data.rgctx_entry->data = mono_patch_info_dup_mp (mp, res->data.rgctx_entry->data); break; case MONO_PATCH_INFO_DELEGATE_TRAMPOLINE: res->data.del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (mp, sizeof (MonoDelegateClassMethodPair)); memcpy (res->data.del_tramp, patch_info->data.del_tramp, sizeof (MonoDelegateClassMethodPair)); break; case MONO_PATCH_INFO_GSHAREDVT_CALL: res->data.gsharedvt = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc (mp, sizeof (MonoJumpInfoGSharedVtCall)); memcpy (res->data.gsharedvt, patch_info->data.gsharedvt, sizeof (MonoJumpInfoGSharedVtCall)); break; case MONO_PATCH_INFO_GSHAREDVT_METHOD: { MonoGSharedVtMethodInfo *info; MonoGSharedVtMethodInfo *oinfo; int i; oinfo = patch_info->data.gsharedvt_method; info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc (mp, sizeof (MonoGSharedVtMethodInfo)); res->data.gsharedvt_method = info; memcpy (info, oinfo, sizeof (MonoGSharedVtMethodInfo)); info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc (mp, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries); for (i = 0; i < oinfo->num_entries; ++i) { MonoRuntimeGenericContextInfoTemplate *otemplate = &oinfo->entries [i]; MonoRuntimeGenericContextInfoTemplate *template_ = &info->entries [i]; memcpy (template_, otemplate, sizeof (MonoRuntimeGenericContextInfoTemplate)); } //info->locals_types = mono_mempool_alloc0 (mp, info->nlocals * sizeof (MonoType*)); //memcpy (info->locals_types, oinfo->locals_types, info->nlocals * sizeof (MonoType*)); break; } case MONO_PATCH_INFO_VIRT_METHOD: { MonoJumpInfoVirtMethod *info; MonoJumpInfoVirtMethod *oinfo; oinfo = patch_info->data.virt_method; info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoVirtMethod)); res->data.virt_method = info; memcpy (info, oinfo, sizeof (MonoJumpInfoVirtMethod)); break; } default: break; } return res; } guint mono_patch_info_hash (gconstpointer data) { const MonoJumpInfo *ji = (MonoJumpInfo*)data; const MonoJumpInfoType type = ji->type; guint hash = type << 8; switch (type) { case MONO_PATCH_INFO_RVA: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_LDTOKEN: case MONO_PATCH_INFO_DECLSEC: return hash | ji->data.token->token; case MONO_PATCH_INFO_TYPE_FROM_HANDLE: return hash | ji->data.token->token | (ji->data.token->has_context ? (gsize)ji->data.token->context.class_inst : 0); case MONO_PATCH_INFO_OBJC_SELECTOR_REF: // Hash on the selector name case MONO_PATCH_INFO_LDSTR_LIT: return g_str_hash (ji->data.name); case MONO_PATCH_INFO_VTABLE: case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_IID: case MONO_PATCH_INFO_ADJUSTED_IID: case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_METHOD: case MONO_PATCH_INFO_METHOD_JUMP: case MONO_PATCH_INFO_METHOD_FTNDESC: case MONO_PATCH_INFO_LLVMONLY_INTERP_ENTRY: case MONO_PATCH_INFO_IMAGE: case MONO_PATCH_INFO_ICALL_ADDR: case MONO_PATCH_INFO_ICALL_ADDR_CALL: case MONO_PATCH_INFO_FIELD: case MONO_PATCH_INFO_SFLDA: case MONO_PATCH_INFO_SEQ_POINT_INFO: case MONO_PATCH_INFO_METHOD_RGCTX: case MONO_PATCH_INFO_SIGNATURE: case MONO_PATCH_INFO_METHOD_CODE_SLOT: case MONO_PATCH_INFO_AOT_JIT_INFO: case MONO_PATCH_INFO_METHOD_PINVOKE_ADDR_CACHE: return hash | (gssize)ji->data.target; case MONO_PATCH_INFO_GSHAREDVT_CALL: return hash | (gssize)ji->data.gsharedvt->method; case MONO_PATCH_INFO_RGCTX_FETCH: case MONO_PATCH_INFO_RGCTX_SLOT_INDEX: { MonoJumpInfoRgctxEntry *e = ji->data.rgctx_entry; hash |= e->in_mrgctx | e->info_type | mono_patch_info_hash (e->data); if (e->in_mrgctx) return hash | (gssize)e->d.method; else return hash | (gssize)e->d.klass; } case MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG: case MONO_PATCH_INFO_MSCORLIB_GOT_ADDR: case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: case MONO_PATCH_INFO_GC_NURSERY_START: case MONO_PATCH_INFO_GC_NURSERY_BITS: case MONO_PATCH_INFO_GOT_OFFSET: case MONO_PATCH_INFO_GC_SAFE_POINT_FLAG: case MONO_PATCH_INFO_AOT_MODULE: case MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT: case MONO_PATCH_INFO_PROFILER_CLAUSE_COUNT: case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINES: case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINES_GOT_SLOTS_BASE: return hash; case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR: return hash | ji->data.uindex; case MONO_PATCH_INFO_JIT_ICALL_ID: case MONO_PATCH_INFO_JIT_ICALL_ADDR: case MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL: case MONO_PATCH_INFO_CASTCLASS_CACHE: return hash | ji->data.index; case MONO_PATCH_INFO_SWITCH: return hash | ji->data.table->table_size; case MONO_PATCH_INFO_GSHAREDVT_METHOD: return hash | (gssize)ji->data.gsharedvt_method->method; case MONO_PATCH_INFO_DELEGATE_TRAMPOLINE: return hash | (gsize)ji->data.del_tramp->klass | (gsize)ji->data.del_tramp->method | (gsize)ji->data.del_tramp->is_virtual; case MONO_PATCH_INFO_VIRT_METHOD: { MonoJumpInfoVirtMethod *info = ji->data.virt_method; return hash | (gssize)info->klass | (gssize)info->method; } case MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER: return hash | mono_signature_hash (ji->data.sig); case MONO_PATCH_INFO_R8_GOT: return hash | (guint32)*(double*)ji->data.target; case MONO_PATCH_INFO_R4_GOT: return hash | (guint32)*(float*)ji->data.target; default: printf ("info type: %d\n", ji->type); mono_print_ji (ji); printf ("\n"); g_assert_not_reached (); case MONO_PATCH_INFO_NONE: return 0; } } /* * mono_patch_info_equal: * * This might fail to recognize equivalent patches, i.e. floats, so its only * usable in those cases where this is not a problem, i.e. sharing GOT slots * in AOT. */ gint mono_patch_info_equal (gconstpointer ka, gconstpointer kb) { const MonoJumpInfo *ji1 = (MonoJumpInfo*)ka; const MonoJumpInfo *ji2 = (MonoJumpInfo*)kb; MonoJumpInfoType const ji1_type = ji1->type; MonoJumpInfoType const ji2_type = ji2->type; if (ji1_type != ji2_type) return 0; switch (ji1_type) { case MONO_PATCH_INFO_RVA: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDTOKEN: case MONO_PATCH_INFO_DECLSEC: return ji1->data.token->image == ji2->data.token->image && ji1->data.token->token == ji2->data.token->token && ji1->data.token->has_context == ji2->data.token->has_context && ji1->data.token->context.class_inst == ji2->data.token->context.class_inst && ji1->data.token->context.method_inst == ji2->data.token->context.method_inst; case MONO_PATCH_INFO_OBJC_SELECTOR_REF: case MONO_PATCH_INFO_LDSTR_LIT: return g_str_equal (ji1->data.name, ji2->data.name); case MONO_PATCH_INFO_RGCTX_FETCH: case MONO_PATCH_INFO_RGCTX_SLOT_INDEX: { MonoJumpInfoRgctxEntry *e1 = ji1->data.rgctx_entry; MonoJumpInfoRgctxEntry *e2 = ji2->data.rgctx_entry; return e1->d.method == e2->d.method && e1->d.klass == e2->d.klass && e1->in_mrgctx == e2->in_mrgctx && e1->info_type == e2->info_type && mono_patch_info_equal (e1->data, e2->data); } case MONO_PATCH_INFO_GSHAREDVT_CALL: { MonoJumpInfoGSharedVtCall *c1 = ji1->data.gsharedvt; MonoJumpInfoGSharedVtCall *c2 = ji2->data.gsharedvt; return c1->sig == c2->sig && c1->method == c2->method; } case MONO_PATCH_INFO_GSHAREDVT_METHOD: return ji1->data.gsharedvt_method->method == ji2->data.gsharedvt_method->method; case MONO_PATCH_INFO_DELEGATE_TRAMPOLINE: return ji1->data.del_tramp->klass == ji2->data.del_tramp->klass && ji1->data.del_tramp->method == ji2->data.del_tramp->method && ji1->data.del_tramp->is_virtual == ji2->data.del_tramp->is_virtual; case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR: return ji1->data.uindex == ji2->data.uindex; case MONO_PATCH_INFO_CASTCLASS_CACHE: return ji1->data.index == ji2->data.index; case MONO_PATCH_INFO_JIT_ICALL_ID: case MONO_PATCH_INFO_JIT_ICALL_ADDR: case MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL: return ji1->data.jit_icall_id == ji2->data.jit_icall_id; case MONO_PATCH_INFO_VIRT_METHOD: return ji1->data.virt_method->klass == ji2->data.virt_method->klass && ji1->data.virt_method->method == ji2->data.virt_method->method; case MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER: return mono_metadata_signature_equal (ji1->data.sig, ji2->data.sig); case MONO_PATCH_INFO_GC_SAFE_POINT_FLAG: case MONO_PATCH_INFO_NONE: return 1; default: break; } return ji1->data.target == ji2->data.target; } gpointer mono_resolve_patch_target_ext (MonoMemoryManager *mem_manager, MonoMethod *method, guint8 *code, MonoJumpInfo *patch_info, gboolean run_cctors, MonoError *error) { unsigned char *ip = patch_info->ip.i + code; gconstpointer target = NULL; error_init (error); switch (patch_info->type) { case MONO_PATCH_INFO_BB: /* * FIXME: This could be hit for methods without a prolog. Should use -1 * but too much code depends on a 0 initial value. */ //g_assert (patch_info->data.bb->native_offset); target = patch_info->data.bb->native_offset + code; break; case MONO_PATCH_INFO_ABS: target = patch_info->data.target; break; case MONO_PATCH_INFO_LABEL: target = patch_info->data.inst->inst_c0 + code; break; case MONO_PATCH_INFO_IP: target = ip; break; case MONO_PATCH_INFO_JIT_ICALL_ID: { MonoJitICallInfo * const mi = mono_find_jit_icall_info (patch_info->data.jit_icall_id); target = mono_icall_get_wrapper (mi); break; } case MONO_PATCH_INFO_JIT_ICALL_ADDR: case MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL: { MonoJitICallInfo * const mi = mono_find_jit_icall_info (patch_info->data.jit_icall_id); target = mi->func; break; } case MONO_PATCH_INFO_METHOD_JUMP: target = mono_create_jump_trampoline (patch_info->data.method, FALSE, error); if (!is_ok (error)) return NULL; break; case MONO_PATCH_INFO_METHOD: if (patch_info->data.method == method) { target = code; } else { /* get the trampoline to the method from the domain */ target = mono_create_jit_trampoline (patch_info->data.method, error); if (!is_ok (error)) return NULL; } break; case MONO_PATCH_INFO_METHOD_FTNDESC: { /* * Return an ftndesc for either AOTed code, or for an interp entry. */ target = mini_llvmonly_load_method_ftndesc (patch_info->data.method, FALSE, FALSE, error); return_val_if_nok (error, NULL); break; } case MONO_PATCH_INFO_LLVMONLY_INTERP_ENTRY: { target = mini_get_interp_callbacks ()->create_method_pointer_llvmonly (patch_info->data.method, FALSE, error); mono_error_assert_ok (error); break; } case MONO_PATCH_INFO_METHOD_CODE_SLOT: { gpointer code_slot; MonoJitMemoryManager *jit_mm = jit_mm_for_method (patch_info->data.method); jit_mm_lock (jit_mm); if (!jit_mm->method_code_hash) jit_mm->method_code_hash = g_hash_table_new (NULL, NULL); code_slot = g_hash_table_lookup (jit_mm->method_code_hash, patch_info->data.method); if (!code_slot) { code_slot = mono_mem_manager_alloc0 (jit_mm->mem_manager, sizeof (gpointer)); g_hash_table_insert (jit_mm->method_code_hash, patch_info->data.method, code_slot); } jit_mm_unlock (jit_mm); target = code_slot; break; } case MONO_PATCH_INFO_METHOD_PINVOKE_ADDR_CACHE: { target = mono_mem_manager_alloc0 (mem_manager, sizeof (gpointer)); break; } case MONO_PATCH_INFO_GC_SAFE_POINT_FLAG: target = (gpointer)&mono_polling_required; break; case MONO_PATCH_INFO_SWITCH: { #ifndef MONO_ARCH_NO_CODEMAN gpointer *jump_table; int i; if (method && method->dynamic) { jump_table = (void **)mono_code_manager_reserve (mono_dynamic_code_hash_lookup (method)->code_mp, sizeof (gpointer) * patch_info->data.table->table_size); } else { MonoMemoryManager *method_mem_manager = method ? m_method_get_mem_manager (method) : mem_manager; if (mono_aot_only) { jump_table = (void **)mono_mem_manager_alloc (method_mem_manager, sizeof (gpointer) * patch_info->data.table->table_size); } else { jump_table = (void **)mono_mem_manager_code_reserve (method_mem_manager, sizeof (gpointer) * patch_info->data.table->table_size); } } mono_codeman_enable_write (); for (i = 0; i < patch_info->data.table->table_size; i++) { jump_table [i] = code + GPOINTER_TO_INT (patch_info->data.table->table [i]); } mono_codeman_disable_write (); target = jump_table; #else g_assert_not_reached (); target = NULL; #endif break; } case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_IMAGE: case MONO_PATCH_INFO_FIELD: case MONO_PATCH_INFO_SIGNATURE: case MONO_PATCH_INFO_AOT_MODULE: target = patch_info->data.target; break; case MONO_PATCH_INFO_IID: mono_class_init_internal (patch_info->data.klass); target = GUINT_TO_POINTER (m_class_get_interface_id (patch_info->data.klass)); break; case MONO_PATCH_INFO_ADJUSTED_IID: mono_class_init_internal (patch_info->data.klass); target = GUINT_TO_POINTER ((guint32)(-((m_class_get_interface_id (patch_info->data.klass) + 1) * TARGET_SIZEOF_VOID_P))); break; case MONO_PATCH_INFO_VTABLE: target = mono_class_vtable_checked (patch_info->data.klass, error); mono_error_assert_ok (error); break; case MONO_PATCH_INFO_DELEGATE_TRAMPOLINE: { MonoDelegateClassMethodPair *del_tramp = patch_info->data.del_tramp; if (del_tramp->is_virtual) target = mono_create_delegate_virtual_trampoline (del_tramp->klass, del_tramp->method); else target = mono_create_delegate_trampoline_info (del_tramp->klass, del_tramp->method); break; } case MONO_PATCH_INFO_SFLDA: { MonoVTable *vtable = mono_class_vtable_checked (m_field_get_parent (patch_info->data.field), error); mono_error_assert_ok (error); if (mono_class_field_is_special_static (patch_info->data.field)) { gpointer addr = mono_special_static_field_get_offset (patch_info->data.field, error); mono_error_assert_ok (error); g_assert (addr); return addr; } if (!vtable->initialized && !mono_class_is_before_field_init (vtable->klass) && (!method || mono_class_needs_cctor_run (vtable->klass, method))) /* Done by the generated code */ ; else { if (run_cctors) { if (!mono_runtime_class_init_full (vtable, error)) { return NULL; } } } target = mono_static_field_get_addr (vtable, patch_info->data.field); break; } case MONO_PATCH_INFO_RVA: { guint32 field_index = mono_metadata_token_index (patch_info->data.token->token); guint32 rva; mono_metadata_field_info (patch_info->data.token->image, field_index - 1, NULL, &rva, NULL); target = mono_image_rva_map (patch_info->data.token->image, rva); break; } case MONO_PATCH_INFO_R4: case MONO_PATCH_INFO_R4_GOT: case MONO_PATCH_INFO_R8: case MONO_PATCH_INFO_R8_GOT: target = patch_info->data.target; break; case MONO_PATCH_INFO_EXC_NAME: target = patch_info->data.name; break; case MONO_PATCH_INFO_LDSTR: target = mono_ldstr_checked (patch_info->data.token->image, mono_metadata_token_index (patch_info->data.token->token), error); break; case MONO_PATCH_INFO_TYPE_FROM_HANDLE: { gpointer handle; MonoClass *handle_class; handle = mono_ldtoken_checked (patch_info->data.token->image, patch_info->data.token->token, &handle_class, patch_info->data.token->has_context ? &patch_info->data.token->context : NULL, error); if (!is_ok (error)) return NULL; mono_class_init_internal (handle_class); mono_class_init_internal (mono_class_from_mono_type_internal ((MonoType *)handle)); target = mono_type_get_object_checked ((MonoType *)handle, error); if (!is_ok (error)) return NULL; break; } case MONO_PATCH_INFO_LDTOKEN: { gpointer handle; MonoClass *handle_class; handle = mono_ldtoken_checked (patch_info->data.token->image, patch_info->data.token->token, &handle_class, patch_info->data.token->has_context ? &patch_info->data.token->context : NULL, error); mono_error_assert_msg_ok (error, "Could not patch ldtoken"); mono_class_init_internal (handle_class); target = handle; break; } case MONO_PATCH_INFO_DECLSEC: target = (mono_metadata_blob_heap (patch_info->data.token->image, patch_info->data.token->token) + 2); break; case MONO_PATCH_INFO_ICALL_ADDR: case MONO_PATCH_INFO_ICALL_ADDR_CALL: /* run_cctors == 0 -> AOT */ if (patch_info->data.method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) { if (run_cctors) { target = mono_lookup_pinvoke_call_internal (patch_info->data.method, error); if (!target) { if (mono_aot_only) return NULL; g_error ("Unable to resolve pinvoke method '%s' Re-run with MONO_LOG_LEVEL=debug for more information.\n", mono_method_full_name (patch_info->data.method, TRUE)); } } else { target = NULL; } } else { target = mono_lookup_internal_call (patch_info->data.method); if (mono_is_missing_icall_addr (target) && run_cctors) g_error ("Unregistered icall '%s'\n", mono_method_full_name (patch_info->data.method, TRUE)); } break; case MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG: target = &mono_thread_interruption_request_flag; break; case MONO_PATCH_INFO_METHOD_RGCTX: target = mini_method_get_rgctx (patch_info->data.method); break; case MONO_PATCH_INFO_RGCTX_SLOT_INDEX: { int slot = mini_get_rgctx_entry_slot (patch_info->data.rgctx_entry); target = GINT_TO_POINTER (MONO_RGCTX_SLOT_INDEX (slot)); break; } case MONO_PATCH_INFO_BB_OVF: case MONO_PATCH_INFO_EXC_OVF: case MONO_PATCH_INFO_GOT_OFFSET: case MONO_PATCH_INFO_NONE: break; case MONO_PATCH_INFO_RGCTX_FETCH: { int slot = mini_get_rgctx_entry_slot (patch_info->data.rgctx_entry); target = mono_create_rgctx_lazy_fetch_trampoline (slot); break; } #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED case MONO_PATCH_INFO_SEQ_POINT_INFO: if (!run_cctors) /* AOT, not needed */ target = NULL; else target = mono_arch_get_seq_point_info (code); break; #endif case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: { int card_table_shift_bits; gpointer card_table_mask; target = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask); break; } case MONO_PATCH_INFO_GC_NURSERY_START: { int shift_bits; size_t size; target = mono_gc_get_nursery (&shift_bits, &size); break; } case MONO_PATCH_INFO_GC_NURSERY_BITS: { int shift_bits; size_t size; mono_gc_get_nursery (&shift_bits, &size); target = (gpointer)(gssize)shift_bits; break; } case MONO_PATCH_INFO_CASTCLASS_CACHE: { target = mono_mem_manager_alloc0 (mem_manager, sizeof (gpointer)); break; } case MONO_PATCH_INFO_OBJC_SELECTOR_REF: { target = NULL; break; } case MONO_PATCH_INFO_LDSTR_LIT: { int len; char *s; len = strlen ((const char *)patch_info->data.target); s = (char *)mono_mem_manager_alloc0 (mem_manager, len + 1); memcpy (s, patch_info->data.target, len); target = s; break; } case MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER: target = mini_get_gsharedvt_wrapper (TRUE, NULL, patch_info->data.sig, NULL, -1, FALSE); break; case MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT: { target = (gpointer) &mono_profiler_state.gc_allocation_count; break; } case MONO_PATCH_INFO_PROFILER_CLAUSE_COUNT: { target = (gpointer) &mono_profiler_state.exception_clause_count; break; } case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINES: case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINES_GOT_SLOTS_BASE: { /* Resolved in aot-runtime.c */ g_assert_not_reached (); target = NULL; break; } default: g_assert_not_reached (); } return (gpointer)target; } gpointer mono_resolve_patch_target (MonoMethod *method, guint8 *code, MonoJumpInfo *patch_info, gboolean run_cctors, MonoError *error) { return mono_resolve_patch_target_ext (get_default_mem_manager (), method, code, patch_info, run_cctors, error); } /* * mini_register_jump_site: * * Register IP as a jump/tailcall site which calls METHOD. * This is needed because common_call_trampoline () cannot patch * the call site because the caller ip is not available for jumps. */ void mini_register_jump_site (MonoMethod *method, gpointer ip) { MonoJumpList *jlist; MonoJitMemoryManager *jit_mm; MonoMethod *shared_method = mini_method_to_shared (method); method = shared_method ? shared_method : method; jit_mm = jit_mm_for_method (method); jit_mm_lock (jit_mm); jlist = (MonoJumpList *)g_hash_table_lookup (jit_mm->jump_target_hash, method); if (!jlist) { jlist = (MonoJumpList *)mono_mem_manager_alloc0 (jit_mm->mem_manager, sizeof (MonoJumpList)); g_hash_table_insert (jit_mm->jump_target_hash, method, jlist); } jlist->list = g_slist_prepend (jlist->list, ip); jit_mm_unlock (jit_mm); } /* * mini_patch_jump_sites: * * Patch jump/tailcall sites calling METHOD so the jump to ADDR. */ void mini_patch_jump_sites (MonoMethod *method, gpointer addr) { MonoJitMemoryManager *jit_mm; MonoJumpInfo patch_info; MonoJumpList *jlist; GSList *tmp; /* The caller/callee might use different instantiations */ MonoMethod *shared_method = mini_method_to_shared (method); method = shared_method ? shared_method : method; jit_mm = jit_mm_for_method (method); jit_mm_lock (jit_mm); jlist = (MonoJumpList *)g_hash_table_lookup (jit_mm->jump_target_hash, method); if (jlist) g_hash_table_remove (jit_mm->jump_target_hash, method); jit_mm_unlock (jit_mm); if (jlist) { patch_info.next = NULL; patch_info.ip.i = 0; patch_info.type = MONO_PATCH_INFO_METHOD_JUMP; patch_info.data.method = method; mono_codeman_enable_write (); for (tmp = jlist->list; tmp; tmp = tmp->next) mono_arch_patch_code_new (NULL, (guint8 *)tmp->data, &patch_info, addr); mono_codeman_disable_write (); } } /* * mini_patch_llvm_jit_callees: * * Patch function address slots used by llvm JITed code. */ void mini_patch_llvm_jit_callees (MonoMethod *method, gpointer addr) { MonoJitMemoryManager *jit_mm; // FIXME: jit_mm = get_default_jit_mm (); if (!jit_mm->llvm_jit_callees) return; jit_mm_lock (jit_mm); GSList *callees = (GSList*)g_hash_table_lookup (jit_mm->llvm_jit_callees, method); GSList *l; for (l = callees; l; l = l->next) { gpointer *slot = (gpointer*)l->data; *slot = addr; } jit_mm_unlock (jit_mm); } void mini_init_gsctx (MonoMemPool *mp, MonoGenericContext *context, MonoGenericSharingContext *gsctx) { MonoGenericInst *inst; int i; memset (gsctx, 0, sizeof (MonoGenericSharingContext)); if (context && context->class_inst) { inst = context->class_inst; for (i = 0; i < inst->type_argc; ++i) { MonoType *type = inst->type_argv [i]; if (mini_is_gsharedvt_gparam (type)) gsctx->is_gsharedvt = TRUE; } } if (context && context->method_inst) { inst = context->method_inst; for (i = 0; i < inst->type_argc; ++i) { MonoType *type = inst->type_argv [i]; if (mini_is_gsharedvt_gparam (type)) gsctx->is_gsharedvt = TRUE; } } } /* * LOCKING: Acquires the jit code hash lock. */ MonoJitInfo* mini_lookup_method (MonoMethod *method, MonoMethod *shared) { MonoJitInfo *ji; MonoJitMemoryManager *jit_mm = jit_mm_for_method (method); static gboolean inited = FALSE; static int lookups = 0; static int failed_lookups = 0; jit_code_hash_lock (jit_mm); ji = (MonoJitInfo *)mono_internal_hash_table_lookup (&jit_mm->jit_code_hash, method); jit_code_hash_unlock (jit_mm); if (!ji && shared) { jit_mm = jit_mm_for_method (shared); jit_code_hash_lock (jit_mm); /* Try generic sharing */ ji = (MonoJitInfo *)mono_internal_hash_table_lookup (&jit_mm->jit_code_hash, shared); if (ji && !ji->has_generic_jit_info) ji = NULL; if (!inited) { mono_counters_register ("Shared generic lookups", MONO_COUNTER_INT|MONO_COUNTER_GENERICS, &lookups); mono_counters_register ("Failed shared generic lookups", MONO_COUNTER_INT|MONO_COUNTER_GENERICS, &failed_lookups); inited = TRUE; } ++lookups; if (!ji) ++failed_lookups; jit_code_hash_unlock (jit_mm); } return ji; } static MonoJitInfo* lookup_method (MonoMethod *method) { ERROR_DECL (error); MonoJitInfo *ji; MonoMethod *shared; ji = mini_lookup_method (method, NULL); if (!ji) { if (!mono_method_is_generic_sharable (method, FALSE)) return NULL; shared = mini_get_shared_method_full (method, SHARE_MODE_NONE, error); mono_error_assert_ok (error); ji = mini_lookup_method (method, shared); } return ji; } MonoClass* mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context) { ERROR_DECL (error); MonoClass *klass; if (method->wrapper_type != MONO_WRAPPER_NONE) { klass = (MonoClass *)mono_method_get_wrapper_data (method, token); if (context) { klass = mono_class_inflate_generic_class_checked (klass, context, error); mono_error_cleanup (error); /* FIXME don't swallow the error */ } } else { klass = mono_class_get_and_inflate_typespec_checked (m_class_get_image (method->klass), token, context, error); mono_error_cleanup (error); /* FIXME don't swallow the error */ } if (klass) mono_class_init_internal (klass); return klass; } #if ENABLE_JIT_MAP static FILE* perf_map_file; void mono_enable_jit_map (void) { if (!perf_map_file) { char name [64]; g_snprintf (name, sizeof (name), "/tmp/perf-%d.map", getpid ()); unlink (name); perf_map_file = fopen (name, "w"); } } void mono_emit_jit_tramp (void *start, int size, const char *desc) { if (perf_map_file) fprintf (perf_map_file, "%" PRIx64 " %x %s\n", (guint64)(gsize)start, size, desc); } void mono_emit_jit_map (MonoJitInfo *jinfo) { if (perf_map_file) { char *name = mono_method_full_name (jinfo_get_method (jinfo), TRUE); mono_emit_jit_tramp (jinfo->code_start, jinfo->code_size, name); g_free (name); } } gboolean mono_jit_map_is_enabled (void) { return perf_map_file != NULL; } #endif #ifdef ENABLE_JIT_DUMP #include <sys/mman.h> #include <sys/syscall.h> #include <elf.h> static FILE *perf_dump_file; static mono_mutex_t perf_dump_mutex; static void *perf_dump_mmap_addr = MAP_FAILED; static guint32 perf_dump_pid; static clockid_t clock_id = CLOCK_MONOTONIC; enum { JIT_DUMP_MAGIC = 0x4A695444, JIT_DUMP_VERSION = 2, #if HOST_X86 ELF_MACHINE = EM_386, #elif HOST_AMD64 ELF_MACHINE = EM_X86_64, #elif HOST_ARM ELF_MACHINE = EM_ARM, #elif HOST_ARM64 ELF_MACHINE = EM_AARCH64, #elif HOST_POWERPC64 ELF_MACHINE = EM_PPC64, #elif HOST_S390X ELF_MACHINE = EM_S390, #elif HOST_RISCV ELF_MACHINE = EM_RISCV, #elif HOST_MIPS ELF_MACHINE = EM_MIPS, #endif JIT_CODE_LOAD = 0 }; typedef struct { guint32 magic; guint32 version; guint32 total_size; guint32 elf_mach; guint32 pad1; guint32 pid; guint64 timestamp; guint64 flags; } FileHeader; typedef struct { guint32 id; guint32 total_size; guint64 timestamp; } RecordHeader; typedef struct { RecordHeader header; guint32 pid; guint32 tid; guint64 vma; guint64 code_addr; guint64 code_size; guint64 code_index; // Null terminated function name // Native code } JitCodeLoadRecord; static void add_file_header_info (FileHeader *header); static void add_basic_JitCodeLoadRecord_info (JitCodeLoadRecord *record); void mono_enable_jit_dump (void) { if (perf_dump_pid == 0) perf_dump_pid = getpid(); if (!perf_dump_file) { char name [64]; FileHeader header; memset (&header, 0, sizeof (header)); mono_os_mutex_init (&perf_dump_mutex); mono_os_mutex_lock (&perf_dump_mutex); g_snprintf (name, sizeof (name), "/tmp/jit-%d.dump", perf_dump_pid); unlink (name); perf_dump_file = fopen (name, "w"); add_file_header_info (&header); if (perf_dump_file) { fwrite (&header, sizeof (header), 1, perf_dump_file); //This informs perf of the presence of the jitdump file and support for the feature. perf_dump_mmap_addr = mmap (NULL, sizeof (header), PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno (perf_dump_file), 0); } mono_os_mutex_unlock (&perf_dump_mutex); } } static void add_file_header_info (FileHeader *header) { header->magic = JIT_DUMP_MAGIC; header->version = JIT_DUMP_VERSION; header->total_size = sizeof (header); header->elf_mach = ELF_MACHINE; header->pad1 = 0; header->pid = perf_dump_pid; header->timestamp = mono_clock_get_time_ns (clock_id); header->flags = 0; } void mono_emit_jit_dump (MonoJitInfo *jinfo, gpointer code) { static uint64_t code_index; if (perf_dump_file) { JitCodeLoadRecord record; size_t nameLen = strlen (jinfo->d.method->name); memset (&record, 0, sizeof (record)); add_basic_JitCodeLoadRecord_info (&record); record.header.total_size = sizeof (record) + nameLen + 1 + jinfo->code_size; record.vma = (guint64)jinfo->code_start; record.code_addr = (guint64)jinfo->code_start; record.code_size = (guint64)jinfo->code_size; mono_os_mutex_lock (&perf_dump_mutex); record.code_index = ++code_index; // TODO: write debugInfo and unwindInfo immediately before the JitCodeLoadRecord (while lock is held). record.header.timestamp = mono_clock_get_time_ns (clock_id); fwrite (&record, sizeof (record), 1, perf_dump_file); fwrite (jinfo->d.method->name, nameLen + 1, 1, perf_dump_file); fwrite (code, jinfo->code_size, 1, perf_dump_file); mono_os_mutex_unlock (&perf_dump_mutex); } } static void add_basic_JitCodeLoadRecord_info (JitCodeLoadRecord *record) { record->header.id = JIT_CODE_LOAD; record->header.timestamp = mono_clock_get_time_ns (clock_id); record->pid = perf_dump_pid; record->tid = syscall (SYS_gettid); } void mono_jit_dump_cleanup (void) { if (perf_dump_mmap_addr != MAP_FAILED) munmap (perf_dump_mmap_addr, sizeof(FileHeader)); if (perf_dump_file) fclose (perf_dump_file); } #else void mono_enable_jit_dump (void) { } void mono_emit_jit_dump (MonoJitInfo *jinfo, gpointer code) { } void mono_jit_dump_cleanup (void) { } #endif static void no_gsharedvt_in_wrapper (void) { g_assert_not_reached (); } /* Overall algorithm: When a JIT request is made, we check if there's an outstanding one for that method and, if it exits, put the thread to sleep. If the current thread is already JITing another method, don't wait as it might cause a deadlock. Dependency management in this case is too complex to justify implementing it. If there are no outstanding requests, the current thread is doing nothing and there are already mono_cpu_count threads JITing, go to sleep. TODO: Get rid of cctor invocations from within the JIT, it increases JIT duration and complicates things A LOT. Can we get rid of ref_count and use `done && threads_waiting == 0` as the equivalent of `ref_count == 0`? Reduce amount of dynamically allocated - possible once the JIT is no longer reentrant Maybe pool JitCompilationEntry, specially those with an inited cond var; */ typedef struct { MonoMethod *method; int compilation_count; /* Number of threads compiling this method - This happens due to the JIT being reentrant */ int ref_count; /* Number of threads using this JitCompilationEntry, roughtly 1 + threads_waiting */ int threads_waiting; /* Number of threads waiting on this job */ gboolean has_cond; /* True if @cond was initialized */ gboolean done; /* True if the method finished JIT'ing */ MonoCoopCond cond; /* Cond sleeping threads wait one */ } JitCompilationEntry; typedef struct { GPtrArray *in_flight_methods; //JitCompilationEntry* MonoCoopMutex lock; } JitCompilationData; /* Timeout, in millisecounds, that we wait other threads to finish JITing. This value can't be too small or we won't see enough methods being reused and it can't be too big to cause massive stalls due to unforseable circunstances. */ #define MAX_JIT_TIMEOUT_MS 1000 static JitCompilationData compilation_data; static int jit_methods_waited, jit_methods_multiple, jit_methods_overload, jit_spurious_wakeups_or_timeouts; static void mini_jit_init_job_control (void) { mono_coop_mutex_init (&compilation_data.lock); compilation_data.in_flight_methods = g_ptr_array_new (); } static void lock_compilation_data (void) { mono_coop_mutex_lock (&compilation_data.lock); } static void unlock_compilation_data (void) { mono_coop_mutex_unlock (&compilation_data.lock); } static JitCompilationEntry* find_method (MonoMethod *method) { int i; for (i = 0; i < compilation_data.in_flight_methods->len; ++i){ JitCompilationEntry *e = (JitCompilationEntry*)compilation_data.in_flight_methods->pdata [i]; if (e->method == method) return e; } return NULL; } static void add_current_thread (MonoJitTlsData *jit_tls) { ++jit_tls->active_jit_methods; } static void unref_jit_entry (JitCompilationEntry *entry) { --entry->ref_count; if (entry->ref_count) return; if (entry->has_cond) mono_coop_cond_destroy (&entry->cond); g_free (entry); } /* * Returns true if this method waited successfully for another thread to JIT it */ static gboolean wait_or_register_method_to_compile (MonoMethod *method) { MonoJitTlsData *jit_tls = mono_tls_get_jit_tls (); JitCompilationEntry *entry; static gboolean inited; if (!inited) { mono_counters_register ("JIT compile waited others", MONO_COUNTER_INT|MONO_COUNTER_JIT, &jit_methods_waited); mono_counters_register ("JIT compile 1+ jobs", MONO_COUNTER_INT|MONO_COUNTER_JIT, &jit_methods_multiple); mono_counters_register ("JIT compile overload wait", MONO_COUNTER_INT|MONO_COUNTER_JIT, &jit_methods_overload); mono_counters_register ("JIT compile spurious wakeups or timeouts", MONO_COUNTER_INT|MONO_COUNTER_JIT, &jit_spurious_wakeups_or_timeouts); inited = TRUE; } lock_compilation_data (); if (!(entry = find_method (method))) { entry = g_new0 (JitCompilationEntry, 1); entry->method = method; entry->compilation_count = entry->ref_count = 1; g_ptr_array_add (compilation_data.in_flight_methods, entry); g_assert (find_method (method) == entry); add_current_thread (jit_tls); unlock_compilation_data (); return FALSE; } else if (jit_tls->active_jit_methods > 0 || mono_threads_is_current_thread_in_protected_block ()) { //We can't suspend the current thread if it's already JITing a method. //Dependency management is too compilated and we want to get rid of this anyways. //We can't suspend the current thread if it's running a protected block (such as a cctor) //We can't rely only on JIT nesting as cctor's can be run from outside the JIT. //Finally, he hit a timeout or spurious wakeup. We're better off just giving up and keep recompiling ++entry->compilation_count; ++jit_methods_multiple; ++jit_tls->active_jit_methods; unlock_compilation_data (); return FALSE; } else { ++jit_methods_waited; ++entry->ref_count; if (!entry->has_cond) { mono_coop_cond_init (&entry->cond); entry->has_cond = TRUE; } while (TRUE) { ++entry->threads_waiting; g_assert (entry->has_cond); mono_coop_cond_timedwait (&entry->cond, &compilation_data.lock, MAX_JIT_TIMEOUT_MS); --entry->threads_waiting; if (entry->done) { unref_jit_entry (entry); unlock_compilation_data (); return TRUE; } else { //We hit the timeout or a spurious wakeup, fallback to JITing g_assert (entry->ref_count > 1); unref_jit_entry (entry); ++jit_spurious_wakeups_or_timeouts; ++entry->compilation_count; ++jit_methods_multiple; ++jit_tls->active_jit_methods; unlock_compilation_data (); return FALSE; } } } } static void unregister_method_for_compile (MonoMethod *method) { MonoJitTlsData *jit_tls = mono_tls_get_jit_tls (); lock_compilation_data (); g_assert (jit_tls->active_jit_methods > 0); --jit_tls->active_jit_methods; JitCompilationEntry *entry = find_method (method); g_assert (entry); // It would be weird to fail entry->done = TRUE; if (entry->threads_waiting) { g_assert (entry->has_cond); mono_coop_cond_broadcast (&entry->cond); } if (--entry->compilation_count == 0) { g_ptr_array_remove (compilation_data.in_flight_methods, entry); unref_jit_entry (entry); } unlock_compilation_data (); } static MonoJitInfo* create_jit_info_for_trampoline (MonoMethod *wrapper, MonoTrampInfo *info) { MonoJitInfo *jinfo; guint8 *uw_info; guint32 info_len; if (info->uw_info) { uw_info = info->uw_info; info_len = info->uw_info_len; } else { uw_info = mono_unwind_ops_encode (info->unwind_ops, &info_len); } jinfo = (MonoJitInfo *)mono_mem_manager_alloc0 (get_default_mem_manager (), MONO_SIZEOF_JIT_INFO); jinfo->d.method = wrapper; jinfo->code_start = MINI_FTNPTR_TO_ADDR (info->code); jinfo->code_size = info->code_size; jinfo->unwind_info = mono_cache_unwind_info (uw_info, info_len); if (!info->uw_info) g_free (uw_info); return jinfo; } static gpointer compile_special (MonoMethod *method, MonoError *error) { MonoJitInfo *jinfo; gpointer code; if (mono_llvm_only) { if (method->wrapper_type == MONO_WRAPPER_OTHER) { WrapperInfo *info = mono_marshal_get_wrapper_info (method); if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG) { /* * These wrappers are only created for signatures which are in the program, but * sometimes we load methods too eagerly and have to create them even if they * will never be called. */ return (gpointer)no_gsharedvt_in_wrapper; } } } if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) { MonoMethodPInvoke* piinfo = (MonoMethodPInvoke *) method; if (!piinfo->addr) { if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) { guint32 flags = MONO_ICALL_FLAGS_NONE; gpointer icall_addr; icall_addr = (gpointer)mono_lookup_internal_call_full_with_flags (method, TRUE, (guint32 *)&flags); if (flags & MONO_ICALL_FLAGS_NO_WRAPPER) { piinfo->icflags = MONO_ICALL_FLAGS_NO_WRAPPER; mono_memory_write_barrier (); } piinfo->addr = icall_addr; } else if (method->iflags & METHOD_IMPL_ATTRIBUTE_NATIVE) { #ifdef HOST_WIN32 g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono in modules loaded from byte arrays. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), m_class_get_image (method->klass)->name); #else g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono on this platform. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), m_class_get_image (method->klass)->name); #endif } else { ERROR_DECL (ignored_error); mono_lookup_pinvoke_call_internal (method, ignored_error); mono_error_cleanup (ignored_error); } } mono_memory_read_barrier (); gpointer compiled_method = NULL; if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && (piinfo->icflags & MONO_ICALL_FLAGS_NO_WRAPPER)) { compiled_method = piinfo->addr; } else { MonoMethod *nm = mono_marshal_get_native_wrapper (method, TRUE, mono_aot_only); compiled_method = mono_jit_compile_method_jit_only (nm, error); return_val_if_nok (error, NULL); } code = mono_get_addr_from_ftnptr (compiled_method); jinfo = mini_jit_info_table_find (code); if (jinfo) MONO_PROFILER_RAISE (jit_done, (method, jinfo)); return code; } else if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) { const char *name = method->name; char *full_name; MonoMethod *nm; if (m_class_get_parent (method->klass) == mono_defaults.multicastdelegate_class) { if (*name == '.' && (strcmp (name, ".ctor") == 0)) { MonoJitICallInfo *mi = &mono_get_jit_icall_info ()->ves_icall_mono_delegate_ctor; /* * We need to make sure this wrapper * is compiled because it might end up * in an (M)RGCTX if generic sharing * is enabled, and would be called * indirectly. If it were a * trampoline we'd try to patch that * indirect call, which is not * possible. */ return mono_get_addr_from_ftnptr ((gpointer)mono_icall_get_wrapper_full (mi, TRUE)); } else if (*name == 'I' && (strcmp (name, "Invoke") == 0)) { if (mono_llvm_only) { nm = mono_marshal_get_delegate_invoke (method, NULL); gpointer compiled_ptr = mono_jit_compile_method_jit_only (nm, error); return_val_if_nok (error, NULL); return mono_get_addr_from_ftnptr (compiled_ptr); } /* HACK: missing gsharedvt_out wrappers to do transition to del tramp in interp-only mode */ if (mono_use_interpreter) return NULL; return mono_create_delegate_trampoline (method->klass); } else if (*name == 'B' && (strcmp (name, "BeginInvoke") == 0)) { nm = mono_marshal_get_delegate_begin_invoke (method); gpointer compiled_ptr = mono_jit_compile_method_jit_only (nm, error); return_val_if_nok (error, NULL); return mono_get_addr_from_ftnptr (compiled_ptr); } else if (*name == 'E' && (strcmp (name, "EndInvoke") == 0)) { nm = mono_marshal_get_delegate_end_invoke (method); gpointer compiled_ptr = mono_jit_compile_method_jit_only (nm, error); return_val_if_nok (error, NULL); return mono_get_addr_from_ftnptr (compiled_ptr); } } full_name = mono_method_full_name (method, TRUE); mono_error_set_invalid_program (error, "Unrecognizable runtime implemented method '%s'", full_name); g_free (full_name); return NULL; } if (method->wrapper_type == MONO_WRAPPER_OTHER) { WrapperInfo *info = mono_marshal_get_wrapper_info (method); if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT) { static MonoTrampInfo *in_tinfo, *out_tinfo; MonoTrampInfo *tinfo; MonoJitInfo *jinfo; gboolean is_in = info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN; if (is_in && in_tinfo) return in_tinfo->code; else if (!is_in && out_tinfo) return out_tinfo->code; /* * This is a special wrapper whose body is implemented in assembly, like a trampoline. We use a wrapper so EH * works. * FIXME: The caller signature doesn't match the callee, which might cause problems on some platforms */ if (mono_ee_features.use_aot_trampolines) mono_aot_get_trampoline_full (is_in ? "gsharedvt_trampoline" : "gsharedvt_out_trampoline", &tinfo); else mono_arch_get_gsharedvt_trampoline (&tinfo, FALSE); jinfo = create_jit_info_for_trampoline (method, tinfo); mono_jit_info_table_add (jinfo); if (is_in) in_tinfo = tinfo; else out_tinfo = tinfo; return tinfo->code; } } return NULL; } static gpointer mono_jit_compile_method_with_opt (MonoMethod *method, guint32 opt, gboolean jit_only, MonoError *error) { MonoJitInfo *info; gpointer code = NULL, p; MonoJitICallInfo *callinfo = NULL; WrapperInfo *winfo = NULL; gboolean use_interp = FALSE; error_init (error); if (mono_ee_features.force_use_interpreter && !jit_only) use_interp = TRUE; if (!use_interp && mono_interp_only_classes) { for (GSList *l = mono_interp_only_classes; l; l = l->next) { if (!strcmp (m_class_get_name (method->klass), (char*)l->data)) use_interp = TRUE; } } if (use_interp) { code = mini_get_interp_callbacks ()->create_method_pointer (method, TRUE, error); if (code) return code; return_val_if_nok (error, NULL); } if (mono_llvm_only) /* Should be handled by the caller */ g_assert (!(method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED)); /* * ICALL wrappers are handled specially, since there is only one copy of them * shared by all appdomains. */ if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) winfo = mono_marshal_get_wrapper_info (method); if (winfo && winfo->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER) callinfo = mono_find_jit_icall_info (winfo->d.icall.jit_icall_id); if (method->wrapper_type == MONO_WRAPPER_OTHER) { WrapperInfo *info = mono_marshal_get_wrapper_info (method); g_assert (info); if (info->subtype == WRAPPER_SUBTYPE_SYNCHRONIZED_INNER) { MonoGenericContext *ctx = NULL; if (method->is_inflated) ctx = mono_method_get_context (method); method = info->d.synchronized_inner.method; if (ctx) { method = mono_class_inflate_generic_method_checked (method, ctx, error); g_assert (is_ok (error)); /* FIXME don't swallow the error */ } } } lookup_start: info = lookup_method (method); if (info) { MonoVTable *vtable; mono_atomic_inc_i32 (&mono_jit_stats.methods_lookups); vtable = mono_class_vtable_checked (method->klass, error); if (!is_ok (error)) return NULL; g_assert (vtable); if (!mono_runtime_class_init_full (vtable, error)) return NULL; code = MINI_ADDR_TO_FTNPTR (info->code_start); return mono_create_ftnptr (code); } #ifdef MONO_USE_AOT_COMPILER if (opt & MONO_OPT_AOT) { mono_class_init_internal (method->klass); code = mono_aot_get_method (method, error); if (code) { MonoVTable *vtable; if (mono_gc_is_critical_method (method)) { /* * The suspend code needs to be able to lookup these methods by ip in async context, * so preload their jit info. */ MonoJitInfo *ji = mini_jit_info_table_find (code); g_assert (ji); } /* * In llvm-only mode, method might be a shared method, so we can't initialize its class. * This is not a problem, since it will be initialized when the method is first * called by init_method (). */ if (!mono_llvm_only && !mono_class_is_open_constructed_type (m_class_get_byval_arg (method->klass))) { vtable = mono_class_vtable_checked (method->klass, error); mono_error_assert_ok (error); if (!mono_runtime_class_init_full (vtable, error)) return NULL; } } if (!is_ok (error)) return NULL; } #endif if (!code) { code = compile_special (method, error); if (!is_ok (error)) return NULL; } if (!jit_only && !code && mono_aot_only && mono_use_interpreter && method->wrapper_type != MONO_WRAPPER_OTHER) { if (mono_llvm_only) { /* Signal to the caller that AOTed code is not found */ return NULL; } code = mini_get_interp_callbacks ()->create_method_pointer (method, TRUE, error); if (!is_ok (error)) return NULL; } if (!code) { if (mono_class_is_open_constructed_type (m_class_get_byval_arg (method->klass))) { char *full_name = mono_type_get_full_name (method->klass); mono_error_set_invalid_operation (error, "Could not execute the method because the containing type '%s', is not fully instantiated.", full_name); g_free (full_name); return NULL; } if (mono_aot_only) { char *fullname = mono_method_get_full_name (method); mono_error_set_execution_engine (error, "Attempting to JIT compile method '%s' while running in aot-only mode. See https://docs.microsoft.com/xamarin/ios/internals/limitations for more information.\n", fullname); g_free (fullname); return NULL; } if (wait_or_register_method_to_compile (method)) goto lookup_start; code = mono_jit_compile_method_inner (method, opt, error); unregister_method_for_compile (method); } if (!is_ok (error)) return NULL; if (!code && mono_llvm_only) { printf ("AOT method not found in llvmonly mode: %s\n", mono_method_full_name (method, 1)); g_assert_not_reached (); } if (!code) return NULL; //FIXME mini_jit_info_table_find doesn't work yet under wasm due to code_start/code_end issues. #ifndef HOST_WASM if ((method->wrapper_type == MONO_WRAPPER_WRITE_BARRIER || method->wrapper_type == MONO_WRAPPER_ALLOC)) { /* * SGEN requires the JIT info for these methods to be registered, see is_ip_in_managed_allocator (). */ MonoJitInfo *ji = mini_jit_info_table_find (code); g_assert (ji); } #endif p = mono_create_ftnptr (code); if (callinfo) { // FIXME Locking here is somewhat historical due to mono_register_jit_icall_wrapper taking loader lock. // atomic_compare_exchange should suffice. mono_loader_lock (); mono_jit_lock (); if (!callinfo->wrapper) { callinfo->wrapper = p; } mono_jit_unlock (); mono_loader_unlock (); } // FIXME p or callinfo->wrapper or does not matter? return p; } typedef struct { MonoMethod *method; guint32 opt; gboolean jit_only; MonoError *error; gpointer code; } JitCompileMethodWithOptCallbackData; static void jit_compile_method_with_opt_cb (gpointer arg) { JitCompileMethodWithOptCallbackData *params = (JitCompileMethodWithOptCallbackData *)arg; params->code = mono_jit_compile_method_with_opt (params->method, params->opt, params->jit_only, params->error); } static gpointer jit_compile_method_with_opt (JitCompileMethodWithOptCallbackData *params) { MonoLMFExt ext; memset (&ext, 0, sizeof (MonoLMFExt)); ext.kind = MONO_LMFEXT_JIT_ENTRY; mono_push_lmf (&ext); gboolean thrown = FALSE; #if defined(ENABLE_LLVM_RUNTIME) || defined(ENABLE_LLVM) mono_llvm_cpp_catch_exception (jit_compile_method_with_opt_cb, params, &thrown); #else jit_compile_method_with_opt_cb (params); #endif mono_pop_lmf (&ext.lmf); return !thrown ? params->code : NULL; } gpointer mono_jit_compile_method (MonoMethod *method, MonoError *error) { JitCompileMethodWithOptCallbackData params; params.method = method; params.opt = mono_get_optimizations_for_method (method, default_opt); params.jit_only = FALSE; params.error = error; params.code = NULL; return jit_compile_method_with_opt (&params); } /* * mono_jit_compile_method_jit_only: * * Compile METHOD using the JIT/AOT, even in interpreted mode. */ gpointer mono_jit_compile_method_jit_only (MonoMethod *method, MonoError *error) { JitCompileMethodWithOptCallbackData params; params.method = method; params.opt = mono_get_optimizations_for_method (method, default_opt); params.jit_only = TRUE; params.error = error; params.code = NULL; return jit_compile_method_with_opt (&params); } /* * get_ftnptr_for_method: * * Return a function pointer for METHOD which is indirectly callable from managed code. * On llvmonly, this returns a MonoFtnDesc, otherwise it returns a normal function pointer. */ static gpointer get_ftnptr_for_method (MonoMethod *method, MonoError *error) { if (!mono_llvm_only) { return mono_jit_compile_method (method, error); } else { return mini_llvmonly_load_method_ftndesc (method, FALSE, FALSE, error); } } #ifdef MONO_ARCH_HAVE_INVALIDATE_METHOD static void invalidated_delegate_trampoline (char *desc) { g_error ("Unmanaged code called delegate of type %s which was already garbage collected.\n" "See http://www.mono-project.com/Diagnostic:Delegate for an explanation and ways to fix this.", desc); } #endif /* * mono_jit_free_method: * * Free all memory allocated by the JIT for METHOD. */ static void mono_jit_free_method (MonoMethod *method) { MonoJitDynamicMethodInfo *ji; gboolean destroy = TRUE, removed; GHashTableIter iter; MonoJumpList *jlist; MonoJitMemoryManager *jit_mm; g_assert (method->dynamic); if (mono_use_interpreter) mini_get_interp_callbacks ()->free_method (method); ji = mono_dynamic_code_hash_lookup (method); if (!ji) return; mono_debug_remove_method (method, NULL); mono_lldb_remove_method (method, ji); //seq_points are always on get_default_jit_mm jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); g_hash_table_remove (jit_mm->seq_points, method); jit_mm_unlock (jit_mm); jit_mm = jit_mm_for_method (method); jit_code_hash_lock (jit_mm); removed = mono_internal_hash_table_remove (&jit_mm->jit_code_hash, method); g_assert (removed); jit_code_hash_unlock (jit_mm); ji->ji->seq_points = NULL; jit_mm_lock (jit_mm); mono_conc_hashtable_remove (jit_mm->runtime_invoke_hash, method); g_hash_table_remove (jit_mm->dynamic_code_hash, method); g_hash_table_remove (jit_mm->jump_trampoline_hash, method); g_hash_table_remove (jit_mm->seq_points, method); g_hash_table_iter_init (&iter, jit_mm->jump_target_hash); while (g_hash_table_iter_next (&iter, NULL, (void**)&jlist)) { GSList *tmp, *remove; remove = NULL; for (tmp = jlist->list; tmp; tmp = tmp->next) { guint8 *ip = (guint8 *)tmp->data; if (ip >= (guint8*)ji->ji->code_start && ip < (guint8*)ji->ji->code_start + ji->ji->code_size) remove = g_slist_prepend (remove, tmp); } for (tmp = remove; tmp; tmp = tmp->next) { jlist->list = g_slist_delete_link ((GSList *)jlist->list, (GSList *)tmp->data); } g_slist_free (remove); } jit_mm_unlock (jit_mm); #ifdef MONO_ARCH_HAVE_INVALIDATE_METHOD if (mini_debug_options.keep_delegates && method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) { /* * Instead of freeing the code, change it to call an error routine * so people can fix their code. */ char *type = mono_type_full_name (m_class_get_byval_arg (method->klass)); char *type_and_method = g_strdup_printf ("%s.%s", type, method->name); g_free (type); mono_arch_invalidate_method (ji->ji, (gpointer)invalidated_delegate_trampoline, (gpointer)type_and_method); destroy = FALSE; } #endif /* * This needs to be done before freeing code_mp, since the code address is the * key in the table, so if we free the code_mp first, another thread can grab the * same code address and replace our entry in the table. */ mono_jit_info_table_remove (ji->ji); if (destroy) mono_code_manager_destroy (ji->code_mp); g_free (ji); } gpointer mono_jit_search_all_backends_for_jit_info (MonoMethod *method, MonoJitInfo **out_ji) { gpointer code; MonoJitInfo *ji; code = mono_jit_find_compiled_method_with_jit_info (method, &ji); if (!code) { ERROR_DECL (oerror); /* Might be AOTed code */ mono_class_init_internal (method->klass); code = mono_aot_get_method (method, oerror); if (code) { mono_error_assert_ok (oerror); ji = mini_jit_info_table_find (code); } else { if (!is_ok (oerror)) mono_error_cleanup (oerror); /* Might be interpreted */ ji = mini_get_interp_callbacks ()->find_jit_info (method); } } *out_ji = ji; return code; } gpointer mono_jit_find_compiled_method_with_jit_info (MonoMethod *method, MonoJitInfo **ji) { MonoJitInfo *info; info = lookup_method (method); if (info) { mono_atomic_inc_i32 (&mono_jit_stats.methods_lookups); if (ji) *ji = info; return MINI_ADDR_TO_FTNPTR (info->code_start); } if (ji) *ji = NULL; return NULL; } static guint32 bisect_opt = 0; static GHashTable *bisect_methods_hash = NULL; void mono_set_bisect_methods (guint32 opt, const char *method_list_filename) { FILE *file; char method_name [2048]; bisect_opt = opt; bisect_methods_hash = g_hash_table_new (g_str_hash, g_str_equal); g_assert (bisect_methods_hash); file = fopen (method_list_filename, "r"); g_assert (file); while (fgets (method_name, sizeof (method_name), file)) { size_t len = strlen (method_name); g_assert (len > 0); g_assert (method_name [len - 1] == '\n'); method_name [len - 1] = 0; g_hash_table_insert (bisect_methods_hash, g_strdup (method_name), GINT_TO_POINTER (1)); } g_assert (feof (file)); } gboolean mono_do_single_method_regression = FALSE; guint32 mono_single_method_regression_opt = 0; MonoMethod *mono_current_single_method; GSList *mono_single_method_list; GHashTable *mono_single_method_hash; guint32 mono_get_optimizations_for_method (MonoMethod *method, guint32 opt) { g_assert (method); if (bisect_methods_hash) { char *name = mono_method_full_name (method, TRUE); void *res = g_hash_table_lookup (bisect_methods_hash, name); g_free (name); if (res) return opt | bisect_opt; } if (!mono_do_single_method_regression) return opt; if (!mono_current_single_method) { if (!mono_single_method_hash) mono_single_method_hash = g_hash_table_new (g_direct_hash, g_direct_equal); if (!g_hash_table_lookup (mono_single_method_hash, method)) { g_hash_table_insert (mono_single_method_hash, method, method); mono_single_method_list = g_slist_prepend (mono_single_method_list, method); } return opt; } if (method == mono_current_single_method) return mono_single_method_regression_opt; return opt; } gpointer mono_jit_find_compiled_method (MonoMethod *method) { return mono_jit_find_compiled_method_with_jit_info (method, NULL); } typedef struct { MonoMethod *method; gpointer compiled_method; gpointer runtime_invoke; MonoVTable *vtable; MonoDynCallInfo *dyn_call_info; MonoClass *ret_box_class; MonoMethodSignature *sig; gboolean gsharedvt_invoke; gboolean use_interp; gpointer *wrapper_arg; } RuntimeInvokeInfo; #define MONO_SIZEOF_DYN_CALL_RET_BUF TARGET_SIZEOF_VOID_P static RuntimeInvokeInfo* create_runtime_invoke_info (MonoMethod *method, gpointer compiled_method, gboolean callee_gsharedvt, gboolean use_interp, MonoError *error) { MonoMethod *invoke; RuntimeInvokeInfo *info = NULL; RuntimeInvokeInfo *ret = NULL; info = g_new0 (RuntimeInvokeInfo, 1); info->compiled_method = compiled_method; info->use_interp = use_interp; info->sig = mono_method_signature_internal (method); invoke = mono_marshal_get_runtime_invoke (method, FALSE); (void)invoke; info->vtable = mono_class_vtable_checked (method->klass, error); if (!is_ok (error)) goto exit; g_assert (info->vtable); MonoMethodSignature *sig; sig = info->sig; MonoType *ret_type; /* * We want to avoid AOTing 1000s of runtime-invoke wrappers when running * in full-aot mode, so we use a slower, but more generic wrapper if * possible, built on top of the OP_DYN_CALL opcode provided by the JIT. */ #ifdef MONO_ARCH_DYN_CALL_SUPPORTED if (!mono_llvm_only && (mono_aot_only || mini_debug_options.dyn_runtime_invoke)) { gboolean supported = TRUE; int i; if (method->string_ctor) sig = mono_marshal_get_string_ctor_signature (method); for (i = 0; i < sig->param_count; ++i) { MonoType *t = sig->params [i]; if (m_type_is_byref (t) && t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) supported = FALSE; } if (!info->compiled_method) supported = FALSE; if (supported) { info->dyn_call_info = mono_arch_dyn_call_prepare (sig); if (mini_debug_options.dyn_runtime_invoke) g_assert (info->dyn_call_info); } } #endif ret_type = sig->ret; switch (ret_type->type) { case MONO_TYPE_VOID: break; case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_R4: case MONO_TYPE_R8: info->ret_box_class = mono_class_from_mono_type_internal (ret_type); break; case MONO_TYPE_PTR: info->ret_box_class = mono_defaults.int_class; break; case MONO_TYPE_STRING: case MONO_TYPE_CLASS: case MONO_TYPE_ARRAY: case MONO_TYPE_SZARRAY: case MONO_TYPE_OBJECT: break; case MONO_TYPE_GENERICINST: if (!MONO_TYPE_IS_REFERENCE (ret_type)) info->ret_box_class = mono_class_from_mono_type_internal (ret_type); break; case MONO_TYPE_VALUETYPE: info->ret_box_class = mono_class_from_mono_type_internal (ret_type); break; default: g_assert_not_reached (); break; } if (info->use_interp) { ret = info; info = NULL; goto exit; } if (!info->dyn_call_info) { /* * Can't use the normal llvmonly code for string ctors since the gsharedvt out wrapper passes * an extra arg, which the string ctor methods don't have, which causes signature mismatches * on wasm. Instead, call string ctors normally using a direct runtime invoke wrapper * which is AOTed for each ctor. */ if (mono_llvm_only && !method->string_ctor) { #ifndef MONO_ARCH_GSHAREDVT_SUPPORTED g_assert_not_reached (); #endif info->gsharedvt_invoke = TRUE; if (!callee_gsharedvt) { /* Invoke a gsharedvt out wrapper instead */ MonoMethod *wrapper = mini_get_gsharedvt_out_sig_wrapper (sig); MonoMethodSignature *wrapper_sig = mini_get_gsharedvt_out_sig_wrapper_signature (sig->hasthis, sig->ret->type != MONO_TYPE_VOID, sig->param_count); info->wrapper_arg = g_malloc0 (2 * sizeof (gpointer)); info->wrapper_arg [0] = mini_llvmonly_add_method_wrappers (method, info->compiled_method, FALSE, FALSE, &(info->wrapper_arg [1])); /* Pass has_rgctx == TRUE since the wrapper has an extra arg */ invoke = mono_marshal_get_runtime_invoke_for_sig (wrapper_sig); g_free (wrapper_sig); info->compiled_method = mono_jit_compile_method (wrapper, error); if (!is_ok (error)) goto exit; } else { /* Gsharedvt methods can be invoked the same way */ /* The out wrapper has the same signature as the compiled gsharedvt method */ MonoMethodSignature *wrapper_sig = mini_get_gsharedvt_out_sig_wrapper_signature (sig->hasthis, sig->ret->type != MONO_TYPE_VOID, sig->param_count); info->wrapper_arg = (gpointer*)(mono_method_needs_static_rgctx_invoke (method, TRUE) ? mini_method_get_rgctx (method) : NULL); invoke = mono_marshal_get_runtime_invoke_for_sig (wrapper_sig); g_free (wrapper_sig); } } info->runtime_invoke = mono_jit_compile_method (invoke, error); if (!is_ok (error)) goto exit; } ret = info; info = NULL; exit: g_free (info); return ret; } static GENERATE_GET_CLASS_WITH_CACHE (nullbyrefreturn_ex, "Mono", "NullByRefReturnException"); static MonoObject* mono_llvmonly_runtime_invoke (MonoMethod *method, RuntimeInvokeInfo *info, void *obj, void **params, MonoObject **exc, MonoError *error) { MonoMethodSignature *sig = info->sig; MonoObject *(*runtime_invoke) (MonoObject *this_obj, void **params, MonoObject **exc, void* compiled_method); int32_t retval_size = MONO_SIZEOF_DYN_CALL_RET_BUF; gpointer retval = NULL; int i, pindex; error_init (error); g_assert (info->gsharedvt_invoke); /* * Instead of invoking the method directly, we invoke a gsharedvt out wrapper. * The advantage of this is the gsharedvt out wrappers have a reduced set of * signatures, so we only have to generate runtime invoke wrappers for these * signatures. * This code also handles invocation of gsharedvt methods directly, no * out wrappers are used in that case. */ // allocate param_refs = param_count and args = param_count + hasthis + 2. int const param_count = sig->param_count; gpointer* const param_refs = g_newa (gpointer, param_count * 2 + sig->hasthis + 2); gpointer* const args = param_refs + param_count; pindex = 0; /* * The runtime invoke wrappers expects pointers to primitive types, so have to * use indirections. */ if (sig->hasthis) args [pindex ++] = &obj; if (sig->ret->type != MONO_TYPE_VOID) { if (info->ret_box_class && !m_type_is_byref (sig->ret) && (sig->ret->type == MONO_TYPE_VALUETYPE || (sig->ret->type == MONO_TYPE_GENERICINST && !MONO_TYPE_IS_REFERENCE (sig->ret)))) { // if the return type is a struct, allocate enough stack space to hold it MonoClass *ret_klass = mono_class_from_mono_type_internal (sig->ret); g_assert (!mono_class_has_failure (ret_klass)); int32_t inst_size = mono_class_instance_size (ret_klass); if (inst_size > MONO_SIZEOF_DYN_CALL_RET_BUF) { retval_size = inst_size; } } } retval = g_alloca (retval_size); if (sig->ret->type != MONO_TYPE_VOID) { args [pindex ++] = &retval; } for (i = 0; i < sig->param_count; ++i) { MonoType *t = sig->params [i]; if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) { MonoClass *klass = mono_class_from_mono_type_internal (t); guint8 *nullable_buf; int size; size = mono_class_value_size (klass, NULL); nullable_buf = g_alloca (size); g_assert (nullable_buf); /* The argument pointed to by params [i] is either a boxed vtype or null */ mono_nullable_init (nullable_buf, (MonoObject*)params [i], klass); params [i] = nullable_buf; } if (!m_type_is_byref (t) && (MONO_TYPE_IS_REFERENCE (t) || t->type == MONO_TYPE_PTR)) { param_refs [i] = params [i]; params [i] = &(param_refs [i]); } args [pindex ++] = &params [i]; } /* The gsharedvt out wrapper has an extra argument which contains the method to call */ args [pindex ++] = &info->wrapper_arg; runtime_invoke = (MonoObject *(*)(MonoObject *, void **, MonoObject **, void *))info->runtime_invoke; runtime_invoke (NULL, args, exc, info->compiled_method); if (exc && *exc) return NULL; if (m_type_is_byref (sig->ret)) { if (*(gpointer*)retval == NULL) { MonoClass *klass = mono_class_get_nullbyrefreturn_ex_class (); MonoObject *ex = mono_object_new_checked (klass, error); mono_error_assert_ok (error); mono_error_set_exception_instance (error, (MonoException*)ex); return NULL; } } if (sig->ret->type != MONO_TYPE_VOID) { if (info->ret_box_class) { if (m_type_is_byref (sig->ret)) { return mono_value_box_checked (info->ret_box_class, *(gpointer*)retval, error); } else { MonoObject *ret = mono_value_box_checked (info->ret_box_class, retval, error); return ret; } } else { if (m_type_is_byref (sig->ret)) return **(MonoObject***)retval; else return *(MonoObject**)retval; } } else { return NULL; } } /** * mono_jit_runtime_invoke: * \param method: the method to invoke * \param obj: this pointer * \param params: array of parameter values. * \param exc: Set to the exception raised in the managed method. * \param error: error or caught exception object * If \p exc is NULL, \p error is thrown instead. * If coop is enabled, \p exc argument is ignored - * all exceptions are caught and propagated through \p error */ static MonoObject* mono_jit_runtime_invoke (MonoMethod *method, void *obj, void **params, MonoObject **exc, MonoError *error) { MonoMethod *callee; MonoObject *(*runtime_invoke) (MonoObject *this_obj, void **params, MonoObject **exc, void* compiled_method); RuntimeInvokeInfo *info, *info2; MonoJitInfo *ji = NULL; gboolean callee_gsharedvt = FALSE; MonoJitMemoryManager *jit_mm; if (mono_ee_features.force_use_interpreter) { // FIXME: On wasm, if the callee throws an exception, this will return NULL, and the // exception will be stored inside the interpreter, it won't show up in exc/error. return mini_get_interp_callbacks ()->runtime_invoke (method, obj, params, exc, error); } error_init (error); if (exc) *exc = NULL; if (obj == NULL && !(method->flags & METHOD_ATTRIBUTE_STATIC) && !method->string_ctor && (method->wrapper_type == 0)) { g_warning ("Ignoring invocation of an instance method on a NULL instance.\n"); return NULL; } jit_mm = jit_mm_for_method (method); info = (RuntimeInvokeInfo *)mono_conc_hashtable_lookup (jit_mm->runtime_invoke_hash, method); if (!info) { gpointer compiled_method; callee = method; if (m_class_get_rank (method->klass) && (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && (method->iflags & METHOD_IMPL_ATTRIBUTE_NATIVE)) { /* * Array Get/Set/Address methods. The JIT implements them using inline code * inside the runtime invoke wrappers, so no need to compile them. */ if (mono_aot_only) { /* * Call a wrapper, since the runtime invoke wrapper was not generated. */ MonoMethod *wrapper; wrapper = mono_marshal_get_array_accessor_wrapper (method); mono_marshal_get_runtime_invoke (wrapper, FALSE); callee = wrapper; } else { callee = NULL; } } gboolean use_interp = FALSE; if (mono_aot_mode == MONO_AOT_MODE_LLVMONLY_INTERP) /* The runtime invoke wrappers contain clauses so they are not AOTed */ use_interp = TRUE; if (callee) { compiled_method = mono_jit_compile_method_jit_only (callee, error); if (!compiled_method) { g_assert (!is_ok (error)); if (mono_use_interpreter) use_interp = TRUE; else return NULL; } else { if (mono_llvm_only) { ji = mini_jit_info_table_find (mono_get_addr_from_ftnptr (compiled_method)); callee_gsharedvt = mini_jit_info_is_gsharedvt (ji); if (callee_gsharedvt) callee_gsharedvt = mini_is_gsharedvt_variable_signature (mono_method_signature_internal (jinfo_get_method (ji))); } if (!callee_gsharedvt) compiled_method = mini_add_method_trampoline (callee, compiled_method, mono_method_needs_static_rgctx_invoke (callee, TRUE), FALSE); } } else { compiled_method = NULL; } info = create_runtime_invoke_info (method, compiled_method, callee_gsharedvt, use_interp, error); if (!is_ok (error)) return NULL; jit_mm_lock (jit_mm); info2 = (RuntimeInvokeInfo *)mono_conc_hashtable_insert (jit_mm->runtime_invoke_hash, method, info); jit_mm_unlock (jit_mm); if (info2) { g_free (info); info = info2; } } /* * We need this here because mono_marshal_get_runtime_invoke can place * the helper method in System.Object and not the target class. */ if (!mono_runtime_class_init_full (info->vtable, error)) { if (exc) *exc = (MonoObject*) mono_error_convert_to_exception (error); return NULL; } /* If coop is enabled, and the caller didn't ask for the exception to be caught separately, we always catch the exception and propagate it through the MonoError */ gboolean catchExcInMonoError = (exc == NULL) && mono_threads_are_safepoints_enabled (); MonoObject *invoke_exc = NULL; if (catchExcInMonoError) exc = &invoke_exc; /* The wrappers expect this to be initialized to NULL */ if (exc) *exc = NULL; #ifdef MONO_ARCH_DYN_CALL_SUPPORTED static RuntimeInvokeDynamicFunction dyn_runtime_invoke = NULL; if (info->dyn_call_info) { if (!dyn_runtime_invoke) { MonoMethod *dynamic_invoke = mono_marshal_get_runtime_invoke_dynamic (); RuntimeInvokeDynamicFunction invoke_func = (RuntimeInvokeDynamicFunction)mono_jit_compile_method_jit_only (dynamic_invoke, error); mono_memory_barrier (); dyn_runtime_invoke = invoke_func; if (!dyn_runtime_invoke && mono_use_interpreter) { info->use_interp = TRUE; info->dyn_call_info = NULL; } else if (!is_ok (error)) { return NULL; } } } if (info->dyn_call_info) { MonoMethodSignature *sig = mono_method_signature_internal (method); gpointer *args; int i, pindex, buf_size; guint8 *buf; int32_t retval_size = MONO_SIZEOF_DYN_CALL_RET_BUF; guint8 *retval = NULL; /* if the return type is a struct and it's too big, allocate more space for it */ if (info->ret_box_class && !m_type_is_byref (sig->ret) && (sig->ret->type == MONO_TYPE_VALUETYPE || (sig->ret->type == MONO_TYPE_GENERICINST && !MONO_TYPE_IS_REFERENCE (sig->ret)))) { MonoClass *ret_klass = mono_class_from_mono_type_internal (sig->ret); g_assert (!mono_class_has_failure (ret_klass)); int32_t inst_size = mono_class_instance_size (ret_klass); if (inst_size > MONO_SIZEOF_DYN_CALL_RET_BUF) { retval_size = inst_size; } } retval = g_alloca (retval_size); /* Convert the arguments to the format expected by start_dyn_call () */ args = (void **)g_alloca ((sig->param_count + sig->hasthis) * sizeof (gpointer)); pindex = 0; if (sig->hasthis) args [pindex ++] = &obj; for (i = 0; i < sig->param_count; ++i) { MonoType *t = sig->params [i]; if (m_type_is_byref (t)) { args [pindex ++] = &params [i]; } else if (MONO_TYPE_IS_REFERENCE (t) || t->type == MONO_TYPE_PTR) { args [pindex ++] = &params [i]; } else { args [pindex ++] = params [i]; } } //printf ("M: %s\n", mono_method_full_name (method, TRUE)); buf_size = mono_arch_dyn_call_get_buf_size (info->dyn_call_info); buf = g_alloca (buf_size); memset (buf, 0, buf_size); g_assert (buf); mono_arch_start_dyn_call (info->dyn_call_info, (gpointer**)args, retval, buf); dyn_runtime_invoke (buf, exc, info->compiled_method); mono_arch_finish_dyn_call (info->dyn_call_info, buf); if (catchExcInMonoError && *exc != NULL) { mono_error_set_exception_instance (error, (MonoException*) *exc); return NULL; } if (m_type_is_byref (sig->ret)) { if (*(gpointer*)retval == NULL) { MonoClass *klass = mono_class_get_nullbyrefreturn_ex_class (); MonoObject *ex = mono_object_new_checked (klass, error); mono_error_assert_ok (error); mono_error_set_exception_instance (error, (MonoException*)ex); return NULL; } } if (info->ret_box_class) { if (m_type_is_byref (sig->ret)) { return mono_value_box_checked (info->ret_box_class, *(gpointer*)retval, error); } else { MonoObject *boxed_ret = mono_value_box_checked (info->ret_box_class, retval, error); return boxed_ret; } } else { if (m_type_is_byref (sig->ret)) return **(MonoObject***)retval; else return *(MonoObject**)retval; } } #endif MonoObject *result; if (info->use_interp) { result = mini_get_interp_callbacks ()->runtime_invoke (method, obj, params, exc, error); return_val_if_nok (error, NULL); } else if (mono_llvm_only && !method->string_ctor) { result = mono_llvmonly_runtime_invoke (method, info, obj, params, exc, error); if (!is_ok (error)) return NULL; } else { runtime_invoke = (MonoObject *(*)(MonoObject *, void **, MonoObject **, void *))info->runtime_invoke; result = runtime_invoke ((MonoObject *)obj, params, exc, info->compiled_method); } if (catchExcInMonoError && *exc != NULL) { ((MonoException *)(*exc))->caught_in_unmanaged = TRUE; mono_error_set_exception_instance (error, (MonoException*) *exc); } return result; } MONO_SIG_HANDLER_FUNC (, mono_sigfpe_signal_handler) { MonoException *exc = NULL; MonoJitInfo *ji; MonoContext mctx; MONO_SIG_HANDLER_INFO_TYPE *info = MONO_SIG_HANDLER_GET_INFO (); MONO_SIG_HANDLER_GET_CONTEXT; ji = mono_jit_info_table_find_internal (mono_arch_ip_from_context (ctx), TRUE, TRUE); MONO_ENTER_GC_UNSAFE_UNBALANCED; #if defined(MONO_ARCH_HAVE_IS_INT_OVERFLOW) if (mono_arch_is_int_overflow (ctx, info)) /* * The spec says this throws ArithmeticException, but MS throws the derived * OverflowException. */ exc = mono_get_exception_overflow (); else exc = mono_get_exception_divide_by_zero (); #else exc = mono_get_exception_divide_by_zero (); #endif if (!ji) { if (!mono_do_crash_chaining && mono_chain_signal (MONO_SIG_HANDLER_PARAMS)) goto exit; mono_sigctx_to_monoctx (ctx, &mctx); mono_handle_native_crash (mono_get_signame (SIGFPE), &mctx, info); if (mono_do_crash_chaining) { mono_chain_signal (MONO_SIG_HANDLER_PARAMS); goto exit; } } mono_arch_handle_exception (ctx, exc); exit: MONO_EXIT_GC_UNSAFE_UNBALANCED; } MONO_SIG_HANDLER_FUNC (, mono_crashing_signal_handler) { MonoContext mctx; MONO_SIG_HANDLER_INFO_TYPE *info = MONO_SIG_HANDLER_GET_INFO (); MONO_SIG_HANDLER_GET_CONTEXT; if (mono_runtime_get_no_exec ()) exit (1); mono_sigctx_to_monoctx (ctx, &mctx); #if defined(HAVE_SIG_INFO) && !defined(HOST_WIN32) // info is a siginfo_t mono_handle_native_crash (mono_get_signame (info->si_signo), &mctx, info); #else mono_handle_native_crash (mono_get_signame (SIGTERM), &mctx, info); #endif if (mono_do_crash_chaining) { mono_chain_signal (MONO_SIG_HANDLER_PARAMS); return; } } #if defined(MONO_ARCH_USE_SIGACTION) || defined(HOST_WIN32) #define HAVE_SIG_INFO #define MONO_SIG_HANDLER_DEBUG 1 // "with_fault_addr" but could be extended in future, so "debug" #ifdef MONO_SIG_HANDLER_DEBUG // Same as MONO_SIG_HANDLER_FUNC but debug_fault_addr is added to params, and no_optimize. // The Krait workaround is not needed here, due to this not actually being the signal handler, // so MONO_SIGNAL_HANDLER_FUNC is combined into it. #define MONO_SIG_HANDLER_FUNC_DEBUG(access, ftn) access MONO_NO_OPTIMIZATION void ftn \ (int _dummy, MONO_SIG_HANDLER_INFO_TYPE *_info, void *context, void * volatile debug_fault_addr G_GNUC_UNUSED) #define MONO_SIG_HANDLER_PARAMS_DEBUG MONO_SIG_HANDLER_PARAMS, debug_fault_addr #endif #endif gboolean mono_is_addr_implicit_null_check (void *addr) { /* implicit null checks are only expected to work on the first page. larger * offsets are expected to have an explicit null check */ return addr <= GUINT_TO_POINTER (mono_target_pagesize ()); } // This function is separate from mono_sigsegv_signal_handler // so debug_fault_addr can be seen in debugger stacks. #ifdef MONO_SIG_HANDLER_DEBUG MONO_NEVER_INLINE MONO_SIG_HANDLER_FUNC_DEBUG (static, mono_sigsegv_signal_handler_debug) #else MONO_SIG_HANDLER_FUNC (, mono_sigsegv_signal_handler) #endif { MonoJitInfo *ji = NULL; MonoDomain *domain = mono_domain_get (); gpointer fault_addr = NULL; MonoContext mctx; #if defined(HAVE_SIG_INFO) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK) MonoJitTlsData *jit_tls = mono_tls_get_jit_tls (); #endif #ifdef HAVE_SIG_INFO MONO_SIG_HANDLER_INFO_TYPE *info = MONO_SIG_HANDLER_GET_INFO (); #else void *info = NULL; #endif MONO_SIG_HANDLER_GET_CONTEXT; mono_sigctx_to_monoctx (ctx, &mctx); #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED) && defined(HAVE_SIG_INFO) if (mono_arch_is_single_step_event (info, ctx)) { mono_component_debugger ()->single_step_event (ctx); return; } else if (mono_arch_is_breakpoint_event (info, ctx)) { mono_component_debugger ()->breakpoint_hit (ctx); return; } #endif #if defined(HAVE_SIG_INFO) #if !defined(HOST_WIN32) fault_addr = info->si_addr; if (mono_aot_is_pagefault (info->si_addr)) { mono_aot_handle_pagefault (info->si_addr); return; } int signo = info->si_signo; #else int signo = SIGSEGV; #endif /* The thread might no be registered with the runtime */ if (!mono_domain_get () || !jit_tls) { if (!mono_do_crash_chaining && mono_chain_signal (MONO_SIG_HANDLER_PARAMS)) return; mono_handle_native_crash (mono_get_signame (signo), &mctx, info); if (mono_do_crash_chaining) { mono_chain_signal (MONO_SIG_HANDLER_PARAMS); return; } } #endif if (domain) { gpointer ip = MINI_FTNPTR_TO_ADDR (mono_arch_ip_from_context (ctx)); ji = mono_jit_info_table_find_internal (ip, TRUE, TRUE); } #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK if (mono_handle_soft_stack_ovf (jit_tls, ji, ctx, info, (guint8*)info->si_addr)) return; /* info->si_addr seems to be NULL on some kernels when handling stack overflows */ fault_addr = info->si_addr; if (fault_addr == NULL) { fault_addr = MONO_CONTEXT_GET_SP (&mctx); } if (jit_tls && jit_tls->stack_size && ABS ((guint8*)fault_addr - ((guint8*)jit_tls->end_of_stack - jit_tls->stack_size)) < 8192 * sizeof (gpointer)) { /* * The hard-guard page has been hit: there is not much we can do anymore * Print a hopefully clear message and abort. */ mono_handle_hard_stack_ovf (jit_tls, ji, &mctx, (guint8*)info->si_addr); g_assert_not_reached (); } else { /* The original handler might not like that it is executed on an altstack... */ if (!ji && mono_chain_signal (MONO_SIG_HANDLER_PARAMS)) return; #ifdef TARGET_AMD64 /* exceptions-amd64.c handles the check itself */ mono_arch_handle_altstack_exception (ctx, info, info->si_addr, FALSE); #else if (mono_is_addr_implicit_null_check (info->si_addr)) { mono_arch_handle_altstack_exception (ctx, info, info->si_addr, FALSE); } else { // FIXME: This shouldn't run on the altstack mono_handle_native_crash (mono_get_signame (SIGSEGV), &mctx, info); } #endif } #else if (!ji) { if (!mono_do_crash_chaining && mono_chain_signal (MONO_SIG_HANDLER_PARAMS)) return; mono_handle_native_crash (mono_get_signame (SIGSEGV), &mctx, (MONO_SIG_HANDLER_INFO_TYPE*)info); if (mono_do_crash_chaining) { mono_chain_signal (MONO_SIG_HANDLER_PARAMS); return; } } if (mono_is_addr_implicit_null_check (fault_addr)) { mono_arch_handle_exception (ctx, NULL); } else { mono_handle_native_crash (mono_get_signame (SIGSEGV), &mctx, (MONO_SIG_HANDLER_INFO_TYPE*)info); if (mono_do_crash_chaining) { mono_chain_signal (MONO_SIG_HANDLER_PARAMS); return; } } #endif } #ifdef MONO_SIG_HANDLER_DEBUG // This function is separate from mono_sigsegv_signal_handler_debug // so debug_fault_addr can be seen in debugger stacks. MONO_SIG_HANDLER_FUNC (, mono_sigsegv_signal_handler) { #ifdef HOST_WIN32 gpointer const debug_fault_addr = (gpointer)MONO_SIG_HANDLER_GET_INFO () ->ep->ExceptionRecord->ExceptionInformation [1]; #elif defined (HAVE_SIG_INFO) gpointer const debug_fault_addr = MONO_SIG_HANDLER_GET_INFO ()->si_addr; #else #error No extra parameter is passed, not even 0, to avoid any confusion. #endif mono_sigsegv_signal_handler_debug (MONO_SIG_HANDLER_PARAMS_DEBUG); } #endif // MONO_SIG_HANDLER_DEBUG MONO_SIG_HANDLER_FUNC (, mono_sigint_signal_handler) { MonoException *exc; MONO_SIG_HANDLER_GET_CONTEXT; MONO_ENTER_GC_UNSAFE_UNBALANCED; exc = mono_get_exception_execution_engine ("Interrupted (SIGINT)."); mono_arch_handle_exception (ctx, exc); MONO_EXIT_GC_UNSAFE_UNBALANCED; } static G_GNUC_UNUSED void no_imt_trampoline (void) { g_assert_not_reached (); } static G_GNUC_UNUSED void no_vcall_trampoline (void) { g_assert_not_reached (); } static gpointer *vtable_trampolines; static int vtable_trampolines_size; gpointer mini_get_vtable_trampoline (MonoVTable *vt, int slot_index) { int index = slot_index + MONO_IMT_SIZE; if (mono_llvm_only) return mini_llvmonly_get_vtable_trampoline (vt, slot_index, index); g_assert (slot_index >= - MONO_IMT_SIZE); if (!vtable_trampolines || slot_index + MONO_IMT_SIZE >= vtable_trampolines_size) { mono_jit_lock (); if (!vtable_trampolines || index >= vtable_trampolines_size) { int new_size; gpointer new_table; new_size = vtable_trampolines_size ? vtable_trampolines_size * 2 : 128; while (new_size <= index) new_size *= 2; new_table = g_new0 (gpointer, new_size); if (vtable_trampolines) memcpy (new_table, vtable_trampolines, vtable_trampolines_size * sizeof (gpointer)); g_free (vtable_trampolines); mono_memory_barrier (); vtable_trampolines = (void **)new_table; vtable_trampolines_size = new_size; } mono_jit_unlock (); } if (!vtable_trampolines [index]) vtable_trampolines [index] = mono_create_specific_trampoline (get_default_mem_manager (), GUINT_TO_POINTER (slot_index), MONO_TRAMPOLINE_VCALL, NULL); return vtable_trampolines [index]; } static gpointer mini_get_imt_trampoline (MonoVTable *vt, int slot_index) { return mini_get_vtable_trampoline (vt, slot_index - MONO_IMT_SIZE); } static gboolean mini_imt_entry_inited (MonoVTable *vt, int imt_slot_index) { if (mono_llvm_only) return FALSE; gpointer *imt = (gpointer*)vt; imt -= MONO_IMT_SIZE; return (imt [imt_slot_index] != mini_get_imt_trampoline (vt, imt_slot_index)); } static gpointer create_delegate_method_ptr (MonoMethod *method, MonoError *error) { gpointer func; if (method_is_dynamic (method)) { /* Creating a trampoline would leak memory */ func = mono_compile_method_checked (method, error); return_val_if_nok (error, NULL); } else { gpointer trampoline = mono_create_jump_trampoline (method, TRUE, error); return_val_if_nok (error, NULL); func = mono_create_ftnptr (trampoline); } return func; } static void mini_init_delegate (MonoDelegateHandle delegate, MonoObjectHandle target, gpointer addr, MonoMethod *method, MonoError *error) { MonoDelegate *del = MONO_HANDLE_RAW (delegate); if (!method && !addr) { // Multicast delegate init if (!mono_llvm_only) { MONO_HANDLE_SETVAL (delegate, invoke_impl, gpointer, mono_create_delegate_trampoline (mono_handle_class (delegate))); } else { mini_llvmonly_init_delegate (del, NULL); } return; } if (!method) { MonoJitInfo *ji; gpointer lookup_addr = MINI_FTNPTR_TO_ADDR (addr); g_assert (addr); ji = mono_jit_info_table_find_internal (mono_get_addr_from_ftnptr (lookup_addr), TRUE, TRUE); if (ji) { if (ji->is_trampoline) { /* Could be an unbox trampoline etc. */ method = ji->d.tramp_info->method; } else { method = mono_jit_info_get_method (ji); g_assert (!mono_class_is_gtd (method->klass)); } } } if (method) MONO_HANDLE_SETVAL (delegate, method, MonoMethod*, method); if (addr) MONO_HANDLE_SETVAL (delegate, method_ptr, gpointer, addr); MONO_HANDLE_SET (delegate, target, target); MONO_HANDLE_SETVAL (delegate, invoke_impl, gpointer, mono_create_delegate_trampoline (mono_handle_class (delegate))); MonoDelegateTrampInfo *info = NULL; if (mono_use_interpreter) { mini_get_interp_callbacks ()->init_delegate (del, &info, error); return_if_nok (error); } if (mono_llvm_only) { g_assert (del->method); mini_llvmonly_init_delegate (del, info); //del->method_ptr = mini_llvmonly_load_method_delegate (del->method, FALSE, FALSE, &del->extra_arg, error); } else if (!del->method_ptr) { del->method_ptr = create_delegate_method_ptr (del->method, error); return_if_nok (error); } } char* mono_get_delegate_virtual_invoke_impl_name (gboolean load_imt_reg, int offset) { int abs_offset; abs_offset = offset; if (abs_offset < 0) abs_offset = - abs_offset; return g_strdup_printf ("delegate_virtual_invoke%s_%s%d", load_imt_reg ? "_imt" : "", offset < 0 ? "m_" : "", abs_offset / TARGET_SIZEOF_VOID_P); } gpointer mono_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method) { gboolean is_virtual_generic, is_interface, load_imt_reg; int offset, idx; static guint8 **cache = NULL; static int cache_size = 0; if (!method) return NULL; if (MONO_TYPE_ISSTRUCT (sig->ret)) return NULL; is_virtual_generic = method->is_inflated && mono_method_get_declaring_generic_method (method)->is_generic; is_interface = mono_class_is_interface (method->klass); load_imt_reg = is_virtual_generic || is_interface; if (is_interface) offset = ((gint32)mono_method_get_imt_slot (method) - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P; else offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + ((mono_method_get_vtable_index (method)) * (TARGET_SIZEOF_VOID_P)); idx = (offset / TARGET_SIZEOF_VOID_P + MONO_IMT_SIZE) * 2 + (load_imt_reg ? 1 : 0); g_assert (idx >= 0); /* Resize the cache to idx + 1 */ if (cache_size < idx + 1) { mono_jit_lock (); if (cache_size < idx + 1) { guint8 **new_cache; int new_cache_size = idx + 1; new_cache = g_new0 (guint8*, new_cache_size); if (cache) memcpy (new_cache, cache, cache_size * sizeof (guint8*)); g_free (cache); mono_memory_barrier (); cache = new_cache; cache_size = new_cache_size; } mono_jit_unlock (); } if (cache [idx]) return cache [idx]; /* FIXME Support more cases */ if (mono_ee_features.use_aot_trampolines) { cache [idx] = (guint8 *)mono_aot_get_trampoline (mono_get_delegate_virtual_invoke_impl_name (load_imt_reg, offset)); g_assert (cache [idx]); } else { cache [idx] = (guint8 *)mono_arch_get_delegate_virtual_invoke_impl (sig, method, offset, load_imt_reg); } return cache [idx]; } /** * mini_parse_debug_option: * @option: The option to parse. * * Parses debug options for the mono runtime. The options are the same as for * the MONO_DEBUG environment variable. * */ gboolean mini_parse_debug_option (const char *option) { // Empty string is ok as consequence of appending ",foo" // without first checking for empty. if (*option == 0) return TRUE; if (!strcmp (option, "handle-sigint")) mini_debug_options.handle_sigint = TRUE; else if (!strcmp (option, "keep-delegates")) mini_debug_options.keep_delegates = TRUE; else if (!strcmp (option, "reverse-pinvoke-exceptions")) mini_debug_options.reverse_pinvoke_exceptions = TRUE; else if (!strcmp (option, "collect-pagefault-stats")) mini_debug_options.collect_pagefault_stats = TRUE; else if (!strcmp (option, "break-on-unverified")) mini_debug_options.break_on_unverified = TRUE; else if (!strcmp (option, "no-gdb-backtrace")) mini_debug_options.no_gdb_backtrace = TRUE; else if (!strcmp (option, "suspend-on-native-crash") || !strcmp (option, "suspend-on-sigsegv")) mini_debug_options.suspend_on_native_crash = TRUE; else if (!strcmp (option, "suspend-on-exception")) mini_debug_options.suspend_on_exception = TRUE; else if (!strcmp (option, "suspend-on-unhandled")) mini_debug_options.suspend_on_unhandled = TRUE; else if (!strcmp (option, "dont-free-domains")) mono_dont_free_domains = TRUE; else if (!strcmp (option, "dyn-runtime-invoke")) mini_debug_options.dyn_runtime_invoke = TRUE; else if (!strcmp (option, "gdb")) fprintf (stderr, "MONO_DEBUG=gdb is deprecated."); else if (!strcmp (option, "lldb")) mini_debug_options.lldb = TRUE; else if (!strcmp (option, "llvm-disable-inlining")) mini_debug_options.llvm_disable_inlining = TRUE; else if (!strcmp (option, "llvm-disable-implicit-null-checks")) mini_debug_options.llvm_disable_implicit_null_checks = TRUE; else if (!strcmp (option, "explicit-null-checks")) mini_debug_options.explicit_null_checks = TRUE; else if (!strcmp (option, "gen-seq-points")) mini_debug_options.gen_sdb_seq_points = TRUE; else if (!strcmp (option, "gen-compact-seq-points")) fprintf (stderr, "Mono Warning: option gen-compact-seq-points is deprecated.\n"); else if (!strcmp (option, "no-compact-seq-points")) mini_debug_options.no_seq_points_compact_data = TRUE; else if (!strcmp (option, "single-imm-size")) mini_debug_options.single_imm_size = TRUE; else if (!strcmp (option, "init-stacks")) mini_debug_options.init_stacks = TRUE; else if (!strcmp (option, "casts")) mini_debug_options.better_cast_details = TRUE; else if (!strcmp (option, "soft-breakpoints")) mini_debug_options.soft_breakpoints = TRUE; else if (!strcmp (option, "check-pinvoke-callconv")) mini_debug_options.check_pinvoke_callconv = TRUE; else if (!strcmp (option, "use-fallback-tls")) mini_debug_options.use_fallback_tls = TRUE; else if (!strcmp (option, "debug-domain-unload")) g_error ("MONO_DEBUG option debug-domain-unload is deprecated."); else if (!strcmp (option, "partial-sharing")) mono_set_partial_sharing_supported (TRUE); else if (!strcmp (option, "align-small-structs")) mono_align_small_structs = TRUE; else if (!strcmp (option, "native-debugger-break")) mini_debug_options.native_debugger_break = TRUE; else if (!strcmp (option, "disable_omit_fp")) mini_debug_options.disable_omit_fp = TRUE; // This is an internal testing feature. // Every tail. encountered is required to be optimized. // It is asserted. else if (!strcmp (option, "test-tailcall-require")) mini_debug_options.test_tailcall_require = TRUE; else if (!strcmp (option, "verbose-gdb")) mini_debug_options.verbose_gdb = TRUE; else if (!strcmp (option, "clr-memory-model")) // FIXME Kill this debug flag mini_debug_options.weak_memory_model = FALSE; else if (!strcmp (option, "weak-memory-model")) mini_debug_options.weak_memory_model = TRUE; else if (!strcmp (option, "top-runtime-invoke-unhandled")) mini_debug_options.top_runtime_invoke_unhandled = TRUE; else if (!strncmp (option, "thread-dump-dir=", 16)) mono_set_thread_dump_dir(g_strdup(option + 16)); else if (!strncmp (option, "aot-skip=", 9)) { mini_debug_options.aot_skip_set = TRUE; mini_debug_options.aot_skip = atoi (option + 9); } else return FALSE; return TRUE; } static void mini_parse_debug_options (void) { char *options = g_getenv ("MONO_DEBUG"); gchar **args, **ptr; if (!options) return; args = g_strsplit (options, ",", -1); g_free (options); for (ptr = args; ptr && *ptr; ptr++) { const char *arg = *ptr; if (!mini_parse_debug_option (arg)) { fprintf (stderr, "Invalid option for the MONO_DEBUG env variable: %s\n", arg); // test-tailcall-require is also accepted but not documented. // empty string is also accepted and ignored as a consequence // of appending ",foo" without checking for empty. fprintf (stderr, "Available options: 'handle-sigint', 'keep-delegates', 'reverse-pinvoke-exceptions', 'collect-pagefault-stats', 'break-on-unverified', 'no-gdb-backtrace', 'suspend-on-native-crash', 'suspend-on-sigsegv', 'suspend-on-exception', 'suspend-on-unhandled', 'dont-free-domains', 'dyn-runtime-invoke', 'gdb', 'explicit-null-checks', 'gen-seq-points', 'no-compact-seq-points', 'single-imm-size', 'init-stacks', 'casts', 'soft-breakpoints', 'check-pinvoke-callconv', 'use-fallback-tls', 'debug-domain-unload', 'partial-sharing', 'align-small-structs', 'native-debugger-break', 'thread-dump-dir=DIR', 'no-verbose-gdb', 'llvm_disable_inlining', 'llvm-disable-self-init', 'llvm-disable-implicit-null-checks', 'weak-memory-model'.\n"); exit (1); } } g_strfreev (args); } MonoDebugOptions * mini_get_debug_options (void) { return &mini_debug_options; } static gpointer mini_create_ftnptr (gpointer addr) { #if defined(PPC_USES_FUNCTION_DESCRIPTOR) gpointer* desc = NULL; static GHashTable *ftnptrs_hash; if (!ftnptrs_hash) { GHashTable *hash = g_hash_table_new (mono_aligned_addr_hash, NULL); mono_memory_barrier (); ftnptrs_hash = hash; } // FIXME: MonoJitMemoryManager *jit_mm = get_default_jit_mm (); mono_jit_lock (); desc = (gpointer*)g_hash_table_lookup (ftnptrs_hash, addr); mono_jit_unlock (); if (desc) return desc; #if defined(__mono_ppc64__) desc = mono_mem_manager_alloc0 (jit_mm->mem_manager, 3 * sizeof (gpointer)); desc [0] = addr; desc [1] = NULL; desc [2] = NULL; # endif mono_jit_lock (); g_hash_table_insert (ftnptrs_hash, addr, desc); mono_jit_unlock (); return desc; #else return addr; #endif } static gpointer mini_get_addr_from_ftnptr (gpointer descr) { #if defined(PPC_USES_FUNCTION_DESCRIPTOR) return *(gpointer*)descr; #else return descr; #endif } static void register_counters (void) { mono_counters_register ("Compiled methods", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_compiled); mono_counters_register ("Methods from AOT", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_aot); mono_counters_register ("Methods from AOT+LLVM", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_aot_llvm); mono_counters_register ("Methods JITted using mono JIT", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_without_llvm); mono_counters_register ("Methods JITted using LLVM", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_with_llvm); mono_counters_register ("Methods using the interpreter", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_with_interp); } static void runtime_invoke_info_free (gpointer value); static gint class_method_pair_equal (gconstpointer ka, gconstpointer kb) { const MonoClassMethodPair *apair = (const MonoClassMethodPair *)ka; const MonoClassMethodPair *bpair = (const MonoClassMethodPair *)kb; return apair->klass == bpair->klass && apair->method == bpair->method ? 1 : 0; } static guint class_method_pair_hash (gconstpointer data) { const MonoClassMethodPair *pair = (const MonoClassMethodPair *)data; return (gsize)pair->klass ^ (gsize)pair->method; } static void init_jit_mem_manager (MonoMemoryManager *mem_manager) { MonoJitMemoryManager *info = g_new0 (MonoJitMemoryManager, 1); info->mem_manager = mem_manager; info->jump_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); info->jump_target_hash = g_hash_table_new (NULL, NULL); info->jit_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); info->delegate_trampoline_hash = g_hash_table_new (class_method_pair_hash, class_method_pair_equal); info->seq_points = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, mono_seq_point_info_free); info->runtime_invoke_hash = mono_conc_hashtable_new_full (mono_aligned_addr_hash, NULL, NULL, runtime_invoke_info_free); info->arch_seq_points = g_hash_table_new (mono_aligned_addr_hash, NULL); mono_jit_code_hash_init (&info->jit_code_hash); mono_jit_code_hash_init (&info->interp_code_hash); mono_os_mutex_init_recursive (&info->jit_code_hash_lock); mem_manager->runtime_info = info; } static void delete_jump_list (gpointer key, gpointer value, gpointer user_data) { MonoJumpList *jlist = (MonoJumpList *)value; g_slist_free ((GSList*)jlist->list); } static void delete_got_slot_list (gpointer key, gpointer value, gpointer user_data) { GSList *list = (GSList *)value; g_slist_free (list); } static void dynamic_method_info_free (gpointer key, gpointer value, gpointer user_data) { MonoJitDynamicMethodInfo *di = (MonoJitDynamicMethodInfo *)value; mono_code_manager_destroy (di->code_mp); g_free (di); } static void runtime_invoke_info_free (gpointer value) { RuntimeInvokeInfo *info = (RuntimeInvokeInfo*)value; #ifdef MONO_ARCH_DYN_CALL_SUPPORTED if (info->dyn_call_info) mono_arch_dyn_call_free (info->dyn_call_info); #endif g_free (info); } static void free_jit_callee_list (gpointer key, gpointer value, gpointer user_data) { g_slist_free ((GSList*)value); } static void free_jit_mem_manager (MonoMemoryManager *mem_manager) { MonoJitMemoryManager *info = (MonoJitMemoryManager*)mem_manager->runtime_info; g_hash_table_foreach (info->jump_target_hash, delete_jump_list, NULL); g_hash_table_destroy (info->jump_target_hash); if (info->jump_target_got_slot_hash) { g_hash_table_foreach (info->jump_target_got_slot_hash, delete_got_slot_list, NULL); g_hash_table_destroy (info->jump_target_got_slot_hash); } if (info->dynamic_code_hash) { g_hash_table_foreach (info->dynamic_code_hash, dynamic_method_info_free, NULL); g_hash_table_destroy (info->dynamic_code_hash); } g_hash_table_destroy (info->method_code_hash); g_hash_table_destroy (info->jump_trampoline_hash); g_hash_table_destroy (info->jit_trampoline_hash); g_hash_table_destroy (info->delegate_trampoline_hash); g_hash_table_destroy (info->static_rgctx_trampoline_hash); g_hash_table_destroy (info->mrgctx_hash); g_hash_table_destroy (info->method_rgctx_hash); g_hash_table_destroy (info->interp_method_pointer_hash); mono_conc_hashtable_destroy (info->runtime_invoke_hash); g_hash_table_destroy (info->seq_points); g_hash_table_destroy (info->arch_seq_points); if (info->agent_info) mono_component_debugger ()->free_mem_manager (info); g_hash_table_destroy (info->gsharedvt_arg_tramp_hash); if (info->llvm_jit_callees) { g_hash_table_foreach (info->llvm_jit_callees, free_jit_callee_list, NULL); g_hash_table_destroy (info->llvm_jit_callees); } mono_internal_hash_table_destroy (&info->interp_code_hash); #ifdef ENABLE_LLVM mono_llvm_free_mem_manager (info); #endif g_free (info); mem_manager->runtime_info = NULL; } #ifdef ENABLE_LLVM static gboolean llvm_init_inner (void) { mono_llvm_init (!mono_compile_aot); return TRUE; } #endif /* * mini_llvm_init: * * Load and initialize LLVM support. * Return TRUE on success. */ gboolean mini_llvm_init (void) { #ifdef ENABLE_LLVM static gboolean llvm_inited; static gboolean init_result; mono_loader_lock_if_inited (); if (!llvm_inited) { init_result = llvm_init_inner (); llvm_inited = TRUE; } mono_loader_unlock_if_inited (); return init_result; #else return FALSE; #endif } void mini_add_profiler_argument (const char *desc) { if (!profile_options) profile_options = g_ptr_array_new (); g_ptr_array_add (profile_options, (gpointer) g_strdup (desc)); } const MonoEECallbacks *mono_interp_callbacks_pointer; void mini_install_interp_callbacks (const MonoEECallbacks *cbs) { mono_interp_callbacks_pointer = cbs; } int mono_ee_api_version (void) { return MONO_EE_API_VERSION; } void mono_interp_entry_from_trampoline (gpointer ccontext, gpointer imethod) { mini_get_interp_callbacks ()->entry_from_trampoline (ccontext, imethod); } void mono_interp_to_native_trampoline (gpointer addr, gpointer ccontext) { mini_get_interp_callbacks ()->to_native_trampoline (addr, ccontext); } static gboolean mini_is_interpreter_enabled (void) { return mono_use_interpreter; } static const char* mono_get_runtime_build_version (void); MonoDomain * mini_init (const char *filename, const char *runtime_version) { ERROR_DECL (error); MonoDomain *domain; MonoRuntimeCallbacks callbacks; static const MonoThreadInfoRuntimeCallbacks ticallbacks = { MONO_THREAD_INFO_RUNTIME_CALLBACKS (MONO_INIT_CALLBACK, mono) }; mono_component_event_pipe_100ns_ticks_start (); MONO_VES_INIT_BEGIN (); CHECKED_MONO_INIT (); #if defined(__linux__) if (access ("/proc/self/maps", F_OK) != 0) { g_print ("Mono requires /proc to be mounted.\n"); exit (1); } #endif mono_interp_stub_init (); #ifndef DISABLE_INTERPRETER if (mono_use_interpreter) mono_ee_interp_init (mono_interp_opts_string); #endif mono_components_init (); mono_component_debugger ()->parse_options (mono_debugger_agent_get_sdb_options ()); mono_os_mutex_init_recursive (&jit_mutex); mono_cross_helpers_run (); mono_counters_init (); mini_jit_init (); mini_jit_init_job_control (); /* Happens when using the embedding interface */ if (!default_opt_set) default_opt = mono_parse_default_optimizations (NULL); #ifdef MONO_ARCH_GSHAREDVT_SUPPORTED if (mono_aot_only) mono_set_generic_sharing_vt_supported (TRUE); #else if (mono_llvm_only) mono_set_generic_sharing_vt_supported (TRUE); #endif mono_tls_init_runtime_keys (); if (!global_codeman) { if (!mono_compile_aot) global_codeman = mono_code_manager_new (); else global_codeman = mono_code_manager_new_aot (); } memset (&callbacks, 0, sizeof (callbacks)); callbacks.create_ftnptr = mini_create_ftnptr; callbacks.get_addr_from_ftnptr = mini_get_addr_from_ftnptr; callbacks.get_runtime_build_info = mono_get_runtime_build_info; callbacks.get_runtime_build_version = mono_get_runtime_build_version; callbacks.set_cast_details = mono_set_cast_details; callbacks.debug_log = mono_component_debugger ()->debug_log; callbacks.debug_log_is_enabled = mono_component_debugger ()->debug_log_is_enabled; callbacks.get_vtable_trampoline = mini_get_vtable_trampoline; callbacks.get_imt_trampoline = mini_get_imt_trampoline; callbacks.imt_entry_inited = mini_imt_entry_inited; callbacks.init_delegate = mini_init_delegate; #define JIT_INVOKE_WORKS #ifdef JIT_INVOKE_WORKS callbacks.runtime_invoke = mono_jit_runtime_invoke; #endif #define JIT_TRAMPOLINES_WORK #ifdef JIT_TRAMPOLINES_WORK callbacks.compile_method = mono_jit_compile_method; callbacks.create_jit_trampoline = mono_create_jit_trampoline; callbacks.create_delegate_trampoline = mono_create_delegate_trampoline; callbacks.free_method = mono_jit_free_method; callbacks.get_ftnptr = get_ftnptr_for_method; #endif callbacks.is_interpreter_enabled = mini_is_interpreter_enabled; #if ENABLE_WEAK_ATTR callbacks.get_weak_field_indexes = mono_aot_get_weak_field_indexes; #endif callbacks.metadata_update_published = mini_invalidate_transformed_interp_methods; callbacks.interp_jit_info_foreach = mini_interp_jit_info_foreach; callbacks.interp_sufficient_stack = mini_interp_sufficient_stack; callbacks.init_mem_manager = init_jit_mem_manager; callbacks.free_mem_manager = free_jit_mem_manager; callbacks.get_jit_stats = get_jit_stats; callbacks.get_exception_stats = get_exception_stats; mono_install_callbacks (&callbacks); #ifndef HOST_WIN32 mono_w32handle_init (); #endif mono_thread_info_runtime_init (&ticallbacks); if (g_hasenv ("MONO_DEBUG")) { mini_parse_debug_options (); } mono_code_manager_init (mono_compile_aot); #ifdef MONO_ARCH_HAVE_CODE_CHUNK_TRACKING static const MonoCodeManagerCallbacks code_manager_callbacks = { #undef MONO_CODE_MANAGER_CALLBACK #define MONO_CODE_MANAGER_CALLBACK(ret, name, sig) mono_arch_code_ ## name, MONO_CODE_MANAGER_CALLBACKS }; mono_code_manager_install_callbacks (&code_manager_callbacks); #endif mono_hwcap_init (); mono_arch_cpu_init (); mono_arch_init (); mono_unwind_init (); if (mini_debug_options.lldb || g_hasenv ("MONO_LLDB")) { mono_lldb_init (""); mono_dont_free_domains = TRUE; } #ifdef ENABLE_LLVM if (mono_use_llvm) mono_llvm_init (!mono_compile_aot); #endif mono_trampolines_init (); if (default_opt & MONO_OPT_AOT) mono_aot_init (); mono_component_debugger ()->init (); #ifdef MONO_ARCH_GSHARED_SUPPORTED mono_set_generic_sharing_supported (TRUE); #endif mono_thread_info_signals_init (); mono_init_native_crash_info (); #ifndef MONO_CROSS_COMPILE mono_runtime_install_handlers (); #endif mono_threads_install_cleanup (mini_thread_cleanup); mono_install_get_cached_class_info (mono_aot_get_cached_class_info); mono_install_get_class_from_name (mono_aot_get_class_from_name); mono_install_jit_info_find_in_aot (mono_aot_find_jit_info); mono_profiler_state.context_enable = mini_profiler_context_enable; mono_profiler_state.context_get_this = mini_profiler_context_get_this; mono_profiler_state.context_get_argument = mini_profiler_context_get_argument; mono_profiler_state.context_get_local = mini_profiler_context_get_local; mono_profiler_state.context_get_result = mini_profiler_context_get_result; mono_profiler_state.context_free_buffer = mini_profiler_context_free_buffer; if (g_hasenv ("MONO_PROFILE")) { gchar *profile_env = g_getenv ("MONO_PROFILE"); mini_add_profiler_argument (profile_env); g_free (profile_env); } if (profile_options) for (guint i = 0; i < profile_options->len; i++) mono_profiler_load ((const char *) g_ptr_array_index (profile_options, i)); mono_profiler_started (); if (mini_debug_options.collect_pagefault_stats) mono_aot_set_make_unreadable (TRUE); /* set no-exec before the default ALC is created */ if (mono_compile_aot) { /* * Avoid running managed code when AOT compiling, since the platform * might only support aot-only execution. */ mono_runtime_set_no_exec (TRUE); } if (runtime_version) domain = mono_init_version (filename, runtime_version); else domain = mono_init_from_assembly (filename, filename); if (mono_compile_aot) mono_component_diagnostics_server ()->disable (); mono_component_event_pipe ()->init (); // EventPipe up is now up and running, convert 100ns ticks since runtime init into EventPipe compatbile timestamp (using negative delta to represent timestamp in past). // Add RuntimeInit execution checkpoint using converted timestamp. mono_component_event_pipe ()->add_rundown_execution_checkpoint_2 ("RuntimeInit", mono_component_event_pipe ()->convert_100ns_ticks_to_timestamp_t (-mono_component_event_pipe_100ns_ticks_stop ())); if (mono_aot_only) { /* This helps catch code allocation requests */ mono_code_manager_set_read_only (mono_mem_manager_get_ambient ()->code_mp); mono_marshal_use_aot_wrappers (TRUE); } if (mono_llvm_only) { mono_install_imt_trampoline_builder (mini_llvmonly_get_imt_trampoline); mono_set_always_build_imt_trampolines (TRUE); } else if (mono_aot_only) { mono_install_imt_trampoline_builder (mono_aot_get_imt_trampoline); } else { mono_install_imt_trampoline_builder (mono_arch_build_imt_trampoline); } /*Init arch tls information only after the metadata side is inited to make sure we see dynamic appdomain tls keys*/ mono_arch_finish_init (); /* This must come after mono_init () in the aot-only case */ mono_exceptions_init (); /* This should come after mono_init () too */ mini_gc_init (); mono_create_icall_signatures (); register_counters (); #define JIT_CALLS_WORK #ifdef JIT_CALLS_WORK /* Needs to be called here since register_jit_icall depends on it */ mono_marshal_init (); mono_arch_register_lowlevel_calls (); register_icalls (); mono_generic_sharing_init (); #endif #ifdef MONO_ARCH_SIMD_INTRINSICS mono_simd_intrinsics_init (); #endif register_trampolines (domain); mono_mem_account_register_counters (); #define JIT_RUNTIME_WORKS #ifdef JIT_RUNTIME_WORKS mono_install_runtime_cleanup (runtime_cleanup); mono_runtime_init_checked (domain, (MonoThreadStartCB)mono_thread_start_cb, mono_thread_attach_cb, error); mono_error_assert_ok (error); mono_thread_internal_attach (domain); MONO_PROFILER_RAISE (thread_name, (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ()), "Main")); #endif mono_threads_set_runtime_startup_finished (); mono_component_event_pipe ()->finish_init (); #ifdef ENABLE_EXPERIMENT_TIERED if (!mono_compile_aot) { /* create compilation thread in background */ mini_tiered_init (); } #endif if (mono_profiler_sampling_enabled ()) mono_runtime_setup_stat_profiler (); MONO_PROFILER_RAISE (runtime_initialized, ()); MONO_VES_INIT_END (); return domain; } static void register_icalls (void) { mono_add_internal_call_internal ("System.Diagnostics.StackFrame::get_frame_info", ves_icall_get_frame_info); mono_add_internal_call_internal ("System.Diagnostics.StackTrace::get_trace", ves_icall_get_trace); mono_add_internal_call_internal ("Mono.Runtime::mono_runtime_install_handlers", mono_runtime_install_handlers); /* * It's important that we pass `TRUE` as the last argument here, as * it causes the JIT to omit a wrapper for these icalls. If the JIT * *did* emit a wrapper, we'd be looking at infinite recursion since * the wrapper would call the icall which would call the wrapper and * so on. */ register_icall (mono_profiler_raise_method_enter, mono_icall_sig_void_ptr_ptr, TRUE); register_icall (mono_profiler_raise_method_leave, mono_icall_sig_void_ptr_ptr, TRUE); register_icall (mono_profiler_raise_method_tail_call, mono_icall_sig_void_ptr_ptr, TRUE); register_icall (mono_profiler_raise_exception_clause, mono_icall_sig_void_ptr_int_int_object, TRUE); register_icall (mono_trace_enter_method, mono_icall_sig_void_ptr_ptr_ptr, TRUE); register_icall (mono_trace_leave_method, mono_icall_sig_void_ptr_ptr_ptr, TRUE); register_icall (mono_trace_tail_method, mono_icall_sig_void_ptr_ptr_ptr, TRUE); g_assert (mono_get_lmf_addr == mono_tls_get_lmf_addr); register_icall (mono_domain_get, mono_icall_sig_ptr, TRUE); register_icall (mini_llvmonly_throw_exception, mono_icall_sig_void_object, TRUE); register_icall (mini_llvmonly_rethrow_exception, mono_icall_sig_void_object, TRUE); register_icall (mini_llvmonly_throw_corlib_exception, mono_icall_sig_void_int, TRUE); register_icall (mini_llvmonly_resume_exception, mono_icall_sig_void, TRUE); register_icall (mini_llvmonly_resume_exception_il_state, mono_icall_sig_void_ptr_ptr, TRUE); register_icall (mini_llvmonly_load_exception, mono_icall_sig_object, TRUE); register_icall (mini_llvmonly_clear_exception, NULL, TRUE); register_icall (mini_llvmonly_match_exception, mono_icall_sig_int_ptr_int_int_ptr_object, TRUE); #if defined(ENABLE_LLVM) && defined(HAVE_UNWIND_H) register_icall (mono_llvm_set_unhandled_exception_handler, NULL, TRUE); // FIXME: This is broken #ifndef TARGET_WASM register_icall (mono_debug_personality, mono_icall_sig_int_int_int_ptr_ptr_ptr, TRUE); #endif #endif if (!mono_llvm_only) { register_dyn_icall (mono_get_throw_exception (), mono_arch_throw_exception, mono_icall_sig_void_object, TRUE); register_dyn_icall (mono_get_rethrow_exception (), mono_arch_rethrow_exception, mono_icall_sig_void_object, TRUE); register_dyn_icall (mono_get_throw_corlib_exception (), mono_arch_throw_corlib_exception, mono_icall_sig_void_ptr, TRUE); } register_icall (mono_thread_get_undeniable_exception, mono_icall_sig_object, FALSE); register_icall (ves_icall_thread_finish_async_abort, mono_icall_sig_void, FALSE); register_icall (mono_thread_interruption_checkpoint, mono_icall_sig_object, FALSE); register_icall (mono_thread_force_interruption_checkpoint_noraise, mono_icall_sig_object, FALSE); register_icall (mono_threads_state_poll, mono_icall_sig_void, FALSE); #ifndef MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS register_opcode_emulation (OP_LMUL, __emul_lmul, mono_icall_sig_long_long_long, mono_llmult, FALSE); register_opcode_emulation (OP_LDIV, __emul_ldiv, mono_icall_sig_long_long_long, mono_lldiv, FALSE); register_opcode_emulation (OP_LDIV_UN, __emul_ldiv_un, mono_icall_sig_long_long_long, mono_lldiv_un, FALSE); register_opcode_emulation (OP_LREM, __emul_lrem, mono_icall_sig_long_long_long, mono_llrem, FALSE); register_opcode_emulation (OP_LREM_UN, __emul_lrem_un, mono_icall_sig_long_long_long, mono_llrem_un, FALSE); #endif #if !defined(MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS) || defined(MONO_ARCH_EMULATE_LONG_MUL_OVF_OPTS) register_opcode_emulation (OP_LMUL_OVF_UN, __emul_lmul_ovf_un, mono_icall_sig_long_long_long, mono_llmult_ovf_un, FALSE); register_opcode_emulation (OP_LMUL_OVF, __emul_lmul_ovf, mono_icall_sig_long_long_long, mono_llmult_ovf, FALSE); register_opcode_emulation (OP_LMUL_OVF_UN_OOM, __emul_lmul_ovf_un_oom, mono_icall_sig_long_long_long, mono_llmult_ovf_un_oom, FALSE); #endif #ifndef MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS register_opcode_emulation (OP_LSHL, __emul_lshl, mono_icall_sig_long_long_int32, mono_lshl, TRUE); register_opcode_emulation (OP_LSHR, __emul_lshr, mono_icall_sig_long_long_int32, mono_lshr, TRUE); register_opcode_emulation (OP_LSHR_UN, __emul_lshr_un, mono_icall_sig_long_long_int32, mono_lshr_un, TRUE); #endif #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV) register_opcode_emulation (OP_IDIV, __emul_op_idiv, mono_icall_sig_int32_int32_int32, mono_idiv, FALSE); register_opcode_emulation (OP_IDIV_UN, __emul_op_idiv_un, mono_icall_sig_int32_int32_int32, mono_idiv_un, FALSE); register_opcode_emulation (OP_IREM, __emul_op_irem, mono_icall_sig_int32_int32_int32, mono_irem, FALSE); register_opcode_emulation (OP_IREM_UN, __emul_op_irem_un, mono_icall_sig_int32_int32_int32, mono_irem_un, FALSE); #endif #ifdef MONO_ARCH_EMULATE_MUL_DIV register_opcode_emulation (OP_IMUL, __emul_op_imul, mono_icall_sig_int32_int32_int32, mono_imul, TRUE); #endif #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_MUL_OVF) register_opcode_emulation (OP_IMUL_OVF, __emul_op_imul_ovf, mono_icall_sig_int32_int32_int32, mono_imul_ovf, FALSE); register_opcode_emulation (OP_IMUL_OVF_UN, __emul_op_imul_ovf_un, mono_icall_sig_int32_int32_int32, mono_imul_ovf_un, FALSE); register_opcode_emulation (OP_IMUL_OVF_UN_OOM, __emul_op_imul_ovf_un_oom, mono_icall_sig_int32_int32_int32, mono_imul_ovf_un_oom, FALSE); #endif #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_SOFT_FLOAT_FALLBACK) register_opcode_emulation (OP_FDIV, __emul_fdiv, mono_icall_sig_double_double_double, mono_fdiv, FALSE); #endif #ifdef MONO_ARCH_EMULATE_FCONV_TO_U8 register_opcode_emulation (OP_FCONV_TO_U8, __emul_fconv_to_u8, mono_icall_sig_ulong_double, mono_fconv_u8, FALSE); register_opcode_emulation (OP_RCONV_TO_U8, __emul_rconv_to_u8, mono_icall_sig_ulong_float, mono_rconv_u8, FALSE); #endif #ifdef MONO_ARCH_EMULATE_FCONV_TO_U4 register_opcode_emulation (OP_FCONV_TO_U4, __emul_fconv_to_u4, mono_icall_sig_uint32_double, mono_fconv_u4, FALSE); register_opcode_emulation (OP_RCONV_TO_U4, __emul_rconv_to_u4, mono_icall_sig_uint32_float, mono_rconv_u4, FALSE); #endif register_opcode_emulation (OP_FCONV_TO_OVF_I8, __emul_fconv_to_ovf_i8, mono_icall_sig_long_double, mono_fconv_ovf_i8, FALSE); register_opcode_emulation (OP_FCONV_TO_OVF_U8, __emul_fconv_to_ovf_u8, mono_icall_sig_ulong_double, mono_fconv_ovf_u8, FALSE); register_opcode_emulation (OP_RCONV_TO_OVF_I8, __emul_rconv_to_ovf_i8, mono_icall_sig_long_float, mono_rconv_ovf_i8, FALSE); register_opcode_emulation (OP_RCONV_TO_OVF_U8, __emul_rconv_to_ovf_u8, mono_icall_sig_ulong_float, mono_rconv_ovf_u8, FALSE); #ifdef MONO_ARCH_EMULATE_FCONV_TO_I8 register_opcode_emulation (OP_FCONV_TO_I8, __emul_fconv_to_i8, mono_icall_sig_long_double, mono_fconv_i8, FALSE); register_opcode_emulation (OP_RCONV_TO_I8, __emul_rconv_to_i8, mono_icall_sig_long_float, mono_rconv_i8, FALSE); #endif #ifdef MONO_ARCH_EMULATE_CONV_R8_UN register_opcode_emulation (OP_ICONV_TO_R_UN, __emul_iconv_to_r_un, mono_icall_sig_double_int32, mono_conv_to_r8_un, FALSE); #endif #ifdef MONO_ARCH_EMULATE_LCONV_TO_R8 register_opcode_emulation (OP_LCONV_TO_R8, __emul_lconv_to_r8, mono_icall_sig_double_long, mono_lconv_to_r8, FALSE); #endif #ifdef MONO_ARCH_EMULATE_LCONV_TO_R4 register_opcode_emulation (OP_LCONV_TO_R4, __emul_lconv_to_r4, mono_icall_sig_float_long, mono_lconv_to_r4, FALSE); #endif #ifdef MONO_ARCH_EMULATE_LCONV_TO_R8_UN register_opcode_emulation (OP_LCONV_TO_R_UN, __emul_lconv_to_r8_un, mono_icall_sig_double_long, mono_lconv_to_r8_un, FALSE); #endif #ifdef MONO_ARCH_EMULATE_FREM register_opcode_emulation (OP_FREM, __emul_frem, mono_icall_sig_double_double_double, mono_fmod, FALSE); register_opcode_emulation (OP_RREM, __emul_rrem, mono_icall_sig_float_float_float, fmodf, FALSE); #endif #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK if (mono_arch_is_soft_float ()) { register_opcode_emulation (OP_FSUB, __emul_fsub, mono_icall_sig_double_double_double, mono_fsub, FALSE); register_opcode_emulation (OP_FADD, __emul_fadd, mono_icall_sig_double_double_double, mono_fadd, FALSE); register_opcode_emulation (OP_FMUL, __emul_fmul, mono_icall_sig_double_double_double, mono_fmul, FALSE); register_opcode_emulation (OP_FNEG, __emul_fneg, mono_icall_sig_double_double, mono_fneg, FALSE); register_opcode_emulation (OP_ICONV_TO_R8, __emul_iconv_to_r8, mono_icall_sig_double_int32, mono_conv_to_r8, FALSE); register_opcode_emulation (OP_ICONV_TO_R4, __emul_iconv_to_r4, mono_icall_sig_double_int32, mono_conv_to_r4, FALSE); register_opcode_emulation (OP_FCONV_TO_R4, __emul_fconv_to_r4, mono_icall_sig_double_double, mono_fconv_r4, FALSE); register_opcode_emulation (OP_FCONV_TO_I1, __emul_fconv_to_i1, mono_icall_sig_int8_double, mono_fconv_i1, FALSE); register_opcode_emulation (OP_FCONV_TO_I2, __emul_fconv_to_i2, mono_icall_sig_int16_double, mono_fconv_i2, FALSE); register_opcode_emulation (OP_FCONV_TO_I4, __emul_fconv_to_i4, mono_icall_sig_int32_double, mono_fconv_i4, FALSE); register_opcode_emulation (OP_FCONV_TO_U1, __emul_fconv_to_u1, mono_icall_sig_uint8_double, mono_fconv_u1, FALSE); register_opcode_emulation (OP_FCONV_TO_U2, __emul_fconv_to_u2, mono_icall_sig_uint16_double, mono_fconv_u2, FALSE); #if TARGET_SIZEOF_VOID_P == 4 register_opcode_emulation (OP_FCONV_TO_I, __emul_fconv_to_i, mono_icall_sig_int32_double, mono_fconv_i4, FALSE); #endif register_opcode_emulation (OP_FBEQ, __emul_fcmp_eq, mono_icall_sig_uint32_double_double, mono_fcmp_eq, FALSE); register_opcode_emulation (OP_FBLT, __emul_fcmp_lt, mono_icall_sig_uint32_double_double, mono_fcmp_lt, FALSE); register_opcode_emulation (OP_FBGT, __emul_fcmp_gt, mono_icall_sig_uint32_double_double, mono_fcmp_gt, FALSE); register_opcode_emulation (OP_FBLE, __emul_fcmp_le, mono_icall_sig_uint32_double_double, mono_fcmp_le, FALSE); register_opcode_emulation (OP_FBGE, __emul_fcmp_ge, mono_icall_sig_uint32_double_double, mono_fcmp_ge, FALSE); register_opcode_emulation (OP_FBNE_UN, __emul_fcmp_ne_un, mono_icall_sig_uint32_double_double, mono_fcmp_ne_un, FALSE); register_opcode_emulation (OP_FBLT_UN, __emul_fcmp_lt_un, mono_icall_sig_uint32_double_double, mono_fcmp_lt_un, FALSE); register_opcode_emulation (OP_FBGT_UN, __emul_fcmp_gt_un, mono_icall_sig_uint32_double_double, mono_fcmp_gt_un, FALSE); register_opcode_emulation (OP_FBLE_UN, __emul_fcmp_le_un, mono_icall_sig_uint32_double_double, mono_fcmp_le_un, FALSE); register_opcode_emulation (OP_FBGE_UN, __emul_fcmp_ge_un, mono_icall_sig_uint32_double_double, mono_fcmp_ge_un, FALSE); register_opcode_emulation (OP_FCEQ, __emul_fcmp_ceq, mono_icall_sig_uint32_double_double, mono_fceq, FALSE); register_opcode_emulation (OP_FCGT, __emul_fcmp_cgt, mono_icall_sig_uint32_double_double, mono_fcgt, FALSE); register_opcode_emulation (OP_FCGT_UN, __emul_fcmp_cgt_un, mono_icall_sig_uint32_double_double, mono_fcgt_un, FALSE); register_opcode_emulation (OP_FCLT, __emul_fcmp_clt, mono_icall_sig_uint32_double_double, mono_fclt, FALSE); register_opcode_emulation (OP_FCLT_UN, __emul_fcmp_clt_un, mono_icall_sig_uint32_double_double, mono_fclt_un, FALSE); register_icall (mono_fload_r4, mono_icall_sig_double_ptr, FALSE); register_icall (mono_fstore_r4, mono_icall_sig_void_double_ptr, FALSE); register_icall (mono_fload_r4_arg, mono_icall_sig_uint32_double, FALSE); register_icall (mono_isfinite_double, mono_icall_sig_int32_double, FALSE); } #endif register_icall (mono_ckfinite, mono_icall_sig_double_double, FALSE); #ifdef COMPRESSED_INTERFACE_BITMAP register_icall (mono_class_interface_match, mono_icall_sig_uint32_ptr_int32, TRUE); #endif /* other jit icalls */ register_icall (ves_icall_mono_delegate_ctor, mono_icall_sig_void_object_object_ptr, FALSE); register_icall (ves_icall_mono_delegate_ctor_interp, mono_icall_sig_void_object_object_ptr, FALSE); register_icall (mono_class_static_field_address, mono_icall_sig_ptr_ptr, FALSE); register_icall (mono_ldtoken_wrapper, mono_icall_sig_ptr_ptr_ptr_ptr, FALSE); register_icall (mono_ldtoken_wrapper_generic_shared, mono_icall_sig_ptr_ptr_ptr_ptr, FALSE); register_icall (mono_get_special_static_data, mono_icall_sig_ptr_int, FALSE); register_icall (mono_helper_stelem_ref_check, mono_icall_sig_void_object_object, FALSE); register_icall (ves_icall_object_new, mono_icall_sig_object_ptr, FALSE); register_icall (ves_icall_object_new_specific, mono_icall_sig_object_ptr, FALSE); register_icall (ves_icall_array_new_specific, mono_icall_sig_object_ptr_int32, FALSE); register_icall (ves_icall_runtime_class_init, mono_icall_sig_void_ptr, FALSE); register_icall (mono_ldftn, mono_icall_sig_ptr_ptr, FALSE); register_icall (mono_ldvirtfn, mono_icall_sig_ptr_object_ptr, FALSE); register_icall (mono_ldvirtfn_gshared, mono_icall_sig_ptr_object_ptr, FALSE); register_icall (mono_helper_compile_generic_method, mono_icall_sig_ptr_object_ptr_ptr, FALSE); register_icall (mono_helper_ldstr, mono_icall_sig_object_ptr_int, FALSE); register_icall (mono_helper_ldstr_mscorlib, mono_icall_sig_object_int, FALSE); register_icall (mono_helper_newobj_mscorlib, mono_icall_sig_object_int, FALSE); register_icall (mono_value_copy_internal, mono_icall_sig_void_ptr_ptr_ptr, FALSE); register_icall (mono_object_castclass_unbox, mono_icall_sig_object_object_ptr, FALSE); register_icall (mono_break, NULL, TRUE); register_icall (mono_create_corlib_exception_0, mono_icall_sig_object_int, TRUE); register_icall (mono_create_corlib_exception_1, mono_icall_sig_object_int_object, TRUE); register_icall (mono_create_corlib_exception_2, mono_icall_sig_object_int_object_object, TRUE); register_icall (mono_array_new_1, mono_icall_sig_object_ptr_int, FALSE); register_icall (mono_array_new_2, mono_icall_sig_object_ptr_int_int, FALSE); register_icall (mono_array_new_3, mono_icall_sig_object_ptr_int_int_int, FALSE); register_icall (mono_array_new_4, mono_icall_sig_object_ptr_int_int_int_int, FALSE); register_icall (mono_array_new_n_icall, mono_icall_sig_object_ptr_int_ptr, FALSE); register_icall (mono_get_native_calli_wrapper, mono_icall_sig_ptr_ptr_ptr_ptr, FALSE); register_icall (mono_resume_unwind, mono_icall_sig_void_ptr, TRUE); register_icall (mono_gsharedvt_constrained_call, mono_icall_sig_object_ptr_ptr_ptr_ptr_ptr, FALSE); register_icall (mono_gsharedvt_value_copy, mono_icall_sig_void_ptr_ptr_ptr, TRUE); //WARNING We do runtime selection here but the string *MUST* be to a fallback function that has same signature and behavior MonoRangeCopyFunction const mono_gc_wbarrier_range_copy = mono_gc_get_range_copy_func (); register_icall_no_wrapper (mono_gc_wbarrier_range_copy, mono_icall_sig_void_ptr_ptr_int); register_icall (mono_object_castclass_with_cache, mono_icall_sig_object_object_ptr_ptr, FALSE); register_icall (mono_object_isinst_with_cache, mono_icall_sig_object_object_ptr_ptr, FALSE); register_icall (mono_generic_class_init, mono_icall_sig_void_ptr, FALSE); register_icall (mono_fill_class_rgctx, mono_icall_sig_ptr_ptr_int, FALSE); register_icall (mono_fill_method_rgctx, mono_icall_sig_ptr_ptr_int, FALSE); register_dyn_icall (mono_component_debugger ()->user_break, mono_debugger_agent_user_break, mono_icall_sig_void, FALSE); register_icall (mini_llvm_init_method, mono_icall_sig_void_ptr_ptr_ptr_ptr, TRUE); register_icall_no_wrapper (mini_llvmonly_resolve_iface_call_gsharedvt, mono_icall_sig_ptr_object_int_ptr_ptr); register_icall_no_wrapper (mini_llvmonly_resolve_vcall_gsharedvt, mono_icall_sig_ptr_object_int_ptr_ptr); register_icall_no_wrapper (mini_llvmonly_resolve_vcall_gsharedvt_fast, mono_icall_sig_ptr_object_int); register_icall_no_wrapper (mini_llvmonly_resolve_generic_virtual_call, mono_icall_sig_ptr_ptr_int_ptr); register_icall_no_wrapper (mini_llvmonly_resolve_generic_virtual_iface_call, mono_icall_sig_ptr_ptr_int_ptr); /* This needs a wrapper so it can have a preserveall cconv */ register_icall (mini_llvmonly_init_vtable_slot, mono_icall_sig_ptr_ptr_int, FALSE); register_icall (mini_llvmonly_init_delegate, mono_icall_sig_void_object_ptr, TRUE); register_icall (mini_llvmonly_init_delegate_virtual, mono_icall_sig_void_object_object_ptr, TRUE); register_icall (mini_llvmonly_throw_nullref_exception, mono_icall_sig_void, TRUE); register_icall (mini_llvmonly_throw_aot_failed_exception, mono_icall_sig_void_ptr, TRUE); register_icall (mini_llvmonly_pop_lmf, mono_icall_sig_void_ptr, TRUE); register_icall (mini_llvmonly_interp_entry_gsharedvt, mono_icall_sig_void_ptr_ptr_ptr, TRUE); register_icall (mono_get_assembly_object, mono_icall_sig_object_ptr, TRUE); register_icall (mono_get_method_object, mono_icall_sig_object_ptr, TRUE); register_icall (mono_throw_method_access, mono_icall_sig_void_ptr_ptr, FALSE); register_icall (mono_throw_bad_image, mono_icall_sig_void, FALSE); register_icall (mono_throw_not_supported, mono_icall_sig_void, FALSE); register_icall (mono_throw_platform_not_supported, mono_icall_sig_void, FALSE); register_icall (mono_throw_invalid_program, mono_icall_sig_void_ptr, FALSE); register_icall_no_wrapper (mono_dummy_jit_icall, mono_icall_sig_void); //register_icall_no_wrapper (mono_dummy_jit_icall_val, mono_icall_sig_void_ptr); register_icall_with_wrapper (mono_monitor_enter_internal, mono_icall_sig_int32_obj); register_icall_with_wrapper (mono_monitor_enter_v4_internal, mono_icall_sig_void_obj_ptr); register_icall_no_wrapper (mono_monitor_enter_fast, mono_icall_sig_int_obj); register_icall_no_wrapper (mono_monitor_enter_v4_fast, mono_icall_sig_int_obj_ptr); #ifdef TARGET_IOS register_icall (pthread_getspecific, mono_icall_sig_ptr_ptr, TRUE); #endif /* Register tls icalls */ register_icall_no_wrapper (mono_tls_get_thread_extern, mono_icall_sig_ptr); register_icall_no_wrapper (mono_tls_get_jit_tls_extern, mono_icall_sig_ptr); register_icall_no_wrapper (mono_tls_get_domain_extern, mono_icall_sig_ptr); register_icall_no_wrapper (mono_tls_get_sgen_thread_info_extern, mono_icall_sig_ptr); register_icall_no_wrapper (mono_tls_get_lmf_addr_extern, mono_icall_sig_ptr); register_icall_no_wrapper (mono_interp_entry_from_trampoline, mono_icall_sig_void_ptr_ptr); register_icall_no_wrapper (mono_interp_to_native_trampoline, mono_icall_sig_void_ptr_ptr); #ifdef MONO_ARCH_HAS_REGISTER_ICALL mono_arch_register_icall (); #endif } MonoJitStats mono_jit_stats = {0}; /** * Counters of mono_stats and mono_jit_stats can be read without locking during shutdown. * For all other contexts, assumes that the domain lock is held. * MONO_NO_SANITIZE_THREAD tells Clang's ThreadSanitizer to hide all reports of these (known) races. */ MONO_NO_SANITIZE_THREAD void mono_runtime_print_stats (void) { if (mono_jit_stats.enabled) { g_print ("Mono Jit statistics\n"); g_print ("Max code size ratio: %.2f (%s)\n", mono_jit_stats.max_code_size_ratio / 100.0, mono_jit_stats.max_ratio_method); g_print ("Biggest method: %" G_GINT32_FORMAT " (%s)\n", mono_jit_stats.biggest_method_size, mono_jit_stats.biggest_method); g_print ("Delegates created: %" G_GINT32_FORMAT "\n", mono_stats.delegate_creations); g_print ("Initialized classes: %" G_GINT32_FORMAT "\n", mono_stats.initialized_class_count); g_print ("Used classes: %" G_GINT32_FORMAT "\n", mono_stats.used_class_count); g_print ("Generic vtables: %" G_GINT32_FORMAT "\n", mono_stats.generic_vtable_count); g_print ("Methods: %" G_GINT32_FORMAT "\n", mono_stats.method_count); g_print ("Static data size: %" G_GINT32_FORMAT "\n", mono_stats.class_static_data_size); g_print ("VTable data size: %" G_GINT32_FORMAT "\n", mono_stats.class_vtable_size); g_print ("Mscorlib mempool size: %d\n", mono_mempool_get_allocated (mono_defaults.corlib->mempool)); g_print ("\nInitialized classes: %" G_GINT32_FORMAT "\n", mono_stats.generic_class_count); g_print ("Inflated types: %" G_GINT32_FORMAT "\n", mono_stats.inflated_type_count); g_print ("Generics virtual invokes: %" G_GINT32_FORMAT "\n", mono_jit_stats.generic_virtual_invocations); g_print ("Sharable generic methods: %" G_GINT32_FORMAT "\n", mono_stats.generics_sharable_methods); g_print ("Unsharable generic methods: %" G_GINT32_FORMAT "\n", mono_stats.generics_unsharable_methods); g_print ("Shared generic methods: %" G_GINT32_FORMAT "\n", mono_stats.generics_shared_methods); g_print ("Shared vtype generic methods: %" G_GINT32_FORMAT "\n", mono_stats.gsharedvt_methods); g_print ("IMT tables size: %" G_GINT32_FORMAT "\n", mono_stats.imt_tables_size); g_print ("IMT number of tables: %" G_GINT32_FORMAT "\n", mono_stats.imt_number_of_tables); g_print ("IMT number of methods: %" G_GINT32_FORMAT "\n", mono_stats.imt_number_of_methods); g_print ("IMT used slots: %" G_GINT32_FORMAT "\n", mono_stats.imt_used_slots); g_print ("IMT colliding slots: %" G_GINT32_FORMAT "\n", mono_stats.imt_slots_with_collisions); g_print ("IMT max collisions: %" G_GINT32_FORMAT "\n", mono_stats.imt_max_collisions_in_slot); g_print ("IMT methods at max col: %" G_GINT32_FORMAT "\n", mono_stats.imt_method_count_when_max_collisions); g_print ("IMT trampolines size: %" G_GINT32_FORMAT "\n", mono_stats.imt_trampolines_size); g_print ("JIT info table inserts: %" G_GINT32_FORMAT "\n", mono_stats.jit_info_table_insert_count); g_print ("JIT info table removes: %" G_GINT32_FORMAT "\n", mono_stats.jit_info_table_remove_count); g_print ("JIT info table lookups: %" G_GINT32_FORMAT "\n", mono_stats.jit_info_table_lookup_count); mono_counters_dump (MONO_COUNTER_SECTION_MASK | MONO_COUNTER_MONOTONIC, NULL); g_print ("\n"); } } static void jit_stats_cleanup (void) { g_free (mono_jit_stats.max_ratio_method); mono_jit_stats.max_ratio_method = NULL; g_free (mono_jit_stats.biggest_method); mono_jit_stats.biggest_method = NULL; } static void runtime_cleanup (MonoDomain *domain, gpointer user_data) { mini_cleanup (domain); } void mini_cleanup (MonoDomain *domain) { if (mono_stats.enabled) g_printf ("Printing runtime stats at shutdown\n"); mono_runtime_print_stats (); jit_stats_cleanup (); mono_jit_dump_cleanup (); mini_get_interp_callbacks ()->cleanup (); mono_component_event_pipe ()->shutdown (); mono_component_diagnostics_server ()->shutdown (); } void mono_set_defaults (int verbose_level, guint32 opts) { mini_verbose = verbose_level; mono_set_optimizations (opts); } void mono_disable_optimizations (guint32 opts) { default_opt &= ~opts; } void mono_set_optimizations (guint32 opts) { if (opts & MONO_OPT_AGGRESSIVE_INLINING) opts |= MONO_OPT_INLINE; default_opt = opts; default_opt_set = TRUE; #ifdef MONO_ARCH_GSHAREDVT_SUPPORTED mono_set_generic_sharing_vt_supported (mono_aot_only || ((default_opt & MONO_OPT_GSHAREDVT) != 0)); #else if (mono_llvm_only) mono_set_generic_sharing_vt_supported (TRUE); #endif } void mono_set_verbose_level (guint32 level) { mini_verbose = level; } static const char* mono_get_runtime_build_version (void) { return FULL_VERSION; } /** * mono_get_runtime_build_info: * The returned string is owned by the caller. The returned string * format is <code>VERSION (FULL_VERSION BUILD_DATE)</code> and build date is optional. * \returns the runtime version + build date in string format. */ char* mono_get_runtime_build_info (void) { if (mono_build_date) return g_strdup_printf ("%s (%s %s)", VERSION, FULL_VERSION, mono_build_date); else return g_strdup_printf ("%s (%s)", VERSION, FULL_VERSION); } static void mono_precompile_assembly (MonoAssembly *ass, void *user_data) { GHashTable *assemblies = (GHashTable*)user_data; MonoImage *image = mono_assembly_get_image_internal (ass); MonoMethod *method, *invoke; int i, count = 0; if (g_hash_table_lookup (assemblies, ass)) return; g_hash_table_insert (assemblies, ass, ass); if (mini_verbose > 0) printf ("PRECOMPILE: %s.\n", mono_image_get_filename (image)); for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) { ERROR_DECL (error); method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error); if (!method) { mono_error_cleanup (error); /* FIXME don't swallow the error */ continue; } if (method->flags & METHOD_ATTRIBUTE_ABSTRACT) continue; if (method->is_generic || mono_class_is_gtd (method->klass)) continue; count++; if (mini_verbose > 1) { char * desc = mono_method_full_name (method, TRUE); g_print ("Compiling %d %s\n", count, desc); g_free (desc); } mono_compile_method_checked (method, error); if (!is_ok (error)) { mono_error_cleanup (error); /* FIXME don't swallow the error */ continue; } if (strcmp (method->name, "Finalize") == 0) { invoke = mono_marshal_get_runtime_invoke (method, FALSE); mono_compile_method_checked (invoke, error); mono_error_assert_ok (error); } } /* Load and precompile referenced assemblies as well */ for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_ASSEMBLYREF); ++i) { mono_assembly_load_reference (image, i); if (image->references [i]) mono_precompile_assembly (image->references [i], assemblies); } } void mono_precompile_assemblies () { GHashTable *assemblies = g_hash_table_new (NULL, NULL); mono_assembly_foreach ((GFunc)mono_precompile_assembly, assemblies); g_hash_table_destroy (assemblies); } /* * Used by LLVM. * Have to export this for AOT. */ void mono_personality (void) { /* Not used */ g_assert_not_reached (); } static MonoBreakPolicy always_insert_breakpoint (MonoMethod *method) { return MONO_BREAK_POLICY_ALWAYS; } static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint; /** * mono_set_break_policy: * \param policy_callback the new callback function * * Allow embedders to decide whether to actually obey breakpoint instructions * (both break IL instructions and \c Debugger.Break method calls), for example * to not allow an app to be aborted by a perfectly valid IL opcode when executing * untrusted or semi-trusted code. * * \p policy_callback will be called every time a break point instruction needs to * be inserted with the method argument being the method that calls \c Debugger.Break * or has the IL \c break instruction. The callback should return \c MONO_BREAK_POLICY_NEVER * if it wants the breakpoint to not be effective in the given method. * \c MONO_BREAK_POLICY_ALWAYS is the default. */ void mono_set_break_policy (MonoBreakPolicyFunc policy_callback) { if (policy_callback) break_policy_func = policy_callback; else break_policy_func = always_insert_breakpoint; } gboolean mini_should_insert_breakpoint (MonoMethod *method) { switch (break_policy_func (method)) { case MONO_BREAK_POLICY_ALWAYS: return TRUE; case MONO_BREAK_POLICY_NEVER: return FALSE; case MONO_BREAK_POLICY_ON_DBG: g_warning ("mdb no longer supported"); return FALSE; default: g_warning ("Incorrect value returned from break policy callback"); return FALSE; } } // Custom handlers currently only implemented by Windows. #ifndef HOST_WIN32 gboolean mono_runtime_install_custom_handlers (const char *handlers) { return FALSE; } void mono_runtime_install_custom_handlers_usage (void) { fprintf (stdout, "Custom Handlers:\n" " --handlers=HANDLERS Enable handler support, HANDLERS is a comma\n" " separated list of available handlers to install.\n" "\n" "No handlers supported on current platform.\n"); } #endif /* HOST_WIN32 */ static void mini_invalidate_transformed_interp_methods (MonoAssemblyLoadContext *alc G_GNUC_UNUSED, uint32_t generation G_GNUC_UNUSED) { mini_get_interp_callbacks ()->invalidate_transformed (); } static void mini_interp_jit_info_foreach(InterpJitInfoFunc func, gpointer user_data) { mini_get_interp_callbacks ()->jit_info_foreach (func, user_data); } static gboolean mini_interp_sufficient_stack (gsize size) { return mini_get_interp_callbacks ()->sufficient_stack (size); } /* * mini_get_default_mem_manager: * * Return a memory manager which can be used for default allocation. * FIXME: Review all callers and change them to allocate from a * class/method/assembly specific memory manager. */ MonoMemoryManager* mini_get_default_mem_manager (void) { return mono_mem_manager_get_ambient (); } gpointer mini_alloc_generic_virtual_trampoline (MonoVTable *vtable, int size) { static gboolean inited = FALSE; static int generic_virtual_trampolines_size = 0; if (!inited) { mono_counters_register ("Generic virtual trampoline bytes", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &generic_virtual_trampolines_size); inited = TRUE; } generic_virtual_trampolines_size += size; return mono_mem_manager_code_reserve (m_class_get_mem_manager (vtable->klass), size); } MonoException* mini_get_stack_overflow_ex (void) { return mono_get_root_domain ()->stack_overflow_ex; } const MonoEECallbacks* mini_get_interp_callbacks_api (void) { return mono_interp_callbacks_pointer; }
/** * \file * Runtime code for the JIT * * Authors: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * * Copyright 2002-2003 Ximian, Inc. * Copyright 2003-2010 Novell, Inc. * Copyright 2011-2015 Xamarin, Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include <config.h> #ifdef HAVE_ALLOCA_H #include <alloca.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <math.h> #ifdef HAVE_SYS_TIME_H #include <sys/time.h> #endif #include <signal.h> #include <mono/utils/memcheck.h> #include <mono/metadata/assembly.h> #include <mono/metadata/assembly-internals.h> #include <mono/metadata/loader.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/class.h> #include <mono/metadata/object.h> #include <mono/metadata/tokentype.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/threads.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/domain-internals.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/mono-config.h> #include <mono/metadata/environment.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/gc-internals.h> #include <mono/metadata/threads-types.h> #include <mono/metadata/mempool-internals.h> #include <mono/metadata/runtime.h> #include <mono/metadata/reflection-internals.h> #include <mono/metadata/monitor.h> #include <mono/metadata/icall-internals.h> #include <mono/metadata/loader-internals.h> #define MONO_MATH_DECLARE_ALL 1 #include <mono/utils/mono-math.h> #include <mono/utils/mono-compiler.h> #include <mono/utils/mono-counters.h> #include <mono/utils/mono-error-internals.h> #include <mono/utils/mono-logger-internals.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-path.h> #include <mono/utils/mono-tls.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/dtrace.h> #include <mono/utils/mono-signal-handler.h> #include <mono/utils/mono-threads.h> #include <mono/utils/mono-threads-coop.h> #include <mono/utils/checked-build.h> #include <mono/utils/mono-compiler.h> #include <mono/utils/mono-proclib.h> #include <mono/utils/mono-time.h> #include <mono/metadata/w32handle.h> #include <mono/metadata/components.h> #include <mono/mini/debugger-agent-external.h> #include "mini.h" #include "seq-points.h" #include <string.h> #include <ctype.h> #include "trace.h" #include "aot-compiler.h" #include "aot-runtime.h" #include "llvmonly-runtime.h" #include "jit-icalls.h" #include "mini-gc.h" #include "mini-llvm.h" #include "llvm-runtime.h" #include "lldb.h" #include "mini-runtime.h" #include "interp/interp.h" #ifdef MONO_ARCH_LLVM_SUPPORTED #ifdef ENABLE_LLVM #include "mini-llvm-cpp.h" #include "llvm-jit.h" #endif #endif #include "mono/metadata/icall-signatures.h" #include "mono/utils/mono-tls-inline.h" static guint32 default_opt = 0; static gboolean default_opt_set = FALSE; MonoMethodDesc *mono_stats_method_desc; gboolean mono_compile_aot = FALSE; /* If this is set, no code is generated dynamically, everything is taken from AOT files */ gboolean mono_aot_only = FALSE; /* Same as mono_aot_only, but only LLVM compiled code is used, no trampolines */ gboolean mono_llvm_only = FALSE; /* By default, don't require AOT but attempt to probe */ MonoAotMode mono_aot_mode = MONO_AOT_MODE_NORMAL; MonoEEFeatures mono_ee_features; const char *mono_build_date; gboolean mono_do_signal_chaining; gboolean mono_do_crash_chaining; int mini_verbose = 0; /* * This flag controls whenever the runtime uses LLVM for JIT compilation, and whenever * it can load AOT code compiled by LLVM. */ gboolean mono_use_llvm = FALSE; gboolean mono_use_fast_math = FALSE; // Lists of allowlisted and blocklisted CPU features MonoCPUFeatures mono_cpu_features_enabled = (MonoCPUFeatures)0; #ifdef DISABLE_SIMD MonoCPUFeatures mono_cpu_features_disabled = MONO_CPU_X86_FULL_SSEAVX_COMBINED; #else MonoCPUFeatures mono_cpu_features_disabled = (MonoCPUFeatures)0; #endif gboolean mono_use_interpreter = FALSE; const char *mono_interp_opts_string = NULL; #define mono_jit_lock() mono_os_mutex_lock (&jit_mutex) #define mono_jit_unlock() mono_os_mutex_unlock (&jit_mutex) static mono_mutex_t jit_mutex; static MonoCodeManager *global_codeman; MonoDebugOptions mini_debug_options; #ifdef VALGRIND_JIT_REGISTER_MAP int valgrind_register; #endif GList* mono_aot_paths; static GPtrArray *profile_options; static GSList *tramp_infos; GSList *mono_interp_only_classes; static void register_icalls (void); static void runtime_cleanup (MonoDomain *domain, gpointer user_data); static void mini_invalidate_transformed_interp_methods (MonoAssemblyLoadContext *alc, uint32_t generation); static void mini_interp_jit_info_foreach(InterpJitInfoFunc func, gpointer user_data); static gboolean mini_interp_sufficient_stack (gsize size); gboolean mono_running_on_valgrind (void) { #ifndef HOST_WIN32 if (RUNNING_ON_VALGRIND){ #ifdef VALGRIND_JIT_REGISTER_MAP valgrind_register = TRUE; #endif return TRUE; } else #endif return FALSE; } void mono_set_use_llvm (mono_bool use_llvm) { mono_use_llvm = (gboolean)use_llvm; } typedef struct { void *ip; MonoMethod *method; } FindTrampUserData; static void find_tramp (gpointer key, gpointer value, gpointer user_data) { FindTrampUserData *ud = (FindTrampUserData*)user_data; if (value == ud->ip) ud->method = (MonoMethod*)key; } static char* mono_get_method_from_ip_u (void *ip); /* debug function */ char* mono_get_method_from_ip (void *ip) { char *result; MONO_ENTER_GC_UNSAFE; result = mono_get_method_from_ip_u (ip); MONO_EXIT_GC_UNSAFE; return result; } /* debug function */ static char* mono_get_method_from_ip_u (void *ip) { MonoJitInfo *ji; MonoMethod *method; char *method_name; char *res; MonoDomain *domain = mono_domain_get (); MonoDebugSourceLocation *location; FindTrampUserData user_data; if (!domain) domain = mono_get_root_domain (); ji = mono_jit_info_table_find_internal (ip, TRUE, TRUE); if (!ji) { user_data.ip = ip; user_data.method = NULL; MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); g_hash_table_foreach (jit_mm->jit_trampoline_hash, find_tramp, &user_data); jit_mm_unlock (jit_mm); if (user_data.method) { char *mname = mono_method_full_name (user_data.method, TRUE); res = g_strdup_printf ("<%p - JIT trampoline for %s>", ip, mname); g_free (mname); return res; } else return NULL; } else if (ji->is_trampoline) { res = g_strdup_printf ("<%p - %s trampoline>", ip, ji->d.tramp_info->name); return res; } method = jinfo_get_method (ji); method_name = mono_method_get_name_full (method, TRUE, FALSE, MONO_TYPE_NAME_FORMAT_IL); location = mono_debug_lookup_source_location (method, (guint32)((guint8*)ip - (guint8*)ji->code_start), domain); char *file_loc = NULL; if (location) file_loc = g_strdup_printf ("[%s :: %du]", location->source_file, location->row); const char *in_interp = ji->is_interp ? " interp" : ""; res = g_strdup_printf (" %s [{%p} + 0x%x%s] %s (%p %p) [%p - %s]", method_name, method, (int)((char*)ip - (char*)ji->code_start), in_interp, file_loc ? file_loc : "", ji->code_start, (char*)ji->code_start + ji->code_size, domain, domain->friendly_name); mono_debug_free_source_location (location); g_free (method_name); g_free (file_loc); return res; } /** * mono_pmip: * \param ip an instruction pointer address * * This method is used from a debugger to get the name of the * method at address \p ip. This routine is typically invoked from * a debugger like this: * * (gdb) print mono_pmip ($pc) * * \returns the name of the method at address \p ip. */ G_GNUC_UNUSED char * mono_pmip (void *ip) { return mono_get_method_from_ip (ip); } G_GNUC_UNUSED char * mono_pmip_u (void *ip) { return mono_get_method_from_ip_u (ip); } /** * mono_print_method_from_ip: * \param ip an instruction pointer address * * This method is used from a debugger to get the name of the * method at address \p ip. * * This prints the name of the method at address \p ip in the standard * output. Unlike \c mono_pmip which returns a string, this routine * prints the value on the standard output. */ MONO_ATTR_USED void mono_print_method_from_ip (void *ip) { MonoJitInfo *ji; char *method; MonoDebugSourceLocation *source; MonoDomain *domain = mono_domain_get (); MonoDomain *target_domain = mono_domain_get (); FindTrampUserData user_data; MonoGenericSharingContext*gsctx; const char *shared_type; if (!domain) domain = mono_get_root_domain (); ji = mini_jit_info_table_find_ext (ip, TRUE); if (ji && ji->is_trampoline) { MonoTrampInfo *tinfo = ji->d.tramp_info; printf ("IP %p is at offset 0x%x of trampoline '%s'.\n", ip, (int)((guint8*)ip - tinfo->code), tinfo->name); return; } if (!ji) { user_data.ip = ip; user_data.method = NULL; MonoJitMemoryManager *jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); g_hash_table_foreach (jit_mm->jit_trampoline_hash, find_tramp, &user_data); jit_mm_unlock (jit_mm); if (user_data.method) { char *mname = mono_method_full_name (user_data.method, TRUE); printf ("IP %p is a JIT trampoline for %s\n", ip, mname); g_free (mname); return; } g_print ("No method at %p\n", ip); fflush (stdout); return; } method = mono_method_full_name (jinfo_get_method (ji), TRUE); source = mono_debug_lookup_source_location (jinfo_get_method (ji), (guint32)((guint8*)ip - (guint8*)ji->code_start), target_domain); gsctx = mono_jit_info_get_generic_sharing_context (ji); shared_type = ""; if (gsctx) { if (gsctx->is_gsharedvt) shared_type = "gsharedvt "; else shared_type = "gshared "; } g_print ("IP %p at offset 0x%x of %smethod %s (%p %p)[domain %p - %s]\n", ip, (int)((char*)ip - (char*)ji->code_start), shared_type, method, ji->code_start, (char*)ji->code_start + ji->code_size, target_domain, target_domain->friendly_name); if (source) g_print ("%s:%d\n", source->source_file, source->row); fflush (stdout); mono_debug_free_source_location (source); g_free (method); } /* * mono_method_same_domain: * * Determine whenever two compiled methods are in the same domain, thus * the address of the callee can be embedded in the caller. */ gboolean mono_method_same_domain (MonoJitInfo *caller, MonoJitInfo *callee) { if (!caller || caller->is_trampoline || !callee || callee->is_trampoline) return FALSE; return TRUE; } /* * mono_global_codeman_reserve: * * Allocate code memory from the global code manager. */ void *(mono_global_codeman_reserve) (int size) { void *ptr; if (mono_aot_only) g_error ("Attempting to allocate from the global code manager while running in aot-only mode.\n"); if (!global_codeman) { /* This can happen during startup */ if (!mono_compile_aot) global_codeman = mono_code_manager_new (); else global_codeman = mono_code_manager_new_aot (); return mono_code_manager_reserve (global_codeman, size); } else { mono_jit_lock (); ptr = mono_code_manager_reserve (global_codeman, size); mono_jit_unlock (); return ptr; } } /* The callback shouldn't take any locks */ void mono_global_codeman_foreach (MonoCodeManagerFunc func, void *user_data) { mono_jit_lock (); mono_code_manager_foreach (global_codeman, func, user_data); mono_jit_unlock (); } /** * mono_create_unwind_op: * * Create an unwind op with the given parameters. */ MonoUnwindOp* mono_create_unwind_op (int when, int tag, int reg, int val) { MonoUnwindOp *op = g_new0 (MonoUnwindOp, 1); op->op = tag; op->reg = reg; op->val = val; op->when = when; return op; } MonoJumpInfoToken * mono_jump_info_token_new2 (MonoMemPool *mp, MonoImage *image, guint32 token, MonoGenericContext *context) { MonoJumpInfoToken *res = (MonoJumpInfoToken *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoToken)); res->image = image; res->token = token; res->has_context = context != NULL; if (context) memcpy (&res->context, context, sizeof (MonoGenericContext)); return res; } MonoJumpInfoToken * mono_jump_info_token_new (MonoMemPool *mp, MonoImage *image, guint32 token) { return mono_jump_info_token_new2 (mp, image, token, NULL); } /* * mono_tramp_info_create: * * Create a MonoTrampInfo structure from the arguments. This function assumes ownership * of JI, and UNWIND_OPS. */ MonoTrampInfo* mono_tramp_info_create (const char *name, guint8 *code, guint32 code_size, MonoJumpInfo *ji, GSList *unwind_ops) { MonoTrampInfo *info = g_new0 (MonoTrampInfo, 1); info->name = g_strdup (name); info->code = code; info->code_size = code_size; info->ji = ji; info->unwind_ops = unwind_ops; return info; } void mono_tramp_info_free (MonoTrampInfo *info) { g_free (info->name); // FIXME: ji mono_free_unwind_info (info->unwind_ops); if (info->owns_uw_info) g_free (info->uw_info); g_free (info); } static void register_trampoline_jit_info (MonoMemoryManager *mem_manager, MonoTrampInfo *info) { MonoJitInfo *ji; ji = (MonoJitInfo *)mono_mem_manager_alloc0 (mem_manager, mono_jit_info_size ((MonoJitInfoFlags)0, 0, 0)); mono_jit_info_init (ji, NULL, (guint8*)MINI_FTNPTR_TO_ADDR (info->code), info->code_size, (MonoJitInfoFlags)0, 0, 0); ji->d.tramp_info = info; ji->is_trampoline = TRUE; ji->unwind_info = mono_cache_unwind_info (info->uw_info, info->uw_info_len); mono_jit_info_table_add (ji); } /* * mono_tramp_info_register: * * Remember INFO for use by xdebug, mono_print_method_from_ip (), jit maps, etc. * INFO can be NULL. * Frees INFO. */ static void mono_tramp_info_register_internal (MonoTrampInfo *info, MonoMemoryManager *mem_manager, gboolean aot) { MonoTrampInfo *copy; MonoDomain *domain = mono_get_root_domain (); if (!info) return; if (mem_manager) copy = mono_mem_manager_alloc0 (mem_manager, sizeof (MonoTrampInfo)); else copy = g_new0 (MonoTrampInfo, 1); copy->code = info->code; copy->code_size = info->code_size; copy->name = mem_manager ? mono_mem_manager_strdup (mem_manager, info->name) : g_strdup (info->name); copy->method = info->method; if (info->unwind_ops) { copy->uw_info = mono_unwind_ops_encode (info->unwind_ops, &copy->uw_info_len); copy->owns_uw_info = TRUE; if (mem_manager) { guint8 *temp = copy->uw_info; copy->uw_info = mono_mem_manager_alloc (mem_manager, copy->uw_info_len); memcpy (copy->uw_info, temp, copy->uw_info_len); g_free (temp); } } else { /* Trampolines from aot have the unwind ops already encoded */ copy->uw_info = info->uw_info; copy->uw_info_len = info->uw_info_len; } mono_lldb_save_trampoline_info (info); #ifdef MONO_ARCH_HAVE_UNWIND_TABLE if (!aot) mono_arch_unwindinfo_install_tramp_unwind_info (info->unwind_ops, info->code, info->code_size); #endif if (!domain) { /* If no domain has been created yet, postpone the registration. */ mono_jit_lock (); tramp_infos = g_slist_prepend (tramp_infos, copy); mono_jit_unlock (); } else if (copy->uw_info || info->method) { /* Only register trampolines that have unwind info */ register_trampoline_jit_info (mem_manager ? mem_manager : get_default_mem_manager (), copy); } if (mono_jit_map_is_enabled ()) mono_emit_jit_tramp (info->code, info->code_size, info->name); mono_tramp_info_free (info); } void mono_tramp_info_register (MonoTrampInfo *info, MonoMemoryManager *mem_manager) { mono_tramp_info_register_internal (info, mem_manager, FALSE); } void mono_aot_tramp_info_register (MonoTrampInfo *info, MonoMemoryManager *mem_manager) { mono_tramp_info_register_internal (info, mem_manager, TRUE); } /* Register trampolines created before the root domain was created in the jit info tables */ static void register_trampolines (MonoDomain *domain) { GSList *l; for (l = tramp_infos; l; l = l->next) { MonoTrampInfo *info = (MonoTrampInfo *)l->data; register_trampoline_jit_info (get_default_mem_manager (), info); } } G_GNUC_UNUSED static void break_count (void) { } /* * Runtime debugging tool, use if (debug_count ()) <x> else <y> to do <x> the first COUNT times, then do <y> afterwards. * Set a breakpoint in break_count () to break the last time <x> is done. */ G_GNUC_UNUSED gboolean mono_debug_count (void) { static int count = 0, int_val = 0; static gboolean inited, has_value = FALSE; count ++; if (!inited) { char *value = g_getenv ("COUNT"); if (value) { int_val = atoi (value); g_free (value); has_value = TRUE; } inited = TRUE; } if (!has_value) return TRUE; if (count == int_val) break_count (); if (count > int_val) return FALSE; return TRUE; } MonoMethod* mono_icall_get_wrapper_method (MonoJitICallInfo* callinfo) { /* This icall is used to check for exceptions, so don't check in the wrapper */ gboolean check_exc = (callinfo != &mono_get_jit_icall_info ()->mono_thread_interruption_checkpoint); return mono_marshal_get_icall_wrapper (callinfo, check_exc); } gconstpointer mono_icall_get_wrapper_full (MonoJitICallInfo* callinfo, gboolean do_compile) { ERROR_DECL (error); MonoMethod *wrapper; gconstpointer addr, trampoline; if (callinfo->wrapper) return callinfo->wrapper; wrapper = mono_icall_get_wrapper_method (callinfo); if (do_compile) { addr = mono_compile_method_checked (wrapper, error); mono_error_assert_ok (error); mono_memory_barrier (); callinfo->wrapper = addr; return addr; } else { if (callinfo->trampoline) return callinfo->trampoline; trampoline = mono_create_jit_trampoline (wrapper, error); mono_error_assert_ok (error); trampoline = mono_create_ftnptr ((gpointer)trampoline); mono_loader_lock (); if (!callinfo->trampoline) { callinfo->trampoline = trampoline; } mono_loader_unlock (); return callinfo->trampoline; } } gconstpointer mono_icall_get_wrapper (MonoJitICallInfo* callinfo) { return mono_icall_get_wrapper_full (callinfo, FALSE); } static MonoJitDynamicMethodInfo* mono_dynamic_code_hash_lookup (MonoMethod *method) { MonoJitDynamicMethodInfo *res; MonoJitMemoryManager *jit_mm; jit_mm = jit_mm_for_method (method); jit_mm_lock (jit_mm); if (jit_mm->dynamic_code_hash) res = (MonoJitDynamicMethodInfo *)g_hash_table_lookup (jit_mm->dynamic_code_hash, method); else res = NULL; jit_mm_unlock (jit_mm); return res; } #ifdef __cplusplus template <typename T> static void register_opcode_emulation (int opcode, MonoJitICallInfo *jit_icall_info, const char *name, MonoMethodSignature *sig, T func, const char *symbol, gboolean no_wrapper) #else static void register_opcode_emulation (int opcode, MonoJitICallInfo *jit_icall_info, const char *name, MonoMethodSignature *sig, gpointer func, const char *symbol, gboolean no_wrapper) #endif { #ifndef DISABLE_JIT mini_register_opcode_emulation (opcode, jit_icall_info, name, sig, func, symbol, no_wrapper); #else // FIXME ifdef in mini_register_opcode_emulation and just call it. g_assert (!sig->hasthis); g_assert (sig->param_count < 3); mono_register_jit_icall_info (jit_icall_info, func, name, sig, no_wrapper, symbol); #endif } #define register_opcode_emulation(opcode, name, sig, func, no_wrapper) \ (register_opcode_emulation ((opcode), &mono_get_jit_icall_info ()->name, #name, (sig), func, #func, (no_wrapper))) /* * For JIT icalls implemented in C. * NAME should be the same as the name of the C function whose address is FUNC. * If @avoid_wrapper is TRUE, no wrapper is generated. This is for perf critical icalls which * can't throw exceptions. * * func is an identifier, that names a function, and is also in jit-icall-reg.h, * and therefore a field in mono_jit_icall_info and can be token pasted into an enum value. * * The name of func must be linkable for AOT, for example g_free does not work (monoeg_g_free instead), * nor does the C++ overload fmod (mono_fmod instead). These functions therefore * must be extern "C". */ #define register_icall(func, sig, avoid_wrapper) \ (mono_register_jit_icall_info (&mono_get_jit_icall_info ()->func, func, #func, (sig), (avoid_wrapper), #func)) #define register_icall_no_wrapper(func, sig) register_icall (func, sig, TRUE) #define register_icall_with_wrapper(func, sig) register_icall (func, sig, FALSE) /* * Register an icall where FUNC is dynamically generated or otherwise not * possible to link to it using NAME during AOT. * * func is an expression, such a local variable or a function call to get a function pointer. * name is an identifier * * Providing func and name separately is what distinguishes "dyn" from regular. * * This also passes last parameter c_symbol=NULL since there is not a directly linkable symbol. */ #define register_dyn_icall(func, name, sig, save) \ (mono_register_jit_icall_info (&mono_get_jit_icall_info ()->name, (func), #name, (sig), (save), NULL)) MonoLMF * mono_get_lmf (void) { MonoJitTlsData *jit_tls; if ((jit_tls = mono_tls_get_jit_tls ())) return jit_tls->lmf; /* * We do not assert here because this function can be called from * mini-gc.c on a thread that has not executed any managed code, yet * (the thread object allocation can trigger a collection). */ return NULL; } void mono_set_lmf (MonoLMF *lmf) { (*mono_get_lmf_addr ()) = lmf; } static void mono_set_jit_tls (MonoJitTlsData *jit_tls) { MonoThreadInfo *info; mono_tls_set_jit_tls (jit_tls); /* Save it into MonoThreadInfo so it can be accessed by mono_thread_state_init_from_handle () */ info = mono_thread_info_current (); if (info) mono_thread_info_tls_set (info, TLS_KEY_JIT_TLS, jit_tls); } static void mono_set_lmf_addr (MonoLMF **lmf_addr) { MonoThreadInfo *info; mono_tls_set_lmf_addr (lmf_addr); /* Save it into MonoThreadInfo so it can be accessed by mono_thread_state_init_from_handle () */ info = mono_thread_info_current (); if (info) mono_thread_info_tls_set (info, TLS_KEY_LMF_ADDR, lmf_addr); } /* * mono_push_lmf: * * Push an MonoLMFExt frame on the LMF stack. */ void mono_push_lmf (MonoLMFExt *ext) { MonoLMF **lmf_addr; lmf_addr = mono_get_lmf_addr (); ext->lmf.previous_lmf = *lmf_addr; /* Mark that this is a MonoLMFExt */ ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2); mono_set_lmf ((MonoLMF*)ext); } /* * mono_pop_lmf: * * Pop the last frame from the LMF stack. */ void mono_pop_lmf (MonoLMF *lmf) { mono_set_lmf ((MonoLMF *)(((gssize)lmf->previous_lmf) & ~3)); } /* * mono_jit_thread_attach: * * Called by Xamarin.Mac and other products. Attach thread to runtime if * needed and switch to @domain. * * This function is external only and @deprecated don't use it. Use mono_threads_attach_coop (). * * If the thread is newly-attached, put into GC Safe mode. * * @return the original domain which needs to be restored, or NULL. */ MonoDomain* mono_jit_thread_attach (MonoDomain *domain) { gboolean attached; if (!domain) { /* Happens when called from AOTed code which is only used in the root domain. */ domain = mono_get_root_domain (); } g_assert (domain); attached = mono_tls_get_jit_tls () != NULL; if (!attached) { // #678164 gboolean background = TRUE; mono_thread_attach_external_native_thread (domain, background); /* mono_jit_thread_attach is external-only and not called by * the runtime on any of our own threads. So if we get here, * the thread is running native code - leave it in GC Safe mode * and leave it to the n2m invoke wrappers or MONO_API entry * points to switch to GC Unsafe. */ MONO_STACKDATA (stackdata); mono_threads_enter_gc_safe_region_unbalanced_internal (&stackdata); } return NULL; } /* * mono_jit_set_domain: * * Set domain to @domain if @domain is not null */ void mono_jit_set_domain (MonoDomain *domain) { g_assert (!mono_threads_is_blocking_transition_enabled ()); if (domain) mono_domain_set_fast (domain); } /** * mono_thread_abort: * \param obj exception object * Abort the thread, print exception information and stack trace */ static void mono_thread_abort (MonoObject *obj) { /* MonoJitTlsData *jit_tls = mono_tls_get_jit_tls (); */ /* handle_remove should be eventually called for this thread, too g_free (jit_tls);*/ if ((obj->vtable->klass == mono_defaults.threadabortexception_class) || ((obj->vtable->klass) == mono_class_try_get_appdomain_unloaded_exception_class () && mono_thread_info_current ()->runtime_thread)) { mono_thread_exit (); } else { mono_invoke_unhandled_exception_hook (obj); } } static MonoJitTlsData* setup_jit_tls_data (gpointer stack_start, MonoAbortFunction abort_func) { MonoJitTlsData *jit_tls; MonoLMF *lmf; jit_tls = mono_tls_get_jit_tls (); if (jit_tls) return jit_tls; jit_tls = g_new0 (MonoJitTlsData, 1); jit_tls->abort_func = abort_func; jit_tls->end_of_stack = stack_start; mono_set_jit_tls (jit_tls); lmf = g_new0 (MonoLMF, 1); MONO_ARCH_INIT_TOP_LMF_ENTRY (lmf); jit_tls->first_lmf = lmf; mono_set_lmf_addr (&jit_tls->lmf); jit_tls->lmf = lmf; #ifdef MONO_ARCH_HAVE_TLS_INIT mono_arch_tls_init (); #endif mono_setup_altstack (jit_tls); return jit_tls; } static void free_jit_tls_data (MonoJitTlsData *jit_tls) { //This happens during AOT cuz the thread is never attached if (!jit_tls) return; mono_free_altstack (jit_tls); if (jit_tls->interp_context) mini_get_interp_callbacks ()->free_context (jit_tls->interp_context); g_free (jit_tls->first_lmf); g_free (jit_tls); } static void mono_thread_start_cb (intptr_t tid, gpointer stack_start, gpointer func) { MonoThreadInfo *thread; MonoJitTlsData *jit_tls = setup_jit_tls_data (stack_start, mono_thread_abort); thread = mono_thread_info_current_unchecked (); if (thread) thread->jit_data = jit_tls; mono_arch_cpu_init (); } void (*mono_thread_attach_aborted_cb ) (MonoObject *obj) = NULL; static void mono_thread_abort_dummy (MonoObject *obj) { if (mono_thread_attach_aborted_cb) mono_thread_attach_aborted_cb (obj); else mono_thread_abort (obj); } static void mono_thread_attach_cb (intptr_t tid, gpointer stack_start) { MonoThreadInfo *thread; MonoJitTlsData *jit_tls = setup_jit_tls_data (stack_start, mono_thread_abort_dummy); thread = mono_thread_info_current_unchecked (); if (thread) thread->jit_data = jit_tls; mono_arch_cpu_init (); } static void mini_thread_cleanup (MonoNativeThreadId tid) { MonoJitTlsData *jit_tls = NULL; MonoThreadInfo *info; info = mono_thread_info_current_unchecked (); /* We can't clean up tls information if we are on another thread, it will clean up the wrong stuff * It would be nice to issue a warning when this happens outside of the shutdown sequence. but it's * not a trivial thing. * * The current offender is mono_thread_manage which cleanup threads from the outside. */ if (info && mono_thread_info_get_tid (info) == tid) { jit_tls = info->jit_data; info->jit_data = NULL; mono_set_jit_tls (NULL); /* If we attach a thread but never call into managed land, we might never get an lmf.*/ if (mono_get_lmf ()) { mono_set_lmf (NULL); mono_set_lmf_addr (NULL); } } else { info = mono_thread_info_lookup (tid); if (info) { jit_tls = info->jit_data; info->jit_data = NULL; } mono_hazard_pointer_clear (mono_hazard_pointer_get (), 1); } if (jit_tls) free_jit_tls_data (jit_tls); } MonoJumpInfo * mono_patch_info_list_prepend (MonoJumpInfo *list, int ip, MonoJumpInfoType type, gconstpointer target) { MonoJumpInfo *ji = g_new0 (MonoJumpInfo, 1); ji->ip.i = ip; ji->type = type; ji->data.target = target; ji->next = list; return ji; } #if !defined(DISABLE_LOGGING) && !defined(DISABLE_JIT) static const char* const patch_info_str[] = { #define PATCH_INFO(a,b) "" #a, #include "patch-info.h" #undef PATCH_INFO }; const char* mono_ji_type_to_string (MonoJumpInfoType type) { return patch_info_str [type]; } void mono_print_ji (const MonoJumpInfo *ji) { const char *type = patch_info_str [ji->type]; switch (ji->type) { case MONO_PATCH_INFO_RGCTX_FETCH: case MONO_PATCH_INFO_RGCTX_SLOT_INDEX: { MonoJumpInfoRgctxEntry *entry = ji->data.rgctx_entry; printf ("[%s ", type); mono_print_ji (entry->data); printf (" -> %s]", mono_rgctx_info_type_to_str (entry->info_type)); break; } case MONO_PATCH_INFO_METHOD: case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_METHOD_FTNDESC: case MONO_PATCH_INFO_LLVMONLY_INTERP_ENTRY: { char *s = mono_method_get_full_name (ji->data.method); printf ("[%s %s]", type, s); g_free (s); break; } case MONO_PATCH_INFO_JIT_ICALL_ID: printf ("[JIT_ICALL %s]", mono_find_jit_icall_info (ji->data.jit_icall_id)->name); break; case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_VTABLE: { char *name = mono_class_full_name (ji->data.klass); printf ("[%s %s]", type, name); g_free (name); break; } default: printf ("[%s]", type); break; } } #else const char* mono_ji_type_to_string (MonoJumpInfoType type) { return ""; } void mono_print_ji (const MonoJumpInfo *ji) { } #endif /** * mono_patch_info_dup_mp: * * Make a copy of PATCH_INFO, allocating memory from the mempool MP. */ MonoJumpInfo* mono_patch_info_dup_mp (MonoMemPool *mp, MonoJumpInfo *patch_info) { MonoJumpInfo *res = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo)); memcpy (res, patch_info, sizeof (MonoJumpInfo)); switch (patch_info->type) { case MONO_PATCH_INFO_RVA: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDTOKEN: case MONO_PATCH_INFO_DECLSEC: res->data.token = (MonoJumpInfoToken *)mono_mempool_alloc (mp, sizeof (MonoJumpInfoToken)); memcpy (res->data.token, patch_info->data.token, sizeof (MonoJumpInfoToken)); break; case MONO_PATCH_INFO_SWITCH: res->data.table = (MonoJumpInfoBBTable *)mono_mempool_alloc (mp, sizeof (MonoJumpInfoBBTable)); memcpy (res->data.table, patch_info->data.table, sizeof (MonoJumpInfoBBTable)); res->data.table->table = (MonoBasicBlock **)mono_mempool_alloc (mp, sizeof (MonoBasicBlock*) * patch_info->data.table->table_size); memcpy (res->data.table->table, patch_info->data.table->table, sizeof (MonoBasicBlock*) * patch_info->data.table->table_size); break; case MONO_PATCH_INFO_RGCTX_FETCH: case MONO_PATCH_INFO_RGCTX_SLOT_INDEX: res->data.rgctx_entry = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc (mp, sizeof (MonoJumpInfoRgctxEntry)); memcpy (res->data.rgctx_entry, patch_info->data.rgctx_entry, sizeof (MonoJumpInfoRgctxEntry)); res->data.rgctx_entry->data = mono_patch_info_dup_mp (mp, res->data.rgctx_entry->data); break; case MONO_PATCH_INFO_DELEGATE_TRAMPOLINE: res->data.del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (mp, sizeof (MonoDelegateClassMethodPair)); memcpy (res->data.del_tramp, patch_info->data.del_tramp, sizeof (MonoDelegateClassMethodPair)); break; case MONO_PATCH_INFO_GSHAREDVT_CALL: res->data.gsharedvt = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc (mp, sizeof (MonoJumpInfoGSharedVtCall)); memcpy (res->data.gsharedvt, patch_info->data.gsharedvt, sizeof (MonoJumpInfoGSharedVtCall)); break; case MONO_PATCH_INFO_GSHAREDVT_METHOD: { MonoGSharedVtMethodInfo *info; MonoGSharedVtMethodInfo *oinfo; int i; oinfo = patch_info->data.gsharedvt_method; info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc (mp, sizeof (MonoGSharedVtMethodInfo)); res->data.gsharedvt_method = info; memcpy (info, oinfo, sizeof (MonoGSharedVtMethodInfo)); info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc (mp, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries); for (i = 0; i < oinfo->num_entries; ++i) { MonoRuntimeGenericContextInfoTemplate *otemplate = &oinfo->entries [i]; MonoRuntimeGenericContextInfoTemplate *template_ = &info->entries [i]; memcpy (template_, otemplate, sizeof (MonoRuntimeGenericContextInfoTemplate)); } //info->locals_types = mono_mempool_alloc0 (mp, info->nlocals * sizeof (MonoType*)); //memcpy (info->locals_types, oinfo->locals_types, info->nlocals * sizeof (MonoType*)); break; } case MONO_PATCH_INFO_VIRT_METHOD: { MonoJumpInfoVirtMethod *info; MonoJumpInfoVirtMethod *oinfo; oinfo = patch_info->data.virt_method; info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoVirtMethod)); res->data.virt_method = info; memcpy (info, oinfo, sizeof (MonoJumpInfoVirtMethod)); break; } default: break; } return res; } guint mono_patch_info_hash (gconstpointer data) { const MonoJumpInfo *ji = (MonoJumpInfo*)data; const MonoJumpInfoType type = ji->type; guint hash = type << 8; switch (type) { case MONO_PATCH_INFO_RVA: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_LDTOKEN: case MONO_PATCH_INFO_DECLSEC: return hash | ji->data.token->token; case MONO_PATCH_INFO_TYPE_FROM_HANDLE: return hash | ji->data.token->token | (ji->data.token->has_context ? (gsize)ji->data.token->context.class_inst : 0); case MONO_PATCH_INFO_OBJC_SELECTOR_REF: // Hash on the selector name case MONO_PATCH_INFO_LDSTR_LIT: return g_str_hash (ji->data.name); case MONO_PATCH_INFO_VTABLE: case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_IID: case MONO_PATCH_INFO_ADJUSTED_IID: case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_METHOD: case MONO_PATCH_INFO_METHOD_JUMP: case MONO_PATCH_INFO_METHOD_FTNDESC: case MONO_PATCH_INFO_LLVMONLY_INTERP_ENTRY: case MONO_PATCH_INFO_IMAGE: case MONO_PATCH_INFO_ICALL_ADDR: case MONO_PATCH_INFO_ICALL_ADDR_CALL: case MONO_PATCH_INFO_FIELD: case MONO_PATCH_INFO_SFLDA: case MONO_PATCH_INFO_SEQ_POINT_INFO: case MONO_PATCH_INFO_METHOD_RGCTX: case MONO_PATCH_INFO_SIGNATURE: case MONO_PATCH_INFO_METHOD_CODE_SLOT: case MONO_PATCH_INFO_AOT_JIT_INFO: case MONO_PATCH_INFO_METHOD_PINVOKE_ADDR_CACHE: return hash | (gssize)ji->data.target; case MONO_PATCH_INFO_GSHAREDVT_CALL: return hash | (gssize)ji->data.gsharedvt->method; case MONO_PATCH_INFO_RGCTX_FETCH: case MONO_PATCH_INFO_RGCTX_SLOT_INDEX: { MonoJumpInfoRgctxEntry *e = ji->data.rgctx_entry; hash |= e->in_mrgctx | e->info_type | mono_patch_info_hash (e->data); if (e->in_mrgctx) return hash | (gssize)e->d.method; else return hash | (gssize)e->d.klass; } case MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG: case MONO_PATCH_INFO_MSCORLIB_GOT_ADDR: case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: case MONO_PATCH_INFO_GC_NURSERY_START: case MONO_PATCH_INFO_GC_NURSERY_BITS: case MONO_PATCH_INFO_GOT_OFFSET: case MONO_PATCH_INFO_GC_SAFE_POINT_FLAG: case MONO_PATCH_INFO_AOT_MODULE: case MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT: case MONO_PATCH_INFO_PROFILER_CLAUSE_COUNT: case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINES: case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINES_GOT_SLOTS_BASE: return hash; case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR: return hash | ji->data.uindex; case MONO_PATCH_INFO_JIT_ICALL_ID: case MONO_PATCH_INFO_JIT_ICALL_ADDR: case MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL: case MONO_PATCH_INFO_CASTCLASS_CACHE: return hash | ji->data.index; case MONO_PATCH_INFO_SWITCH: return hash | ji->data.table->table_size; case MONO_PATCH_INFO_GSHAREDVT_METHOD: return hash | (gssize)ji->data.gsharedvt_method->method; case MONO_PATCH_INFO_DELEGATE_TRAMPOLINE: return hash | (gsize)ji->data.del_tramp->klass | (gsize)ji->data.del_tramp->method | (gsize)ji->data.del_tramp->is_virtual; case MONO_PATCH_INFO_VIRT_METHOD: { MonoJumpInfoVirtMethod *info = ji->data.virt_method; return hash | (gssize)info->klass | (gssize)info->method; } case MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER: return hash | mono_signature_hash (ji->data.sig); case MONO_PATCH_INFO_R8_GOT: return hash | (guint32)*(double*)ji->data.target; case MONO_PATCH_INFO_R4_GOT: return hash | (guint32)*(float*)ji->data.target; default: printf ("info type: %d\n", ji->type); mono_print_ji (ji); printf ("\n"); g_assert_not_reached (); case MONO_PATCH_INFO_NONE: return 0; } } /* * mono_patch_info_equal: * * This might fail to recognize equivalent patches, i.e. floats, so its only * usable in those cases where this is not a problem, i.e. sharing GOT slots * in AOT. */ gint mono_patch_info_equal (gconstpointer ka, gconstpointer kb) { const MonoJumpInfo *ji1 = (MonoJumpInfo*)ka; const MonoJumpInfo *ji2 = (MonoJumpInfo*)kb; MonoJumpInfoType const ji1_type = ji1->type; MonoJumpInfoType const ji2_type = ji2->type; if (ji1_type != ji2_type) return 0; switch (ji1_type) { case MONO_PATCH_INFO_RVA: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDTOKEN: case MONO_PATCH_INFO_DECLSEC: return ji1->data.token->image == ji2->data.token->image && ji1->data.token->token == ji2->data.token->token && ji1->data.token->has_context == ji2->data.token->has_context && ji1->data.token->context.class_inst == ji2->data.token->context.class_inst && ji1->data.token->context.method_inst == ji2->data.token->context.method_inst; case MONO_PATCH_INFO_OBJC_SELECTOR_REF: case MONO_PATCH_INFO_LDSTR_LIT: return g_str_equal (ji1->data.name, ji2->data.name); case MONO_PATCH_INFO_RGCTX_FETCH: case MONO_PATCH_INFO_RGCTX_SLOT_INDEX: { MonoJumpInfoRgctxEntry *e1 = ji1->data.rgctx_entry; MonoJumpInfoRgctxEntry *e2 = ji2->data.rgctx_entry; return e1->d.method == e2->d.method && e1->d.klass == e2->d.klass && e1->in_mrgctx == e2->in_mrgctx && e1->info_type == e2->info_type && mono_patch_info_equal (e1->data, e2->data); } case MONO_PATCH_INFO_GSHAREDVT_CALL: { MonoJumpInfoGSharedVtCall *c1 = ji1->data.gsharedvt; MonoJumpInfoGSharedVtCall *c2 = ji2->data.gsharedvt; return c1->sig == c2->sig && c1->method == c2->method; } case MONO_PATCH_INFO_GSHAREDVT_METHOD: return ji1->data.gsharedvt_method->method == ji2->data.gsharedvt_method->method; case MONO_PATCH_INFO_DELEGATE_TRAMPOLINE: return ji1->data.del_tramp->klass == ji2->data.del_tramp->klass && ji1->data.del_tramp->method == ji2->data.del_tramp->method && ji1->data.del_tramp->is_virtual == ji2->data.del_tramp->is_virtual; case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR: return ji1->data.uindex == ji2->data.uindex; case MONO_PATCH_INFO_CASTCLASS_CACHE: return ji1->data.index == ji2->data.index; case MONO_PATCH_INFO_JIT_ICALL_ID: case MONO_PATCH_INFO_JIT_ICALL_ADDR: case MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL: return ji1->data.jit_icall_id == ji2->data.jit_icall_id; case MONO_PATCH_INFO_VIRT_METHOD: return ji1->data.virt_method->klass == ji2->data.virt_method->klass && ji1->data.virt_method->method == ji2->data.virt_method->method; case MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER: return mono_metadata_signature_equal (ji1->data.sig, ji2->data.sig); case MONO_PATCH_INFO_GC_SAFE_POINT_FLAG: case MONO_PATCH_INFO_NONE: return 1; default: break; } return ji1->data.target == ji2->data.target; } gpointer mono_resolve_patch_target_ext (MonoMemoryManager *mem_manager, MonoMethod *method, guint8 *code, MonoJumpInfo *patch_info, gboolean run_cctors, MonoError *error) { unsigned char *ip = patch_info->ip.i + code; gconstpointer target = NULL; error_init (error); switch (patch_info->type) { case MONO_PATCH_INFO_BB: /* * FIXME: This could be hit for methods without a prolog. Should use -1 * but too much code depends on a 0 initial value. */ //g_assert (patch_info->data.bb->native_offset); target = patch_info->data.bb->native_offset + code; break; case MONO_PATCH_INFO_ABS: target = patch_info->data.target; break; case MONO_PATCH_INFO_LABEL: target = patch_info->data.inst->inst_c0 + code; break; case MONO_PATCH_INFO_IP: target = ip; break; case MONO_PATCH_INFO_JIT_ICALL_ID: { MonoJitICallInfo * const mi = mono_find_jit_icall_info (patch_info->data.jit_icall_id); target = mono_icall_get_wrapper (mi); break; } case MONO_PATCH_INFO_JIT_ICALL_ADDR: case MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL: { MonoJitICallInfo * const mi = mono_find_jit_icall_info (patch_info->data.jit_icall_id); target = mi->func; break; } case MONO_PATCH_INFO_METHOD_JUMP: target = mono_create_jump_trampoline (patch_info->data.method, FALSE, error); if (!is_ok (error)) return NULL; break; case MONO_PATCH_INFO_METHOD: if (patch_info->data.method == method) { target = code; } else { /* get the trampoline to the method from the domain */ target = mono_create_jit_trampoline (patch_info->data.method, error); if (!is_ok (error)) return NULL; } break; case MONO_PATCH_INFO_METHOD_FTNDESC: { /* * Return an ftndesc for either AOTed code, or for an interp entry. */ target = mini_llvmonly_load_method_ftndesc (patch_info->data.method, FALSE, FALSE, error); return_val_if_nok (error, NULL); break; } case MONO_PATCH_INFO_LLVMONLY_INTERP_ENTRY: { target = mini_get_interp_callbacks ()->create_method_pointer_llvmonly (patch_info->data.method, FALSE, error); mono_error_assert_ok (error); break; } case MONO_PATCH_INFO_METHOD_CODE_SLOT: { gpointer code_slot; MonoJitMemoryManager *jit_mm = jit_mm_for_method (patch_info->data.method); jit_mm_lock (jit_mm); if (!jit_mm->method_code_hash) jit_mm->method_code_hash = g_hash_table_new (NULL, NULL); code_slot = g_hash_table_lookup (jit_mm->method_code_hash, patch_info->data.method); if (!code_slot) { code_slot = mono_mem_manager_alloc0 (jit_mm->mem_manager, sizeof (gpointer)); g_hash_table_insert (jit_mm->method_code_hash, patch_info->data.method, code_slot); } jit_mm_unlock (jit_mm); target = code_slot; break; } case MONO_PATCH_INFO_METHOD_PINVOKE_ADDR_CACHE: { target = mono_mem_manager_alloc0 (mem_manager, sizeof (gpointer)); break; } case MONO_PATCH_INFO_GC_SAFE_POINT_FLAG: target = (gpointer)&mono_polling_required; break; case MONO_PATCH_INFO_SWITCH: { #ifndef MONO_ARCH_NO_CODEMAN gpointer *jump_table; int i; if (method && method->dynamic) { jump_table = (void **)mono_code_manager_reserve (mono_dynamic_code_hash_lookup (method)->code_mp, sizeof (gpointer) * patch_info->data.table->table_size); } else { MonoMemoryManager *method_mem_manager = method ? m_method_get_mem_manager (method) : mem_manager; if (mono_aot_only) { jump_table = (void **)mono_mem_manager_alloc (method_mem_manager, sizeof (gpointer) * patch_info->data.table->table_size); } else { jump_table = (void **)mono_mem_manager_code_reserve (method_mem_manager, sizeof (gpointer) * patch_info->data.table->table_size); } } mono_codeman_enable_write (); for (i = 0; i < patch_info->data.table->table_size; i++) { jump_table [i] = code + GPOINTER_TO_INT (patch_info->data.table->table [i]); } mono_codeman_disable_write (); target = jump_table; #else g_assert_not_reached (); target = NULL; #endif break; } case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_IMAGE: case MONO_PATCH_INFO_FIELD: case MONO_PATCH_INFO_SIGNATURE: case MONO_PATCH_INFO_AOT_MODULE: target = patch_info->data.target; break; case MONO_PATCH_INFO_IID: mono_class_init_internal (patch_info->data.klass); target = GUINT_TO_POINTER (m_class_get_interface_id (patch_info->data.klass)); break; case MONO_PATCH_INFO_ADJUSTED_IID: mono_class_init_internal (patch_info->data.klass); target = GUINT_TO_POINTER ((guint32)(-((m_class_get_interface_id (patch_info->data.klass) + 1) * TARGET_SIZEOF_VOID_P))); break; case MONO_PATCH_INFO_VTABLE: target = mono_class_vtable_checked (patch_info->data.klass, error); mono_error_assert_ok (error); break; case MONO_PATCH_INFO_DELEGATE_TRAMPOLINE: { MonoDelegateClassMethodPair *del_tramp = patch_info->data.del_tramp; if (del_tramp->is_virtual) target = mono_create_delegate_virtual_trampoline (del_tramp->klass, del_tramp->method); else target = mono_create_delegate_trampoline_info (del_tramp->klass, del_tramp->method); break; } case MONO_PATCH_INFO_SFLDA: { MonoVTable *vtable = mono_class_vtable_checked (m_field_get_parent (patch_info->data.field), error); mono_error_assert_ok (error); if (mono_class_field_is_special_static (patch_info->data.field)) { gpointer addr = mono_special_static_field_get_offset (patch_info->data.field, error); mono_error_assert_ok (error); g_assert (addr); return addr; } if (!vtable->initialized && !mono_class_is_before_field_init (vtable->klass) && (!method || mono_class_needs_cctor_run (vtable->klass, method))) /* Done by the generated code */ ; else { if (run_cctors) { if (!mono_runtime_class_init_full (vtable, error)) { return NULL; } } } target = mono_static_field_get_addr (vtable, patch_info->data.field); break; } case MONO_PATCH_INFO_RVA: { guint32 field_index = mono_metadata_token_index (patch_info->data.token->token); guint32 rva; mono_metadata_field_info (patch_info->data.token->image, field_index - 1, NULL, &rva, NULL); target = mono_image_rva_map (patch_info->data.token->image, rva); break; } case MONO_PATCH_INFO_R4: case MONO_PATCH_INFO_R4_GOT: case MONO_PATCH_INFO_R8: case MONO_PATCH_INFO_R8_GOT: target = patch_info->data.target; break; case MONO_PATCH_INFO_EXC_NAME: target = patch_info->data.name; break; case MONO_PATCH_INFO_LDSTR: target = mono_ldstr_checked (patch_info->data.token->image, mono_metadata_token_index (patch_info->data.token->token), error); break; case MONO_PATCH_INFO_TYPE_FROM_HANDLE: { gpointer handle; MonoClass *handle_class; handle = mono_ldtoken_checked (patch_info->data.token->image, patch_info->data.token->token, &handle_class, patch_info->data.token->has_context ? &patch_info->data.token->context : NULL, error); if (!is_ok (error)) return NULL; mono_class_init_internal (handle_class); mono_class_init_internal (mono_class_from_mono_type_internal ((MonoType *)handle)); target = mono_type_get_object_checked ((MonoType *)handle, error); if (!is_ok (error)) return NULL; break; } case MONO_PATCH_INFO_LDTOKEN: { gpointer handle; MonoClass *handle_class; handle = mono_ldtoken_checked (patch_info->data.token->image, patch_info->data.token->token, &handle_class, patch_info->data.token->has_context ? &patch_info->data.token->context : NULL, error); mono_error_assert_msg_ok (error, "Could not patch ldtoken"); mono_class_init_internal (handle_class); target = handle; break; } case MONO_PATCH_INFO_DECLSEC: target = (mono_metadata_blob_heap (patch_info->data.token->image, patch_info->data.token->token) + 2); break; case MONO_PATCH_INFO_ICALL_ADDR: case MONO_PATCH_INFO_ICALL_ADDR_CALL: /* run_cctors == 0 -> AOT */ if (patch_info->data.method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) { if (run_cctors) { target = mono_lookup_pinvoke_call_internal (patch_info->data.method, error); if (!target) { if (mono_aot_only) return NULL; g_error ("Unable to resolve pinvoke method '%s' Re-run with MONO_LOG_LEVEL=debug for more information.\n", mono_method_full_name (patch_info->data.method, TRUE)); } } else { target = NULL; } } else { target = mono_lookup_internal_call (patch_info->data.method); if (mono_is_missing_icall_addr (target) && run_cctors) g_error ("Unregistered icall '%s'\n", mono_method_full_name (patch_info->data.method, TRUE)); } break; case MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG: target = &mono_thread_interruption_request_flag; break; case MONO_PATCH_INFO_METHOD_RGCTX: target = mini_method_get_rgctx (patch_info->data.method); break; case MONO_PATCH_INFO_RGCTX_SLOT_INDEX: { int slot = mini_get_rgctx_entry_slot (patch_info->data.rgctx_entry); target = GINT_TO_POINTER (MONO_RGCTX_SLOT_INDEX (slot)); break; } case MONO_PATCH_INFO_BB_OVF: case MONO_PATCH_INFO_EXC_OVF: case MONO_PATCH_INFO_GOT_OFFSET: case MONO_PATCH_INFO_NONE: break; case MONO_PATCH_INFO_RGCTX_FETCH: { int slot = mini_get_rgctx_entry_slot (patch_info->data.rgctx_entry); target = mono_create_rgctx_lazy_fetch_trampoline (slot); break; } #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED case MONO_PATCH_INFO_SEQ_POINT_INFO: if (!run_cctors) /* AOT, not needed */ target = NULL; else target = mono_arch_get_seq_point_info (code); break; #endif case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: { int card_table_shift_bits; gpointer card_table_mask; target = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask); break; } case MONO_PATCH_INFO_GC_NURSERY_START: { int shift_bits; size_t size; target = mono_gc_get_nursery (&shift_bits, &size); break; } case MONO_PATCH_INFO_GC_NURSERY_BITS: { int shift_bits; size_t size; mono_gc_get_nursery (&shift_bits, &size); target = (gpointer)(gssize)shift_bits; break; } case MONO_PATCH_INFO_CASTCLASS_CACHE: { target = mono_mem_manager_alloc0 (mem_manager, sizeof (gpointer)); break; } case MONO_PATCH_INFO_OBJC_SELECTOR_REF: { target = NULL; break; } case MONO_PATCH_INFO_LDSTR_LIT: { int len; char *s; len = strlen ((const char *)patch_info->data.target); s = (char *)mono_mem_manager_alloc0 (mem_manager, len + 1); memcpy (s, patch_info->data.target, len); target = s; break; } case MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER: target = mini_get_gsharedvt_wrapper (TRUE, NULL, patch_info->data.sig, NULL, -1, FALSE); break; case MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT: { target = (gpointer) &mono_profiler_state.gc_allocation_count; break; } case MONO_PATCH_INFO_PROFILER_CLAUSE_COUNT: { target = (gpointer) &mono_profiler_state.exception_clause_count; break; } case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINES: case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINES_GOT_SLOTS_BASE: { /* Resolved in aot-runtime.c */ g_assert_not_reached (); target = NULL; break; } default: g_assert_not_reached (); } return (gpointer)target; } gpointer mono_resolve_patch_target (MonoMethod *method, guint8 *code, MonoJumpInfo *patch_info, gboolean run_cctors, MonoError *error) { return mono_resolve_patch_target_ext (get_default_mem_manager (), method, code, patch_info, run_cctors, error); } /* * mini_register_jump_site: * * Register IP as a jump/tailcall site which calls METHOD. * This is needed because common_call_trampoline () cannot patch * the call site because the caller ip is not available for jumps. */ void mini_register_jump_site (MonoMethod *method, gpointer ip) { MonoJumpList *jlist; MonoJitMemoryManager *jit_mm; MonoMethod *shared_method = mini_method_to_shared (method); method = shared_method ? shared_method : method; jit_mm = jit_mm_for_method (method); jit_mm_lock (jit_mm); jlist = (MonoJumpList *)g_hash_table_lookup (jit_mm->jump_target_hash, method); if (!jlist) { jlist = (MonoJumpList *)mono_mem_manager_alloc0 (jit_mm->mem_manager, sizeof (MonoJumpList)); g_hash_table_insert (jit_mm->jump_target_hash, method, jlist); } jlist->list = g_slist_prepend (jlist->list, ip); jit_mm_unlock (jit_mm); } /* * mini_patch_jump_sites: * * Patch jump/tailcall sites calling METHOD so the jump to ADDR. */ void mini_patch_jump_sites (MonoMethod *method, gpointer addr) { MonoJitMemoryManager *jit_mm; MonoJumpInfo patch_info; MonoJumpList *jlist; GSList *tmp; /* The caller/callee might use different instantiations */ MonoMethod *shared_method = mini_method_to_shared (method); method = shared_method ? shared_method : method; jit_mm = jit_mm_for_method (method); jit_mm_lock (jit_mm); jlist = (MonoJumpList *)g_hash_table_lookup (jit_mm->jump_target_hash, method); if (jlist) g_hash_table_remove (jit_mm->jump_target_hash, method); jit_mm_unlock (jit_mm); if (jlist) { patch_info.next = NULL; patch_info.ip.i = 0; patch_info.type = MONO_PATCH_INFO_METHOD_JUMP; patch_info.data.method = method; mono_codeman_enable_write (); for (tmp = jlist->list; tmp; tmp = tmp->next) mono_arch_patch_code_new (NULL, (guint8 *)tmp->data, &patch_info, addr); mono_codeman_disable_write (); } } /* * mini_patch_llvm_jit_callees: * * Patch function address slots used by llvm JITed code. */ void mini_patch_llvm_jit_callees (MonoMethod *method, gpointer addr) { MonoJitMemoryManager *jit_mm; // FIXME: jit_mm = get_default_jit_mm (); if (!jit_mm->llvm_jit_callees) return; jit_mm_lock (jit_mm); GSList *callees = (GSList*)g_hash_table_lookup (jit_mm->llvm_jit_callees, method); GSList *l; for (l = callees; l; l = l->next) { gpointer *slot = (gpointer*)l->data; *slot = addr; } jit_mm_unlock (jit_mm); } void mini_init_gsctx (MonoMemPool *mp, MonoGenericContext *context, MonoGenericSharingContext *gsctx) { MonoGenericInst *inst; int i; memset (gsctx, 0, sizeof (MonoGenericSharingContext)); if (context && context->class_inst) { inst = context->class_inst; for (i = 0; i < inst->type_argc; ++i) { MonoType *type = inst->type_argv [i]; if (mini_is_gsharedvt_gparam (type)) gsctx->is_gsharedvt = TRUE; } } if (context && context->method_inst) { inst = context->method_inst; for (i = 0; i < inst->type_argc; ++i) { MonoType *type = inst->type_argv [i]; if (mini_is_gsharedvt_gparam (type)) gsctx->is_gsharedvt = TRUE; } } } /* * LOCKING: Acquires the jit code hash lock. */ MonoJitInfo* mini_lookup_method (MonoMethod *method, MonoMethod *shared) { MonoJitInfo *ji; MonoJitMemoryManager *jit_mm = jit_mm_for_method (method); static gboolean inited = FALSE; static int lookups = 0; static int failed_lookups = 0; jit_code_hash_lock (jit_mm); ji = (MonoJitInfo *)mono_internal_hash_table_lookup (&jit_mm->jit_code_hash, method); jit_code_hash_unlock (jit_mm); if (!ji && shared) { jit_mm = jit_mm_for_method (shared); jit_code_hash_lock (jit_mm); /* Try generic sharing */ ji = (MonoJitInfo *)mono_internal_hash_table_lookup (&jit_mm->jit_code_hash, shared); if (ji && !ji->has_generic_jit_info) ji = NULL; if (!inited) { mono_counters_register ("Shared generic lookups", MONO_COUNTER_INT|MONO_COUNTER_GENERICS, &lookups); mono_counters_register ("Failed shared generic lookups", MONO_COUNTER_INT|MONO_COUNTER_GENERICS, &failed_lookups); inited = TRUE; } ++lookups; if (!ji) ++failed_lookups; jit_code_hash_unlock (jit_mm); } return ji; } static MonoJitInfo* lookup_method (MonoMethod *method) { ERROR_DECL (error); MonoJitInfo *ji; MonoMethod *shared; ji = mini_lookup_method (method, NULL); if (!ji) { if (!mono_method_is_generic_sharable (method, FALSE)) return NULL; shared = mini_get_shared_method_full (method, SHARE_MODE_NONE, error); mono_error_assert_ok (error); ji = mini_lookup_method (method, shared); } return ji; } MonoClass* mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context) { ERROR_DECL (error); MonoClass *klass; if (method->wrapper_type != MONO_WRAPPER_NONE) { klass = (MonoClass *)mono_method_get_wrapper_data (method, token); if (context) { klass = mono_class_inflate_generic_class_checked (klass, context, error); mono_error_cleanup (error); /* FIXME don't swallow the error */ } } else { klass = mono_class_get_and_inflate_typespec_checked (m_class_get_image (method->klass), token, context, error); mono_error_cleanup (error); /* FIXME don't swallow the error */ } if (klass) mono_class_init_internal (klass); return klass; } #if ENABLE_JIT_MAP static FILE* perf_map_file; void mono_enable_jit_map (void) { if (!perf_map_file) { char name [64]; g_snprintf (name, sizeof (name), "/tmp/perf-%d.map", getpid ()); unlink (name); perf_map_file = fopen (name, "w"); } } void mono_emit_jit_tramp (void *start, int size, const char *desc) { if (perf_map_file) fprintf (perf_map_file, "%" PRIx64 " %x %s\n", (guint64)(gsize)start, size, desc); } void mono_emit_jit_map (MonoJitInfo *jinfo) { if (perf_map_file) { char *name = mono_method_full_name (jinfo_get_method (jinfo), TRUE); mono_emit_jit_tramp (jinfo->code_start, jinfo->code_size, name); g_free (name); } } gboolean mono_jit_map_is_enabled (void) { return perf_map_file != NULL; } #endif #ifdef ENABLE_JIT_DUMP #include <sys/mman.h> #include <sys/syscall.h> #include <elf.h> static FILE *perf_dump_file; static mono_mutex_t perf_dump_mutex; static void *perf_dump_mmap_addr = MAP_FAILED; static guint32 perf_dump_pid; static clockid_t clock_id = CLOCK_MONOTONIC; enum { JIT_DUMP_MAGIC = 0x4A695444, JIT_DUMP_VERSION = 2, #if HOST_X86 ELF_MACHINE = EM_386, #elif HOST_AMD64 ELF_MACHINE = EM_X86_64, #elif HOST_ARM ELF_MACHINE = EM_ARM, #elif HOST_ARM64 ELF_MACHINE = EM_AARCH64, #elif HOST_POWERPC64 ELF_MACHINE = EM_PPC64, #elif HOST_S390X ELF_MACHINE = EM_S390, #elif HOST_RISCV ELF_MACHINE = EM_RISCV, #elif HOST_MIPS ELF_MACHINE = EM_MIPS, #endif JIT_CODE_LOAD = 0 }; typedef struct { guint32 magic; guint32 version; guint32 total_size; guint32 elf_mach; guint32 pad1; guint32 pid; guint64 timestamp; guint64 flags; } FileHeader; typedef struct { guint32 id; guint32 total_size; guint64 timestamp; } RecordHeader; typedef struct { RecordHeader header; guint32 pid; guint32 tid; guint64 vma; guint64 code_addr; guint64 code_size; guint64 code_index; // Null terminated function name // Native code } JitCodeLoadRecord; static void add_file_header_info (FileHeader *header); static void add_basic_JitCodeLoadRecord_info (JitCodeLoadRecord *record); void mono_enable_jit_dump (void) { if (perf_dump_pid == 0) perf_dump_pid = getpid(); if (!perf_dump_file) { char name [64]; FileHeader header; memset (&header, 0, sizeof (header)); mono_os_mutex_init (&perf_dump_mutex); mono_os_mutex_lock (&perf_dump_mutex); g_snprintf (name, sizeof (name), "/tmp/jit-%d.dump", perf_dump_pid); unlink (name); perf_dump_file = fopen (name, "w"); add_file_header_info (&header); if (perf_dump_file) { fwrite (&header, sizeof (header), 1, perf_dump_file); //This informs perf of the presence of the jitdump file and support for the feature. perf_dump_mmap_addr = mmap (NULL, sizeof (header), PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno (perf_dump_file), 0); } mono_os_mutex_unlock (&perf_dump_mutex); } } static void add_file_header_info (FileHeader *header) { header->magic = JIT_DUMP_MAGIC; header->version = JIT_DUMP_VERSION; header->total_size = sizeof (header); header->elf_mach = ELF_MACHINE; header->pad1 = 0; header->pid = perf_dump_pid; header->timestamp = mono_clock_get_time_ns (clock_id); header->flags = 0; } void mono_emit_jit_dump (MonoJitInfo *jinfo, gpointer code) { static uint64_t code_index; if (perf_dump_file) { JitCodeLoadRecord record; size_t nameLen = strlen (jinfo->d.method->name); memset (&record, 0, sizeof (record)); add_basic_JitCodeLoadRecord_info (&record); record.header.total_size = sizeof (record) + nameLen + 1 + jinfo->code_size; record.vma = (guint64)jinfo->code_start; record.code_addr = (guint64)jinfo->code_start; record.code_size = (guint64)jinfo->code_size; mono_os_mutex_lock (&perf_dump_mutex); record.code_index = ++code_index; // TODO: write debugInfo and unwindInfo immediately before the JitCodeLoadRecord (while lock is held). record.header.timestamp = mono_clock_get_time_ns (clock_id); fwrite (&record, sizeof (record), 1, perf_dump_file); fwrite (jinfo->d.method->name, nameLen + 1, 1, perf_dump_file); fwrite (code, jinfo->code_size, 1, perf_dump_file); mono_os_mutex_unlock (&perf_dump_mutex); } } static void add_basic_JitCodeLoadRecord_info (JitCodeLoadRecord *record) { record->header.id = JIT_CODE_LOAD; record->header.timestamp = mono_clock_get_time_ns (clock_id); record->pid = perf_dump_pid; record->tid = syscall (SYS_gettid); } void mono_jit_dump_cleanup (void) { if (perf_dump_mmap_addr != MAP_FAILED) munmap (perf_dump_mmap_addr, sizeof(FileHeader)); if (perf_dump_file) fclose (perf_dump_file); } #else void mono_enable_jit_dump (void) { } void mono_emit_jit_dump (MonoJitInfo *jinfo, gpointer code) { } void mono_jit_dump_cleanup (void) { } #endif static void no_gsharedvt_in_wrapper (void) { g_assert_not_reached (); } /* Overall algorithm: When a JIT request is made, we check if there's an outstanding one for that method and, if it exits, put the thread to sleep. If the current thread is already JITing another method, don't wait as it might cause a deadlock. Dependency management in this case is too complex to justify implementing it. If there are no outstanding requests, the current thread is doing nothing and there are already mono_cpu_count threads JITing, go to sleep. TODO: Get rid of cctor invocations from within the JIT, it increases JIT duration and complicates things A LOT. Can we get rid of ref_count and use `done && threads_waiting == 0` as the equivalent of `ref_count == 0`? Reduce amount of dynamically allocated - possible once the JIT is no longer reentrant Maybe pool JitCompilationEntry, specially those with an inited cond var; */ typedef struct { MonoMethod *method; int compilation_count; /* Number of threads compiling this method - This happens due to the JIT being reentrant */ int ref_count; /* Number of threads using this JitCompilationEntry, roughtly 1 + threads_waiting */ int threads_waiting; /* Number of threads waiting on this job */ gboolean has_cond; /* True if @cond was initialized */ gboolean done; /* True if the method finished JIT'ing */ MonoCoopCond cond; /* Cond sleeping threads wait one */ } JitCompilationEntry; typedef struct { GPtrArray *in_flight_methods; //JitCompilationEntry* MonoCoopMutex lock; } JitCompilationData; /* Timeout, in millisecounds, that we wait other threads to finish JITing. This value can't be too small or we won't see enough methods being reused and it can't be too big to cause massive stalls due to unforseable circunstances. */ #define MAX_JIT_TIMEOUT_MS 1000 static JitCompilationData compilation_data; static int jit_methods_waited, jit_methods_multiple, jit_methods_overload, jit_spurious_wakeups_or_timeouts; static void mini_jit_init_job_control (void) { mono_coop_mutex_init (&compilation_data.lock); compilation_data.in_flight_methods = g_ptr_array_new (); } static void lock_compilation_data (void) { mono_coop_mutex_lock (&compilation_data.lock); } static void unlock_compilation_data (void) { mono_coop_mutex_unlock (&compilation_data.lock); } static JitCompilationEntry* find_method (MonoMethod *method) { int i; for (i = 0; i < compilation_data.in_flight_methods->len; ++i){ JitCompilationEntry *e = (JitCompilationEntry*)compilation_data.in_flight_methods->pdata [i]; if (e->method == method) return e; } return NULL; } static void add_current_thread (MonoJitTlsData *jit_tls) { ++jit_tls->active_jit_methods; } static void unref_jit_entry (JitCompilationEntry *entry) { --entry->ref_count; if (entry->ref_count) return; if (entry->has_cond) mono_coop_cond_destroy (&entry->cond); g_free (entry); } /* * Returns true if this method waited successfully for another thread to JIT it */ static gboolean wait_or_register_method_to_compile (MonoMethod *method) { MonoJitTlsData *jit_tls = mono_tls_get_jit_tls (); JitCompilationEntry *entry; static gboolean inited; if (!inited) { mono_counters_register ("JIT compile waited others", MONO_COUNTER_INT|MONO_COUNTER_JIT, &jit_methods_waited); mono_counters_register ("JIT compile 1+ jobs", MONO_COUNTER_INT|MONO_COUNTER_JIT, &jit_methods_multiple); mono_counters_register ("JIT compile overload wait", MONO_COUNTER_INT|MONO_COUNTER_JIT, &jit_methods_overload); mono_counters_register ("JIT compile spurious wakeups or timeouts", MONO_COUNTER_INT|MONO_COUNTER_JIT, &jit_spurious_wakeups_or_timeouts); inited = TRUE; } lock_compilation_data (); if (!(entry = find_method (method))) { entry = g_new0 (JitCompilationEntry, 1); entry->method = method; entry->compilation_count = entry->ref_count = 1; g_ptr_array_add (compilation_data.in_flight_methods, entry); g_assert (find_method (method) == entry); add_current_thread (jit_tls); unlock_compilation_data (); return FALSE; } else if (jit_tls->active_jit_methods > 0 || mono_threads_is_current_thread_in_protected_block ()) { //We can't suspend the current thread if it's already JITing a method. //Dependency management is too compilated and we want to get rid of this anyways. //We can't suspend the current thread if it's running a protected block (such as a cctor) //We can't rely only on JIT nesting as cctor's can be run from outside the JIT. //Finally, he hit a timeout or spurious wakeup. We're better off just giving up and keep recompiling ++entry->compilation_count; ++jit_methods_multiple; ++jit_tls->active_jit_methods; unlock_compilation_data (); return FALSE; } else { ++jit_methods_waited; ++entry->ref_count; if (!entry->has_cond) { mono_coop_cond_init (&entry->cond); entry->has_cond = TRUE; } while (TRUE) { ++entry->threads_waiting; g_assert (entry->has_cond); mono_coop_cond_timedwait (&entry->cond, &compilation_data.lock, MAX_JIT_TIMEOUT_MS); --entry->threads_waiting; if (entry->done) { unref_jit_entry (entry); unlock_compilation_data (); return TRUE; } else { //We hit the timeout or a spurious wakeup, fallback to JITing g_assert (entry->ref_count > 1); unref_jit_entry (entry); ++jit_spurious_wakeups_or_timeouts; ++entry->compilation_count; ++jit_methods_multiple; ++jit_tls->active_jit_methods; unlock_compilation_data (); return FALSE; } } } } static void unregister_method_for_compile (MonoMethod *method) { MonoJitTlsData *jit_tls = mono_tls_get_jit_tls (); lock_compilation_data (); g_assert (jit_tls->active_jit_methods > 0); --jit_tls->active_jit_methods; JitCompilationEntry *entry = find_method (method); g_assert (entry); // It would be weird to fail entry->done = TRUE; if (entry->threads_waiting) { g_assert (entry->has_cond); mono_coop_cond_broadcast (&entry->cond); } if (--entry->compilation_count == 0) { g_ptr_array_remove (compilation_data.in_flight_methods, entry); unref_jit_entry (entry); } unlock_compilation_data (); } static MonoJitInfo* create_jit_info_for_trampoline (MonoMethod *wrapper, MonoTrampInfo *info) { MonoJitInfo *jinfo; guint8 *uw_info; guint32 info_len; if (info->uw_info) { uw_info = info->uw_info; info_len = info->uw_info_len; } else { uw_info = mono_unwind_ops_encode (info->unwind_ops, &info_len); } jinfo = (MonoJitInfo *)mono_mem_manager_alloc0 (get_default_mem_manager (), MONO_SIZEOF_JIT_INFO); jinfo->d.method = wrapper; jinfo->code_start = MINI_FTNPTR_TO_ADDR (info->code); jinfo->code_size = info->code_size; jinfo->unwind_info = mono_cache_unwind_info (uw_info, info_len); if (!info->uw_info) g_free (uw_info); return jinfo; } static gpointer compile_special (MonoMethod *method, MonoError *error) { MonoJitInfo *jinfo; gpointer code; if (mono_llvm_only) { if (method->wrapper_type == MONO_WRAPPER_OTHER) { WrapperInfo *info = mono_marshal_get_wrapper_info (method); if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG) { /* * These wrappers are only created for signatures which are in the program, but * sometimes we load methods too eagerly and have to create them even if they * will never be called. */ return (gpointer)no_gsharedvt_in_wrapper; } } } if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) { MonoMethodPInvoke* piinfo = (MonoMethodPInvoke *) method; if (!piinfo->addr) { if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) { guint32 flags = MONO_ICALL_FLAGS_NONE; gpointer icall_addr; icall_addr = (gpointer)mono_lookup_internal_call_full_with_flags (method, TRUE, (guint32 *)&flags); if (flags & MONO_ICALL_FLAGS_NO_WRAPPER) { piinfo->icflags = MONO_ICALL_FLAGS_NO_WRAPPER; mono_memory_write_barrier (); } piinfo->addr = icall_addr; } else if (method->iflags & METHOD_IMPL_ATTRIBUTE_NATIVE) { #ifdef HOST_WIN32 g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono in modules loaded from byte arrays. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), m_class_get_image (method->klass)->name); #else g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono on this platform. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), m_class_get_image (method->klass)->name); #endif } else { ERROR_DECL (ignored_error); mono_lookup_pinvoke_call_internal (method, ignored_error); mono_error_cleanup (ignored_error); } } mono_memory_read_barrier (); gpointer compiled_method = NULL; if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && (piinfo->icflags & MONO_ICALL_FLAGS_NO_WRAPPER)) { compiled_method = piinfo->addr; } else { MonoMethod *nm = mono_marshal_get_native_wrapper (method, TRUE, mono_aot_only); compiled_method = mono_jit_compile_method_jit_only (nm, error); return_val_if_nok (error, NULL); } code = mono_get_addr_from_ftnptr (compiled_method); jinfo = mini_jit_info_table_find (code); if (jinfo) MONO_PROFILER_RAISE (jit_done, (method, jinfo)); return code; } else if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) { const char *name = method->name; char *full_name; MonoMethod *nm; if (m_class_get_parent (method->klass) == mono_defaults.multicastdelegate_class) { if (*name == '.' && (strcmp (name, ".ctor") == 0)) { MonoJitICallInfo *mi = &mono_get_jit_icall_info ()->ves_icall_mono_delegate_ctor; /* * We need to make sure this wrapper * is compiled because it might end up * in an (M)RGCTX if generic sharing * is enabled, and would be called * indirectly. If it were a * trampoline we'd try to patch that * indirect call, which is not * possible. */ return mono_get_addr_from_ftnptr ((gpointer)mono_icall_get_wrapper_full (mi, TRUE)); } else if (*name == 'I' && (strcmp (name, "Invoke") == 0)) { if (mono_llvm_only) { nm = mono_marshal_get_delegate_invoke (method, NULL); gpointer compiled_ptr = mono_jit_compile_method_jit_only (nm, error); return_val_if_nok (error, NULL); return mono_get_addr_from_ftnptr (compiled_ptr); } /* HACK: missing gsharedvt_out wrappers to do transition to del tramp in interp-only mode */ if (mono_use_interpreter) return NULL; return mono_create_delegate_trampoline (method->klass); } else if (*name == 'B' && (strcmp (name, "BeginInvoke") == 0)) { nm = mono_marshal_get_delegate_begin_invoke (method); gpointer compiled_ptr = mono_jit_compile_method_jit_only (nm, error); return_val_if_nok (error, NULL); return mono_get_addr_from_ftnptr (compiled_ptr); } else if (*name == 'E' && (strcmp (name, "EndInvoke") == 0)) { nm = mono_marshal_get_delegate_end_invoke (method); gpointer compiled_ptr = mono_jit_compile_method_jit_only (nm, error); return_val_if_nok (error, NULL); return mono_get_addr_from_ftnptr (compiled_ptr); } } full_name = mono_method_full_name (method, TRUE); mono_error_set_invalid_program (error, "Unrecognizable runtime implemented method '%s'", full_name); g_free (full_name); return NULL; } if (method->wrapper_type == MONO_WRAPPER_OTHER) { WrapperInfo *info = mono_marshal_get_wrapper_info (method); if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT) { static MonoTrampInfo *in_tinfo, *out_tinfo; MonoTrampInfo *tinfo; MonoJitInfo *jinfo; gboolean is_in = info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN; if (is_in && in_tinfo) return in_tinfo->code; else if (!is_in && out_tinfo) return out_tinfo->code; /* * This is a special wrapper whose body is implemented in assembly, like a trampoline. We use a wrapper so EH * works. * FIXME: The caller signature doesn't match the callee, which might cause problems on some platforms */ if (mono_ee_features.use_aot_trampolines) mono_aot_get_trampoline_full (is_in ? "gsharedvt_trampoline" : "gsharedvt_out_trampoline", &tinfo); else mono_arch_get_gsharedvt_trampoline (&tinfo, FALSE); jinfo = create_jit_info_for_trampoline (method, tinfo); mono_jit_info_table_add (jinfo); if (is_in) in_tinfo = tinfo; else out_tinfo = tinfo; return tinfo->code; } } return NULL; } static gpointer mono_jit_compile_method_with_opt (MonoMethod *method, guint32 opt, gboolean jit_only, MonoError *error) { MonoJitInfo *info; gpointer code = NULL, p; MonoJitICallInfo *callinfo = NULL; WrapperInfo *winfo = NULL; gboolean use_interp = FALSE; error_init (error); if (mono_ee_features.force_use_interpreter && !jit_only) use_interp = TRUE; if (!use_interp && mono_interp_only_classes) { for (GSList *l = mono_interp_only_classes; l; l = l->next) { if (!strcmp (m_class_get_name (method->klass), (char*)l->data)) use_interp = TRUE; } } if (use_interp) { code = mini_get_interp_callbacks ()->create_method_pointer (method, TRUE, error); if (code) return code; return_val_if_nok (error, NULL); } if (mono_llvm_only) /* Should be handled by the caller */ g_assert (!(method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED)); /* * ICALL wrappers are handled specially, since there is only one copy of them * shared by all appdomains. */ if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) winfo = mono_marshal_get_wrapper_info (method); if (winfo && winfo->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER) callinfo = mono_find_jit_icall_info (winfo->d.icall.jit_icall_id); if (method->wrapper_type == MONO_WRAPPER_OTHER) { WrapperInfo *info = mono_marshal_get_wrapper_info (method); g_assert (info); if (info->subtype == WRAPPER_SUBTYPE_SYNCHRONIZED_INNER) { MonoGenericContext *ctx = NULL; if (method->is_inflated) ctx = mono_method_get_context (method); method = info->d.synchronized_inner.method; if (ctx) { method = mono_class_inflate_generic_method_checked (method, ctx, error); g_assert (is_ok (error)); /* FIXME don't swallow the error */ } } } lookup_start: info = lookup_method (method); if (info) { MonoVTable *vtable; mono_atomic_inc_i32 (&mono_jit_stats.methods_lookups); vtable = mono_class_vtable_checked (method->klass, error); if (!is_ok (error)) return NULL; g_assert (vtable); if (!mono_runtime_class_init_full (vtable, error)) return NULL; code = MINI_ADDR_TO_FTNPTR (info->code_start); return mono_create_ftnptr (code); } #ifdef MONO_USE_AOT_COMPILER if (opt & MONO_OPT_AOT) { mono_class_init_internal (method->klass); code = mono_aot_get_method (method, error); if (code) { MonoVTable *vtable; if (mono_gc_is_critical_method (method)) { /* * The suspend code needs to be able to lookup these methods by ip in async context, * so preload their jit info. */ MonoJitInfo *ji = mini_jit_info_table_find (code); g_assert (ji); } /* * In llvm-only mode, method might be a shared method, so we can't initialize its class. * This is not a problem, since it will be initialized when the method is first * called by init_method (). */ if (!mono_llvm_only && !mono_class_is_open_constructed_type (m_class_get_byval_arg (method->klass))) { vtable = mono_class_vtable_checked (method->klass, error); mono_error_assert_ok (error); if (!mono_runtime_class_init_full (vtable, error)) return NULL; } } if (!is_ok (error)) return NULL; } #endif if (!code) { code = compile_special (method, error); if (!is_ok (error)) return NULL; } if (!jit_only && !code && mono_aot_only && mono_use_interpreter && method->wrapper_type != MONO_WRAPPER_OTHER) { if (mono_llvm_only) { /* Signal to the caller that AOTed code is not found */ return NULL; } code = mini_get_interp_callbacks ()->create_method_pointer (method, TRUE, error); if (!is_ok (error)) return NULL; } if (!code) { if (mono_class_is_open_constructed_type (m_class_get_byval_arg (method->klass))) { char *full_name = mono_type_get_full_name (method->klass); mono_error_set_invalid_operation (error, "Could not execute the method because the containing type '%s', is not fully instantiated.", full_name); g_free (full_name); return NULL; } if (mono_aot_only) { char *fullname = mono_method_get_full_name (method); mono_error_set_execution_engine (error, "Attempting to JIT compile method '%s' while running in aot-only mode. See https://docs.microsoft.com/xamarin/ios/internals/limitations for more information.\n", fullname); g_free (fullname); return NULL; } if (wait_or_register_method_to_compile (method)) goto lookup_start; code = mono_jit_compile_method_inner (method, opt, error); unregister_method_for_compile (method); } if (!is_ok (error)) return NULL; if (!code && mono_llvm_only) { printf ("AOT method not found in llvmonly mode: %s\n", mono_method_full_name (method, 1)); g_assert_not_reached (); } if (!code) return NULL; //FIXME mini_jit_info_table_find doesn't work yet under wasm due to code_start/code_end issues. #ifndef HOST_WASM if ((method->wrapper_type == MONO_WRAPPER_WRITE_BARRIER || method->wrapper_type == MONO_WRAPPER_ALLOC)) { /* * SGEN requires the JIT info for these methods to be registered, see is_ip_in_managed_allocator (). */ MonoJitInfo *ji = mini_jit_info_table_find (code); g_assert (ji); } #endif p = mono_create_ftnptr (code); if (callinfo) { // FIXME Locking here is somewhat historical due to mono_register_jit_icall_wrapper taking loader lock. // atomic_compare_exchange should suffice. mono_loader_lock (); mono_jit_lock (); if (!callinfo->wrapper) { callinfo->wrapper = p; } mono_jit_unlock (); mono_loader_unlock (); } // FIXME p or callinfo->wrapper or does not matter? return p; } typedef struct { MonoMethod *method; guint32 opt; gboolean jit_only; MonoError *error; gpointer code; } JitCompileMethodWithOptCallbackData; static void jit_compile_method_with_opt_cb (gpointer arg) { JitCompileMethodWithOptCallbackData *params = (JitCompileMethodWithOptCallbackData *)arg; params->code = mono_jit_compile_method_with_opt (params->method, params->opt, params->jit_only, params->error); } static gpointer jit_compile_method_with_opt (JitCompileMethodWithOptCallbackData *params) { MonoLMFExt ext; memset (&ext, 0, sizeof (MonoLMFExt)); ext.kind = MONO_LMFEXT_JIT_ENTRY; mono_push_lmf (&ext); gboolean thrown = FALSE; #if defined(ENABLE_LLVM_RUNTIME) || defined(ENABLE_LLVM) mono_llvm_cpp_catch_exception (jit_compile_method_with_opt_cb, params, &thrown); #else jit_compile_method_with_opt_cb (params); #endif mono_pop_lmf (&ext.lmf); return !thrown ? params->code : NULL; } gpointer mono_jit_compile_method (MonoMethod *method, MonoError *error) { JitCompileMethodWithOptCallbackData params; params.method = method; params.opt = mono_get_optimizations_for_method (method, default_opt); params.jit_only = FALSE; params.error = error; params.code = NULL; return jit_compile_method_with_opt (&params); } /* * mono_jit_compile_method_jit_only: * * Compile METHOD using the JIT/AOT, even in interpreted mode. */ gpointer mono_jit_compile_method_jit_only (MonoMethod *method, MonoError *error) { JitCompileMethodWithOptCallbackData params; params.method = method; params.opt = mono_get_optimizations_for_method (method, default_opt); params.jit_only = TRUE; params.error = error; params.code = NULL; return jit_compile_method_with_opt (&params); } /* * get_ftnptr_for_method: * * Return a function pointer for METHOD which is indirectly callable from managed code. * On llvmonly, this returns a MonoFtnDesc, otherwise it returns a normal function pointer. */ static gpointer get_ftnptr_for_method (MonoMethod *method, MonoError *error) { if (!mono_llvm_only) { return mono_jit_compile_method (method, error); } else { return mini_llvmonly_load_method_ftndesc (method, FALSE, FALSE, error); } } #ifdef MONO_ARCH_HAVE_INVALIDATE_METHOD static void invalidated_delegate_trampoline (char *desc) { g_error ("Unmanaged code called delegate of type %s which was already garbage collected.\n" "See http://www.mono-project.com/Diagnostic:Delegate for an explanation and ways to fix this.", desc); } #endif /* * mono_jit_free_method: * * Free all memory allocated by the JIT for METHOD. */ static void mono_jit_free_method (MonoMethod *method) { MonoJitDynamicMethodInfo *ji; gboolean destroy = TRUE, removed; GHashTableIter iter; MonoJumpList *jlist; MonoJitMemoryManager *jit_mm; g_assert (method->dynamic); if (mono_use_interpreter) mini_get_interp_callbacks ()->free_method (method); ji = mono_dynamic_code_hash_lookup (method); if (!ji) return; mono_debug_remove_method (method, NULL); mono_lldb_remove_method (method, ji); //seq_points are always on get_default_jit_mm jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); g_hash_table_remove (jit_mm->seq_points, method); jit_mm_unlock (jit_mm); jit_mm = jit_mm_for_method (method); jit_code_hash_lock (jit_mm); removed = mono_internal_hash_table_remove (&jit_mm->jit_code_hash, method); g_assert (removed); jit_code_hash_unlock (jit_mm); ji->ji->seq_points = NULL; jit_mm_lock (jit_mm); mono_conc_hashtable_remove (jit_mm->runtime_invoke_hash, method); g_hash_table_remove (jit_mm->dynamic_code_hash, method); g_hash_table_remove (jit_mm->jump_trampoline_hash, method); g_hash_table_remove (jit_mm->seq_points, method); g_hash_table_iter_init (&iter, jit_mm->jump_target_hash); while (g_hash_table_iter_next (&iter, NULL, (void**)&jlist)) { GSList *tmp, *remove; remove = NULL; for (tmp = jlist->list; tmp; tmp = tmp->next) { guint8 *ip = (guint8 *)tmp->data; if (ip >= (guint8*)ji->ji->code_start && ip < (guint8*)ji->ji->code_start + ji->ji->code_size) remove = g_slist_prepend (remove, tmp); } for (tmp = remove; tmp; tmp = tmp->next) { jlist->list = g_slist_delete_link ((GSList *)jlist->list, (GSList *)tmp->data); } g_slist_free (remove); } jit_mm_unlock (jit_mm); #ifdef MONO_ARCH_HAVE_INVALIDATE_METHOD if (mini_debug_options.keep_delegates && method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) { /* * Instead of freeing the code, change it to call an error routine * so people can fix their code. */ char *type = mono_type_full_name (m_class_get_byval_arg (method->klass)); char *type_and_method = g_strdup_printf ("%s.%s", type, method->name); g_free (type); mono_arch_invalidate_method (ji->ji, (gpointer)invalidated_delegate_trampoline, (gpointer)type_and_method); destroy = FALSE; } #endif /* * This needs to be done before freeing code_mp, since the code address is the * key in the table, so if we free the code_mp first, another thread can grab the * same code address and replace our entry in the table. */ mono_jit_info_table_remove (ji->ji); if (destroy) mono_code_manager_destroy (ji->code_mp); g_free (ji); } gpointer mono_jit_search_all_backends_for_jit_info (MonoMethod *method, MonoJitInfo **out_ji) { gpointer code; MonoJitInfo *ji; code = mono_jit_find_compiled_method_with_jit_info (method, &ji); if (!code) { ERROR_DECL (oerror); /* Might be AOTed code */ mono_class_init_internal (method->klass); code = mono_aot_get_method (method, oerror); if (code) { mono_error_assert_ok (oerror); ji = mini_jit_info_table_find (code); } else { if (!is_ok (oerror)) mono_error_cleanup (oerror); /* Might be interpreted */ ji = mini_get_interp_callbacks ()->find_jit_info (method); } } *out_ji = ji; return code; } gpointer mono_jit_find_compiled_method_with_jit_info (MonoMethod *method, MonoJitInfo **ji) { MonoJitInfo *info; info = lookup_method (method); if (info) { mono_atomic_inc_i32 (&mono_jit_stats.methods_lookups); if (ji) *ji = info; return MINI_ADDR_TO_FTNPTR (info->code_start); } if (ji) *ji = NULL; return NULL; } static guint32 bisect_opt = 0; static GHashTable *bisect_methods_hash = NULL; void mono_set_bisect_methods (guint32 opt, const char *method_list_filename) { FILE *file; char method_name [2048]; bisect_opt = opt; bisect_methods_hash = g_hash_table_new (g_str_hash, g_str_equal); g_assert (bisect_methods_hash); file = fopen (method_list_filename, "r"); g_assert (file); while (fgets (method_name, sizeof (method_name), file)) { size_t len = strlen (method_name); g_assert (len > 0); g_assert (method_name [len - 1] == '\n'); method_name [len - 1] = 0; g_hash_table_insert (bisect_methods_hash, g_strdup (method_name), GINT_TO_POINTER (1)); } g_assert (feof (file)); } gboolean mono_do_single_method_regression = FALSE; guint32 mono_single_method_regression_opt = 0; MonoMethod *mono_current_single_method; GSList *mono_single_method_list; GHashTable *mono_single_method_hash; guint32 mono_get_optimizations_for_method (MonoMethod *method, guint32 opt) { g_assert (method); if (bisect_methods_hash) { char *name = mono_method_full_name (method, TRUE); void *res = g_hash_table_lookup (bisect_methods_hash, name); g_free (name); if (res) return opt | bisect_opt; } if (!mono_do_single_method_regression) return opt; if (!mono_current_single_method) { if (!mono_single_method_hash) mono_single_method_hash = g_hash_table_new (g_direct_hash, g_direct_equal); if (!g_hash_table_lookup (mono_single_method_hash, method)) { g_hash_table_insert (mono_single_method_hash, method, method); mono_single_method_list = g_slist_prepend (mono_single_method_list, method); } return opt; } if (method == mono_current_single_method) return mono_single_method_regression_opt; return opt; } gpointer mono_jit_find_compiled_method (MonoMethod *method) { return mono_jit_find_compiled_method_with_jit_info (method, NULL); } typedef struct { MonoMethod *method; gpointer compiled_method; gpointer runtime_invoke; MonoVTable *vtable; MonoDynCallInfo *dyn_call_info; MonoClass *ret_box_class; MonoMethodSignature *sig; gboolean gsharedvt_invoke; gboolean use_interp; gpointer *wrapper_arg; } RuntimeInvokeInfo; #define MONO_SIZEOF_DYN_CALL_RET_BUF TARGET_SIZEOF_VOID_P static RuntimeInvokeInfo* create_runtime_invoke_info (MonoMethod *method, gpointer compiled_method, gboolean callee_gsharedvt, gboolean use_interp, MonoError *error) { MonoMethod *invoke; RuntimeInvokeInfo *info = NULL; RuntimeInvokeInfo *ret = NULL; info = g_new0 (RuntimeInvokeInfo, 1); info->compiled_method = compiled_method; info->use_interp = use_interp; info->sig = mono_method_signature_internal (method); invoke = mono_marshal_get_runtime_invoke (method, FALSE); (void)invoke; info->vtable = mono_class_vtable_checked (method->klass, error); if (!is_ok (error)) goto exit; g_assert (info->vtable); MonoMethodSignature *sig; sig = info->sig; MonoType *ret_type; /* * We want to avoid AOTing 1000s of runtime-invoke wrappers when running * in full-aot mode, so we use a slower, but more generic wrapper if * possible, built on top of the OP_DYN_CALL opcode provided by the JIT. */ #ifdef MONO_ARCH_DYN_CALL_SUPPORTED if (!mono_llvm_only && (mono_aot_only || mini_debug_options.dyn_runtime_invoke)) { gboolean supported = TRUE; int i; if (method->string_ctor) sig = mono_marshal_get_string_ctor_signature (method); for (i = 0; i < sig->param_count; ++i) { MonoType *t = sig->params [i]; if (m_type_is_byref (t) && t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) supported = FALSE; } if (!info->compiled_method) supported = FALSE; if (supported) { info->dyn_call_info = mono_arch_dyn_call_prepare (sig); if (mini_debug_options.dyn_runtime_invoke) g_assert (info->dyn_call_info); } } #endif ret_type = sig->ret; switch (ret_type->type) { case MONO_TYPE_VOID: break; case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_R4: case MONO_TYPE_R8: info->ret_box_class = mono_class_from_mono_type_internal (ret_type); break; case MONO_TYPE_PTR: info->ret_box_class = mono_defaults.int_class; break; case MONO_TYPE_STRING: case MONO_TYPE_CLASS: case MONO_TYPE_ARRAY: case MONO_TYPE_SZARRAY: case MONO_TYPE_OBJECT: break; case MONO_TYPE_GENERICINST: if (!MONO_TYPE_IS_REFERENCE (ret_type)) info->ret_box_class = mono_class_from_mono_type_internal (ret_type); break; case MONO_TYPE_VALUETYPE: info->ret_box_class = mono_class_from_mono_type_internal (ret_type); break; default: g_assert_not_reached (); break; } if (info->use_interp) { ret = info; info = NULL; goto exit; } if (!info->dyn_call_info) { /* * Can't use the normal llvmonly code for string ctors since the gsharedvt out wrapper passes * an extra arg, which the string ctor methods don't have, which causes signature mismatches * on wasm. Instead, call string ctors normally using a direct runtime invoke wrapper * which is AOTed for each ctor. */ if (mono_llvm_only && !method->string_ctor) { #ifndef MONO_ARCH_GSHAREDVT_SUPPORTED g_assert_not_reached (); #endif info->gsharedvt_invoke = TRUE; if (!callee_gsharedvt) { /* Invoke a gsharedvt out wrapper instead */ MonoMethod *wrapper = mini_get_gsharedvt_out_sig_wrapper (sig); MonoMethodSignature *wrapper_sig = mini_get_gsharedvt_out_sig_wrapper_signature (sig->hasthis, sig->ret->type != MONO_TYPE_VOID, sig->param_count); info->wrapper_arg = g_malloc0 (2 * sizeof (gpointer)); info->wrapper_arg [0] = mini_llvmonly_add_method_wrappers (method, info->compiled_method, FALSE, FALSE, &(info->wrapper_arg [1])); /* Pass has_rgctx == TRUE since the wrapper has an extra arg */ invoke = mono_marshal_get_runtime_invoke_for_sig (wrapper_sig); g_free (wrapper_sig); info->compiled_method = mono_jit_compile_method (wrapper, error); if (!is_ok (error)) goto exit; } else { /* Gsharedvt methods can be invoked the same way */ /* The out wrapper has the same signature as the compiled gsharedvt method */ MonoMethodSignature *wrapper_sig = mini_get_gsharedvt_out_sig_wrapper_signature (sig->hasthis, sig->ret->type != MONO_TYPE_VOID, sig->param_count); info->wrapper_arg = (gpointer*)(mono_method_needs_static_rgctx_invoke (method, TRUE) ? mini_method_get_rgctx (method) : NULL); invoke = mono_marshal_get_runtime_invoke_for_sig (wrapper_sig); g_free (wrapper_sig); } } info->runtime_invoke = mono_jit_compile_method (invoke, error); if (!is_ok (error)) goto exit; } ret = info; info = NULL; exit: g_free (info); return ret; } static GENERATE_GET_CLASS_WITH_CACHE (nullbyrefreturn_ex, "Mono", "NullByRefReturnException"); static MonoObject* mono_llvmonly_runtime_invoke (MonoMethod *method, RuntimeInvokeInfo *info, void *obj, void **params, MonoObject **exc, MonoError *error) { MonoMethodSignature *sig = info->sig; MonoObject *(*runtime_invoke) (MonoObject *this_obj, void **params, MonoObject **exc, void* compiled_method); int32_t retval_size = MONO_SIZEOF_DYN_CALL_RET_BUF; gpointer retval = NULL; int i, pindex; error_init (error); g_assert (info->gsharedvt_invoke); /* * Instead of invoking the method directly, we invoke a gsharedvt out wrapper. * The advantage of this is the gsharedvt out wrappers have a reduced set of * signatures, so we only have to generate runtime invoke wrappers for these * signatures. * This code also handles invocation of gsharedvt methods directly, no * out wrappers are used in that case. */ // allocate param_refs = param_count and args = param_count + hasthis + 2. int const param_count = sig->param_count; gpointer* const param_refs = g_newa (gpointer, param_count * 2 + sig->hasthis + 2); gpointer* const args = param_refs + param_count; pindex = 0; /* * The runtime invoke wrappers expects pointers to primitive types, so have to * use indirections. */ if (sig->hasthis) args [pindex ++] = &obj; if (sig->ret->type != MONO_TYPE_VOID) { if (info->ret_box_class && !m_type_is_byref (sig->ret) && (sig->ret->type == MONO_TYPE_VALUETYPE || (sig->ret->type == MONO_TYPE_GENERICINST && !MONO_TYPE_IS_REFERENCE (sig->ret)))) { // if the return type is a struct, allocate enough stack space to hold it MonoClass *ret_klass = mono_class_from_mono_type_internal (sig->ret); g_assert (!mono_class_has_failure (ret_klass)); int32_t inst_size = mono_class_instance_size (ret_klass); if (inst_size > MONO_SIZEOF_DYN_CALL_RET_BUF) { retval_size = inst_size; } } } retval = g_alloca (retval_size); if (sig->ret->type != MONO_TYPE_VOID) { args [pindex ++] = &retval; } for (i = 0; i < sig->param_count; ++i) { MonoType *t = sig->params [i]; if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) { MonoClass *klass = mono_class_from_mono_type_internal (t); guint8 *nullable_buf; int size; size = mono_class_value_size (klass, NULL); nullable_buf = g_alloca (size); g_assert (nullable_buf); /* The argument pointed to by params [i] is either a boxed vtype or null */ mono_nullable_init (nullable_buf, (MonoObject*)params [i], klass); params [i] = nullable_buf; } if (!m_type_is_byref (t) && (MONO_TYPE_IS_REFERENCE (t) || t->type == MONO_TYPE_PTR)) { param_refs [i] = params [i]; params [i] = &(param_refs [i]); } args [pindex ++] = &params [i]; } /* The gsharedvt out wrapper has an extra argument which contains the method to call */ args [pindex ++] = &info->wrapper_arg; runtime_invoke = (MonoObject *(*)(MonoObject *, void **, MonoObject **, void *))info->runtime_invoke; runtime_invoke (NULL, args, exc, info->compiled_method); if (exc && *exc) return NULL; if (m_type_is_byref (sig->ret)) { if (*(gpointer*)retval == NULL) { MonoClass *klass = mono_class_get_nullbyrefreturn_ex_class (); MonoObject *ex = mono_object_new_checked (klass, error); mono_error_assert_ok (error); mono_error_set_exception_instance (error, (MonoException*)ex); return NULL; } } if (sig->ret->type != MONO_TYPE_VOID) { if (info->ret_box_class) { if (m_type_is_byref (sig->ret)) { return mono_value_box_checked (info->ret_box_class, *(gpointer*)retval, error); } else { MonoObject *ret = mono_value_box_checked (info->ret_box_class, retval, error); return ret; } } else { if (m_type_is_byref (sig->ret)) return **(MonoObject***)retval; else return *(MonoObject**)retval; } } else { return NULL; } } /** * mono_jit_runtime_invoke: * \param method: the method to invoke * \param obj: this pointer * \param params: array of parameter values. * \param exc: Set to the exception raised in the managed method. * \param error: error or caught exception object * If \p exc is NULL, \p error is thrown instead. * If coop is enabled, \p exc argument is ignored - * all exceptions are caught and propagated through \p error */ static MonoObject* mono_jit_runtime_invoke (MonoMethod *method, void *obj, void **params, MonoObject **exc, MonoError *error) { MonoMethod *callee; MonoObject *(*runtime_invoke) (MonoObject *this_obj, void **params, MonoObject **exc, void* compiled_method); RuntimeInvokeInfo *info, *info2; MonoJitInfo *ji = NULL; gboolean callee_gsharedvt = FALSE; MonoJitMemoryManager *jit_mm; if (mono_ee_features.force_use_interpreter) { // FIXME: On wasm, if the callee throws an exception, this will return NULL, and the // exception will be stored inside the interpreter, it won't show up in exc/error. return mini_get_interp_callbacks ()->runtime_invoke (method, obj, params, exc, error); } error_init (error); if (exc) *exc = NULL; if (obj == NULL && !(method->flags & METHOD_ATTRIBUTE_STATIC) && !method->string_ctor && (method->wrapper_type == 0)) { g_warning ("Ignoring invocation of an instance method on a NULL instance.\n"); return NULL; } jit_mm = jit_mm_for_method (method); info = (RuntimeInvokeInfo *)mono_conc_hashtable_lookup (jit_mm->runtime_invoke_hash, method); if (!info) { gpointer compiled_method; callee = method; if (m_class_get_rank (method->klass) && (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && (method->iflags & METHOD_IMPL_ATTRIBUTE_NATIVE)) { /* * Array Get/Set/Address methods. The JIT implements them using inline code * inside the runtime invoke wrappers, so no need to compile them. */ if (mono_aot_only) { /* * Call a wrapper, since the runtime invoke wrapper was not generated. */ MonoMethod *wrapper; wrapper = mono_marshal_get_array_accessor_wrapper (method); mono_marshal_get_runtime_invoke (wrapper, FALSE); callee = wrapper; } else { callee = NULL; } } gboolean use_interp = FALSE; if (mono_aot_mode == MONO_AOT_MODE_LLVMONLY_INTERP) /* The runtime invoke wrappers contain clauses so they are not AOTed */ use_interp = TRUE; if (callee) { compiled_method = mono_jit_compile_method_jit_only (callee, error); if (!compiled_method) { g_assert (!is_ok (error)); if (mono_use_interpreter) use_interp = TRUE; else return NULL; } else { if (mono_llvm_only) { ji = mini_jit_info_table_find (mono_get_addr_from_ftnptr (compiled_method)); callee_gsharedvt = mini_jit_info_is_gsharedvt (ji); if (callee_gsharedvt) callee_gsharedvt = mini_is_gsharedvt_variable_signature (mono_method_signature_internal (jinfo_get_method (ji))); } if (!callee_gsharedvt) compiled_method = mini_add_method_trampoline (callee, compiled_method, mono_method_needs_static_rgctx_invoke (callee, TRUE), FALSE); } } else { compiled_method = NULL; } info = create_runtime_invoke_info (method, compiled_method, callee_gsharedvt, use_interp, error); if (!is_ok (error)) return NULL; jit_mm_lock (jit_mm); info2 = (RuntimeInvokeInfo *)mono_conc_hashtable_insert (jit_mm->runtime_invoke_hash, method, info); jit_mm_unlock (jit_mm); if (info2) { g_free (info); info = info2; } } /* * We need this here because mono_marshal_get_runtime_invoke can place * the helper method in System.Object and not the target class. */ if (!mono_runtime_class_init_full (info->vtable, error)) { if (exc) *exc = (MonoObject*) mono_error_convert_to_exception (error); return NULL; } /* If coop is enabled, and the caller didn't ask for the exception to be caught separately, we always catch the exception and propagate it through the MonoError */ gboolean catchExcInMonoError = (exc == NULL) && mono_threads_are_safepoints_enabled (); MonoObject *invoke_exc = NULL; if (catchExcInMonoError) exc = &invoke_exc; /* The wrappers expect this to be initialized to NULL */ if (exc) *exc = NULL; #ifdef MONO_ARCH_DYN_CALL_SUPPORTED static RuntimeInvokeDynamicFunction dyn_runtime_invoke = NULL; if (info->dyn_call_info) { if (!dyn_runtime_invoke) { MonoMethod *dynamic_invoke = mono_marshal_get_runtime_invoke_dynamic (); RuntimeInvokeDynamicFunction invoke_func = (RuntimeInvokeDynamicFunction)mono_jit_compile_method_jit_only (dynamic_invoke, error); mono_memory_barrier (); dyn_runtime_invoke = invoke_func; if (!dyn_runtime_invoke && mono_use_interpreter) { info->use_interp = TRUE; info->dyn_call_info = NULL; } else if (!is_ok (error)) { return NULL; } } } if (info->dyn_call_info) { MonoMethodSignature *sig = mono_method_signature_internal (method); gpointer *args; int i, pindex, buf_size; guint8 *buf; int32_t retval_size = MONO_SIZEOF_DYN_CALL_RET_BUF; guint8 *retval = NULL; /* if the return type is a struct and it's too big, allocate more space for it */ if (info->ret_box_class && !m_type_is_byref (sig->ret) && (sig->ret->type == MONO_TYPE_VALUETYPE || (sig->ret->type == MONO_TYPE_GENERICINST && !MONO_TYPE_IS_REFERENCE (sig->ret)))) { MonoClass *ret_klass = mono_class_from_mono_type_internal (sig->ret); g_assert (!mono_class_has_failure (ret_klass)); int32_t inst_size = mono_class_instance_size (ret_klass); if (inst_size > MONO_SIZEOF_DYN_CALL_RET_BUF) { retval_size = inst_size; } } retval = g_alloca (retval_size); /* Convert the arguments to the format expected by start_dyn_call () */ args = (void **)g_alloca ((sig->param_count + sig->hasthis) * sizeof (gpointer)); pindex = 0; if (sig->hasthis) args [pindex ++] = &obj; for (i = 0; i < sig->param_count; ++i) { MonoType *t = sig->params [i]; if (m_type_is_byref (t)) { args [pindex ++] = &params [i]; } else if (MONO_TYPE_IS_REFERENCE (t) || t->type == MONO_TYPE_PTR) { args [pindex ++] = &params [i]; } else { args [pindex ++] = params [i]; } } //printf ("M: %s\n", mono_method_full_name (method, TRUE)); buf_size = mono_arch_dyn_call_get_buf_size (info->dyn_call_info); buf = g_alloca (buf_size); memset (buf, 0, buf_size); g_assert (buf); mono_arch_start_dyn_call (info->dyn_call_info, (gpointer**)args, retval, buf); dyn_runtime_invoke (buf, exc, info->compiled_method); mono_arch_finish_dyn_call (info->dyn_call_info, buf); if (catchExcInMonoError && *exc != NULL) { mono_error_set_exception_instance (error, (MonoException*) *exc); return NULL; } if (m_type_is_byref (sig->ret)) { if (*(gpointer*)retval == NULL) { MonoClass *klass = mono_class_get_nullbyrefreturn_ex_class (); MonoObject *ex = mono_object_new_checked (klass, error); mono_error_assert_ok (error); mono_error_set_exception_instance (error, (MonoException*)ex); return NULL; } } if (info->ret_box_class) { if (m_type_is_byref (sig->ret)) { return mono_value_box_checked (info->ret_box_class, *(gpointer*)retval, error); } else { MonoObject *boxed_ret = mono_value_box_checked (info->ret_box_class, retval, error); return boxed_ret; } } else { if (m_type_is_byref (sig->ret)) return **(MonoObject***)retval; else return *(MonoObject**)retval; } } #endif MonoObject *result; if (info->use_interp) { result = mini_get_interp_callbacks ()->runtime_invoke (method, obj, params, exc, error); return_val_if_nok (error, NULL); } else if (mono_llvm_only && !method->string_ctor) { result = mono_llvmonly_runtime_invoke (method, info, obj, params, exc, error); if (!is_ok (error)) return NULL; } else { runtime_invoke = (MonoObject *(*)(MonoObject *, void **, MonoObject **, void *))info->runtime_invoke; result = runtime_invoke ((MonoObject *)obj, params, exc, info->compiled_method); } if (catchExcInMonoError && *exc != NULL) { ((MonoException *)(*exc))->caught_in_unmanaged = TRUE; mono_error_set_exception_instance (error, (MonoException*) *exc); } return result; } MONO_SIG_HANDLER_FUNC (, mono_sigfpe_signal_handler) { MonoException *exc = NULL; MonoJitInfo *ji; MonoContext mctx; MONO_SIG_HANDLER_INFO_TYPE *info = MONO_SIG_HANDLER_GET_INFO (); MONO_SIG_HANDLER_GET_CONTEXT; ji = mono_jit_info_table_find_internal (mono_arch_ip_from_context (ctx), TRUE, TRUE); MONO_ENTER_GC_UNSAFE_UNBALANCED; #if defined(MONO_ARCH_HAVE_IS_INT_OVERFLOW) if (mono_arch_is_int_overflow (ctx, info)) /* * The spec says this throws ArithmeticException, but MS throws the derived * OverflowException. */ exc = mono_get_exception_overflow (); else exc = mono_get_exception_divide_by_zero (); #else exc = mono_get_exception_divide_by_zero (); #endif if (!ji) { if (!mono_do_crash_chaining && mono_chain_signal (MONO_SIG_HANDLER_PARAMS)) goto exit; mono_sigctx_to_monoctx (ctx, &mctx); mono_handle_native_crash (mono_get_signame (SIGFPE), &mctx, info); if (mono_do_crash_chaining) { mono_chain_signal (MONO_SIG_HANDLER_PARAMS); goto exit; } } mono_arch_handle_exception (ctx, exc); exit: MONO_EXIT_GC_UNSAFE_UNBALANCED; } MONO_SIG_HANDLER_FUNC (, mono_crashing_signal_handler) { MonoContext mctx; MONO_SIG_HANDLER_INFO_TYPE *info = MONO_SIG_HANDLER_GET_INFO (); MONO_SIG_HANDLER_GET_CONTEXT; if (mono_runtime_get_no_exec ()) exit (1); mono_sigctx_to_monoctx (ctx, &mctx); #if defined(HAVE_SIG_INFO) && !defined(HOST_WIN32) // info is a siginfo_t mono_handle_native_crash (mono_get_signame (info->si_signo), &mctx, info); #else mono_handle_native_crash (mono_get_signame (SIGTERM), &mctx, info); #endif if (mono_do_crash_chaining) { mono_chain_signal (MONO_SIG_HANDLER_PARAMS); return; } } #if defined(MONO_ARCH_USE_SIGACTION) || defined(HOST_WIN32) #define HAVE_SIG_INFO #define MONO_SIG_HANDLER_DEBUG 1 // "with_fault_addr" but could be extended in future, so "debug" #ifdef MONO_SIG_HANDLER_DEBUG // Same as MONO_SIG_HANDLER_FUNC but debug_fault_addr is added to params, and no_optimize. // The Krait workaround is not needed here, due to this not actually being the signal handler, // so MONO_SIGNAL_HANDLER_FUNC is combined into it. #define MONO_SIG_HANDLER_FUNC_DEBUG(access, ftn) access MONO_NO_OPTIMIZATION void ftn \ (int _dummy, MONO_SIG_HANDLER_INFO_TYPE *_info, void *context, void * volatile debug_fault_addr G_GNUC_UNUSED) #define MONO_SIG_HANDLER_PARAMS_DEBUG MONO_SIG_HANDLER_PARAMS, debug_fault_addr #endif #endif gboolean mono_is_addr_implicit_null_check (void *addr) { /* implicit null checks are only expected to work on the first page. larger * offsets are expected to have an explicit null check */ return addr <= GUINT_TO_POINTER (mono_target_pagesize ()); } // This function is separate from mono_sigsegv_signal_handler // so debug_fault_addr can be seen in debugger stacks. #ifdef MONO_SIG_HANDLER_DEBUG MONO_NEVER_INLINE MONO_SIG_HANDLER_FUNC_DEBUG (static, mono_sigsegv_signal_handler_debug) #else MONO_SIG_HANDLER_FUNC (, mono_sigsegv_signal_handler) #endif { MonoJitInfo *ji = NULL; MonoDomain *domain = mono_domain_get (); gpointer fault_addr = NULL; MonoContext mctx; #if defined(HAVE_SIG_INFO) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK) MonoJitTlsData *jit_tls = mono_tls_get_jit_tls (); #endif #ifdef HAVE_SIG_INFO MONO_SIG_HANDLER_INFO_TYPE *info = MONO_SIG_HANDLER_GET_INFO (); #else void *info = NULL; #endif MONO_SIG_HANDLER_GET_CONTEXT; mono_sigctx_to_monoctx (ctx, &mctx); #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED) && defined(HAVE_SIG_INFO) if (mono_arch_is_single_step_event (info, ctx)) { mono_component_debugger ()->single_step_event (ctx); return; } else if (mono_arch_is_breakpoint_event (info, ctx)) { mono_component_debugger ()->breakpoint_hit (ctx); return; } #endif #if defined(HAVE_SIG_INFO) #if !defined(HOST_WIN32) fault_addr = info->si_addr; if (mono_aot_is_pagefault (info->si_addr)) { mono_aot_handle_pagefault (info->si_addr); return; } int signo = info->si_signo; #else int signo = SIGSEGV; #endif /* The thread might no be registered with the runtime */ if (!mono_domain_get () || !jit_tls) { if (!mono_do_crash_chaining && mono_chain_signal (MONO_SIG_HANDLER_PARAMS)) return; mono_handle_native_crash (mono_get_signame (signo), &mctx, info); if (mono_do_crash_chaining) { mono_chain_signal (MONO_SIG_HANDLER_PARAMS); return; } } #endif if (domain) { gpointer ip = MINI_FTNPTR_TO_ADDR (mono_arch_ip_from_context (ctx)); ji = mono_jit_info_table_find_internal (ip, TRUE, TRUE); } #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK if (mono_handle_soft_stack_ovf (jit_tls, ji, ctx, info, (guint8*)info->si_addr)) return; /* info->si_addr seems to be NULL on some kernels when handling stack overflows */ fault_addr = info->si_addr; if (fault_addr == NULL) { fault_addr = MONO_CONTEXT_GET_SP (&mctx); } if (jit_tls && jit_tls->stack_size && ABS ((guint8*)fault_addr - ((guint8*)jit_tls->end_of_stack - jit_tls->stack_size)) < 8192 * sizeof (gpointer)) { /* * The hard-guard page has been hit: there is not much we can do anymore * Print a hopefully clear message and abort. */ mono_handle_hard_stack_ovf (jit_tls, ji, &mctx, (guint8*)info->si_addr); g_assert_not_reached (); } else { /* The original handler might not like that it is executed on an altstack... */ if (!ji && mono_chain_signal (MONO_SIG_HANDLER_PARAMS)) return; #ifdef TARGET_AMD64 /* exceptions-amd64.c handles the check itself */ mono_arch_handle_altstack_exception (ctx, info, info->si_addr, FALSE); #else if (mono_is_addr_implicit_null_check (info->si_addr)) { mono_arch_handle_altstack_exception (ctx, info, info->si_addr, FALSE); } else { // FIXME: This shouldn't run on the altstack mono_handle_native_crash (mono_get_signame (SIGSEGV), &mctx, info); } #endif } #else if (!ji) { if (!mono_do_crash_chaining && mono_chain_signal (MONO_SIG_HANDLER_PARAMS)) return; mono_handle_native_crash (mono_get_signame (SIGSEGV), &mctx, (MONO_SIG_HANDLER_INFO_TYPE*)info); if (mono_do_crash_chaining) { mono_chain_signal (MONO_SIG_HANDLER_PARAMS); return; } } if (mono_is_addr_implicit_null_check (fault_addr)) { mono_arch_handle_exception (ctx, NULL); } else { mono_handle_native_crash (mono_get_signame (SIGSEGV), &mctx, (MONO_SIG_HANDLER_INFO_TYPE*)info); if (mono_do_crash_chaining) { mono_chain_signal (MONO_SIG_HANDLER_PARAMS); return; } } #endif } #ifdef MONO_SIG_HANDLER_DEBUG // This function is separate from mono_sigsegv_signal_handler_debug // so debug_fault_addr can be seen in debugger stacks. MONO_SIG_HANDLER_FUNC (, mono_sigsegv_signal_handler) { #ifdef HOST_WIN32 gpointer const debug_fault_addr = (gpointer)MONO_SIG_HANDLER_GET_INFO () ->ep->ExceptionRecord->ExceptionInformation [1]; #elif defined (HAVE_SIG_INFO) gpointer const debug_fault_addr = MONO_SIG_HANDLER_GET_INFO ()->si_addr; #else #error No extra parameter is passed, not even 0, to avoid any confusion. #endif mono_sigsegv_signal_handler_debug (MONO_SIG_HANDLER_PARAMS_DEBUG); } #endif // MONO_SIG_HANDLER_DEBUG MONO_SIG_HANDLER_FUNC (, mono_sigint_signal_handler) { MonoException *exc; MONO_SIG_HANDLER_GET_CONTEXT; MONO_ENTER_GC_UNSAFE_UNBALANCED; exc = mono_get_exception_execution_engine ("Interrupted (SIGINT)."); mono_arch_handle_exception (ctx, exc); MONO_EXIT_GC_UNSAFE_UNBALANCED; } static G_GNUC_UNUSED void no_imt_trampoline (void) { g_assert_not_reached (); } static G_GNUC_UNUSED void no_vcall_trampoline (void) { g_assert_not_reached (); } static gpointer *vtable_trampolines; static int vtable_trampolines_size; gpointer mini_get_vtable_trampoline (MonoVTable *vt, int slot_index) { int index = slot_index + MONO_IMT_SIZE; if (mono_llvm_only) return mini_llvmonly_get_vtable_trampoline (vt, slot_index, index); g_assert (slot_index >= - MONO_IMT_SIZE); if (!vtable_trampolines || slot_index + MONO_IMT_SIZE >= vtable_trampolines_size) { mono_jit_lock (); if (!vtable_trampolines || index >= vtable_trampolines_size) { int new_size; gpointer new_table; new_size = vtable_trampolines_size ? vtable_trampolines_size * 2 : 128; while (new_size <= index) new_size *= 2; new_table = g_new0 (gpointer, new_size); if (vtable_trampolines) memcpy (new_table, vtable_trampolines, vtable_trampolines_size * sizeof (gpointer)); g_free (vtable_trampolines); mono_memory_barrier (); vtable_trampolines = (void **)new_table; vtable_trampolines_size = new_size; } mono_jit_unlock (); } if (!vtable_trampolines [index]) vtable_trampolines [index] = mono_create_specific_trampoline (get_default_mem_manager (), GUINT_TO_POINTER (slot_index), MONO_TRAMPOLINE_VCALL, NULL); return vtable_trampolines [index]; } static gpointer mini_get_imt_trampoline (MonoVTable *vt, int slot_index) { return mini_get_vtable_trampoline (vt, slot_index - MONO_IMT_SIZE); } static gboolean mini_imt_entry_inited (MonoVTable *vt, int imt_slot_index) { if (mono_llvm_only) return FALSE; gpointer *imt = (gpointer*)vt; imt -= MONO_IMT_SIZE; return (imt [imt_slot_index] != mini_get_imt_trampoline (vt, imt_slot_index)); } static gpointer create_delegate_method_ptr (MonoMethod *method, MonoError *error) { gpointer func; if (method_is_dynamic (method)) { /* Creating a trampoline would leak memory */ func = mono_compile_method_checked (method, error); return_val_if_nok (error, NULL); } else { gpointer trampoline = mono_create_jump_trampoline (method, TRUE, error); return_val_if_nok (error, NULL); func = mono_create_ftnptr (trampoline); } return func; } static void mini_init_delegate (MonoDelegateHandle delegate, MonoObjectHandle target, gpointer addr, MonoMethod *method, MonoError *error) { MonoDelegate *del = MONO_HANDLE_RAW (delegate); if (!method && !addr) { // Multicast delegate init if (!mono_llvm_only) { MONO_HANDLE_SETVAL (delegate, invoke_impl, gpointer, mono_create_delegate_trampoline (mono_handle_class (delegate))); } else { mini_llvmonly_init_delegate (del, NULL); } return; } if (!method) { MonoJitInfo *ji; gpointer lookup_addr = MINI_FTNPTR_TO_ADDR (addr); g_assert (addr); ji = mono_jit_info_table_find_internal (mono_get_addr_from_ftnptr (lookup_addr), TRUE, TRUE); if (ji) { if (ji->is_trampoline) { /* Could be an unbox trampoline etc. */ method = ji->d.tramp_info->method; } else { method = mono_jit_info_get_method (ji); g_assert (!mono_class_is_gtd (method->klass)); } } } if (method) MONO_HANDLE_SETVAL (delegate, method, MonoMethod*, method); if (addr) MONO_HANDLE_SETVAL (delegate, method_ptr, gpointer, addr); MONO_HANDLE_SET (delegate, target, target); MONO_HANDLE_SETVAL (delegate, invoke_impl, gpointer, mono_create_delegate_trampoline (mono_handle_class (delegate))); MonoDelegateTrampInfo *info = NULL; if (mono_use_interpreter) { mini_get_interp_callbacks ()->init_delegate (del, &info, error); return_if_nok (error); } if (mono_llvm_only) { g_assert (del->method); mini_llvmonly_init_delegate (del, info); //del->method_ptr = mini_llvmonly_load_method_delegate (del->method, FALSE, FALSE, &del->extra_arg, error); } else if (!del->method_ptr) { del->method_ptr = create_delegate_method_ptr (del->method, error); return_if_nok (error); } } char* mono_get_delegate_virtual_invoke_impl_name (gboolean load_imt_reg, int offset) { int abs_offset; abs_offset = offset; if (abs_offset < 0) abs_offset = - abs_offset; return g_strdup_printf ("delegate_virtual_invoke%s_%s%d", load_imt_reg ? "_imt" : "", offset < 0 ? "m_" : "", abs_offset / TARGET_SIZEOF_VOID_P); } gpointer mono_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method) { gboolean is_virtual_generic, is_interface, load_imt_reg; int offset, idx; static guint8 **cache = NULL; static int cache_size = 0; if (!method) return NULL; if (MONO_TYPE_ISSTRUCT (sig->ret)) return NULL; is_virtual_generic = method->is_inflated && mono_method_get_declaring_generic_method (method)->is_generic; is_interface = mono_class_is_interface (method->klass); load_imt_reg = is_virtual_generic || is_interface; if (is_interface) offset = ((gint32)mono_method_get_imt_slot (method) - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P; else offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + ((mono_method_get_vtable_index (method)) * (TARGET_SIZEOF_VOID_P)); idx = (offset / TARGET_SIZEOF_VOID_P + MONO_IMT_SIZE) * 2 + (load_imt_reg ? 1 : 0); g_assert (idx >= 0); /* Resize the cache to idx + 1 */ if (cache_size < idx + 1) { mono_jit_lock (); if (cache_size < idx + 1) { guint8 **new_cache; int new_cache_size = idx + 1; new_cache = g_new0 (guint8*, new_cache_size); if (cache) memcpy (new_cache, cache, cache_size * sizeof (guint8*)); g_free (cache); mono_memory_barrier (); cache = new_cache; cache_size = new_cache_size; } mono_jit_unlock (); } if (cache [idx]) return cache [idx]; /* FIXME Support more cases */ if (mono_ee_features.use_aot_trampolines) { cache [idx] = (guint8 *)mono_aot_get_trampoline (mono_get_delegate_virtual_invoke_impl_name (load_imt_reg, offset)); g_assert (cache [idx]); } else { cache [idx] = (guint8 *)mono_arch_get_delegate_virtual_invoke_impl (sig, method, offset, load_imt_reg); } return cache [idx]; } /** * mini_parse_debug_option: * @option: The option to parse. * * Parses debug options for the mono runtime. The options are the same as for * the MONO_DEBUG environment variable. * */ gboolean mini_parse_debug_option (const char *option) { // Empty string is ok as consequence of appending ",foo" // without first checking for empty. if (*option == 0) return TRUE; if (!strcmp (option, "handle-sigint")) mini_debug_options.handle_sigint = TRUE; else if (!strcmp (option, "keep-delegates")) mini_debug_options.keep_delegates = TRUE; else if (!strcmp (option, "reverse-pinvoke-exceptions")) mini_debug_options.reverse_pinvoke_exceptions = TRUE; else if (!strcmp (option, "collect-pagefault-stats")) mini_debug_options.collect_pagefault_stats = TRUE; else if (!strcmp (option, "break-on-unverified")) mini_debug_options.break_on_unverified = TRUE; else if (!strcmp (option, "no-gdb-backtrace")) mini_debug_options.no_gdb_backtrace = TRUE; else if (!strcmp (option, "suspend-on-native-crash") || !strcmp (option, "suspend-on-sigsegv")) mini_debug_options.suspend_on_native_crash = TRUE; else if (!strcmp (option, "suspend-on-exception")) mini_debug_options.suspend_on_exception = TRUE; else if (!strcmp (option, "suspend-on-unhandled")) mini_debug_options.suspend_on_unhandled = TRUE; else if (!strcmp (option, "dont-free-domains")) mono_dont_free_domains = TRUE; else if (!strcmp (option, "dyn-runtime-invoke")) mini_debug_options.dyn_runtime_invoke = TRUE; else if (!strcmp (option, "gdb")) fprintf (stderr, "MONO_DEBUG=gdb is deprecated."); else if (!strcmp (option, "lldb")) mini_debug_options.lldb = TRUE; else if (!strcmp (option, "llvm-disable-inlining")) mini_debug_options.llvm_disable_inlining = TRUE; else if (!strcmp (option, "llvm-disable-implicit-null-checks")) mini_debug_options.llvm_disable_implicit_null_checks = TRUE; else if (!strcmp (option, "explicit-null-checks")) mini_debug_options.explicit_null_checks = TRUE; else if (!strcmp (option, "gen-seq-points")) mini_debug_options.gen_sdb_seq_points = TRUE; else if (!strcmp (option, "gen-compact-seq-points")) fprintf (stderr, "Mono Warning: option gen-compact-seq-points is deprecated.\n"); else if (!strcmp (option, "no-compact-seq-points")) mini_debug_options.no_seq_points_compact_data = TRUE; else if (!strcmp (option, "single-imm-size")) mini_debug_options.single_imm_size = TRUE; else if (!strcmp (option, "init-stacks")) mini_debug_options.init_stacks = TRUE; else if (!strcmp (option, "casts")) mini_debug_options.better_cast_details = TRUE; else if (!strcmp (option, "soft-breakpoints")) mini_debug_options.soft_breakpoints = TRUE; else if (!strcmp (option, "check-pinvoke-callconv")) mini_debug_options.check_pinvoke_callconv = TRUE; else if (!strcmp (option, "use-fallback-tls")) mini_debug_options.use_fallback_tls = TRUE; else if (!strcmp (option, "debug-domain-unload")) g_error ("MONO_DEBUG option debug-domain-unload is deprecated."); else if (!strcmp (option, "partial-sharing")) mono_set_partial_sharing_supported (TRUE); else if (!strcmp (option, "align-small-structs")) mono_align_small_structs = TRUE; else if (!strcmp (option, "native-debugger-break")) mini_debug_options.native_debugger_break = TRUE; else if (!strcmp (option, "disable_omit_fp")) mini_debug_options.disable_omit_fp = TRUE; // This is an internal testing feature. // Every tail. encountered is required to be optimized. // It is asserted. else if (!strcmp (option, "test-tailcall-require")) mini_debug_options.test_tailcall_require = TRUE; else if (!strcmp (option, "verbose-gdb")) mini_debug_options.verbose_gdb = TRUE; else if (!strcmp (option, "clr-memory-model")) // FIXME Kill this debug flag mini_debug_options.weak_memory_model = FALSE; else if (!strcmp (option, "weak-memory-model")) mini_debug_options.weak_memory_model = TRUE; else if (!strcmp (option, "top-runtime-invoke-unhandled")) mini_debug_options.top_runtime_invoke_unhandled = TRUE; else if (!strncmp (option, "thread-dump-dir=", 16)) mono_set_thread_dump_dir(g_strdup(option + 16)); else if (!strncmp (option, "aot-skip=", 9)) { mini_debug_options.aot_skip_set = TRUE; mini_debug_options.aot_skip = atoi (option + 9); } else return FALSE; return TRUE; } static void mini_parse_debug_options (void) { char *options = g_getenv ("MONO_DEBUG"); gchar **args, **ptr; if (!options) return; args = g_strsplit (options, ",", -1); g_free (options); for (ptr = args; ptr && *ptr; ptr++) { const char *arg = *ptr; if (!mini_parse_debug_option (arg)) { fprintf (stderr, "Invalid option for the MONO_DEBUG env variable: %s\n", arg); // test-tailcall-require is also accepted but not documented. // empty string is also accepted and ignored as a consequence // of appending ",foo" without checking for empty. fprintf (stderr, "Available options: 'handle-sigint', 'keep-delegates', 'reverse-pinvoke-exceptions', 'collect-pagefault-stats', 'break-on-unverified', 'no-gdb-backtrace', 'suspend-on-native-crash', 'suspend-on-sigsegv', 'suspend-on-exception', 'suspend-on-unhandled', 'dont-free-domains', 'dyn-runtime-invoke', 'gdb', 'explicit-null-checks', 'gen-seq-points', 'no-compact-seq-points', 'single-imm-size', 'init-stacks', 'casts', 'soft-breakpoints', 'check-pinvoke-callconv', 'use-fallback-tls', 'debug-domain-unload', 'partial-sharing', 'align-small-structs', 'native-debugger-break', 'thread-dump-dir=DIR', 'no-verbose-gdb', 'llvm_disable_inlining', 'llvm-disable-self-init', 'llvm-disable-implicit-null-checks', 'weak-memory-model'.\n"); exit (1); } } g_strfreev (args); } MonoDebugOptions * mini_get_debug_options (void) { return &mini_debug_options; } static gpointer mini_create_ftnptr (gpointer addr) { #if defined(PPC_USES_FUNCTION_DESCRIPTOR) gpointer* desc = NULL; static GHashTable *ftnptrs_hash; if (!ftnptrs_hash) { GHashTable *hash = g_hash_table_new (mono_aligned_addr_hash, NULL); mono_memory_barrier (); ftnptrs_hash = hash; } // FIXME: MonoJitMemoryManager *jit_mm = get_default_jit_mm (); mono_jit_lock (); desc = (gpointer*)g_hash_table_lookup (ftnptrs_hash, addr); mono_jit_unlock (); if (desc) return desc; #if defined(__mono_ppc64__) desc = mono_mem_manager_alloc0 (jit_mm->mem_manager, 3 * sizeof (gpointer)); desc [0] = addr; desc [1] = NULL; desc [2] = NULL; # endif mono_jit_lock (); g_hash_table_insert (ftnptrs_hash, addr, desc); mono_jit_unlock (); return desc; #else return addr; #endif } static gpointer mini_get_addr_from_ftnptr (gpointer descr) { #if defined(PPC_USES_FUNCTION_DESCRIPTOR) return *(gpointer*)descr; #else return descr; #endif } static void register_counters (void) { mono_counters_register ("Compiled methods", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_compiled); mono_counters_register ("Methods from AOT", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_aot); mono_counters_register ("Methods from AOT+LLVM", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_aot_llvm); mono_counters_register ("Methods JITted using mono JIT", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_without_llvm); mono_counters_register ("Methods JITted using LLVM", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_with_llvm); mono_counters_register ("Methods using the interpreter", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_with_interp); } static void runtime_invoke_info_free (gpointer value); static gint class_method_pair_equal (gconstpointer ka, gconstpointer kb) { const MonoClassMethodPair *apair = (const MonoClassMethodPair *)ka; const MonoClassMethodPair *bpair = (const MonoClassMethodPair *)kb; return apair->klass == bpair->klass && apair->method == bpair->method ? 1 : 0; } static guint class_method_pair_hash (gconstpointer data) { const MonoClassMethodPair *pair = (const MonoClassMethodPair *)data; return (gsize)pair->klass ^ (gsize)pair->method; } static void init_jit_mem_manager (MonoMemoryManager *mem_manager) { MonoJitMemoryManager *info = g_new0 (MonoJitMemoryManager, 1); info->mem_manager = mem_manager; info->jump_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); info->jump_target_hash = g_hash_table_new (NULL, NULL); info->jit_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL); info->delegate_trampoline_hash = g_hash_table_new (class_method_pair_hash, class_method_pair_equal); info->seq_points = g_hash_table_new_full (mono_aligned_addr_hash, NULL, NULL, mono_seq_point_info_free); info->runtime_invoke_hash = mono_conc_hashtable_new_full (mono_aligned_addr_hash, NULL, NULL, runtime_invoke_info_free); info->arch_seq_points = g_hash_table_new (mono_aligned_addr_hash, NULL); mono_jit_code_hash_init (&info->jit_code_hash); mono_jit_code_hash_init (&info->interp_code_hash); mono_os_mutex_init_recursive (&info->jit_code_hash_lock); mem_manager->runtime_info = info; } static void delete_jump_list (gpointer key, gpointer value, gpointer user_data) { MonoJumpList *jlist = (MonoJumpList *)value; g_slist_free ((GSList*)jlist->list); } static void delete_got_slot_list (gpointer key, gpointer value, gpointer user_data) { GSList *list = (GSList *)value; g_slist_free (list); } static void dynamic_method_info_free (gpointer key, gpointer value, gpointer user_data) { MonoJitDynamicMethodInfo *di = (MonoJitDynamicMethodInfo *)value; mono_code_manager_destroy (di->code_mp); g_free (di); } static void runtime_invoke_info_free (gpointer value) { RuntimeInvokeInfo *info = (RuntimeInvokeInfo*)value; #ifdef MONO_ARCH_DYN_CALL_SUPPORTED if (info->dyn_call_info) mono_arch_dyn_call_free (info->dyn_call_info); #endif g_free (info); } static void free_jit_callee_list (gpointer key, gpointer value, gpointer user_data) { g_slist_free ((GSList*)value); } static void free_jit_mem_manager (MonoMemoryManager *mem_manager) { MonoJitMemoryManager *info = (MonoJitMemoryManager*)mem_manager->runtime_info; g_hash_table_foreach (info->jump_target_hash, delete_jump_list, NULL); g_hash_table_destroy (info->jump_target_hash); if (info->jump_target_got_slot_hash) { g_hash_table_foreach (info->jump_target_got_slot_hash, delete_got_slot_list, NULL); g_hash_table_destroy (info->jump_target_got_slot_hash); } if (info->dynamic_code_hash) { g_hash_table_foreach (info->dynamic_code_hash, dynamic_method_info_free, NULL); g_hash_table_destroy (info->dynamic_code_hash); } g_hash_table_destroy (info->method_code_hash); g_hash_table_destroy (info->jump_trampoline_hash); g_hash_table_destroy (info->jit_trampoline_hash); g_hash_table_destroy (info->delegate_trampoline_hash); g_hash_table_destroy (info->static_rgctx_trampoline_hash); g_hash_table_destroy (info->mrgctx_hash); g_hash_table_destroy (info->method_rgctx_hash); g_hash_table_destroy (info->interp_method_pointer_hash); mono_conc_hashtable_destroy (info->runtime_invoke_hash); g_hash_table_destroy (info->seq_points); g_hash_table_destroy (info->arch_seq_points); if (info->agent_info) mono_component_debugger ()->free_mem_manager (info); g_hash_table_destroy (info->gsharedvt_arg_tramp_hash); if (info->llvm_jit_callees) { g_hash_table_foreach (info->llvm_jit_callees, free_jit_callee_list, NULL); g_hash_table_destroy (info->llvm_jit_callees); } mono_internal_hash_table_destroy (&info->interp_code_hash); #ifdef ENABLE_LLVM mono_llvm_free_mem_manager (info); #endif g_free (info); mem_manager->runtime_info = NULL; } #ifdef ENABLE_LLVM static gboolean llvm_init_inner (void) { mono_llvm_init (!mono_compile_aot); return TRUE; } #endif /* * mini_llvm_init: * * Load and initialize LLVM support. * Return TRUE on success. */ gboolean mini_llvm_init (void) { #ifdef ENABLE_LLVM static gboolean llvm_inited; static gboolean init_result; mono_loader_lock_if_inited (); if (!llvm_inited) { init_result = llvm_init_inner (); llvm_inited = TRUE; } mono_loader_unlock_if_inited (); return init_result; #else return FALSE; #endif } void mini_add_profiler_argument (const char *desc) { if (!profile_options) profile_options = g_ptr_array_new (); g_ptr_array_add (profile_options, (gpointer) g_strdup (desc)); } const MonoEECallbacks *mono_interp_callbacks_pointer; void mini_install_interp_callbacks (const MonoEECallbacks *cbs) { mono_interp_callbacks_pointer = cbs; } int mono_ee_api_version (void) { return MONO_EE_API_VERSION; } void mono_interp_entry_from_trampoline (gpointer ccontext, gpointer imethod) { mini_get_interp_callbacks ()->entry_from_trampoline (ccontext, imethod); } void mono_interp_to_native_trampoline (gpointer addr, gpointer ccontext) { mini_get_interp_callbacks ()->to_native_trampoline (addr, ccontext); } static gboolean mini_is_interpreter_enabled (void) { return mono_use_interpreter; } static const char* mono_get_runtime_build_version (void); MonoDomain * mini_init (const char *filename, const char *runtime_version) { ERROR_DECL (error); MonoDomain *domain; MonoRuntimeCallbacks callbacks; static const MonoThreadInfoRuntimeCallbacks ticallbacks = { MONO_THREAD_INFO_RUNTIME_CALLBACKS (MONO_INIT_CALLBACK, mono) }; mono_component_event_pipe_100ns_ticks_start (); MONO_VES_INIT_BEGIN (); CHECKED_MONO_INIT (); #if defined(__linux__) if (access ("/proc/self/maps", F_OK) != 0) { g_print ("Mono requires /proc to be mounted.\n"); exit (1); } #endif mono_interp_stub_init (); #ifndef DISABLE_INTERPRETER if (mono_use_interpreter) mono_ee_interp_init (mono_interp_opts_string); #endif mono_components_init (); mono_component_debugger ()->parse_options (mono_debugger_agent_get_sdb_options ()); mono_os_mutex_init_recursive (&jit_mutex); mono_cross_helpers_run (); mono_counters_init (); mini_jit_init (); mini_jit_init_job_control (); /* Happens when using the embedding interface */ if (!default_opt_set) default_opt = mono_parse_default_optimizations (NULL); #ifdef MONO_ARCH_GSHAREDVT_SUPPORTED if (mono_aot_only) mono_set_generic_sharing_vt_supported (TRUE); #else if (mono_llvm_only) mono_set_generic_sharing_vt_supported (TRUE); #endif mono_tls_init_runtime_keys (); if (!global_codeman) { if (!mono_compile_aot) global_codeman = mono_code_manager_new (); else global_codeman = mono_code_manager_new_aot (); } memset (&callbacks, 0, sizeof (callbacks)); callbacks.create_ftnptr = mini_create_ftnptr; callbacks.get_addr_from_ftnptr = mini_get_addr_from_ftnptr; callbacks.get_runtime_build_info = mono_get_runtime_build_info; callbacks.get_runtime_build_version = mono_get_runtime_build_version; callbacks.set_cast_details = mono_set_cast_details; callbacks.debug_log = mono_component_debugger ()->debug_log; callbacks.debug_log_is_enabled = mono_component_debugger ()->debug_log_is_enabled; callbacks.get_vtable_trampoline = mini_get_vtable_trampoline; callbacks.get_imt_trampoline = mini_get_imt_trampoline; callbacks.imt_entry_inited = mini_imt_entry_inited; callbacks.init_delegate = mini_init_delegate; #define JIT_INVOKE_WORKS #ifdef JIT_INVOKE_WORKS callbacks.runtime_invoke = mono_jit_runtime_invoke; #endif #define JIT_TRAMPOLINES_WORK #ifdef JIT_TRAMPOLINES_WORK callbacks.compile_method = mono_jit_compile_method; callbacks.create_jit_trampoline = mono_create_jit_trampoline; callbacks.create_delegate_trampoline = mono_create_delegate_trampoline; callbacks.free_method = mono_jit_free_method; callbacks.get_ftnptr = get_ftnptr_for_method; #endif callbacks.is_interpreter_enabled = mini_is_interpreter_enabled; #if ENABLE_WEAK_ATTR callbacks.get_weak_field_indexes = mono_aot_get_weak_field_indexes; #endif callbacks.metadata_update_published = mini_invalidate_transformed_interp_methods; callbacks.interp_jit_info_foreach = mini_interp_jit_info_foreach; callbacks.interp_sufficient_stack = mini_interp_sufficient_stack; callbacks.init_mem_manager = init_jit_mem_manager; callbacks.free_mem_manager = free_jit_mem_manager; callbacks.get_jit_stats = get_jit_stats; callbacks.get_exception_stats = get_exception_stats; mono_install_callbacks (&callbacks); #ifndef HOST_WIN32 mono_w32handle_init (); #endif mono_thread_info_runtime_init (&ticallbacks); if (g_hasenv ("MONO_DEBUG")) { mini_parse_debug_options (); } mono_code_manager_init (mono_compile_aot); #ifdef MONO_ARCH_HAVE_CODE_CHUNK_TRACKING static const MonoCodeManagerCallbacks code_manager_callbacks = { #undef MONO_CODE_MANAGER_CALLBACK #define MONO_CODE_MANAGER_CALLBACK(ret, name, sig) mono_arch_code_ ## name, MONO_CODE_MANAGER_CALLBACKS }; mono_code_manager_install_callbacks (&code_manager_callbacks); #endif mono_hwcap_init (); mono_arch_cpu_init (); mono_arch_init (); mono_unwind_init (); if (mini_debug_options.lldb || g_hasenv ("MONO_LLDB")) { mono_lldb_init (""); mono_dont_free_domains = TRUE; } #ifdef ENABLE_LLVM if (mono_use_llvm) mono_llvm_init (!mono_compile_aot); #endif mono_trampolines_init (); if (default_opt & MONO_OPT_AOT) mono_aot_init (); mono_component_debugger ()->init (); #ifdef MONO_ARCH_GSHARED_SUPPORTED mono_set_generic_sharing_supported (TRUE); #endif mono_thread_info_signals_init (); mono_init_native_crash_info (); #ifndef MONO_CROSS_COMPILE mono_runtime_install_handlers (); #endif mono_threads_install_cleanup (mini_thread_cleanup); mono_install_get_cached_class_info (mono_aot_get_cached_class_info); mono_install_get_class_from_name (mono_aot_get_class_from_name); mono_install_jit_info_find_in_aot (mono_aot_find_jit_info); mono_profiler_state.context_enable = mini_profiler_context_enable; mono_profiler_state.context_get_this = mini_profiler_context_get_this; mono_profiler_state.context_get_argument = mini_profiler_context_get_argument; mono_profiler_state.context_get_local = mini_profiler_context_get_local; mono_profiler_state.context_get_result = mini_profiler_context_get_result; mono_profiler_state.context_free_buffer = mini_profiler_context_free_buffer; if (g_hasenv ("MONO_PROFILE")) { gchar *profile_env = g_getenv ("MONO_PROFILE"); mini_add_profiler_argument (profile_env); g_free (profile_env); } if (profile_options) for (guint i = 0; i < profile_options->len; i++) mono_profiler_load ((const char *) g_ptr_array_index (profile_options, i)); mono_profiler_started (); if (mini_debug_options.collect_pagefault_stats) mono_aot_set_make_unreadable (TRUE); /* set no-exec before the default ALC is created */ if (mono_compile_aot) { /* * Avoid running managed code when AOT compiling, since the platform * might only support aot-only execution. */ mono_runtime_set_no_exec (TRUE); } if (runtime_version) domain = mono_init_version (filename, runtime_version); else domain = mono_init_from_assembly (filename, filename); if (mono_compile_aot) mono_component_diagnostics_server ()->disable (); mono_component_event_pipe ()->init (); // EventPipe up is now up and running, convert 100ns ticks since runtime init into EventPipe compatbile timestamp (using negative delta to represent timestamp in past). // Add RuntimeInit execution checkpoint using converted timestamp. mono_component_event_pipe ()->add_rundown_execution_checkpoint_2 ("RuntimeInit", mono_component_event_pipe ()->convert_100ns_ticks_to_timestamp_t (-mono_component_event_pipe_100ns_ticks_stop ())); if (mono_aot_only) { /* This helps catch code allocation requests */ mono_code_manager_set_read_only (mono_mem_manager_get_ambient ()->code_mp); mono_marshal_use_aot_wrappers (TRUE); } if (mono_llvm_only) { mono_install_imt_trampoline_builder (mini_llvmonly_get_imt_trampoline); mono_set_always_build_imt_trampolines (TRUE); } else if (mono_aot_only) { mono_install_imt_trampoline_builder (mono_aot_get_imt_trampoline); } else { mono_install_imt_trampoline_builder (mono_arch_build_imt_trampoline); } /*Init arch tls information only after the metadata side is inited to make sure we see dynamic appdomain tls keys*/ mono_arch_finish_init (); /* This must come after mono_init () in the aot-only case */ mono_exceptions_init (); /* This should come after mono_init () too */ mini_gc_init (); mono_create_icall_signatures (); register_counters (); #define JIT_CALLS_WORK #ifdef JIT_CALLS_WORK /* Needs to be called here since register_jit_icall depends on it */ mono_marshal_init (); mono_arch_register_lowlevel_calls (); register_icalls (); mono_generic_sharing_init (); #endif #ifdef MONO_ARCH_SIMD_INTRINSICS mono_simd_intrinsics_init (); #endif register_trampolines (domain); mono_mem_account_register_counters (); #define JIT_RUNTIME_WORKS #ifdef JIT_RUNTIME_WORKS mono_install_runtime_cleanup (runtime_cleanup); mono_runtime_init_checked (domain, (MonoThreadStartCB)mono_thread_start_cb, mono_thread_attach_cb, error); mono_error_assert_ok (error); mono_thread_internal_attach (domain); MONO_PROFILER_RAISE (thread_name, (MONO_NATIVE_THREAD_ID_TO_UINT (mono_native_thread_id_get ()), "Main")); #endif mono_threads_set_runtime_startup_finished (); mono_component_event_pipe ()->finish_init (); #ifdef ENABLE_EXPERIMENT_TIERED if (!mono_compile_aot) { /* create compilation thread in background */ mini_tiered_init (); } #endif if (mono_profiler_sampling_enabled ()) mono_runtime_setup_stat_profiler (); MONO_PROFILER_RAISE (runtime_initialized, ()); MONO_VES_INIT_END (); return domain; } static void register_icalls (void) { mono_add_internal_call_internal ("System.Diagnostics.StackFrame::get_frame_info", ves_icall_get_frame_info); mono_add_internal_call_internal ("System.Diagnostics.StackTrace::get_trace", ves_icall_get_trace); mono_add_internal_call_internal ("Mono.Runtime::mono_runtime_install_handlers", mono_runtime_install_handlers); /* * It's important that we pass `TRUE` as the last argument here, as * it causes the JIT to omit a wrapper for these icalls. If the JIT * *did* emit a wrapper, we'd be looking at infinite recursion since * the wrapper would call the icall which would call the wrapper and * so on. */ register_icall (mono_profiler_raise_method_enter, mono_icall_sig_void_ptr_ptr, TRUE); register_icall (mono_profiler_raise_method_leave, mono_icall_sig_void_ptr_ptr, TRUE); register_icall (mono_profiler_raise_method_tail_call, mono_icall_sig_void_ptr_ptr, TRUE); register_icall (mono_profiler_raise_exception_clause, mono_icall_sig_void_ptr_int_int_object, TRUE); register_icall (mono_trace_enter_method, mono_icall_sig_void_ptr_ptr_ptr, TRUE); register_icall (mono_trace_leave_method, mono_icall_sig_void_ptr_ptr_ptr, TRUE); register_icall (mono_trace_tail_method, mono_icall_sig_void_ptr_ptr_ptr, TRUE); g_assert (mono_get_lmf_addr == mono_tls_get_lmf_addr); register_icall (mono_domain_get, mono_icall_sig_ptr, TRUE); register_icall (mini_llvmonly_throw_exception, mono_icall_sig_void_object, TRUE); register_icall (mini_llvmonly_rethrow_exception, mono_icall_sig_void_object, TRUE); register_icall (mini_llvmonly_throw_corlib_exception, mono_icall_sig_void_int, TRUE); register_icall (mini_llvmonly_resume_exception, mono_icall_sig_void, TRUE); register_icall (mini_llvmonly_resume_exception_il_state, mono_icall_sig_void_ptr_ptr, TRUE); register_icall (mini_llvmonly_load_exception, mono_icall_sig_object, TRUE); register_icall (mini_llvmonly_clear_exception, NULL, TRUE); register_icall (mini_llvmonly_match_exception, mono_icall_sig_int_ptr_int_int_ptr_object, TRUE); #if defined(ENABLE_LLVM) && defined(HAVE_UNWIND_H) register_icall (mono_llvm_set_unhandled_exception_handler, NULL, TRUE); // FIXME: This is broken #ifndef TARGET_WASM register_icall (mono_debug_personality, mono_icall_sig_int_int_int_ptr_ptr_ptr, TRUE); #endif #endif if (!mono_llvm_only) { register_dyn_icall (mono_get_throw_exception (), mono_arch_throw_exception, mono_icall_sig_void_object, TRUE); register_dyn_icall (mono_get_rethrow_exception (), mono_arch_rethrow_exception, mono_icall_sig_void_object, TRUE); register_dyn_icall (mono_get_throw_corlib_exception (), mono_arch_throw_corlib_exception, mono_icall_sig_void_ptr, TRUE); } register_icall (mono_thread_get_undeniable_exception, mono_icall_sig_object, FALSE); register_icall (ves_icall_thread_finish_async_abort, mono_icall_sig_void, FALSE); register_icall (mono_thread_interruption_checkpoint, mono_icall_sig_object, FALSE); register_icall (mono_thread_force_interruption_checkpoint_noraise, mono_icall_sig_object, FALSE); register_icall (mono_threads_state_poll, mono_icall_sig_void, FALSE); #ifndef MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS register_opcode_emulation (OP_LMUL, __emul_lmul, mono_icall_sig_long_long_long, mono_llmult, FALSE); register_opcode_emulation (OP_LDIV, __emul_ldiv, mono_icall_sig_long_long_long, mono_lldiv, FALSE); register_opcode_emulation (OP_LDIV_UN, __emul_ldiv_un, mono_icall_sig_long_long_long, mono_lldiv_un, FALSE); register_opcode_emulation (OP_LREM, __emul_lrem, mono_icall_sig_long_long_long, mono_llrem, FALSE); register_opcode_emulation (OP_LREM_UN, __emul_lrem_un, mono_icall_sig_long_long_long, mono_llrem_un, FALSE); #endif #if !defined(MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS) || defined(MONO_ARCH_EMULATE_LONG_MUL_OVF_OPTS) register_opcode_emulation (OP_LMUL_OVF_UN, __emul_lmul_ovf_un, mono_icall_sig_long_long_long, mono_llmult_ovf_un, FALSE); register_opcode_emulation (OP_LMUL_OVF, __emul_lmul_ovf, mono_icall_sig_long_long_long, mono_llmult_ovf, FALSE); register_opcode_emulation (OP_LMUL_OVF_UN_OOM, __emul_lmul_ovf_un_oom, mono_icall_sig_long_long_long, mono_llmult_ovf_un_oom, FALSE); #endif #ifndef MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS register_opcode_emulation (OP_LSHL, __emul_lshl, mono_icall_sig_long_long_int32, mono_lshl, TRUE); register_opcode_emulation (OP_LSHR, __emul_lshr, mono_icall_sig_long_long_int32, mono_lshr, TRUE); register_opcode_emulation (OP_LSHR_UN, __emul_lshr_un, mono_icall_sig_long_long_int32, mono_lshr_un, TRUE); #endif #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV) register_opcode_emulation (OP_IDIV, __emul_op_idiv, mono_icall_sig_int32_int32_int32, mono_idiv, FALSE); register_opcode_emulation (OP_IDIV_UN, __emul_op_idiv_un, mono_icall_sig_int32_int32_int32, mono_idiv_un, FALSE); register_opcode_emulation (OP_IREM, __emul_op_irem, mono_icall_sig_int32_int32_int32, mono_irem, FALSE); register_opcode_emulation (OP_IREM_UN, __emul_op_irem_un, mono_icall_sig_int32_int32_int32, mono_irem_un, FALSE); #endif #ifdef MONO_ARCH_EMULATE_MUL_DIV register_opcode_emulation (OP_IMUL, __emul_op_imul, mono_icall_sig_int32_int32_int32, mono_imul, TRUE); #endif #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_MUL_OVF) register_opcode_emulation (OP_IMUL_OVF, __emul_op_imul_ovf, mono_icall_sig_int32_int32_int32, mono_imul_ovf, FALSE); register_opcode_emulation (OP_IMUL_OVF_UN, __emul_op_imul_ovf_un, mono_icall_sig_int32_int32_int32, mono_imul_ovf_un, FALSE); register_opcode_emulation (OP_IMUL_OVF_UN_OOM, __emul_op_imul_ovf_un_oom, mono_icall_sig_int32_int32_int32, mono_imul_ovf_un_oom, FALSE); #endif #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_SOFT_FLOAT_FALLBACK) register_opcode_emulation (OP_FDIV, __emul_fdiv, mono_icall_sig_double_double_double, mono_fdiv, FALSE); #endif #ifdef MONO_ARCH_EMULATE_FCONV_TO_U8 register_opcode_emulation (OP_FCONV_TO_U8, __emul_fconv_to_u8, mono_icall_sig_ulong_double, mono_fconv_u8, FALSE); register_opcode_emulation (OP_RCONV_TO_U8, __emul_rconv_to_u8, mono_icall_sig_ulong_float, mono_rconv_u8, FALSE); #endif #ifdef MONO_ARCH_EMULATE_FCONV_TO_U4 register_opcode_emulation (OP_FCONV_TO_U4, __emul_fconv_to_u4, mono_icall_sig_uint32_double, mono_fconv_u4, FALSE); register_opcode_emulation (OP_RCONV_TO_U4, __emul_rconv_to_u4, mono_icall_sig_uint32_float, mono_rconv_u4, FALSE); #endif register_opcode_emulation (OP_FCONV_TO_OVF_I8, __emul_fconv_to_ovf_i8, mono_icall_sig_long_double, mono_fconv_ovf_i8, FALSE); register_opcode_emulation (OP_FCONV_TO_OVF_U8, __emul_fconv_to_ovf_u8, mono_icall_sig_ulong_double, mono_fconv_ovf_u8, FALSE); register_opcode_emulation (OP_RCONV_TO_OVF_I8, __emul_rconv_to_ovf_i8, mono_icall_sig_long_float, mono_rconv_ovf_i8, FALSE); register_opcode_emulation (OP_RCONV_TO_OVF_U8, __emul_rconv_to_ovf_u8, mono_icall_sig_ulong_float, mono_rconv_ovf_u8, FALSE); #ifdef MONO_ARCH_EMULATE_FCONV_TO_I8 register_opcode_emulation (OP_FCONV_TO_I8, __emul_fconv_to_i8, mono_icall_sig_long_double, mono_fconv_i8, FALSE); register_opcode_emulation (OP_RCONV_TO_I8, __emul_rconv_to_i8, mono_icall_sig_long_float, mono_rconv_i8, FALSE); #endif #ifdef MONO_ARCH_EMULATE_CONV_R8_UN register_opcode_emulation (OP_ICONV_TO_R_UN, __emul_iconv_to_r_un, mono_icall_sig_double_int32, mono_conv_to_r8_un, FALSE); #endif #ifdef MONO_ARCH_EMULATE_LCONV_TO_R8 register_opcode_emulation (OP_LCONV_TO_R8, __emul_lconv_to_r8, mono_icall_sig_double_long, mono_lconv_to_r8, FALSE); #endif #ifdef MONO_ARCH_EMULATE_LCONV_TO_R4 register_opcode_emulation (OP_LCONV_TO_R4, __emul_lconv_to_r4, mono_icall_sig_float_long, mono_lconv_to_r4, FALSE); #endif #ifdef MONO_ARCH_EMULATE_LCONV_TO_R8_UN register_opcode_emulation (OP_LCONV_TO_R_UN, __emul_lconv_to_r8_un, mono_icall_sig_double_long, mono_lconv_to_r8_un, FALSE); #endif #ifdef MONO_ARCH_EMULATE_FREM register_opcode_emulation (OP_FREM, __emul_frem, mono_icall_sig_double_double_double, mono_fmod, FALSE); register_opcode_emulation (OP_RREM, __emul_rrem, mono_icall_sig_float_float_float, fmodf, FALSE); #endif #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK if (mono_arch_is_soft_float ()) { register_opcode_emulation (OP_FSUB, __emul_fsub, mono_icall_sig_double_double_double, mono_fsub, FALSE); register_opcode_emulation (OP_FADD, __emul_fadd, mono_icall_sig_double_double_double, mono_fadd, FALSE); register_opcode_emulation (OP_FMUL, __emul_fmul, mono_icall_sig_double_double_double, mono_fmul, FALSE); register_opcode_emulation (OP_FNEG, __emul_fneg, mono_icall_sig_double_double, mono_fneg, FALSE); register_opcode_emulation (OP_ICONV_TO_R8, __emul_iconv_to_r8, mono_icall_sig_double_int32, mono_conv_to_r8, FALSE); register_opcode_emulation (OP_ICONV_TO_R4, __emul_iconv_to_r4, mono_icall_sig_double_int32, mono_conv_to_r4, FALSE); register_opcode_emulation (OP_FCONV_TO_R4, __emul_fconv_to_r4, mono_icall_sig_double_double, mono_fconv_r4, FALSE); register_opcode_emulation (OP_FCONV_TO_I1, __emul_fconv_to_i1, mono_icall_sig_int8_double, mono_fconv_i1, FALSE); register_opcode_emulation (OP_FCONV_TO_I2, __emul_fconv_to_i2, mono_icall_sig_int16_double, mono_fconv_i2, FALSE); register_opcode_emulation (OP_FCONV_TO_I4, __emul_fconv_to_i4, mono_icall_sig_int32_double, mono_fconv_i4, FALSE); register_opcode_emulation (OP_FCONV_TO_U1, __emul_fconv_to_u1, mono_icall_sig_uint8_double, mono_fconv_u1, FALSE); register_opcode_emulation (OP_FCONV_TO_U2, __emul_fconv_to_u2, mono_icall_sig_uint16_double, mono_fconv_u2, FALSE); register_opcode_emulation (OP_FBEQ, __emul_fcmp_eq, mono_icall_sig_uint32_double_double, mono_fcmp_eq, FALSE); register_opcode_emulation (OP_FBLT, __emul_fcmp_lt, mono_icall_sig_uint32_double_double, mono_fcmp_lt, FALSE); register_opcode_emulation (OP_FBGT, __emul_fcmp_gt, mono_icall_sig_uint32_double_double, mono_fcmp_gt, FALSE); register_opcode_emulation (OP_FBLE, __emul_fcmp_le, mono_icall_sig_uint32_double_double, mono_fcmp_le, FALSE); register_opcode_emulation (OP_FBGE, __emul_fcmp_ge, mono_icall_sig_uint32_double_double, mono_fcmp_ge, FALSE); register_opcode_emulation (OP_FBNE_UN, __emul_fcmp_ne_un, mono_icall_sig_uint32_double_double, mono_fcmp_ne_un, FALSE); register_opcode_emulation (OP_FBLT_UN, __emul_fcmp_lt_un, mono_icall_sig_uint32_double_double, mono_fcmp_lt_un, FALSE); register_opcode_emulation (OP_FBGT_UN, __emul_fcmp_gt_un, mono_icall_sig_uint32_double_double, mono_fcmp_gt_un, FALSE); register_opcode_emulation (OP_FBLE_UN, __emul_fcmp_le_un, mono_icall_sig_uint32_double_double, mono_fcmp_le_un, FALSE); register_opcode_emulation (OP_FBGE_UN, __emul_fcmp_ge_un, mono_icall_sig_uint32_double_double, mono_fcmp_ge_un, FALSE); register_opcode_emulation (OP_FCEQ, __emul_fcmp_ceq, mono_icall_sig_uint32_double_double, mono_fceq, FALSE); register_opcode_emulation (OP_FCGT, __emul_fcmp_cgt, mono_icall_sig_uint32_double_double, mono_fcgt, FALSE); register_opcode_emulation (OP_FCGT_UN, __emul_fcmp_cgt_un, mono_icall_sig_uint32_double_double, mono_fcgt_un, FALSE); register_opcode_emulation (OP_FCLT, __emul_fcmp_clt, mono_icall_sig_uint32_double_double, mono_fclt, FALSE); register_opcode_emulation (OP_FCLT_UN, __emul_fcmp_clt_un, mono_icall_sig_uint32_double_double, mono_fclt_un, FALSE); register_icall (mono_fload_r4, mono_icall_sig_double_ptr, FALSE); register_icall (mono_fstore_r4, mono_icall_sig_void_double_ptr, FALSE); register_icall (mono_fload_r4_arg, mono_icall_sig_uint32_double, FALSE); register_icall (mono_isfinite_double, mono_icall_sig_int32_double, FALSE); } #endif register_icall (mono_ckfinite, mono_icall_sig_double_double, FALSE); #ifdef COMPRESSED_INTERFACE_BITMAP register_icall (mono_class_interface_match, mono_icall_sig_uint32_ptr_int32, TRUE); #endif /* other jit icalls */ register_icall (ves_icall_mono_delegate_ctor, mono_icall_sig_void_object_object_ptr, FALSE); register_icall (ves_icall_mono_delegate_ctor_interp, mono_icall_sig_void_object_object_ptr, FALSE); register_icall (mono_class_static_field_address, mono_icall_sig_ptr_ptr, FALSE); register_icall (mono_ldtoken_wrapper, mono_icall_sig_ptr_ptr_ptr_ptr, FALSE); register_icall (mono_ldtoken_wrapper_generic_shared, mono_icall_sig_ptr_ptr_ptr_ptr, FALSE); register_icall (mono_get_special_static_data, mono_icall_sig_ptr_int, FALSE); register_icall (mono_helper_stelem_ref_check, mono_icall_sig_void_object_object, FALSE); register_icall (ves_icall_object_new, mono_icall_sig_object_ptr, FALSE); register_icall (ves_icall_object_new_specific, mono_icall_sig_object_ptr, FALSE); register_icall (ves_icall_array_new_specific, mono_icall_sig_object_ptr_int32, FALSE); register_icall (ves_icall_runtime_class_init, mono_icall_sig_void_ptr, FALSE); register_icall (mono_ldftn, mono_icall_sig_ptr_ptr, FALSE); register_icall (mono_ldvirtfn, mono_icall_sig_ptr_object_ptr, FALSE); register_icall (mono_ldvirtfn_gshared, mono_icall_sig_ptr_object_ptr, FALSE); register_icall (mono_helper_compile_generic_method, mono_icall_sig_ptr_object_ptr_ptr, FALSE); register_icall (mono_helper_ldstr, mono_icall_sig_object_ptr_int, FALSE); register_icall (mono_helper_ldstr_mscorlib, mono_icall_sig_object_int, FALSE); register_icall (mono_helper_newobj_mscorlib, mono_icall_sig_object_int, FALSE); register_icall (mono_value_copy_internal, mono_icall_sig_void_ptr_ptr_ptr, FALSE); register_icall (mono_object_castclass_unbox, mono_icall_sig_object_object_ptr, FALSE); register_icall (mono_break, NULL, TRUE); register_icall (mono_create_corlib_exception_0, mono_icall_sig_object_int, TRUE); register_icall (mono_create_corlib_exception_1, mono_icall_sig_object_int_object, TRUE); register_icall (mono_create_corlib_exception_2, mono_icall_sig_object_int_object_object, TRUE); register_icall (mono_array_new_1, mono_icall_sig_object_ptr_int, FALSE); register_icall (mono_array_new_2, mono_icall_sig_object_ptr_int_int, FALSE); register_icall (mono_array_new_3, mono_icall_sig_object_ptr_int_int_int, FALSE); register_icall (mono_array_new_4, mono_icall_sig_object_ptr_int_int_int_int, FALSE); register_icall (mono_array_new_n_icall, mono_icall_sig_object_ptr_int_ptr, FALSE); register_icall (mono_get_native_calli_wrapper, mono_icall_sig_ptr_ptr_ptr_ptr, FALSE); register_icall (mono_resume_unwind, mono_icall_sig_void_ptr, TRUE); register_icall (mono_gsharedvt_constrained_call, mono_icall_sig_object_ptr_ptr_ptr_ptr_ptr, FALSE); register_icall (mono_gsharedvt_value_copy, mono_icall_sig_void_ptr_ptr_ptr, TRUE); //WARNING We do runtime selection here but the string *MUST* be to a fallback function that has same signature and behavior MonoRangeCopyFunction const mono_gc_wbarrier_range_copy = mono_gc_get_range_copy_func (); register_icall_no_wrapper (mono_gc_wbarrier_range_copy, mono_icall_sig_void_ptr_ptr_int); register_icall (mono_object_castclass_with_cache, mono_icall_sig_object_object_ptr_ptr, FALSE); register_icall (mono_object_isinst_with_cache, mono_icall_sig_object_object_ptr_ptr, FALSE); register_icall (mono_generic_class_init, mono_icall_sig_void_ptr, FALSE); register_icall (mono_fill_class_rgctx, mono_icall_sig_ptr_ptr_int, FALSE); register_icall (mono_fill_method_rgctx, mono_icall_sig_ptr_ptr_int, FALSE); register_dyn_icall (mono_component_debugger ()->user_break, mono_debugger_agent_user_break, mono_icall_sig_void, FALSE); register_icall (mini_llvm_init_method, mono_icall_sig_void_ptr_ptr_ptr_ptr, TRUE); register_icall_no_wrapper (mini_llvmonly_resolve_iface_call_gsharedvt, mono_icall_sig_ptr_object_int_ptr_ptr); register_icall_no_wrapper (mini_llvmonly_resolve_vcall_gsharedvt, mono_icall_sig_ptr_object_int_ptr_ptr); register_icall_no_wrapper (mini_llvmonly_resolve_vcall_gsharedvt_fast, mono_icall_sig_ptr_object_int); register_icall_no_wrapper (mini_llvmonly_resolve_generic_virtual_call, mono_icall_sig_ptr_ptr_int_ptr); register_icall_no_wrapper (mini_llvmonly_resolve_generic_virtual_iface_call, mono_icall_sig_ptr_ptr_int_ptr); /* This needs a wrapper so it can have a preserveall cconv */ register_icall (mini_llvmonly_init_vtable_slot, mono_icall_sig_ptr_ptr_int, FALSE); register_icall (mini_llvmonly_init_delegate, mono_icall_sig_void_object_ptr, TRUE); register_icall (mini_llvmonly_init_delegate_virtual, mono_icall_sig_void_object_object_ptr, TRUE); register_icall (mini_llvmonly_throw_nullref_exception, mono_icall_sig_void, TRUE); register_icall (mini_llvmonly_throw_aot_failed_exception, mono_icall_sig_void_ptr, TRUE); register_icall (mini_llvmonly_pop_lmf, mono_icall_sig_void_ptr, TRUE); register_icall (mini_llvmonly_interp_entry_gsharedvt, mono_icall_sig_void_ptr_ptr_ptr, TRUE); register_icall (mono_get_assembly_object, mono_icall_sig_object_ptr, TRUE); register_icall (mono_get_method_object, mono_icall_sig_object_ptr, TRUE); register_icall (mono_throw_method_access, mono_icall_sig_void_ptr_ptr, FALSE); register_icall (mono_throw_bad_image, mono_icall_sig_void, FALSE); register_icall (mono_throw_not_supported, mono_icall_sig_void, FALSE); register_icall (mono_throw_platform_not_supported, mono_icall_sig_void, FALSE); register_icall (mono_throw_invalid_program, mono_icall_sig_void_ptr, FALSE); register_icall_no_wrapper (mono_dummy_jit_icall, mono_icall_sig_void); //register_icall_no_wrapper (mono_dummy_jit_icall_val, mono_icall_sig_void_ptr); register_icall_with_wrapper (mono_monitor_enter_internal, mono_icall_sig_int32_obj); register_icall_with_wrapper (mono_monitor_enter_v4_internal, mono_icall_sig_void_obj_ptr); register_icall_no_wrapper (mono_monitor_enter_fast, mono_icall_sig_int_obj); register_icall_no_wrapper (mono_monitor_enter_v4_fast, mono_icall_sig_int_obj_ptr); #ifdef TARGET_IOS register_icall (pthread_getspecific, mono_icall_sig_ptr_ptr, TRUE); #endif /* Register tls icalls */ register_icall_no_wrapper (mono_tls_get_thread_extern, mono_icall_sig_ptr); register_icall_no_wrapper (mono_tls_get_jit_tls_extern, mono_icall_sig_ptr); register_icall_no_wrapper (mono_tls_get_domain_extern, mono_icall_sig_ptr); register_icall_no_wrapper (mono_tls_get_sgen_thread_info_extern, mono_icall_sig_ptr); register_icall_no_wrapper (mono_tls_get_lmf_addr_extern, mono_icall_sig_ptr); register_icall_no_wrapper (mono_interp_entry_from_trampoline, mono_icall_sig_void_ptr_ptr); register_icall_no_wrapper (mono_interp_to_native_trampoline, mono_icall_sig_void_ptr_ptr); #ifdef MONO_ARCH_HAS_REGISTER_ICALL mono_arch_register_icall (); #endif } MonoJitStats mono_jit_stats = {0}; /** * Counters of mono_stats and mono_jit_stats can be read without locking during shutdown. * For all other contexts, assumes that the domain lock is held. * MONO_NO_SANITIZE_THREAD tells Clang's ThreadSanitizer to hide all reports of these (known) races. */ MONO_NO_SANITIZE_THREAD void mono_runtime_print_stats (void) { if (mono_jit_stats.enabled) { g_print ("Mono Jit statistics\n"); g_print ("Max code size ratio: %.2f (%s)\n", mono_jit_stats.max_code_size_ratio / 100.0, mono_jit_stats.max_ratio_method); g_print ("Biggest method: %" G_GINT32_FORMAT " (%s)\n", mono_jit_stats.biggest_method_size, mono_jit_stats.biggest_method); g_print ("Delegates created: %" G_GINT32_FORMAT "\n", mono_stats.delegate_creations); g_print ("Initialized classes: %" G_GINT32_FORMAT "\n", mono_stats.initialized_class_count); g_print ("Used classes: %" G_GINT32_FORMAT "\n", mono_stats.used_class_count); g_print ("Generic vtables: %" G_GINT32_FORMAT "\n", mono_stats.generic_vtable_count); g_print ("Methods: %" G_GINT32_FORMAT "\n", mono_stats.method_count); g_print ("Static data size: %" G_GINT32_FORMAT "\n", mono_stats.class_static_data_size); g_print ("VTable data size: %" G_GINT32_FORMAT "\n", mono_stats.class_vtable_size); g_print ("Mscorlib mempool size: %d\n", mono_mempool_get_allocated (mono_defaults.corlib->mempool)); g_print ("\nInitialized classes: %" G_GINT32_FORMAT "\n", mono_stats.generic_class_count); g_print ("Inflated types: %" G_GINT32_FORMAT "\n", mono_stats.inflated_type_count); g_print ("Generics virtual invokes: %" G_GINT32_FORMAT "\n", mono_jit_stats.generic_virtual_invocations); g_print ("Sharable generic methods: %" G_GINT32_FORMAT "\n", mono_stats.generics_sharable_methods); g_print ("Unsharable generic methods: %" G_GINT32_FORMAT "\n", mono_stats.generics_unsharable_methods); g_print ("Shared generic methods: %" G_GINT32_FORMAT "\n", mono_stats.generics_shared_methods); g_print ("Shared vtype generic methods: %" G_GINT32_FORMAT "\n", mono_stats.gsharedvt_methods); g_print ("IMT tables size: %" G_GINT32_FORMAT "\n", mono_stats.imt_tables_size); g_print ("IMT number of tables: %" G_GINT32_FORMAT "\n", mono_stats.imt_number_of_tables); g_print ("IMT number of methods: %" G_GINT32_FORMAT "\n", mono_stats.imt_number_of_methods); g_print ("IMT used slots: %" G_GINT32_FORMAT "\n", mono_stats.imt_used_slots); g_print ("IMT colliding slots: %" G_GINT32_FORMAT "\n", mono_stats.imt_slots_with_collisions); g_print ("IMT max collisions: %" G_GINT32_FORMAT "\n", mono_stats.imt_max_collisions_in_slot); g_print ("IMT methods at max col: %" G_GINT32_FORMAT "\n", mono_stats.imt_method_count_when_max_collisions); g_print ("IMT trampolines size: %" G_GINT32_FORMAT "\n", mono_stats.imt_trampolines_size); g_print ("JIT info table inserts: %" G_GINT32_FORMAT "\n", mono_stats.jit_info_table_insert_count); g_print ("JIT info table removes: %" G_GINT32_FORMAT "\n", mono_stats.jit_info_table_remove_count); g_print ("JIT info table lookups: %" G_GINT32_FORMAT "\n", mono_stats.jit_info_table_lookup_count); mono_counters_dump (MONO_COUNTER_SECTION_MASK | MONO_COUNTER_MONOTONIC, NULL); g_print ("\n"); } } static void jit_stats_cleanup (void) { g_free (mono_jit_stats.max_ratio_method); mono_jit_stats.max_ratio_method = NULL; g_free (mono_jit_stats.biggest_method); mono_jit_stats.biggest_method = NULL; } static void runtime_cleanup (MonoDomain *domain, gpointer user_data) { mini_cleanup (domain); } void mini_cleanup (MonoDomain *domain) { if (mono_stats.enabled) g_printf ("Printing runtime stats at shutdown\n"); mono_runtime_print_stats (); jit_stats_cleanup (); mono_jit_dump_cleanup (); mini_get_interp_callbacks ()->cleanup (); mono_component_event_pipe ()->shutdown (); mono_component_diagnostics_server ()->shutdown (); } void mono_set_defaults (int verbose_level, guint32 opts) { mini_verbose = verbose_level; mono_set_optimizations (opts); } void mono_disable_optimizations (guint32 opts) { default_opt &= ~opts; } void mono_set_optimizations (guint32 opts) { if (opts & MONO_OPT_AGGRESSIVE_INLINING) opts |= MONO_OPT_INLINE; default_opt = opts; default_opt_set = TRUE; #ifdef MONO_ARCH_GSHAREDVT_SUPPORTED mono_set_generic_sharing_vt_supported (mono_aot_only || ((default_opt & MONO_OPT_GSHAREDVT) != 0)); #else if (mono_llvm_only) mono_set_generic_sharing_vt_supported (TRUE); #endif } void mono_set_verbose_level (guint32 level) { mini_verbose = level; } static const char* mono_get_runtime_build_version (void) { return FULL_VERSION; } /** * mono_get_runtime_build_info: * The returned string is owned by the caller. The returned string * format is <code>VERSION (FULL_VERSION BUILD_DATE)</code> and build date is optional. * \returns the runtime version + build date in string format. */ char* mono_get_runtime_build_info (void) { if (mono_build_date) return g_strdup_printf ("%s (%s %s)", VERSION, FULL_VERSION, mono_build_date); else return g_strdup_printf ("%s (%s)", VERSION, FULL_VERSION); } static void mono_precompile_assembly (MonoAssembly *ass, void *user_data) { GHashTable *assemblies = (GHashTable*)user_data; MonoImage *image = mono_assembly_get_image_internal (ass); MonoMethod *method, *invoke; int i, count = 0; if (g_hash_table_lookup (assemblies, ass)) return; g_hash_table_insert (assemblies, ass, ass); if (mini_verbose > 0) printf ("PRECOMPILE: %s.\n", mono_image_get_filename (image)); for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) { ERROR_DECL (error); method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error); if (!method) { mono_error_cleanup (error); /* FIXME don't swallow the error */ continue; } if (method->flags & METHOD_ATTRIBUTE_ABSTRACT) continue; if (method->is_generic || mono_class_is_gtd (method->klass)) continue; count++; if (mini_verbose > 1) { char * desc = mono_method_full_name (method, TRUE); g_print ("Compiling %d %s\n", count, desc); g_free (desc); } mono_compile_method_checked (method, error); if (!is_ok (error)) { mono_error_cleanup (error); /* FIXME don't swallow the error */ continue; } if (strcmp (method->name, "Finalize") == 0) { invoke = mono_marshal_get_runtime_invoke (method, FALSE); mono_compile_method_checked (invoke, error); mono_error_assert_ok (error); } } /* Load and precompile referenced assemblies as well */ for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_ASSEMBLYREF); ++i) { mono_assembly_load_reference (image, i); if (image->references [i]) mono_precompile_assembly (image->references [i], assemblies); } } void mono_precompile_assemblies () { GHashTable *assemblies = g_hash_table_new (NULL, NULL); mono_assembly_foreach ((GFunc)mono_precompile_assembly, assemblies); g_hash_table_destroy (assemblies); } /* * Used by LLVM. * Have to export this for AOT. */ void mono_personality (void) { /* Not used */ g_assert_not_reached (); } static MonoBreakPolicy always_insert_breakpoint (MonoMethod *method) { return MONO_BREAK_POLICY_ALWAYS; } static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint; /** * mono_set_break_policy: * \param policy_callback the new callback function * * Allow embedders to decide whether to actually obey breakpoint instructions * (both break IL instructions and \c Debugger.Break method calls), for example * to not allow an app to be aborted by a perfectly valid IL opcode when executing * untrusted or semi-trusted code. * * \p policy_callback will be called every time a break point instruction needs to * be inserted with the method argument being the method that calls \c Debugger.Break * or has the IL \c break instruction. The callback should return \c MONO_BREAK_POLICY_NEVER * if it wants the breakpoint to not be effective in the given method. * \c MONO_BREAK_POLICY_ALWAYS is the default. */ void mono_set_break_policy (MonoBreakPolicyFunc policy_callback) { if (policy_callback) break_policy_func = policy_callback; else break_policy_func = always_insert_breakpoint; } gboolean mini_should_insert_breakpoint (MonoMethod *method) { switch (break_policy_func (method)) { case MONO_BREAK_POLICY_ALWAYS: return TRUE; case MONO_BREAK_POLICY_NEVER: return FALSE; case MONO_BREAK_POLICY_ON_DBG: g_warning ("mdb no longer supported"); return FALSE; default: g_warning ("Incorrect value returned from break policy callback"); return FALSE; } } // Custom handlers currently only implemented by Windows. #ifndef HOST_WIN32 gboolean mono_runtime_install_custom_handlers (const char *handlers) { return FALSE; } void mono_runtime_install_custom_handlers_usage (void) { fprintf (stdout, "Custom Handlers:\n" " --handlers=HANDLERS Enable handler support, HANDLERS is a comma\n" " separated list of available handlers to install.\n" "\n" "No handlers supported on current platform.\n"); } #endif /* HOST_WIN32 */ static void mini_invalidate_transformed_interp_methods (MonoAssemblyLoadContext *alc G_GNUC_UNUSED, uint32_t generation G_GNUC_UNUSED) { mini_get_interp_callbacks ()->invalidate_transformed (); } static void mini_interp_jit_info_foreach(InterpJitInfoFunc func, gpointer user_data) { mini_get_interp_callbacks ()->jit_info_foreach (func, user_data); } static gboolean mini_interp_sufficient_stack (gsize size) { return mini_get_interp_callbacks ()->sufficient_stack (size); } /* * mini_get_default_mem_manager: * * Return a memory manager which can be used for default allocation. * FIXME: Review all callers and change them to allocate from a * class/method/assembly specific memory manager. */ MonoMemoryManager* mini_get_default_mem_manager (void) { return mono_mem_manager_get_ambient (); } gpointer mini_alloc_generic_virtual_trampoline (MonoVTable *vtable, int size) { static gboolean inited = FALSE; static int generic_virtual_trampolines_size = 0; if (!inited) { mono_counters_register ("Generic virtual trampoline bytes", MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &generic_virtual_trampolines_size); inited = TRUE; } generic_virtual_trampolines_size += size; return mono_mem_manager_code_reserve (m_class_get_mem_manager (vtable->klass), size); } MonoException* mini_get_stack_overflow_ex (void) { return mono_get_root_domain ()->stack_overflow_ex; } const MonoEECallbacks* mini_get_interp_callbacks_api (void) { return mono_interp_callbacks_pointer; }
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/mini-s390x.c
/** * @file * @author - Neale Ferguson ([email protected]) * * @section description * Function - S/390 backend for the Mono code generator. * * Date - January, 2004 * * Derivation - From mini-x86 & mini-ppc by - * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * */ /*------------------------------------------------------------------*/ /* D e f i n e s */ /*------------------------------------------------------------------*/ #define MAX_ARCH_DELEGATE_PARAMS 10 #define EMIT_COND_BRANCH(ins,cond) \ { \ if (ins->inst_true_bb->native_offset) { \ int displace; \ displace = ((cfg->native_code + \ ins->inst_true_bb->native_offset) - code) / 2; \ if (s390_is_imm16(displace)) { \ s390_brc (code, cond, displace); \ } else { \ s390_jcl (code, cond, displace); \ } \ } else { \ mono_add_patch_info_rel (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_BB, ins->inst_true_bb, \ MONO_R_S390_RELINS); \ s390_jcl (code, cond, 0); \ } \ } #define EMIT_UNCOND_BRANCH(ins) \ { \ if (ins->inst_target_bb->native_offset) { \ int displace; \ displace = ((cfg->native_code + \ ins->inst_target_bb->native_offset) - code) / 2; \ if (s390_is_imm16(displace)) { \ s390_brc (code, S390_CC_UN, displace); \ } else { \ s390_jcl (code, S390_CC_UN, displace); \ } \ } else { \ mono_add_patch_info_rel (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_BB, ins->inst_target_bb, \ MONO_R_S390_RELINS); \ s390_jcl (code, S390_CC_UN, 0); \ } \ } #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) \ do { \ mono_add_patch_info (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_EXC, exc_name); \ s390_jcl (code, cond, 0); \ } while (0); #define EMIT_COMP_AND_BRANCH(ins, cab, cmp) \ { \ if (ins->inst_true_bb->native_offset) { \ int displace; \ displace = ((cfg->native_code + \ ins->inst_true_bb->native_offset) - code) / 2; \ if (s390_is_imm16(displace)) { \ s390_##cab (code, ins->sreg1, ins->sreg2, \ ins->sreg3, displace); \ } else { \ s390_##cmp (code, ins->sreg1, ins->sreg2); \ displace = ((cfg->native_code + \ ins->inst_true_bb->native_offset) - code) / 2; \ s390_jcl (code, ins->sreg3, displace); \ } \ } else { \ s390_##cmp (code, ins->sreg1, ins->sreg2); \ mono_add_patch_info_rel (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_BB, ins->inst_true_bb, \ MONO_R_S390_RELINS); \ s390_jcl (code, ins->sreg3, 0); \ } \ } #define EMIT_COMP_AND_BRANCH_IMM(ins, cab, cmp, lat, logical) \ { \ if (ins->inst_true_bb->native_offset) { \ int displace; \ if ((ins->backend.data == 0) && (!logical)) { \ s390_##lat (code, ins->sreg1, ins->sreg1); \ displace = ((cfg->native_code + \ ins->inst_true_bb->native_offset) - code) / 2; \ if (s390_is_imm16(displace)) { \ s390_brc (code, ins->sreg3, displace); \ } else { \ s390_jcl (code, ins->sreg3, displace); \ } \ } else { \ S390_SET (code, s390_r0, ins->backend.data); \ displace = ((cfg->native_code + \ ins->inst_true_bb->native_offset) - code) / 2; \ if (s390_is_imm16(displace)) { \ s390_##cab (code, ins->sreg1, s390_r0, \ ins->sreg3, displace); \ } else { \ s390_##cmp (code, ins->sreg1, s390_r0); \ displace = ((cfg->native_code + \ ins->inst_true_bb->native_offset) - code) / 2; \ s390_jcl (code, ins->sreg3, displace); \ } \ } \ } else { \ if ((ins->backend.data == 0) && (!logical)) { \ s390_##lat (code, ins->sreg1, ins->sreg1); \ } else { \ S390_SET (code, s390_r0, ins->backend.data); \ s390_##cmp (code, ins->sreg1, s390_r0); \ } \ mono_add_patch_info_rel (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_BB, ins->inst_true_bb, \ MONO_R_S390_RELINS); \ s390_jcl (code, ins->sreg3, 0); \ } \ } #define CHECK_SRCDST_COM \ if (ins->dreg == ins->sreg2) { \ src2 = ins->sreg1; \ } else { \ src2 = ins->sreg2; \ if (ins->dreg != ins->sreg1) { \ s390_lgr (code, ins->dreg, ins->sreg1); \ } \ } #define CHECK_SRCDST_NCOM \ if (ins->dreg == ins->sreg2) { \ src2 = s390_r13; \ s390_lgr (code, s390_r13, ins->sreg2); \ } else { \ src2 = ins->sreg2; \ } \ if (ins->dreg != ins->sreg1) { \ s390_lgr (code, ins->dreg, ins->sreg1); \ } #define CHECK_SRCDST_COM_I \ if (ins->dreg == ins->sreg2) { \ src2 = ins->sreg1; \ } else { \ src2 = ins->sreg2; \ if (ins->dreg != ins->sreg1) { \ s390_lgfr (code, ins->dreg, ins->sreg1); \ } \ } #define CHECK_SRCDST_NCOM_I \ if (ins->dreg == ins->sreg2) { \ src2 = s390_r13; \ s390_lgfr (code, s390_r13, ins->sreg2); \ } else { \ src2 = ins->sreg2; \ } \ if (ins->dreg != ins->sreg1) { \ s390_lgfr (code, ins->dreg, ins->sreg1); \ } #define CHECK_SRCDST_COM_F \ if (ins->dreg == ins->sreg2) { \ src2 = ins->sreg1; \ } else { \ src2 = ins->sreg2; \ if (ins->dreg != ins->sreg1) { \ s390_ldr (code, ins->dreg, ins->sreg1); \ } \ } #define CHECK_SRCDST_NCOM_F(op) \ if (ins->dreg == ins->sreg2) { \ s390_lgdr (code, s390_r0, s390_f15); \ s390_ldr (code, s390_f15, ins->sreg2); \ if (ins->dreg != ins->sreg1) { \ s390_ldr (code, ins->dreg, ins->sreg1); \ } \ s390_ ## op (code, ins->dreg, s390_f15); \ s390_ldgr (code, s390_f15, s390_r0); \ } else { \ if (ins->dreg != ins->sreg1) { \ s390_ldr (code, ins->dreg, ins->sreg1); \ } \ s390_ ## op (code, ins->dreg, ins->sreg2); \ } #define CHECK_SRCDST_NCOM_FR(op, m) \ s390_lgdr (code, s390_r1, s390_f14); \ if (ins->dreg == ins->sreg2) { \ s390_lgdr (code, s390_r0, s390_f15); \ s390_ldr (code, s390_f15, ins->sreg2); \ if (ins->dreg != ins->sreg1) { \ s390_ldr (code, ins->dreg, ins->sreg1); \ } \ s390_ ## op (code, ins->dreg, s390_f15, m, s390_f14); \ s390_ldgr (code, s390_f15, s390_r0); \ } else { \ if (ins->dreg != ins->sreg1) { \ s390_ldr (code, ins->dreg, ins->sreg1); \ } \ s390_ ## op (code, ins->dreg, ins->sreg2, m, s390_f14); \ } \ s390_ldgr (code, s390_f14, s390_r1); #undef DEBUG #define DEBUG(a) if (cfg->verbose_level > 1) a #define MAX_EXC 16 #define S390_TRACE_STACK_SIZE (5*sizeof(gpointer)+4*sizeof(gdouble)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) /* * imt trampoline size values */ #define CMP_SIZE 24 #define LOADCON_SIZE 20 #define LOAD_SIZE 6 #define BR_SIZE 2 #define JUMP_SIZE 6 #define ENABLE_WRONG_METHOD_CHECK 0 /*========================= End of Defines =========================*/ /*------------------------------------------------------------------*/ /* I n c l u d e s */ /*------------------------------------------------------------------*/ #include "mini.h" #include <string.h> #include <sys/types.h> #include <unistd.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/profiler-private.h> #include <mono/utils/mono-error.h> #include <mono/utils/mono-error-internals.h> #include <mono/utils/mono-math.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/mono-threads.h> #include <mono/utils/unlocked.h> #include "mini-s390x.h" #include "cpu-s390x.h" #include "jit-icalls.h" #include "ir-emit.h" #include "mini-gc.h" #include "aot-runtime.h" #include "mini-runtime.h" /*========================= End of Includes ========================*/ /*------------------------------------------------------------------*/ /* T y p e d e f s */ /*------------------------------------------------------------------*/ /** * Track stack use */ typedef struct { guint stack_size, code_size, parm_size, retStruct; } size_data; /** * ABI - register use in calls etc. */ typedef enum { RegTypeGeneral, RegTypeBase, RegTypeFP, RegTypeFPR4, RegTypeStructByVal, RegTypeStructByValInFP, RegTypeStructByAddr } ArgStorage; /** * Track method arguments */ typedef struct { gint32 offset; /* offset from caller's stack */ guint16 vtsize; /* in param area */ guint8 reg; ArgStorage regtype; guint32 size; /* Size of structure used by RegTypeStructByVal */ gint32 type; /* Data type of argument */ } ArgInfo; /** * Call information - parameters and stack use for s390x ABI */ struct CallInfo { int nargs; int lastgr; guint32 stack_usage; guint32 struct_ret; ArgInfo ret; ArgInfo sigCookie; size_data sz; int vret_arg_index; MonoMethodSignature *sig; ArgInfo args [1]; }; /** * Registers used in parameter passing */ typedef struct { gint64 gr[5]; /* R2-R6 */ gdouble fp[3]; /* F0-F2 */ } __attribute__ ((__packed__)) RegParm; /*========================= End of Typedefs ========================*/ /*------------------------------------------------------------------*/ /* P r o t o t y p e s */ /*------------------------------------------------------------------*/ static guint8 * backUpStackPtr(MonoCompile *, guint8 *); static void add_general (guint *, size_data *, ArgInfo *); static void add_stackParm (guint *, size_data *, ArgInfo *, gint, ArgStorage); static void add_float (guint *, size_data *, ArgInfo *, gboolean); static CallInfo * get_call_info (MonoMemPool *, MonoMethodSignature *); static guchar * emit_float_to_int (MonoCompile *, guchar *, int, int, int, gboolean); static __inline__ void emit_unwind_regs(MonoCompile *, guint8 *, int, int, long); static void compare_and_branch(MonoBasicBlock *, MonoInst *, int, gboolean); static __inline__ guint8 * emit_call(MonoCompile *, guint8 *, MonoJumpInfoType, gconstpointer); static guint8 * emit_thunk(guint8 *, gconstpointer); static void create_thunk(MonoCompile *, guint8 *, guint8 *, gpointer); static void update_thunk(MonoCompile *, guint8 *, gpointer); static void emit_patch_full (MonoCompile *, MonoJumpInfo *, guint8 *, gpointer, int); /*========================= End of Prototypes ======================*/ /*------------------------------------------------------------------*/ /* G l o b a l V a r i a b l e s */ /*------------------------------------------------------------------*/ /** * The single-step trampoline */ static gpointer ss_trampoline; /** * The breakpoint trampoline */ static gpointer bp_trampoline; /** * Constants used in debugging - map general register names */ static const char * grNames[] = { "s390_r0", "s390_sp", "s390_r2", "s390_r3", "s390_r4", "s390_r5", "s390_r6", "s390_r7", "s390_r8", "s390_r9", "s390_r10", "s390_r11", "s390_r12", "s390_r13", "s390_r14", "s390_r15" }; /** * Constants used in debugging - map floating point register names */ static const char * fpNames[] = { "s390_f0", "s390_f1", "s390_f2", "s390_f3", "s390_f4", "s390_f5", "s390_f6", "s390_f7", "s390_f8", "s390_f9", "s390_f10", "s390_f11", "s390_f12", "s390_f13", "s390_f14", "s390_f15" }; /** * Constants used in debugging - map vector register names */ static const char * vrNames[] = { "vr0", "vr1", "vr2", "vr3", "vr4", "vr5", "vr6", "vr7", "vr8", "vr9", "vr10", "vr11", "vr12", "vr13", "vr14", "vr15", "vr16", "vr17", "vr18", "vr19", "vr20", "vr21", "vr22", "vr23", "vr24", "vr25", "vr26", "vr27", "vr28", "vr29", "vr30", "vr31" }; #if 0 /** * Constants used in debugging - ABI register types */ static const char *typeParm[] = { "General", "Base", "FPR8", "FPR4", "StructByVal", "StructByValInFP", "ByAddr"}; #endif /*====================== End of Global Variables ===================*/ static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math") static GENERATE_TRY_GET_CLASS_WITH_CACHE (mathf, "System", "MathF") /** * * @brief Return general register name * * @param[in] register number * @returns Name of register * * Returns the name of the general register specified by the input parameter. */ const char* mono_arch_regname (int reg) { if (reg >= 0 && reg < 16) return grNames [reg]; else return "unknown"; } /*========================= End of Function ========================*/ /** * * @brief Return floating point register name * * @param[in] register number * @returns Name of register * * Returns the name of the FP register specified by the input parameter. */ const char* mono_arch_fregname (int reg) { if (reg >= 0 && reg < 16) return fpNames [reg]; else return "unknown"; } /*========================= End of Function ========================*/ /** * * @brief Return vector register name * * @param[in] register number * @returns Name of register * * Returns the name of the vector register specified by the input parameter. */ const char * mono_arch_xregname (int reg) { if (reg < s390_VR_NREG) return vrNames [reg]; else return "unknown"; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific return argument information * * @param[in] @csig - Method signature * @param[in] @param_count - Number of parameters to consider * @param[out] @arg_info - An array in which to store results * @returns Size of the activation frame * * Gathers information on parameters such as size, alignment, and padding. * arg_info should be large * enough to hold param_count + 1 entries. */ int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { int k, frame_size = 0; int size, align, pad; int offset = 8; if (MONO_TYPE_ISSTRUCT (csig->ret)) { frame_size += sizeof (target_mgreg_t); offset += 8; } arg_info [0].offset = offset; if (csig->hasthis) { frame_size += sizeof (target_mgreg_t); offset += 8; } arg_info [0].size = frame_size; for (k = 0; k < param_count; k++) { if (csig->pinvoke && !csig->marshalling_disabled) size = mono_type_native_stack_size (csig->params [k], (guint32 *) &align); else size = mini_type_stack_size (csig->params [k], &align); frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; frame_size += size; arg_info [k + 1].pad = 0; arg_info [k + 1].size = size; offset += pad; arg_info [k + 1].offset = offset; offset += size; } align = MONO_ARCH_FRAME_ALIGNMENT; frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; return frame_size; } /*========================= End of Function ========================*/ /** * * @brief Emit an s390x move operation * * @param[in] @cfg - MonoCompile control block * @param[in] @dr - Destination register * @param[in] @ins - Current instruction * @param[in] @src - Instruction representing the source of move * * Emit a move instruction for VT parameters */ static void __inline__ emit_new_move(MonoCompile *cfg, int dr, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst *) ins->inst_p0; ArgInfo *ainfo = (ArgInfo *) ins->inst_p1; MonoInst *vtcopy = mono_compile_create_var (cfg, m_class_get_byval_arg (src->klass), OP_LOCAL); MonoInst *load; MonoInst *move; int size; if (call->signature->pinvoke && !call->signature->marshalling_disabled) { size = mono_type_native_stack_size (m_class_get_byval_arg (src->klass), NULL); vtcopy->backend.is_pinvoke = 1; } else { size = ins->backend.size; } EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype); MONO_INST_NEW (cfg, move, OP_S390_MOVE); move->sreg2 = load->dreg; move->inst_offset = 0; move->sreg1 = src->dreg; move->inst_imm = 0; move->backend.size = size; MONO_ADD_INS (cfg->cbb, move); if (dr != 0) MONO_EMIT_NEW_UNALU(cfg, OP_MOVE, dr, load->dreg); else MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, ainfo->reg, ainfo->offset, load->dreg); } /*========================= End of Function ========================*/ /** * * @brief Generate output sequence for VT register parameters * * @param[in] @cfg - MonoCompile control block * @param[in] @dr - Destination register * @param[in] @ins - Current instruction * @param[in] @src - Instruction representing the source * * Emit the output of structures for calls whose address is placed in a register. */ static void __inline__ emit_outarg_vtr(MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst *) ins->inst_p0; ArgInfo *ainfo = (ArgInfo *) ins->inst_p1; int reg = mono_alloc_preg (cfg); switch (ins->backend.size) { case 0: MONO_EMIT_NEW_ICONST(cfg, reg, 0); break; case 1: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU1_MEMBASE, reg, src->dreg, 0); break; case 2: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU2_MEMBASE, reg, src->dreg, 0); break; case 4: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI4_MEMBASE, reg, src->dreg, 0); break; case 8: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI8_MEMBASE, reg, src->dreg, 0); break; default: emit_new_move (cfg, reg, ins, src); } mono_call_inst_add_outarg_reg(cfg, call, reg, ainfo->reg, FALSE); } /*========================= End of Function ========================*/ /** * * @brief Generate output sequence for VT stack parameters * * @param[in] @cfg - MonoCompile control block * @param[in] @dr - Destination register * @param[in] @ins - Current instruction * @param[in] @src - Instruction representing the source * * Emit the output of structures for calls whose address is placed on the stack */ static void __inline__ emit_outarg_vts(MonoCompile *cfg, MonoInst *ins, MonoInst *src) { ArgInfo *ainfo = (ArgInfo *) ins->inst_p1; int tmpr = mono_alloc_preg (cfg); switch (ins->backend.size) { case 0: MONO_EMIT_NEW_ICONST(cfg, tmpr, 0); MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, ainfo->reg, ainfo->offset, tmpr); break; case 1: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU1_MEMBASE, tmpr, src->dreg, 0); MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, ainfo->reg, ainfo->offset, tmpr); break; case 2: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU2_MEMBASE, tmpr, src->dreg, 0); MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, ainfo->reg, ainfo->offset, tmpr); break; case 4: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI4_MEMBASE, tmpr, src->dreg, 0); MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, ainfo->reg, ainfo->offset, tmpr); break; case 8: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI8_MEMBASE, tmpr, src->dreg, 0); MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, ainfo->reg, ainfo->offset, tmpr); break; default: { emit_new_move (cfg, 0, ins, src); } } } /*========================= End of Function ========================*/ /** * * @brief Generate unwind information for range of registers * * @param[in] @cfg - MonoCompile control block * @param[in] @code - Location of code * @param[in] @start - Starting register * @param[in] @end - Ending register * @param[in] @offset - Offset in stack * * Emit unwind information for a range of registers. */ static void __inline__ emit_unwind_regs(MonoCompile *cfg, guint8 *code, int start, int end, long offset) { int i; for (i = start; i <= end; i++) { mono_emit_unwind_op_offset (cfg, code, i, offset); mini_gc_set_slot_type_from_cfa (cfg, offset, SLOT_NOREF); offset += sizeof(gulong); } } /*========================= End of Function ========================*/ /** * * @brief Get previous stack frame pointer * * @param[in] @cfg - MonoCompile control block * @param[in] @code - Location of code * @returns Previous stack pointer * * Retrieve the stack pointer of the previous frame */ static guint8 * backUpStackPtr(MonoCompile *cfg, guint8 *code) { int stackSize = cfg->stack_usage; if (cfg->flags & MONO_CFG_HAS_ALLOCA) { s390_lg (code, STK_BASE, 0, STK_BASE, 0); } else { if (cfg->frame_reg != STK_BASE) s390_lgr (code, STK_BASE, cfg->frame_reg); if (s390_is_imm16 (stackSize)) { s390_aghi (code, STK_BASE, stackSize); } else if (s390_is_imm32 (stackSize)) { s390_agfi (code, STK_BASE, stackSize); } else { while (stackSize > INT_MAX) { s390_aghi (code, STK_BASE, INT_MAX); stackSize -= INT_MAX; } s390_agfi (code, STK_BASE, stackSize); } } return (code); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific CPU initialization * * Perform CPU specific initialization to execute managed code. */ void mono_arch_cpu_init (void) { } /*========================= End of Function ========================*/ /** * * @brief Archictecture specific initialization * * * Initialize architecture specific code: * - Define trigger pages for debugger * - Generate breakpoint code stub */ void mono_arch_init (void) { mono_set_partial_sharing_supported (FALSE); if (!mono_aot_only) bp_trampoline = mini_get_breakpoint_trampoline(); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific cleaup code * * * Clean up before termination: * - Free the trigger pages */ void mono_arch_cleanup (void) { } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific check for fast TLS access * * @returns True * * Returns whether we use fast inlined thread local storage managed access, * instead of falling back to native code. */ gboolean mono_arch_have_fast_tls (void) { return TRUE; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific check of mono optimizations * * @param[out] @exclude_mask - Optimization exclusion mask * @returns Optimizations supported on this CPU * * Returns the optimizations supported on this CPU */ guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { guint32 opts = 0; /* * No s390-specific optimizations yet */ *exclude_mask = 0; return opts; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific allocation of integer variables * * @param[in] @cfg - MonoCompile control block * @returns A list of integer variables * * Returns a list of allocatable integer variables */ GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG)) continue; /* we can only allocate 32 bit values */ if (mono_is_regsize_var(ins->inst_vtype)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE); } } return vars; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific determination of usable integer registers * * @param[in] @cfg - MonoCompile control block * @returns A list of allocatable registers * * Returns a list of usable integer registers */ GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; MonoMethodHeader *header; int i, top = 13; header = cfg->header; if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses) cfg->frame_reg = s390_r11; /* FIXME: s390_r12 is reserved for bkchain_reg. Only reserve it if needed */ top = 12; for (i = 8; i < top; ++i) { if ((cfg->frame_reg != i) && //!((cfg->uses_rgctx_reg) && (i == MONO_ARCH_IMT_REG))) (i != MONO_ARCH_IMT_REG)) regs = g_list_prepend (regs, GUINT_TO_POINTER (i)); } return regs; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific flush of instruction cache * * @param[in] @code - Start of code * @param[in] @size - Amount to be flushed * * Flush the CPU icache. */ void mono_arch_flush_icache (guint8 *code, gint size) { } /*========================= End of Function ========================*/ /** * * @brief Add an integer register parameter * * @param[in] @gr - Address of current register number * @param[in] @sz - Stack size data * @param[in] @ainfo - Parameter information * * Assign a parameter to a general register or spill it onto the stack */ static void inline add_general (guint *gr, size_data *sz, ArgInfo *ainfo) { if (*gr > S390_LAST_ARG_REG) { sz->stack_size = S390_ALIGN(sz->stack_size, sizeof(long)); ainfo->offset = sz->stack_size; ainfo->reg = STK_BASE; ainfo->regtype = RegTypeBase; sz->stack_size += sizeof(long); sz->code_size += 12; } else { ainfo->reg = *gr; ainfo->regtype = RegTypeGeneral; sz->code_size += 8; } (*gr) ++; } /*========================= End of Function ========================*/ /** * * @brief Add a structure variable to parameter list * * @param[in] @gr - Address of current register number * @param[in] @sz - Stack size data * @param[in] @ainfo - Parameter information * @param[in] @size - Size of parameter * @param[in] @type - Type of stack parameter (reference or value) * * Assign a structure address to a register or spill it onto the stack */ static void inline add_stackParm (guint *gr, size_data *sz, ArgInfo *ainfo, gint size, ArgStorage type) { if (*gr > S390_LAST_ARG_REG) { sz->stack_size = S390_ALIGN(sz->stack_size, sizeof(long)); ainfo->reg = STK_BASE; ainfo->offset = sz->stack_size; sz->stack_size += sizeof (target_mgreg_t); sz->parm_size += sizeof(gpointer); } else { ainfo->reg = *gr; } (*gr) ++; ainfo->regtype = type; ainfo->size = size; ainfo->vtsize = size; sz->parm_size += size; } /*========================= End of Function ========================*/ /** * * @brief Add a floating point register parameter * * @param[in] @fr - Address of current register number * @param[in] @sz - Stack size data * @param[in] @ainfo - Parameter information * @param[in] @isDouble - Precision of parameter * * Assign a parameter to a FP register or spill it onto the stack */ static void inline add_float (guint *fr, size_data *sz, ArgInfo *ainfo, gboolean isDouble) { if ((*fr) <= S390_LAST_FPARG_REG) { if (isDouble) ainfo->regtype = RegTypeFP; else ainfo->regtype = RegTypeFPR4; ainfo->reg = *fr; sz->code_size += 4; (*fr) += 2; } else { ainfo->offset = sz->stack_size; ainfo->reg = STK_BASE; sz->code_size += 4; sz->stack_size += sizeof(double); ainfo->regtype = RegTypeBase; } } /*========================= End of Function ========================*/ /** * * @brief Extract information about call parameters and stack use * * @param[in] @mp - Mono Memory Pool * @param[in] @sig - Mono Method Signature * @returns Information about the parameters and stack usage for a call * * Determine the amount of space required for code and stack. In addition * determine starting points for stack-based parameters, and area for * structures being returned on the stack. */ static CallInfo * get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { guint i, fr, gr, size, pstart; int nParm = sig->hasthis + sig->param_count; MonoType *ret_type; guint32 simpleType, align; gboolean is_pinvoke = sig->pinvoke; CallInfo *cinfo; size_data *sz; if (mp) cinfo = (CallInfo *) mono_mempool_alloc0 (mp, sizeof (CallInfo) + sizeof (ArgInfo) * nParm); else cinfo = (CallInfo *) g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * nParm); fr = 0; gr = s390_r2; nParm = 0; cinfo->struct_ret = 0; cinfo->sig = sig; sz = &cinfo->sz; sz->retStruct = 0; sz->stack_size = S390_MINIMAL_STACK_SIZE; sz->code_size = 0; sz->parm_size = 0; align = 0; size = 0; /*----------------------------------------------------------*/ /* We determine the size of the return code/stack in case we*/ /* need to reserve a register to be used to address a stack */ /* area that the callee will use. */ /*----------------------------------------------------------*/ ret_type = mini_get_underlying_type (sig->ret); simpleType = ret_type->type; enum_retvalue: switch (simpleType) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_OBJECT: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_STRING: cinfo->ret.reg = s390_r2; sz->code_size += 4; break; case MONO_TYPE_R4: case MONO_TYPE_R8: cinfo->ret.reg = s390_f0; sz->code_size += 4; break; case MONO_TYPE_I8: case MONO_TYPE_U8: cinfo->ret.reg = s390_r2; sz->code_size += 4; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (sig->ret)) { cinfo->ret.reg = s390_r2; sz->code_size += 4; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: { MonoClass *klass = mono_class_from_mono_type_internal (sig->ret); if (m_class_is_enumtype (klass)) { simpleType = mono_class_enum_basetype_internal (klass)->type; goto enum_retvalue; } size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, sig->pinvoke && !sig->marshalling_disabled); cinfo->struct_ret = 1; cinfo->ret.size = size; cinfo->ret.vtsize = size; break; } case MONO_TYPE_TYPEDBYREF: { MonoClass *klass = mono_class_from_mono_type_internal (sig->ret); size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, sig->pinvoke && !sig->marshalling_disabled); cinfo->struct_ret = 1; cinfo->ret.size = size; cinfo->ret.vtsize = size; } break; case MONO_TYPE_VOID: break; default: g_error ("Can't handle as return value 0x%x", sig->ret->type); } pstart = 0; /* * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after * the first argument, allowing 'this' to be always passed in the first arg reg. * Also do this if the first argument is a reference type, since virtual calls * are sometimes made using calli without sig->hasthis set, like in the delegate * invoke wrappers. */ if (cinfo->struct_ret && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) { if (sig->hasthis) { cinfo->args[nParm].size = sizeof (target_mgreg_t); add_general (&gr, sz, cinfo->args + nParm); } else { cinfo->args[nParm].size = sizeof (target_mgreg_t); add_general (&gr, sz, &cinfo->args [sig->hasthis + nParm]); pstart = 1; } nParm ++; cinfo->vret_arg_index = 1; cinfo->ret.reg = gr; gr ++; } else { /* this */ if (sig->hasthis) { cinfo->args[nParm].size = sizeof (target_mgreg_t); add_general (&gr, sz, cinfo->args + nParm); nParm ++; } if (cinfo->struct_ret) { cinfo->ret.reg = gr; gr++; } } if ((sig->call_convention == MONO_CALL_VARARG) && (sig->param_count == 0)) { gr = S390_LAST_ARG_REG + 1; fr = S390_LAST_FPARG_REG + 1; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, sz, &cinfo->sigCookie); } /*----------------------------------------------------------*/ /* We determine the size of the parameter code and stack */ /* requirements by checking the types and sizes of the */ /* parameters. */ /*----------------------------------------------------------*/ for (i = pstart; i < sig->param_count; ++i) { MonoType *ptype; /*--------------------------------------------------*/ /* Handle vararg type calls. All args are put on */ /* the stack. */ /*--------------------------------------------------*/ if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { gr = S390_LAST_ARG_REG + 1; fr = S390_LAST_FPARG_REG + 1; add_general (&gr, sz, &cinfo->sigCookie); } if (m_type_is_byref (sig->params [i])) { add_general (&gr, sz, cinfo->args+nParm); cinfo->args[nParm].size = sizeof(gpointer); nParm++; continue; } ptype = mini_get_underlying_type (sig->params [i]); simpleType = ptype->type; cinfo->args[nParm].type = simpleType; switch (simpleType) { case MONO_TYPE_I1: case MONO_TYPE_U1: cinfo->args[nParm].size = sizeof(char); add_general (&gr, sz, cinfo->args+nParm); nParm++; break; case MONO_TYPE_I2: case MONO_TYPE_U2: cinfo->args[nParm].size = sizeof(short); add_general (&gr, sz, cinfo->args+nParm); nParm++; break; case MONO_TYPE_I4: case MONO_TYPE_U4: cinfo->args[nParm].size = sizeof(int); add_general (&gr, sz, cinfo->args+nParm); nParm++; break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: cinfo->args[nParm].size = sizeof(gpointer); add_general (&gr, sz, cinfo->args+nParm); nParm++; break; case MONO_TYPE_I8: case MONO_TYPE_U8: cinfo->args[nParm].size = sizeof(long long); add_general (&gr, sz, cinfo->args+nParm); nParm++; break; case MONO_TYPE_R4: cinfo->args[nParm].size = sizeof(float); add_float (&fr, sz, cinfo->args+nParm, FALSE); nParm++; break; case MONO_TYPE_R8: cinfo->args[nParm].size = sizeof(double); add_float (&fr, sz, cinfo->args+nParm, TRUE); nParm++; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ptype)) { cinfo->args[nParm].size = sizeof(gpointer); add_general (&gr, sz, cinfo->args+nParm); nParm++; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: { MonoMarshalType *info; MonoClass *klass = mono_class_from_mono_type_internal (ptype); if (sig->pinvoke && !sig->marshalling_disabled) size = mono_class_native_size(klass, NULL); else size = mono_class_value_size(klass, NULL); if (simpleType != MONO_TYPE_GENERICINST) { info = mono_marshal_load_type_info(klass); if ((info->native_size == sizeof(float)) && (info->num_fields == 1) && (info->fields[0].field->type->type == MONO_TYPE_R4)) { cinfo->args[nParm].size = sizeof(float); add_float(&fr, sz, cinfo->args+nParm, FALSE); nParm ++; break; } if ((info->native_size == sizeof(double)) && (info->num_fields == 1) && (info->fields[0].field->type->type == MONO_TYPE_R8)) { cinfo->args[nParm].size = sizeof(double); add_float(&fr, sz, cinfo->args+nParm, TRUE); nParm ++; break; } } cinfo->args[nParm].vtsize = 0; cinfo->args[nParm].size = 0; switch (size) { /*----------------------------------*/ /* On S/390, structures of size 1, */ /* 2, 4, and 8 bytes are passed in */ /* (a) register(s). */ /*----------------------------------*/ case 0: case 1: case 2: case 4: case 8: add_general(&gr, sz, cinfo->args+nParm); cinfo->args[nParm].size = size; cinfo->args[nParm].regtype = RegTypeStructByVal; nParm++; break; default: add_stackParm(&gr, sz, cinfo->args+nParm, size, RegTypeStructByVal); nParm++; } } break; case MONO_TYPE_TYPEDBYREF: { add_stackParm(&gr, sz, cinfo->args+nParm, sizeof(uintptr_t), RegTypeStructByAddr); nParm++; } break; default: g_error ("Can't trampoline 0x%x", ptype); } } /*----------------------------------------------------------*/ /* Handle the case where there are no implicit arguments */ /*----------------------------------------------------------*/ if ((sig->call_convention == MONO_CALL_VARARG) && (nParm > 0) && (!sig->pinvoke) && (sig->param_count == sig->sentinelpos)) { gr = S390_LAST_ARG_REG + 1; fr = S390_LAST_FPARG_REG + 1; add_general (&gr, sz, &cinfo->sigCookie); } /* * If we are passing a structure back then we make room at * the end of the parameters that may have been placed on * the stack */ if (cinfo->struct_ret) { cinfo->ret.offset = sz->stack_size; sz->stack_size += S390_ALIGN(cinfo->ret.size, align); } cinfo->lastgr = gr; sz->stack_size = sz->stack_size + sz->parm_size; sz->stack_size = S390_ALIGN(sz->stack_size, sizeof(long)); return (cinfo); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific allocation of variables * * @param[in] @cfg - Compile control block * * Set var information according to the calling convention for s390x. * */ void mono_arch_allocate_vars (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; MonoInst *inst; CallInfo *cinfo; int iParm, iVar, offset, align, size, curinst; int frame_reg = STK_BASE; int sArg, eArg; header = cfg->header; cfg->flags |= MONO_CFG_HAS_SPILLUP; /*---------------------------------------------------------*/ /* We use the frame register also for any method that has */ /* filter clauses. This way, when the handlers are called, */ /* the code will reference local variables using the frame */ /* reg instead of the stack pointer: if we had to restore */ /* the stack pointer, we'd corrupt the method frames that */ /* are already on the stack (since filters get called */ /* before stack unwinding happens) when the filter code */ /* would call any method. */ /*---------------------------------------------------------*/ if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses) frame_reg = s390_r11; cfg->frame_reg = frame_reg; cfg->arch.bkchain_reg = -1; if (frame_reg != STK_BASE) cfg->used_int_regs |= (1LL << frame_reg); sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; /*--------------------------------------------------------------*/ /* local vars are at a positive offset from the stack pointer */ /* also note that if the function uses alloca, we use s390_r11 */ /* to point at the local variables. */ /* add parameter area size for called functions */ /*--------------------------------------------------------------*/ if (cfg->param_area == 0) offset = S390_MINIMAL_STACK_SIZE; else offset = cfg->param_area; cfg->sig_cookie = 0; if (MONO_TYPE_ISSTRUCT(sig->ret)) { cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cfg->ret->dreg = cinfo->ret.reg; } else { switch (mini_get_underlying_type (sig->ret)->type) { case MONO_TYPE_VOID: break; default: cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cfg->ret->dreg = cinfo->ret.reg; } } if (sig->hasthis) { inst = cfg->args [0]; if (inst->opcode != OP_REGVAR) { inst->opcode = OP_REGOFFSET; inst->inst_basereg = frame_reg; offset = S390_ALIGN(offset, sizeof(gpointer)); inst->inst_offset = offset; offset += sizeof (target_mgreg_t); } curinst = sArg = 1; } else { curinst = sArg = 0; } eArg = sig->param_count + sArg; if (sig->call_convention == MONO_CALL_VARARG) cfg->sig_cookie += S390_MINIMAL_STACK_SIZE; for (iParm = sArg; iParm < eArg; ++iParm) { inst = cfg->args [curinst]; if (inst->opcode != OP_REGVAR) { switch (cinfo->args[iParm].regtype) { case RegTypeStructByAddr : { MonoInst *indir; size = sizeof (target_mgreg_t); if (cinfo->args [iParm].reg == STK_BASE) { /* Similar to the == STK_BASE case below */ cfg->arch.bkchain_reg = s390_r12; cfg->used_int_regs |= 1 << cfg->arch.bkchain_reg; inst->opcode = OP_REGOFFSET; inst->dreg = mono_alloc_preg (cfg); inst->inst_basereg = cfg->arch.bkchain_reg; inst->inst_offset = cinfo->args [iParm].offset; } else { inst->opcode = OP_REGOFFSET; inst->dreg = cinfo->args [iParm].reg; inst->opcode = OP_REGOFFSET; inst->dreg = mono_alloc_preg (cfg); inst->inst_basereg = cfg->frame_reg; // inst->inst_offset = cinfo->args [iParm].offset; inst->inst_offset = offset; } /* Add a level of indirection */ MONO_INST_NEW (cfg, indir, 0); *indir = *inst; inst->opcode = OP_VTARG_ADDR; inst->inst_left = indir; } break; case RegTypeStructByVal : { MonoInst *indir; cfg->arch.bkchain_reg = s390_r12; cfg->used_int_regs |= 1 << cfg->arch.bkchain_reg; size = cinfo->args[iParm].size; if (cinfo->args [iParm].reg == STK_BASE) { int offStruct = 0; switch(size) { case 0: case 1: case 2: case 4: case 8: offStruct = (size < 8 ? sizeof(uintptr_t) - size : 0); default: inst->opcode = OP_REGOFFSET; inst->dreg = mono_alloc_preg (cfg); inst->inst_basereg = cfg->arch.bkchain_reg; inst->inst_offset = cinfo->args [iParm].offset + offStruct; } } else { offset = S390_ALIGN(offset, sizeof(uintptr_t)); inst->opcode = OP_REGOFFSET; inst->inst_basereg = cfg->frame_reg; inst->inst_offset = offset; } switch (size) { case 0 : case 1 : case 2 : case 4 : case 8 : break; default : /* Add a level of indirection */ MONO_INST_NEW (cfg, indir, 0); *indir = *inst; inst->opcode = OP_VTARG_ADDR; inst->inst_left = indir; } } break; default : if (cinfo->args [iParm].reg == STK_BASE) { /* * These arguments are in the previous frame, so we can't * compute their offset from the current frame pointer right * now, since cfg->stack_offset is not yet known, so dedicate a * register holding the previous frame pointer. */ cfg->arch.bkchain_reg = s390_r12; cfg->used_int_regs |= 1 << cfg->arch.bkchain_reg; inst->opcode = OP_REGOFFSET; inst->inst_basereg = cfg->arch.bkchain_reg; size = (cinfo->args[iParm].size < 8 ? 8 - cinfo->args[iParm].size : 0); inst->inst_offset = cinfo->args [iParm].offset + size; size = sizeof (long); } else { inst->opcode = OP_REGOFFSET; inst->inst_basereg = frame_reg; size = (cinfo->args[iParm].size < 8 ? sizeof(int) : sizeof(long)); offset = S390_ALIGN(offset, size); if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) inst->inst_offset = offset; else inst->inst_offset = offset + (8 - size); } } offset += MAX(size, 8); } curinst++; } cfg->locals_min_stack_offset = offset; curinst = cfg->locals_start; for (iVar = curinst; iVar < cfg->num_varinfo; ++iVar) { inst = cfg->varinfo [iVar]; if ((inst->flags & MONO_INST_IS_DEAD) || (inst->opcode == OP_REGVAR)) continue; /*--------------------------------------------------*/ /* inst->backend.is_pinvoke indicates native sized */ /* value types this is used by the pinvoke wrappers */ /* when they call functions returning structure */ /*--------------------------------------------------*/ if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype)) size = mono_class_native_size (mono_class_from_mono_type_internal (inst->inst_vtype), (guint32 *) &align); else size = mono_type_size (inst->inst_vtype, &align); offset = S390_ALIGN(offset, align); inst->inst_offset = offset; inst->opcode = OP_REGOFFSET; inst->inst_basereg = frame_reg; offset += size; DEBUG (g_print("allocating local %d to %ld, size: %d\n", iVar, inst->inst_offset, size)); } offset = S390_ALIGN(offset, sizeof(uintptr_t)); cfg->locals_max_stack_offset = offset; /*------------------------------------------------------*/ /* Reserve space to save LMF and caller saved registers */ /*------------------------------------------------------*/ if (cfg->method->save_lmf) offset += sizeof (MonoLMF); /*------------------------------------------------------*/ /* align the offset */ /*------------------------------------------------------*/ cfg->stack_offset = S390_ALIGN(offset, S390_STACK_ALIGNMENT); /*------------------------------------------------------*/ /* Fix offsets for args whose value is in parent frame */ /*------------------------------------------------------*/ for (iParm = sArg; iParm < eArg; ++iParm) { inst = cfg->args [iParm]; if (inst->opcode == OP_S390_STKARG) { inst->opcode = OP_REGOFFSET; inst->inst_offset += cfg->stack_offset; } } } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific creation of variables * * @param[in] @cfg - Compile control block * * Create variables for the method. * */ void mono_arch_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig = mono_method_signature_internal (cfg->method); CallInfo *cinfo; if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; if (cinfo->struct_ret) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG); if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr = "); mono_print_ins (cfg->vret_addr); } } if (cfg->gen_sdb_seq_points) { MonoInst *ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.ss_tramp_var = ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.bp_tramp_var = ins; } } /*========================= End of Function ========================*/ /** * * @brief Add a register to the call operation * * @param[in] @cfg - Compile control block * @param[in] @call - Call Instruction * @param[in] @storage - Register use type * @param[in] @reg - Register number * @param[in] @tree - Call arguments * * Add register use information to the call sequence */ static void add_outarg_reg2 (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree) { MonoInst *ins; switch (storage) { case RegTypeGeneral: MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = tree->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE); break; case RegTypeFP: MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = tree->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE); break; case RegTypeFPR4: MONO_INST_NEW (cfg, ins, OP_S390_SETF4RET); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = tree->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE); break; default: g_assert_not_reached (); } } /*========================= End of Function ========================*/ /** * * @brief Emit a signature cookine * * @param[in] @cfg - Compile control block * @param[in] @call - Call Instruction * @param[in] @cinfo - Call Information * * Emit the signature cooke as a parameter */ static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { MonoMethodSignature *tmpSig; MonoInst *sig_arg; cfg->disable_aot = TRUE; /* * mono_ArgIterator_Setup assumes the signature cookie is * passed first and all the arguments which were before it * passed on the stack after the signature. So compensate * by passing a different signature. */ tmpSig = mono_metadata_signature_dup (call->signature); tmpSig->param_count -= call->signature->sentinelpos; tmpSig->sentinelpos = 0; if (tmpSig->param_count > 0) memcpy (tmpSig->params, call->signature->params + call->signature->sentinelpos, tmpSig->param_count * sizeof(MonoType *)); MONO_INST_NEW (cfg, sig_arg, OP_ICONST); sig_arg->dreg = mono_alloc_ireg (cfg); sig_arg->inst_p0 = tmpSig; MONO_ADD_INS (cfg->cbb, sig_arg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, STK_BASE, cinfo->sigCookie.offset, sig_arg->dreg); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific emission of a call operation * * @param[in] @cfg - Compile control block * @param[in] @call - Call Instruction * * Process all parameters for a call and generate the sequence of * operations to perform the call according to the s390x ABI. */ void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoInst *in; MonoMethodSignature *sig; MonoInst *ins; int i, n, lParamArea; CallInfo *cinfo; ArgInfo *ainfo = NULL; int stackSize; sig = call->signature; n = sig->param_count + sig->hasthis; DEBUG (g_print ("Call requires: %d parameters\n",n)); cinfo = get_call_info (cfg->mempool, sig); stackSize = cinfo->sz.stack_size + cinfo->sz.parm_size; call->stack_usage = MAX(stackSize, call->stack_usage); lParamArea = MAX((call->stack_usage-S390_MINIMAL_STACK_SIZE-cinfo->sz.parm_size), 0); cfg->param_area = MAX(((signed) cfg->param_area), lParamArea); /* FIXME */ cfg->flags |= MONO_CFG_HAS_CALLS; if (cinfo->struct_ret) { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->sreg1 = call->vret_var->dreg; ins->dreg = mono_alloc_preg (cfg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, cinfo->ret.reg, FALSE); } for (i = 0; i < n; ++i) { MonoType *t; ainfo = cinfo->args + i; if (i >= sig->hasthis) t = sig->params [i - sig->hasthis]; else t = mono_get_int_type (); t = mini_get_underlying_type (t); in = call->args [i]; if ((sig->call_convention == MONO_CALL_VARARG) && (!sig->pinvoke) && (i == sig->sentinelpos)) { emit_sig_cookie (cfg, call, cinfo); } switch (ainfo->regtype) { case RegTypeGeneral : add_outarg_reg2 (cfg, call, ainfo->regtype, ainfo->reg, in); break; case RegTypeFP : case RegTypeFPR4 : if (MONO_TYPE_ISSTRUCT (t)) { /* Valuetype passed in one fp register */ ainfo->regtype = RegTypeStructByValInFP; /* Fall through */ } else { add_outarg_reg2 (cfg, call, ainfo->regtype, ainfo->reg, in); break; } case RegTypeStructByVal : case RegTypeStructByAddr : { g_assert (in->klass); MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->sreg1 = in->dreg; ins->klass = in->klass; ins->backend.size = ainfo->size; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); break; } case RegTypeBase : if (!m_type_is_byref (t) && t->type == MONO_TYPE_R4) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, STK_BASE, ainfo->offset + 4, in->dreg); } else if (!m_type_is_byref (t) && (t->type == MONO_TYPE_R8)) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, STK_BASE, ainfo->offset, in->dreg); } else { MONO_INST_NEW (cfg, ins, OP_STORE_MEMBASE_REG); ins->inst_destbasereg = STK_BASE; ins->inst_offset = ainfo->offset; ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); } break; default: g_assert_not_reached (); break; } } /* * Handle the case where there are no implicit arguments */ if ((sig->call_convention == MONO_CALL_VARARG) && (!sig->pinvoke) && (i == sig->sentinelpos)) { emit_sig_cookie (cfg, call, cinfo); } } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific Value Type parameter processing * * @param[in] @cfg - Compile control block * @param[in] @call - Call Instruction * @param[in] @src - Source parameter * * Process value type parameters for a call operation */ void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst*) ins->inst_p0; ArgInfo *ainfo = (ArgInfo *) ins->inst_p1; if (ainfo->regtype == RegTypeStructByVal) { if (ainfo->reg != STK_BASE) { emit_outarg_vtr (cfg, ins, src); } else { emit_outarg_vts (cfg, ins, src); } } else if (ainfo->regtype == RegTypeStructByValInFP) { int dreg = mono_alloc_freg (cfg); if (ainfo->size == 4) { MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, dreg, src->dreg, 0); MONO_EMIT_NEW_UNALU (cfg, OP_S390_SETF4RET, dreg, dreg); } else { g_assert (ainfo->size == 8); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, dreg, src->dreg, 0); } mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE); } else { ERROR_DECL (error); MonoMethodHeader *header; MonoInst *vtcopy = mono_compile_create_var (cfg, m_class_get_byval_arg (src->klass), OP_LOCAL); MonoInst *load; int ovf_size = ainfo->vtsize, srcReg; guint32 size; /* FIXME: alignment? */ if (call->signature->pinvoke && !call->signature->marshalling_disabled) { size = mono_type_native_stack_size (m_class_get_byval_arg (src->klass), NULL); vtcopy->backend.is_pinvoke = 1; } else { size = mini_type_stack_size (m_class_get_byval_arg (src->klass), NULL); } if (size > 0) g_assert (ovf_size > 0); header = mono_method_get_header_checked (cfg->method, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses) srcReg = s390_r11; else srcReg = STK_BASE; EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype); mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, TARGET_SIZEOF_VOID_P); if (ainfo->reg == STK_BASE) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, srcReg, ainfo->offset, load->dreg); if (cfg->compute_gc_maps) { MonoInst *def; EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, m_class_get_byval_arg (ins->klass)); } } else mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg, FALSE); } } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific call value return processing * * @param[in] @cfg - Compile control block * @param[in] @method - Method * @param[in] @val - Instruction representing the result returned to method * * Create the sequence to unload the value returned from a call */ void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret); if (!m_type_is_byref (ret)) { if (ret->type == MONO_TYPE_R4) { MONO_EMIT_NEW_UNALU (cfg, OP_S390_SETF4RET, s390_f0, val->dreg); return; } else if (ret->type == MONO_TYPE_R8) { MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, s390_f0, val->dreg); return; } } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); } /*========================= End of Function ========================*/ /** * * @brief Replace compound compare/branch operations with single operation * * @param[in] @bb - Basic block * @param[in] @ins - Current instruction * @param[in] @cc - Condition code of branch * @param[in] @logical - Whether comparison is signed or logical * * Form a peephole pass at the code looking for simple optimizations * that will combine compare/branch instructions into a single operation. */ static void compare_and_branch(MonoBasicBlock *bb, MonoInst *ins, int cc, gboolean logical) { MonoInst *last; if (mono_hwcap_s390x_has_gie) { last = mono_inst_prev (ins, FILTER_IL_SEQ_POINT); ins->sreg1 = last->sreg1; ins->sreg2 = last->sreg2; ins->sreg3 = cc; switch(last->opcode) { case OP_ICOMPARE: if (logical) ins->opcode = OP_S390_CLRJ; else ins->opcode = OP_S390_CRJ; MONO_DELETE_INS(bb, last); break; case OP_COMPARE: case OP_LCOMPARE: if (logical) ins->opcode = OP_S390_CLGRJ; else ins->opcode = OP_S390_CGRJ; MONO_DELETE_INS(bb, last); break; case OP_ICOMPARE_IMM: ins->backend.data = (gpointer) last->inst_imm; if (logical) ins->opcode = OP_S390_CLIJ; else ins->opcode = OP_S390_CIJ; MONO_DELETE_INS(bb, last); break; case OP_COMPARE_IMM: case OP_LCOMPARE_IMM: ins->backend.data = (gpointer) last->inst_imm; if (logical) ins->opcode = OP_S390_CLGIJ; else ins->opcode = OP_S390_CGIJ; MONO_DELETE_INS(bb, last); break; } } } /*========================= End of Function ========================*/ /** * * @brief Architecure-specific peephole pass 1 processing * * @param[in] @cfg - Compile control block * @param[in] @bb - Basic block * * Form a peephole pass at the code looking for compare and branch * optimizations. */ void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { switch (ins->opcode) { case OP_IBEQ: case OP_LBEQ: compare_and_branch(bb, ins, S390_CC_EQ, FALSE); break; case OP_LBNE_UN: case OP_IBNE_UN: compare_and_branch(bb, ins, S390_CC_NE, TRUE); break; case OP_LBLT: case OP_IBLT: compare_and_branch(bb, ins, S390_CC_LT, FALSE); break; case OP_LBLT_UN: case OP_IBLT_UN: compare_and_branch(bb, ins, S390_CC_LT, TRUE); break; case OP_LBGT: case OP_IBGT: compare_and_branch(bb, ins, S390_CC_GT, FALSE); break; case OP_LBGT_UN: case OP_IBGT_UN: compare_and_branch(bb, ins, S390_CC_GT, TRUE); break; case OP_LBGE: case OP_IBGE: compare_and_branch(bb, ins, S390_CC_GE, FALSE); break; case OP_LBGE_UN: case OP_IBGE_UN: compare_and_branch(bb, ins, S390_CC_GE, TRUE); break; case OP_LBLE: case OP_IBLE: compare_and_branch(bb, ins, S390_CC_LE, FALSE); break; case OP_LBLE_UN: case OP_IBLE_UN: compare_and_branch(bb, ins, S390_CC_LE, TRUE); break; // default: // mono_peephole_ins (bb, ins); } } } /*========================= End of Function ========================*/ /** * * @brief Architecure-specific peephole pass 2 processing * * @param[in] @cfg - Compile control block * @param[in] @bb - Basic block * * Form a peephole pass at the code looking for simple optimizations. */ void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n, *last_ins = NULL; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { switch (ins->opcode) { case OP_LOADU4_MEMBASE: case OP_LOADI4_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI4_MEMBASE) ? OP_ICONV_TO_I4 : OP_ICONV_TO_U4; ins->sreg1 = last_ins->sreg1; } break; } mono_peephole_ins (bb, ins); } } /*========================= End of Function ========================*/ /** * * @brief Architecure-specific lowering pass processing * * @param[in] @cfg - Compile control block * @param[in] @bb - Basic block * * Form a lowering pass at the code looking for simple optimizations. */ void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *next; MONO_BB_FOR_EACH_INS_SAFE (bb, next, ins) { switch (ins->opcode) { case OP_DIV_IMM: case OP_REM_IMM: case OP_IDIV_IMM: case OP_IREM_IMM: case OP_IDIV_UN_IMM: case OP_IREM_UN_IMM: case OP_LAND_IMM: case OP_LOR_IMM: case OP_LREM_IMM: case OP_LXOR_IMM: case OP_LOCALLOC_IMM: mono_decompose_op_imm (cfg, bb, ins); break; case OP_LADD_IMM: if (!s390_is_imm16 (ins->inst_imm)) /* This is created by the memcpy code which ignores is_inst_imm */ mono_decompose_op_imm (cfg, bb, ins); break; default: break; } } bb->max_vreg = cfg->next_vreg; } /*========================= End of Function ========================*/ /** * * @brief Emit float-to-int sequence * * @param[in] @cfg - Compile control block * @param[in] @code - Current instruction area * @param[in] @dreg - Destination general register * @param[in] @sreg - Source floating point register * @param[in] @size - Size of destination * @param[in] @is_signed - Destination is signed/unsigned * @returns Next instruction location * * Emit instructions to convert a single precision floating point value to an integer */ static guchar * emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed) { /* sreg is a float, dreg is an integer reg. */ if (is_signed) { s390_cgebr (code, dreg, 5, sreg); switch (size) { case 1: s390_ltgr (code, dreg, dreg); s390_jnl (code, 4); s390_oill (code, dreg, 0x80); s390_lghi (code, s390_r0, 0xff); s390_ngr (code, dreg, s390_r0); break; case 2: s390_ltgr (code, dreg, dreg); s390_jnl (code, 4); s390_oill (code, dreg, 0x8000); s390_llill(code, s390_r0, 0xffff); s390_ngr (code, dreg, s390_r0); break; } } else { short *o[1]; s390_lgdr (code, s390_r14, s390_f14); s390_lgdr (code, s390_r13, s390_f15); S390_SET (code, s390_r0, 0x4f000000u); s390_ldgr (code, s390_f14, s390_r0); s390_ler (code, s390_f15, sreg); s390_cebr (code, s390_f15, s390_f14); s390_jl (code, 0); CODEPTR (code, o[0]); S390_SET (code, s390_r0, 0x4f800000u); s390_ldgr (code, s390_f14, s390_r0); s390_sebr (code, s390_f15, s390_f14); s390_cfebr (code, dreg, 7, s390_f15); s390_j (code, 4); PTRSLOT (code, o[0]); s390_cfebr (code, dreg, 5, sreg); switch (size) { case 1: s390_lghi (code, s390_r0, 0xff); s390_ngr (code, dreg, s390_r0); break; case 2: s390_llill(code, s390_r0, 0xffff); s390_ngr (code, dreg, s390_r0); break; } s390_ldgr (code, s390_f14, s390_r14); s390_ldgr (code, s390_f15, s390_r13); } return code; } /*========================= End of Function ========================*/ /** * * @brief Emit double-to-int sequence * * @param[in] @cfg - Compile control block * @param[in] @code - Current instruction area * @param[in] @dreg - Destination general register * @param[in] @sreg - Source floating point register * @param[in] @size - Size of destination * @param[in] @is_signed - Destination is signed/unsigned * @returns Next instruction location * * Emit instructions to convert a single precision floating point value to an integer */ static guchar* emit_double_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed) { /* sreg is a float, dreg is an integer reg. */ if (is_signed) { s390_cgdbr (code, dreg, 5, sreg); switch (size) { case 1: s390_ltgr (code, dreg, dreg); s390_jnl (code, 4); s390_oill (code, dreg, 0x80); s390_lghi (code, s390_r0, 0xff); s390_ngr (code, dreg, s390_r0); break; case 2: s390_ltgr (code, dreg, dreg); s390_jnl (code, 4); s390_oill (code, dreg, 0x8000); s390_llill(code, s390_r0, 0xffff); s390_ngr (code, dreg, s390_r0); break; } } else { short *o[1]; s390_lgdr (code, s390_r14, s390_f14); s390_lgdr (code, s390_r13, s390_f15); S390_SET (code, s390_r0, 0x41e0000000000000llu); s390_ldgr (code, s390_f14, s390_r0); s390_ldr (code, s390_f15, sreg); s390_cdbr (code, s390_f15, s390_f14); s390_jl (code, 0); CODEPTR (code, o[0]); S390_SET (code, s390_r0, 0x41f0000000000000llu); s390_ldgr (code, s390_f14, s390_r0); s390_sdbr (code, s390_f15, s390_f14); s390_cfdbr (code, dreg, 7, s390_f15); s390_j (code, 4); PTRSLOT (code, o[0]); s390_cfdbr (code, dreg, 5, sreg); switch (size) { case 1: s390_lghi (code, s390_r0, 0xff); s390_ngr (code, dreg, s390_r0); break; case 2: s390_llill(code, s390_r0, 0xffff); s390_ngr (code, dreg, s390_r0); break; } s390_ldgr (code, s390_f14, s390_r14); s390_ldgr (code, s390_f15, s390_r13); } return code; } /*========================= End of Function ========================*/ /** * * @brief Check if branch is for unsigned comparison * * @param[in] @next - Next instruction * @returns True if the branch is for an unsigned comparison * * Determine if next instruction is a branch for an unsigned comparison */ static gboolean is_unsigned (MonoInst *next) { if ((next) && (((next->opcode >= OP_IBNE_UN) && (next->opcode <= OP_IBLT_UN)) || ((next->opcode >= OP_LBNE_UN) && (next->opcode <= OP_LBLT_UN)) || ((next->opcode >= OP_COND_EXC_NE_UN) && (next->opcode <= OP_COND_EXC_LT_UN)) || ((next->opcode >= OP_COND_EXC_INE_UN) && (next->opcode <= OP_COND_EXC_ILT_UN)) || ((next->opcode == OP_CLT_UN) || (next->opcode == OP_CGT_UN) || (next->opcode == OP_ICGE_UN) || (next->opcode == OP_ICLE_UN)) || ((next->opcode == OP_ICLT_UN) || (next->opcode == OP_ICGT_UN) || (next->opcode == OP_LCLT_UN) || (next->opcode == OP_LCGT_UN)))) return TRUE; else return FALSE; } /*========================= End of Function ========================*/ /** * * @brief Architecutre-specific processing of a basic block * * @param[in] @cfg - Compile control block * @param[in] @bb - Basic block * * Process instructions within basic block emitting s390x instructions * based on the VM operation codes */ void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins; MonoCallInst *call; guint8 *code = cfg->native_code + cfg->code_len; int src2; /* we don't align basic blocks of loops on s390 */ if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); MONO_BB_FOR_EACH_INS (bb, ins) { const guint offset = code - cfg->native_code; set_code_cursor (cfg, code); int max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); mono_debug_record_line_number (cfg, ins, offset); switch (ins->opcode) { case OP_STOREI1_MEMBASE_IMM: { s390_lghi (code, s390_r0, ins->inst_imm); S390_LONG (code, stcy, stc, s390_r0, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_STOREI2_MEMBASE_IMM: { s390_lghi (code, s390_r0, ins->inst_imm); S390_LONG (code, sthy, sth, s390_r0, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_STOREI4_MEMBASE_IMM: { s390_lgfi (code, s390_r0, ins->inst_imm); S390_LONG (code, sty, st, s390_r0, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_STORE_MEMBASE_IMM: case OP_STOREI8_MEMBASE_IMM: { S390_SET (code, s390_r0, ins->inst_imm); S390_LONG (code, stg, stg, s390_r0, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_STOREI1_MEMBASE_REG: { S390_LONG (code, stcy, stc, ins->sreg1, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_STOREI2_MEMBASE_REG: { S390_LONG (code, sthy, sth, ins->sreg1, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_STOREI4_MEMBASE_REG: { S390_LONG (code, sty, st, ins->sreg1, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_STORE_MEMBASE_REG: case OP_STOREI8_MEMBASE_REG: { S390_LONG (code, stg, stg, ins->sreg1, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_LOADU4_MEM: g_assert_not_reached (); break; case OP_LOAD_MEMBASE: case OP_LOADI8_MEMBASE: { S390_LONG (code, lg, lg, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_LOADI4_MEMBASE: { S390_LONG (code, lgf, lgf, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_LOADU4_MEMBASE: { S390_LONG (code, llgf, llgf, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_LOADU1_MEMBASE: { S390_LONG (code, llgc, llgc, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_LOADI1_MEMBASE: { S390_LONG (code, lgb, lgb, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_LOADU2_MEMBASE: { S390_LONG (code, llgh, llgh, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_LOADI2_MEMBASE: { S390_LONG (code, lgh, lgh, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_LCONV_TO_I1: { s390_lgbr (code, ins->dreg, ins->sreg1); } break; case OP_LCONV_TO_I2: { s390_lghr (code, ins->dreg, ins->sreg1); } break; case OP_LCONV_TO_U1: { s390_llgcr (code, ins->dreg, ins->sreg1); } break; case OP_LCONV_TO_U2: { s390_llghr (code, ins->dreg, ins->sreg1); } break; case OP_ICONV_TO_I1: { s390_lgbr (code, ins->dreg, ins->sreg1); } break; case OP_ICONV_TO_I2: { s390_lghr (code, ins->dreg, ins->sreg1); } break; case OP_ICONV_TO_U1: { s390_llgcr (code, ins->dreg, ins->sreg1); } break; case OP_ICONV_TO_U2: { s390_llghr (code, ins->dreg, ins->sreg1); } break; case OP_ICONV_TO_U4: { s390_llgfr (code, ins->dreg, ins->sreg1); } break; case OP_ICONV_TO_I4: { s390_lgfr (code, ins->dreg, ins->sreg1); } break; case OP_COMPARE: case OP_LCOMPARE: { if (is_unsigned (ins->next)) s390_clgr (code, ins->sreg1, ins->sreg2); else s390_cgr (code, ins->sreg1, ins->sreg2); } break; case OP_ICOMPARE: { if (is_unsigned (ins->next)) s390_clr (code, ins->sreg1, ins->sreg2); else s390_cr (code, ins->sreg1, ins->sreg2); } break; case OP_COMPARE_IMM: case OP_LCOMPARE_IMM: { gboolean branchUn = is_unsigned (ins->next); if ((ins->inst_imm == 0) && (!branchUn)) { s390_ltgr (code, ins->sreg1, ins->sreg1); } else { S390_SET (code, s390_r0, ins->inst_imm); if (branchUn) s390_clgr (code, ins->sreg1, s390_r0); else s390_cgr (code, ins->sreg1, s390_r0); } } break; case OP_ICOMPARE_IMM: { gboolean branchUn = is_unsigned (ins->next); if ((ins->inst_imm == 0) && (!branchUn)) { s390_ltr (code, ins->sreg1, ins->sreg1); } else { S390_SET (code, s390_r0, ins->inst_imm); if (branchUn) s390_clr (code, ins->sreg1, s390_r0); else s390_cr (code, ins->sreg1, s390_r0); } } break; case OP_BREAK: { code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break)); } break; case OP_ADDCC: { if (mono_hwcap_s390x_has_mlt) { s390_agrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_COM; s390_agr (code, ins->dreg, src2); } } break; case OP_LADD: { if (mono_hwcap_s390x_has_mlt) { s390_agrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_COM; s390_agr (code, ins->dreg, src2); } } break; case OP_ADC: { CHECK_SRCDST_COM; s390_alcgr (code, ins->dreg, src2); } break; case OP_ADD_IMM: { if (mono_hwcap_s390x_has_mlt) { if (s390_is_imm16 (ins->inst_imm)) { s390_aghik(code, ins->dreg, ins->sreg1, ins->inst_imm); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_agrk (code, ins->dreg, ins->sreg1, s390_r0); } } else { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (ins->inst_imm)) { s390_aghi (code, ins->dreg, ins->inst_imm); } else if (s390_is_imm32 (ins->inst_imm)) { s390_agfi (code, ins->dreg, ins->inst_imm); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_agr (code, ins->dreg, s390_r0); } } } break; case OP_LADD_IMM: { if (mono_hwcap_s390x_has_mlt) { if (s390_is_imm16 (ins->inst_imm)) { s390_aghik(code, ins->dreg, ins->sreg1, ins->inst_imm); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_agrk (code, ins->dreg, ins->sreg1, s390_r0); } } else { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } if (s390_is_imm32 (ins->inst_imm)) { s390_agfi (code, ins->dreg, ins->inst_imm); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_agr (code, ins->dreg, s390_r0); } } } break; case OP_ADC_IMM: { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (ins->inst_imm)) { s390_lghi (code, s390_r0, ins->inst_imm); s390_alcgr (code, ins->dreg, s390_r0); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_alcgr (code, ins->dreg, s390_r0); } } break; case OP_IADD_OVF: case OP_S390_IADD_OVF: { CHECK_SRCDST_COM; s390_ar (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); s390_lgfr (code, ins->dreg, ins->dreg); } break; case OP_IADD_OVF_UN: case OP_S390_IADD_OVF_UN: { CHECK_SRCDST_COM; s390_alr (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException"); s390_llgfr (code, ins->dreg, ins->dreg); } break; case OP_ADD_OVF_CARRY: { CHECK_SRCDST_COM; s390_lghi (code, s390_r0, 0); s390_lgr (code, s390_r1, s390_r0); s390_alcgr (code, s390_r0, s390_r1); s390_agr (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); s390_agr (code, ins->dreg, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); } break; case OP_ADD_OVF_UN_CARRY: { CHECK_SRCDST_COM; s390_alcgr (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException"); } break; case OP_SUBCC: { if (mono_hwcap_s390x_has_mlt) { s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_NCOM; s390_sgr (code, ins->dreg, src2); } } break; case OP_LSUB: { if (mono_hwcap_s390x_has_mlt) { s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_NCOM; s390_sgr (code, ins->dreg, src2); } } break; case OP_SBB: { CHECK_SRCDST_NCOM; s390_slbgr(code, ins->dreg, src2); } break; case OP_SUB_IMM: { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (-ins->inst_imm)) { s390_aghi (code, ins->dreg, -ins->inst_imm); } else if (s390_is_imm32 (-ins->inst_imm)) { s390_slgfi (code, ins->dreg, ins->inst_imm); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_slgr (code, ins->dreg, s390_r0); } } break; case OP_LSUB_IMM: { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (-ins->inst_imm)) { s390_aghi (code, ins->dreg, -ins->inst_imm); } else if (s390_is_imm32 (-ins->inst_imm)) { s390_slgfi (code, ins->dreg, ins->inst_imm); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_slgr (code, ins->dreg, s390_r0); } } break; case OP_SBB_IMM: { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (-ins->inst_imm)) { s390_lghi (code, s390_r0, ins->inst_imm); s390_slbgr (code, ins->dreg, s390_r0); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_slbgr(code, ins->dreg, s390_r0); } } break; case OP_SUB_OVF_CARRY: { CHECK_SRCDST_NCOM; s390_lghi (code, s390_r0, 0); s390_lgr (code, s390_r1, s390_r0); s390_slbgr (code, s390_r0, s390_r1); s390_sgr (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); s390_agr (code, ins->dreg, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); } break; case OP_SUB_OVF_UN_CARRY: { CHECK_SRCDST_NCOM; s390_slbgr (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, "OverflowException"); } break; case OP_LAND: { if (mono_hwcap_s390x_has_mlt) { s390_ngrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { if (ins->sreg1 == ins->dreg) { s390_ngr (code, ins->dreg, ins->sreg2); } else { if (ins->sreg2 == ins->dreg) { s390_ngr (code, ins->dreg, ins->sreg1); } else { s390_lgr (code, ins->dreg, ins->sreg1); s390_ngr (code, ins->dreg, ins->sreg2); } } } } break; case OP_AND_IMM: { S390_SET_MASK (code, s390_r0, ins->inst_imm); if (mono_hwcap_s390x_has_mlt) { s390_ngrk (code, ins->dreg, ins->sreg1, s390_r0); } else { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } s390_ngr (code, ins->dreg, s390_r0); } } break; case OP_LDIV: { s390_lgr (code, s390_r1, ins->sreg1); s390_dsgr (code, s390_r0, ins->sreg2); s390_lgr (code, ins->dreg, s390_r1); } break; case OP_LDIV_UN: { s390_lgr (code, s390_r1, ins->sreg1); s390_lghi (code, s390_r0, 0); s390_dlgr (code, s390_r0, ins->sreg2); s390_lgr (code, ins->dreg, s390_r1); } break; case OP_LREM: { s390_lgr (code, s390_r1, ins->sreg1); s390_dsgr (code, s390_r0, ins->sreg2); s390_lgr (code, ins->dreg, s390_r0); break; } case OP_LREM_IMM: { if (s390_is_imm16 (ins->inst_imm)) { s390_lghi (code, s390_r13, ins->inst_imm); } else { s390_lgfi (code, s390_r13, ins->inst_imm); } s390_lgr (code, s390_r0, ins->sreg1); s390_dsgr (code, s390_r0, s390_r13); s390_lgfr (code, ins->dreg, s390_r0); } break; case OP_LREM_UN: { s390_lgr (code, s390_r1, ins->sreg1); s390_lghi (code, s390_r0, 0); s390_dlgr (code, s390_r0, ins->sreg2); s390_lgr (code, ins->dreg, s390_r0); } break; case OP_LOR: { if (mono_hwcap_s390x_has_mlt) { s390_ogrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { if (ins->sreg1 == ins->dreg) { s390_ogr (code, ins->dreg, ins->sreg2); } else { if (ins->sreg2 == ins->dreg) { s390_ogr (code, ins->dreg, ins->sreg1); } else { s390_lgr (code, ins->dreg, ins->sreg1); s390_ogr (code, ins->dreg, ins->sreg2); } } } } break; case OP_OR_IMM: { S390_SET_MASK(code, s390_r0, ins->inst_imm); if (mono_hwcap_s390x_has_mlt) { s390_ogrk (code, ins->dreg, ins->sreg1, s390_r0); } else { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } s390_ogr (code, ins->dreg, s390_r0); } } break; case OP_LXOR: { if (mono_hwcap_s390x_has_mlt) { s390_xgrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { if (ins->sreg1 == ins->dreg) { s390_xgr (code, ins->dreg, ins->sreg2); } else { if (ins->sreg2 == ins->dreg) { s390_xgr (code, ins->dreg, ins->sreg1); } else { s390_lgr (code, ins->dreg, ins->sreg1); s390_xgr (code, ins->dreg, ins->sreg2); } } } } break; case OP_XOR_IMM: { S390_SET_MASK(code, s390_r0, ins->inst_imm); if (mono_hwcap_s390x_has_mlt) { s390_xgrk (code, ins->dreg, ins->sreg1, s390_r0); } else { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } s390_xgr (code, ins->dreg, s390_r0); } } break; case OP_LSHL: { CHECK_SRCDST_NCOM; s390_sllg (code, ins->dreg, ins->dreg, src2, 0); } break; case OP_SHL_IMM: case OP_LSHL_IMM: { if (ins->sreg1 != ins->dreg) { s390_lgr (code, ins->dreg, ins->sreg1); } s390_sllg (code, ins->dreg, ins->dreg, 0, (ins->inst_imm & 0x3f)); } break; case OP_LSHR: { CHECK_SRCDST_NCOM; s390_srag (code, ins->dreg, ins->dreg, src2, 0); } break; case OP_SHR_IMM: case OP_LSHR_IMM: { if (ins->sreg1 != ins->dreg) { s390_lgr (code, ins->dreg, ins->sreg1); } s390_srag (code, ins->dreg, ins->dreg, 0, (ins->inst_imm & 0x3f)); } break; case OP_SHR_UN_IMM: case OP_LSHR_UN_IMM: { if (ins->sreg1 != ins->dreg) { s390_lgr (code, ins->dreg, ins->sreg1); } s390_srlg (code, ins->dreg, ins->dreg, 0, (ins->inst_imm & 0x3f)); } break; case OP_LSHR_UN: { CHECK_SRCDST_NCOM; s390_srlg (code, ins->dreg, ins->dreg, src2, 0); } break; case OP_LNOT: { if (ins->sreg1 != ins->dreg) { s390_lgr (code, ins->dreg, ins->sreg1); } s390_lghi (code, s390_r0, -1); s390_xgr (code, ins->dreg, s390_r0); } break; case OP_LNEG: { s390_lcgr (code, ins->dreg, ins->sreg1); } break; case OP_LMUL: { CHECK_SRCDST_COM; s390_msgr (code, ins->dreg, src2); } break; case OP_MUL_IMM: case OP_LMUL_IMM: { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } if ((mono_hwcap_s390x_has_gie) && (s390_is_imm32 (ins->inst_imm))) { s390_msgfi (code, ins->dreg, ins->inst_imm); } else { if (s390_is_imm16 (ins->inst_imm)) { s390_lghi (code, s390_r13, ins->inst_imm); } else if (s390_is_imm32 (ins->inst_imm)) { s390_lgfi (code, s390_r13, ins->inst_imm); } else { S390_SET (code, s390_r13, ins->inst_imm); } s390_msgr (code, ins->dreg, s390_r13); } } break; case OP_LMUL_OVF: { short int *o[2]; if (mono_hwcap_s390x_has_mie2) { s390_msgrkc (code, ins->dreg, ins->sreg1, ins->sreg2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); } else { s390_ltgr (code, s390_r1, ins->sreg1); s390_jz (code, 0); CODEPTR(code, o[0]); s390_ltgr (code, s390_r0, ins->sreg2); s390_jnz (code, 6); s390_lghi (code, s390_r1, 0); s390_j (code, 0); CODEPTR(code, o[1]); s390_xgr (code, s390_r0, s390_r1); s390_msgr (code, s390_r1, ins->sreg2); s390_xgr (code, s390_r0, s390_r1); s390_srlg (code, s390_r0, s390_r0, 0, 63); s390_ltgr (code, s390_r0, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException"); PTRSLOT (code, o[0]); PTRSLOT (code, o[1]); s390_lgr (code, ins->dreg, s390_r1); } } break; case OP_LMUL_OVF_UN: { s390_lghi (code, s390_r0, 0); s390_lgr (code, s390_r1, ins->sreg1); s390_mlgr (code, s390_r0, ins->sreg2); s390_ltgr (code, s390_r0, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException"); s390_lgr (code, ins->dreg, s390_r1); } break; case OP_IADDCC: { g_assert_not_reached (); CHECK_SRCDST_COM_I; s390_algr (code, ins->dreg, src2); } break; case OP_IADD: { CHECK_SRCDST_COM_I; s390_agr (code, ins->dreg, src2); } break; case OP_IADC: { g_assert_not_reached (); CHECK_SRCDST_COM_I; s390_alcgr (code, ins->dreg, src2); } break; case OP_IADD_IMM: { if (ins->dreg != ins->sreg1) { s390_lgfr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (ins->inst_imm)) { s390_aghi (code, ins->dreg, ins->inst_imm); } else { s390_afi (code, ins->dreg, ins->inst_imm); } } break; case OP_IADC_IMM: { if (ins->dreg != ins->sreg1) { s390_lgfr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (ins->inst_imm)) { s390_lghi (code, s390_r0, ins->inst_imm); s390_alcgr (code, ins->dreg, s390_r0); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_alcgr (code, ins->dreg, s390_r0); } } break; case OP_LADD_OVF: case OP_S390_LADD_OVF: { if (mono_hwcap_s390x_has_mlt) { s390_agrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_COM; s390_agr (code, ins->dreg, src2); } EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); } break; case OP_LADD_OVF_UN: case OP_S390_LADD_OVF_UN: { if (mono_hwcap_s390x_has_mlt) { s390_algrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_COM; s390_algr (code, ins->dreg, src2); } EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException"); } break; case OP_ISUBCC: { if (mono_hwcap_s390x_has_mlt) { s390_slgrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_NCOM_I; s390_slgr (code, ins->dreg, src2); } } break; case OP_ISUB: { if (mono_hwcap_s390x_has_mlt) { s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_NCOM_I; s390_sgr (code, ins->dreg, src2); } } break; case OP_ISBB: { CHECK_SRCDST_NCOM_I; s390_slbgr (code, ins->dreg, src2); } break; case OP_ISUB_IMM: { if (ins->dreg != ins->sreg1) { s390_lgfr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (-ins->inst_imm)) { s390_aghi (code, ins->dreg, -ins->inst_imm); } else { s390_agfi (code, ins->dreg, -ins->inst_imm); } } break; case OP_ISBB_IMM: { S390_SET (code, s390_r0, ins->inst_imm); s390_slgfr (code, ins->dreg, s390_r0); } break; case OP_ISUB_OVF: case OP_S390_ISUB_OVF: { if (mono_hwcap_s390x_has_mlt) { s390_srk (code, ins->dreg, ins->sreg1, ins->sreg2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); } else { CHECK_SRCDST_NCOM; s390_sr (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); s390_lgfr (code, ins->dreg, ins->dreg); } } break; case OP_ISUB_OVF_UN: case OP_S390_ISUB_OVF_UN: { if (mono_hwcap_s390x_has_mlt) { s390_slrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_NCOM; s390_slr (code, ins->dreg, src2); } EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, "OverflowException"); s390_llgfr(code, ins->dreg, ins->dreg); } break; case OP_LSUB_OVF: case OP_S390_LSUB_OVF: { if (mono_hwcap_s390x_has_mlt) { s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_NCOM; s390_sgr (code, ins->dreg, src2); } EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); } break; case OP_LSUB_OVF_UN: case OP_S390_LSUB_OVF_UN: { CHECK_SRCDST_NCOM; s390_slgr (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, "OverflowException"); } break; case OP_IAND: { if (mono_hwcap_s390x_has_mlt) { s390_ngrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_NCOM_I; s390_ngr (code, ins->dreg, src2); } } break; case OP_IAND_IMM: { S390_SET_MASK (code, s390_r0, ins->inst_imm); if (mono_hwcap_s390x_has_mlt) { s390_ngrk (code, ins->dreg, ins->sreg1, s390_r0); } else { if (ins->dreg != ins->sreg1) { s390_lgfr (code, ins->dreg, ins->sreg1); } s390_ngr (code, ins->dreg, s390_r0); } } break; case OP_IDIV: { s390_lgfr (code, s390_r0, ins->sreg1); s390_srda (code, s390_r0, 0, 32); s390_dr (code, s390_r0, ins->sreg2); s390_lgfr (code, ins->dreg, s390_r1); } break; case OP_IDIV_UN: { s390_lgfr (code, s390_r0, ins->sreg1); s390_srdl (code, s390_r0, 0, 32); s390_dlr (code, s390_r0, ins->sreg2); s390_lgfr (code, ins->dreg, s390_r1); } break; case OP_IDIV_IMM: { if (s390_is_imm16 (ins->inst_imm)) { s390_lghi (code, s390_r13, ins->inst_imm); } else { s390_lgfi (code, s390_r13, ins->inst_imm); } s390_lgfr (code, s390_r0, ins->sreg1); s390_srda (code, s390_r0, 0, 32); s390_dr (code, s390_r0, ins->sreg2); s390_lgfr (code, ins->dreg, s390_r1); } break; case OP_IREM: { s390_lgfr (code, s390_r0, ins->sreg1); s390_srda (code, s390_r0, 0, 32); s390_dr (code, s390_r0, ins->sreg2); s390_lgfr (code, ins->dreg, s390_r0); break; case OP_IREM_UN: s390_lgfr (code, s390_r0, ins->sreg1); s390_srdl (code, s390_r0, 0, 32); s390_dlr (code, s390_r0, ins->sreg2); s390_lgfr (code, ins->dreg, s390_r0); } break; case OP_IREM_IMM: { if (s390_is_imm16 (ins->inst_imm)) { s390_lghi (code, s390_r13, ins->inst_imm); } else { s390_lgfi (code, s390_r13, ins->inst_imm); } s390_lgfr (code, s390_r0, ins->sreg1); s390_srda (code, s390_r0, 0, 32); s390_dr (code, s390_r0, ins->sreg2); s390_lgfr (code, ins->dreg, s390_r0); } break; case OP_IOR: { if (mono_hwcap_s390x_has_mlt) { s390_ogrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_COM_I; s390_ogr (code, ins->dreg, src2); } } break; case OP_IOR_IMM: { S390_SET_MASK (code, s390_r0, ins->inst_imm); if (mono_hwcap_s390x_has_mlt) { s390_ogrk (code, ins->dreg, ins->sreg1, s390_r0); } else { if (ins->dreg != ins->sreg1) { s390_lgfr (code, ins->dreg, ins->sreg1); } s390_ogr (code, ins->dreg, s390_r0); } } break; case OP_IXOR: { if (mono_hwcap_s390x_has_mlt) { s390_xgrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_COM_I; s390_xgr (code, ins->dreg, src2); } } break; case OP_IXOR_IMM: { S390_SET_MASK (code, s390_r0, ins->inst_imm); if (mono_hwcap_s390x_has_mlt) { s390_xgrk (code, ins->dreg, ins->sreg1, s390_r0); } else { if (ins->dreg != ins->sreg1) { s390_lgfr (code, ins->dreg, ins->sreg1); } s390_xgr (code, ins->dreg, s390_r0); } } break; case OP_ISHL: { CHECK_SRCDST_NCOM; s390_sll (code, ins->dreg, src2, 0); } break; case OP_ISHL_IMM: { if (ins->sreg1 != ins->dreg) { s390_lgfr (code, ins->dreg, ins->sreg1); } s390_sll (code, ins->dreg, 0, (ins->inst_imm & 0x1f)); } break; case OP_ISHR: { CHECK_SRCDST_NCOM; s390_sra (code, ins->dreg, src2, 0); } break; case OP_ISHR_IMM: { if (ins->sreg1 != ins->dreg) { s390_lgfr (code, ins->dreg, ins->sreg1); } s390_sra (code, ins->dreg, 0, (ins->inst_imm & 0x1f)); } break; case OP_ISHR_UN_IMM: { if (ins->sreg1 != ins->dreg) { s390_lgfr (code, ins->dreg, ins->sreg1); } s390_srl (code, ins->dreg, 0, (ins->inst_imm & 0x1f)); } break; case OP_ISHR_UN: { CHECK_SRCDST_NCOM; s390_srl (code, ins->dreg, src2, 0); } break; case OP_INOT: { if (ins->sreg1 != ins->dreg) { s390_lgfr (code, ins->dreg, ins->sreg1); } s390_lghi (code, s390_r0, -1); s390_xgr (code, ins->dreg, s390_r0); } break; case OP_INEG: { s390_lcgr (code, ins->dreg, ins->sreg1); } break; case OP_IMUL: { CHECK_SRCDST_COM_I; s390_msr (code, ins->dreg, src2); } break; case OP_IMUL_IMM: { if (ins->dreg != ins->sreg1) { s390_lgfr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (ins->inst_imm)) { s390_lghi (code, s390_r0, ins->inst_imm); } else { s390_lgfi (code, s390_r0, ins->inst_imm); } s390_msr (code, ins->dreg, s390_r0); } break; case OP_IMUL_OVF: { short int *o[2]; if (mono_hwcap_s390x_has_mie2) { s390_msrkc (code, ins->dreg, ins->sreg1, ins->sreg2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); s390_lgfr (code, ins->dreg, ins->dreg); } else { s390_ltr (code, s390_r1, ins->sreg1); s390_jz (code, 0); CODEPTR(code, o[0]); s390_ltr (code, s390_r0, ins->sreg2); s390_jnz (code, 6); s390_lhi (code, s390_r1, 0); s390_j (code, 0); CODEPTR(code, o[1]); s390_xr (code, s390_r0, s390_r1); s390_msr (code, s390_r1, ins->sreg2); s390_xr (code, s390_r0, s390_r1); s390_srl (code, s390_r0, 0, 31); s390_ltr (code, s390_r0, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException"); PTRSLOT (code, o[0]); PTRSLOT (code, o[1]); s390_lgfr (code, ins->dreg, s390_r1); } } break; case OP_IMUL_OVF_UN: { s390_lhi (code, s390_r0, 0); s390_lr (code, s390_r1, ins->sreg1); s390_mlr (code, s390_r0, ins->sreg2); s390_ltr (code, s390_r0, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException"); s390_lgfr (code, ins->dreg, s390_r1); } break; case OP_ICONST: case OP_I8CONST: { S390_SET (code, ins->dreg, ins->inst_c0); } break; case OP_AOTCONST: { mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0); S390_LOAD_TEMPLATE (code, ins->dreg); } break; case OP_JUMP_TABLE: { mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0); S390_LOAD_TEMPLATE (code, ins->dreg); } break; case OP_MOVE: if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } break; case OP_LCONV_TO_I: case OP_LCONV_TO_I8: case OP_SEXT_I4: s390_lgfr (code, ins->dreg, ins->sreg1); break; case OP_LCONV_TO_I4: s390_lgfr (code, ins->dreg, ins->sreg1); break; case OP_LCONV_TO_U: case OP_LCONV_TO_U8: case OP_LCONV_TO_U4: case OP_ZEXT_I4: s390_llgfr (code, ins->dreg, ins->sreg1); break; case OP_LCONV_TO_OVF_U4: S390_SET (code, s390_r0, 4294967295); s390_clgr (code, ins->sreg1, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GT, "OverflowException"); s390_ltgr (code, ins->sreg1, ins->sreg1); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, "OverflowException"); s390_llgfr(code, ins->dreg, ins->sreg1); break; case OP_LCONV_TO_OVF_I4_UN: S390_SET (code, s390_r0, 2147483647); s390_cgr (code, ins->sreg1, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GT, "OverflowException"); s390_ltgr (code, ins->sreg1, ins->sreg1); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, "OverflowException"); s390_lgfr (code, ins->dreg, ins->sreg1); break; case OP_RCONV_TO_R4: if (ins->dreg != ins->sreg1) s390_ler (code, ins->dreg, ins->sreg1); break; case OP_RCONV_TO_R8: s390_ldebr (code, ins->dreg, ins->sreg1); break; case OP_FMOVE: if (ins->dreg != ins->sreg1) s390_ldr (code, ins->dreg, ins->sreg1); break; case OP_RMOVE: if (ins->dreg != ins->sreg1) s390_ldr (code, ins->dreg, ins->sreg1); break; case OP_MOVE_F_TO_I8: s390_lgdr (code, ins->dreg, ins->sreg1); break; case OP_MOVE_I8_TO_F: s390_ldgr (code, ins->dreg, ins->sreg1); break; case OP_MOVE_F_TO_I4: if (!cfg->r4fp) { s390_ledbr (code, s390_f0, ins->sreg1); s390_lgdr (code, ins->dreg, s390_f0); } else { s390_lgdr (code, ins->dreg, ins->sreg1); } s390_srag (code, ins->dreg, ins->dreg, 0, 32); break; case OP_MOVE_I4_TO_F: s390_slag (code, s390_r0, ins->sreg1, 0, 32); s390_ldgr (code, ins->dreg, s390_r0); if (!cfg->r4fp) s390_ldebr (code, ins->dreg, ins->dreg); break; case OP_FCONV_TO_R4: s390_ledbr (code, ins->dreg, ins->sreg1); if (!cfg->r4fp) s390_ldebr (code, ins->dreg, ins->dreg); break; case OP_S390_SETF4RET: if (!cfg->r4fp) s390_ledbr (code, ins->dreg, ins->sreg1); else s390_ldr (code, ins->dreg, ins->sreg1); break; case OP_TLS_GET: { if (s390_is_imm16 (ins->inst_offset)) { s390_lghi (code, s390_r13, ins->inst_offset); } else if (s390_is_imm32 (ins->inst_offset)) { s390_lgfi (code, s390_r13, ins->inst_offset); } else { S390_SET (code, s390_r13, ins->inst_offset); } s390_ear (code, s390_r1, 0); s390_sllg(code, s390_r1, s390_r1, 0, 32); s390_ear (code, s390_r1, 1); s390_lg (code, ins->dreg, s390_r13, s390_r1, 0); } break; case OP_TLS_SET: { if (s390_is_imm16 (ins->inst_offset)) { s390_lghi (code, s390_r13, ins->inst_offset); } else if (s390_is_imm32 (ins->inst_offset)) { s390_lgfi (code, s390_r13, ins->inst_offset); } else { S390_SET (code, s390_r13, ins->inst_offset); } s390_ear (code, s390_r1, 0); s390_sllg(code, s390_r1, s390_r1, 0, 32); s390_ear (code, s390_r1, 1); s390_stg (code, ins->sreg1, s390_r13, s390_r1, 0); } break; case OP_TAILCALL_PARAMETER : // This opcode helps compute sizes, i.e. // of the subsequent OP_TAILCALL, but contributes no code. g_assert (ins->next); break; case OP_TAILCALL : case OP_TAILCALL_REG : case OP_TAILCALL_MEMBASE : { call = (MonoCallInst *) ins; /* * Restore SP to caller's SP */ code = backUpStackPtr(cfg, code); /* * If the destination is specified as a register or membase then * save destination so it doesn't get overwritten by the restores */ if (ins->opcode != OP_TAILCALL) s390_lgr (code, s390_r1, ins->sreg1); /* * We have to restore R6, so it cannot be used as argument register. * This is ensured by mono_arch_tailcall_supported, but verify here. */ g_assert (!(call->used_iregs & (1 << S390_LAST_ARG_REG))); /* * Likewise for the IMT/RGCTX register */ g_assert (!(call->used_iregs & (1 << MONO_ARCH_RGCTX_REG))); g_assert (!(call->rgctx_reg)); /* * Restore all general registers */ s390_lmg (code, s390_r6, s390_r14, STK_BASE, S390_REG_SAVE_OFFSET); /* * Restore any FP registers that have been altered */ if (cfg->arch.fpSize != 0) { int fpOffset = -cfg->arch.fpSize; for (int i = 8; i < 16; i++) { if (cfg->arch.used_fp_regs & (1 << i)) { s390_ldy (code, i, 0, STK_BASE, fpOffset); fpOffset += sizeof(double); } } } if (ins->opcode == OP_TAILCALL_REG) { s390_br (code, s390_r1); } else { if (ins->opcode == OP_TAILCALL_MEMBASE) { if (mono_hwcap_s390x_has_mie2) { s390_bi (code, 0, s390_r1, ins->inst_offset); } else { s390_lg (code, s390_r1, 0, s390_r1, ins->inst_offset); s390_br (code, s390_r1); } } else { mono_add_patch_info_rel (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method, MONO_R_S390_THUNKED); S390_BR_TEMPLATE (code, s390_r1); cfg->thunk_area += THUNK_SIZE; } } } break; case OP_CHECK_THIS: { /* ensure ins->sreg1 is not NULL */ s390_lg (code, s390_r0, 0, ins->sreg1, 0); s390_ltgr (code, s390_r0, s390_r0); } break; case OP_ARGLIST: { const int offset = cfg->sig_cookie + cfg->stack_usage; S390_SET (code, s390_r0, offset); s390_agr (code, s390_r0, cfg->frame_reg); s390_stg (code, s390_r0, 0, ins->sreg1, 0); } break; case OP_FCALL: { call = (MonoCallInst *) ins; const MonoJumpInfoTarget patch = mono_call_to_patch (call); code = emit_call (cfg, code, patch.type, patch.target); if (!cfg->r4fp && call->signature->ret->type == MONO_TYPE_R4) s390_ldebr (code, s390_f0, s390_f0); } break; case OP_RCALL: { call = (MonoCallInst *) ins; const MonoJumpInfoTarget patch = mono_call_to_patch (call); code = emit_call (cfg, code, patch.type, patch.target); if (ins->dreg != s390_f0) s390_ldr (code, ins->dreg, s390_f0); break; } case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: { call = (MonoCallInst *) ins; const MonoJumpInfoTarget patch = mono_call_to_patch (call); code = emit_call (cfg, code, patch.type, patch.target); } break; case OP_FCALL_REG: call = (MonoCallInst*)ins; s390_lgr (code, s390_r1, ins->sreg1); s390_basr (code, s390_r14, s390_r1); if (!cfg->r4fp && call->signature->ret->type == MONO_TYPE_R4) s390_ldebr (code, s390_f0, s390_f0); break; case OP_RCALL_REG: call = (MonoCallInst*)ins; s390_lgr (code, s390_r1, ins->sreg1); s390_basr (code, s390_r14, s390_r1); if (ins->dreg != s390_f0) s390_ldr (code, ins->dreg, s390_f0); break; case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: { s390_lgr (code, s390_r1, ins->sreg1); s390_basr (code, s390_r14, s390_r1); } break; case OP_FCALL_MEMBASE: call = (MonoCallInst*)ins; s390_lg (code, s390_r1, 0, ins->sreg1, ins->inst_offset); s390_basr (code, s390_r14, s390_r1); if (!cfg->r4fp && call->signature->ret->type == MONO_TYPE_R4) s390_ldebr (code, s390_f0, s390_f0); break; case OP_RCALL_MEMBASE: call = (MonoCallInst*)ins; s390_lg (code, s390_r1, 0, ins->sreg1, ins->inst_offset); s390_basr (code, s390_r14, s390_r1); if (ins->dreg != s390_f0) s390_ldr (code, ins->dreg, s390_f0); break; case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: { s390_lg (code, s390_r1, 0, ins->sreg1, ins->inst_offset); s390_basr (code, s390_r14, s390_r1); } break; case OP_LOCALLOC: { int area_offset; if (cfg->param_area == 0) area_offset = S390_MINIMAL_STACK_SIZE; else area_offset = cfg->param_area; area_offset = S390_ALIGN(area_offset, S390_STACK_ALIGNMENT); /* Get current backchain pointer */ s390_lg (code, s390_r13, 0, STK_BASE, 0); /* * Round object size to doubleword */ s390_lgr (code, s390_r1, ins->sreg1); s390_aghi (code, s390_r1, 7); s390_srlg (code, s390_r1, s390_r1, 0, 3); s390_sllg (code, s390_r1, s390_r1, 0, 3); if (mono_hwcap_s390x_has_gie) { if (ins->flags & MONO_INST_INIT) s390_lgr (code, s390_r0, s390_r1); s390_risbg (code, ins->dreg, s390_r1, 0, 0xb3, 0); s390_sgrk (code, ins->dreg, STK_BASE, ins->dreg); s390_cgr (code, STK_BASE, ins->dreg); /* L0: */ s390_je (code, 9); /* je L1 */ s390_aghi (code, STK_BASE, -4096); s390_mvghi (code, s390_r15, 0, 0); s390_j (code, -9); /* j L0 */ s390_risbg (code, ins->dreg, s390_r1, 0x34, 0xbf, 0); /* L1: */ s390_ltgr (code, ins->dreg, ins->dreg); s390_jz (code, 13); /* jz L2: */ s390_sgr (code, STK_BASE, ins->dreg); s390_risbg (code, s390_r1, s390_r1, 0x34, 0xbf, 0); s390_lay (code, s390_r1, s390_r1, STK_BASE, -8); s390_mvghi (code, s390_r1, 0, 0); /* L2: */ } else { s390_lgr (code, ins->dreg, s390_r1); s390_nill (code, ins->dreg, 0xf000); s390_lgr (code, s390_r0, STK_BASE); s390_sgr (code, s390_r0, ins->dreg); s390_lgr (code, ins->dreg, s390_r0); s390_cgr (code, STK_BASE, ins->dreg); /* L0: */ s390_je (code, 11); /* je L1 */ s390_aghi (code, STK_BASE, -4096); s390_lghi (code, s390_r0, 0); s390_stg (code, s390_r0, 0, STK_BASE, 4088); s390_j (code, -11); /* j L0 */ s390_lghi (code, ins->dreg, 4095); /* L1: */ s390_ngr (code, ins->dreg, s390_r1); s390_ltgr (code, ins->dreg, ins->dreg); s390_jz (code, 7); /* jz L2 */ s390_sgr (code, STK_BASE, ins->dreg); s390_stg (code, ins->dreg, s390_r1, STK_BASE, -8); /* L2: */ if (ins->flags & MONO_INST_INIT) s390_lgr (code, s390_r0, s390_r1); } /* * Compute address of localloc'd object */ s390_lgr (code, s390_r1, STK_BASE); if (s390_is_imm16(area_offset)) s390_aghi (code, s390_r1, area_offset); else s390_agfi (code, s390_r1, area_offset); s390_aghi (code, s390_r1, 7); s390_srlg (code, s390_r1, s390_r1, 0, 3); s390_sllg (code, s390_r1, s390_r1, 0, 3); s390_lgr (code, ins->dreg, s390_r1); /* Save backchain pointer */ s390_stg (code, s390_r13, 0, STK_BASE, 0); /* * If we need to zero the area then clear from localloc start * using the length we saved earlier */ if (ins->flags & MONO_INST_INIT) { s390_lgr (code, s390_r1, s390_r0); s390_lgr (code, s390_r0, ins->dreg); s390_lgr (code, s390_r14, s390_r12); s390_lghi (code, s390_r13, 0); s390_mvcle(code, s390_r0, s390_r12, 0, 0); s390_jo (code, -2); s390_lgr (code, s390_r12, s390_r14); } /* * If we have an LMF then we have to adjust its BP */ if (cfg->method->save_lmf) { int lmfOffset = cfg->stack_usage - sizeof(MonoLMF); if (s390_is_imm16(lmfOffset)) { s390_lghi (code, s390_r13, lmfOffset); } else if (s390_is_imm32(lmfOffset)) { s390_lgfi (code, s390_r13, lmfOffset); } else { S390_SET (code, s390_r13, lmfOffset); } s390_stg (code, s390_r15, s390_r13, cfg->frame_reg, MONO_STRUCT_OFFSET(MonoLMF, ebp)); } } break; case OP_THROW: { s390_lgr (code, s390_r2, ins->sreg1); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception)); } break; case OP_RETHROW: { s390_lgr (code, s390_r2, ins->sreg1); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception)); } break; case OP_START_HANDLER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); S390_LONG (code, stg, stg, s390_r14, 0, spvar->inst_basereg, spvar->inst_offset); } break; case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); if (ins->sreg1 != s390_r2) s390_lgr(code, s390_r2, ins->sreg1); S390_LONG (code, lg, lg, s390_r14, 0, spvar->inst_basereg, spvar->inst_offset); s390_br (code, s390_r14); } break; case OP_ENDFINALLY: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); S390_LONG (code, lg, lg, s390_r14, 0, spvar->inst_basereg, spvar->inst_offset); s390_br (code, s390_r14); } break; case OP_CALL_HANDLER: { mono_add_patch_info_rel (cfg, code-cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb, MONO_R_S390_DIRECT); s390_brasl (code, s390_r14, 0); for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); } break; case OP_LABEL: { ins->inst_c0 = code - cfg->native_code; } break; case OP_RELAXED_NOP: case OP_NOP: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_I8CONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: case OP_NOT_REACHED: case OP_NOT_NULL: { } break; case OP_IL_SEQ_POINT: mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); break; case OP_SEQ_POINT: { MonoInst *var; RI_Format *o[2]; guint16 displace; if (cfg->compile_aot) NOT_IMPLEMENTED; if (ins->flags & MONO_INST_SINGLE_STEP_LOC) { var = cfg->arch.ss_tramp_var; s390_lg (code, s390_r1, 0, var->inst_basereg, var->inst_offset); if (mono_hwcap_s390x_has_eif) { s390_ltg (code, s390_r14, 0, s390_r1, 0); } else { s390_lg (code, s390_r14, 0, s390_r1, 0); s390_ltgr (code, s390_r14, s390_r14); } o[0] = (RI_Format *) code; s390_jz (code, 4); s390_lgr (code, s390_r1, cfg->frame_reg); s390_basr (code, s390_r14, s390_r14); displace = ((uintptr_t) code - (uintptr_t) o[0]) / 2; o[0]->i2 = displace; } /* * This is the address which is saved in seq points, */ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); var = cfg->arch.bp_tramp_var; s390_lghi (code, s390_r1, 0); s390_ltgr (code, s390_r1, s390_r1); o[0] = (RI_Format *) code; s390_jz (code, 0); s390_lg (code, s390_r1, 0, var->inst_basereg, var->inst_offset); if (mono_hwcap_s390x_has_eif) { s390_ltg (code, s390_r14, 0, s390_r1, 0); } else { s390_lg (code, s390_r1, 0, s390_r1, 0); s390_ltgr (code, s390_r14, s390_r1); } o[1] = (RI_Format *) code; s390_jz (code, 4); s390_lgr (code, s390_r1, cfg->frame_reg); s390_basr (code, s390_r14, s390_r14); displace = ((uintptr_t) code - (uintptr_t) o[0]) / 2; o[0]->i2 = displace; displace = ((uintptr_t) code - (uintptr_t) o[1]) / 2; o[1]->i2 = displace; /* * Add an additional nop so skipping the bp doesn't cause the ip to point * to another IL offset. */ s390_nop (code); break; } case OP_GENERIC_CLASS_INIT: { static int byte_offset = -1; static guint8 bitmask; short int *jump; g_assert (ins->sreg1 == S390_FIRST_ARG_REG); if (byte_offset < 0) mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask); s390_tm (code, ins->sreg1, byte_offset, bitmask); s390_jo (code, 0); CODEPTR(code, jump); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init)); PTRSLOT (code, jump); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; break; } case OP_BR: EMIT_UNCOND_BRANCH(ins); break; case OP_BR_REG: { s390_br (code, ins->sreg1); } break; case OP_CEQ: case OP_ICEQ: case OP_LCEQ: { s390_lghi(code, ins->dreg, 1); s390_jz (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_CLT: case OP_ICLT: case OP_LCLT: { s390_lghi(code, ins->dreg, 1); s390_jl (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_CLT_UN: case OP_ICLT_UN: case OP_LCLT_UN: { s390_lghi(code, ins->dreg, 1); s390_jlo (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_CGT: case OP_ICGT: case OP_LCGT: { s390_lghi(code, ins->dreg, 1); s390_jh (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_CGT_UN: case OP_ICGT_UN: case OP_LCGT_UN: { s390_lghi(code, ins->dreg, 1); s390_jho (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_ICNEQ: { s390_lghi(code, ins->dreg, 1); s390_jne (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_ICGE: { s390_lghi(code, ins->dreg, 1); s390_jhe (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_ICLE: { s390_lghi(code, ins->dreg, 1); s390_jle (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_ICGE_UN: { s390_lghi(code, ins->dreg, 1); s390_jhe (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_ICLE_UN: { s390_lghi(code, ins->dreg, 1); s390_jle (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_COND_EXC_EQ: case OP_COND_EXC_IEQ: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_EQ, ins->inst_p1); break; case OP_COND_EXC_NE_UN: case OP_COND_EXC_INE_UN: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NE, ins->inst_p1); break; case OP_COND_EXC_LT: case OP_COND_EXC_ILT: case OP_COND_EXC_LT_UN: case OP_COND_EXC_ILT_UN: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, ins->inst_p1); break; case OP_COND_EXC_GT: case OP_COND_EXC_IGT: case OP_COND_EXC_GT_UN: case OP_COND_EXC_IGT_UN: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GT, ins->inst_p1); break; case OP_COND_EXC_GE: case OP_COND_EXC_IGE: case OP_COND_EXC_GE_UN: case OP_COND_EXC_IGE_UN: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GE, ins->inst_p1); break; case OP_COND_EXC_LE: case OP_COND_EXC_ILE: case OP_COND_EXC_LE_UN: case OP_COND_EXC_ILE_UN: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LE, ins->inst_p1); break; case OP_COND_EXC_OV: case OP_COND_EXC_IOV: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, ins->inst_p1); break; case OP_COND_EXC_NO: case OP_COND_EXC_INO: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NO, ins->inst_p1); break; case OP_COND_EXC_C: case OP_COND_EXC_IC: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, ins->inst_p1); break; case OP_COND_EXC_NC: case OP_COND_EXC_INC: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, ins->inst_p1); break; case OP_LBEQ: case OP_IBEQ: EMIT_COND_BRANCH (ins, S390_CC_EQ); break; case OP_LBNE_UN: case OP_IBNE_UN: EMIT_COND_BRANCH (ins, S390_CC_NE); break; case OP_LBLT: case OP_LBLT_UN: case OP_IBLT: case OP_IBLT_UN: EMIT_COND_BRANCH (ins, S390_CC_LT); break; case OP_LBGT: case OP_LBGT_UN: case OP_IBGT: case OP_IBGT_UN: EMIT_COND_BRANCH (ins, S390_CC_GT); break; case OP_LBGE: case OP_LBGE_UN: case OP_IBGE: case OP_IBGE_UN: EMIT_COND_BRANCH (ins, S390_CC_GE); break; case OP_LBLE: case OP_LBLE_UN: case OP_IBLE: case OP_IBLE_UN: EMIT_COND_BRANCH (ins, S390_CC_LE); break; case OP_S390_CRJ: EMIT_COMP_AND_BRANCH(ins, crj, cr); break; case OP_S390_CLRJ: EMIT_COMP_AND_BRANCH(ins, clrj, clr); break; case OP_S390_CGRJ: EMIT_COMP_AND_BRANCH(ins, cgrj, cgr); break; case OP_S390_CLGRJ: EMIT_COMP_AND_BRANCH(ins, clgrj, clgr); break; case OP_S390_CIJ: EMIT_COMP_AND_BRANCH_IMM(ins, crj, cr, ltr, FALSE); break; case OP_S390_CLIJ: EMIT_COMP_AND_BRANCH_IMM(ins, clrj, clr, ltr, TRUE); break; case OP_S390_CGIJ: EMIT_COMP_AND_BRANCH_IMM(ins, cgrj, cgr, ltgr, FALSE); break; case OP_S390_CLGIJ: EMIT_COMP_AND_BRANCH_IMM(ins, clgrj, clgr, ltgr, TRUE); break; /* floating point opcodes */ case OP_R8CONST: { double d = *(double *) ins->inst_p0; if (d == 0) { s390_lzdr (code, ins->dreg); if (mono_signbit (d) != 0) s390_lndbr (code, ins->dreg, ins->dreg); } else { S390_SET (code, s390_r13, ins->inst_p0); s390_ld (code, ins->dreg, 0, s390_r13, 0); } } break; case OP_R4CONST: { float f = *(float *) ins->inst_p0; if (f == 0) { if (cfg->r4fp) { s390_lzer (code, ins->dreg); if (mono_signbit (f) != 0) s390_lnebr (code, ins->dreg, ins->dreg); } else { s390_lzdr (code, ins->dreg); if (mono_signbit (f) != 0) s390_lndbr (code, ins->dreg, ins->dreg); } } else { S390_SET (code, s390_r13, ins->inst_p0); s390_le (code, ins->dreg, 0, s390_r13, 0); if (!cfg->r4fp) s390_ldebr (code, ins->dreg, ins->dreg); else s390_le (code, ins->dreg, 0, s390_r13, 0); } } break; case OP_STORER8_MEMBASE_REG: { S390_LONG (code, stdy, std, ins->sreg1, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_LOADR8_MEMBASE: { S390_LONG (code, ldy, ld, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_STORER4_MEMBASE_REG: { if (cfg->r4fp) { S390_LONG (code, stey, ste, ins->sreg1, 0, ins->inst_destbasereg, ins->inst_offset); } else { s390_ledbr (code, ins->sreg1, ins->sreg1); S390_LONG (code, stey, ste, ins->sreg1, 0, ins->inst_destbasereg, ins->inst_offset); s390_ldebr (code, ins->sreg1, ins->sreg1); } } break; case OP_LOADR4_MEMBASE: { if (cfg->r4fp) { S390_LONG (code, ley, le, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } else { S390_LONG (code, ley, le, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); s390_ldebr (code, ins->dreg, ins->dreg); } } break; case OP_ICONV_TO_R_UN: { if (mono_hwcap_s390x_has_fpe) { s390_cdlfbr (code, ins->dreg, 5, ins->sreg1, 0); } else { s390_llgfr (code, s390_r0, ins->sreg1); s390_cdgbr (code, ins->dreg, s390_r0); } } break; case OP_LCONV_TO_R_UN: { if (mono_hwcap_s390x_has_fpe) { s390_cdlgbr (code, ins->dreg, 6, ins->sreg1, 0); } else { short int *jump; s390_lgdr (code, s390_r0, s390_r15); s390_lgdr (code, s390_r1, s390_r13); s390_lgdr (code, s390_r14, s390_r12); s390_cxgbr (code, s390_f12, ins->sreg1); s390_ltgr (code, ins->sreg1, ins->sreg1); s390_jnl (code, 0); CODEPTR(code, jump); S390_SET (code, s390_r13, 0x403f000000000000llu); s390_lgdr (code, s390_f13, s390_r13); s390_lzdr (code, s390_f15); s390_axbr (code, s390_f12, s390_f13); PTRSLOT(code, jump); s390_ldxbr (code, s390_f13, s390_f12); s390_ldr (code, ins->dreg, s390_f13); s390_ldgr (code, s390_f12, s390_r14); s390_ldgr (code, s390_f13, s390_r1); s390_ldgr (code, s390_f15, s390_r0); } } break; case OP_ICONV_TO_R4: s390_cefbr (code, ins->dreg, ins->sreg1); if (!cfg->r4fp) s390_ldebr (code, ins->dreg, ins->dreg); break; case OP_LCONV_TO_R4: s390_cegbr (code, ins->dreg, ins->sreg1); if (!cfg->r4fp) s390_ldebr (code, ins->dreg, ins->dreg); break; case OP_ICONV_TO_R8: s390_cdfbr (code, ins->dreg, ins->sreg1); break; case OP_LCONV_TO_R8: s390_cdgbr (code, ins->dreg, ins->sreg1); break; case OP_FCONV_TO_I1: s390_cgdbr (code, ins->dreg, 5, ins->sreg1); s390_ltgr (code, ins->dreg, ins->dreg); s390_jnl (code, 4); s390_oill (code, ins->dreg, 0x80); s390_lghi (code, s390_r0, 0xff); s390_ngr (code, ins->dreg, s390_r0); break; case OP_FCONV_TO_U1: if (mono_hwcap_s390x_has_fpe) { s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0); s390_lghi (code, s390_r0, 0xff); s390_ngr (code, ins->dreg, s390_r0); } else { code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE); } break; case OP_FCONV_TO_I2: s390_cgdbr (code, ins->dreg, 5, ins->sreg1); s390_ltgr (code, ins->dreg, ins->dreg); s390_jnl (code, 4); s390_oill (code, ins->dreg, 0x8000); s390_llill (code, s390_r0, 0xffff); s390_ngr (code, ins->dreg, s390_r0); break; case OP_FCONV_TO_U2: if (mono_hwcap_s390x_has_fpe) { s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0); s390_llill (code, s390_r0, 0xffff); s390_ngr (code, ins->dreg, s390_r0); } else { code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE); } break; case OP_FCONV_TO_I4: s390_cfdbr (code, ins->dreg, 5, ins->sreg1); break; case OP_FCONV_TO_U4: if (mono_hwcap_s390x_has_fpe) { s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0); } else { code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE); } break; case OP_FCONV_TO_I8: case OP_FCONV_TO_I: s390_cgdbr (code, ins->dreg, 5, ins->sreg1); break; case OP_FCONV_TO_U8: if (mono_hwcap_s390x_has_fpe) { s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0); } else { code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 8, FALSE); } break; case OP_RCONV_TO_I1: s390_cgebr (code, ins->dreg, 5, ins->sreg1); s390_ltgr (code, ins->dreg, ins->dreg); s390_jnl (code, 4); s390_oill (code, ins->dreg, 0x80); s390_lghi (code, s390_r0, 0xff); s390_ngr (code, ins->dreg, s390_r0); break; case OP_RCONV_TO_U1: if (mono_hwcap_s390x_has_fpe) { s390_clgebr (code, ins->dreg, 5, ins->sreg1, 0); s390_lghi (code, s390_r0, 0xff); s390_ngr (code, ins->dreg, s390_r0); } else { code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE); } break; case OP_RCONV_TO_I2: s390_cgebr (code, ins->dreg, 5, ins->sreg1); s390_ltgr (code, ins->dreg, ins->dreg); s390_jnl (code, 4); s390_oill (code, ins->dreg, 0x8000); s390_llill (code, s390_r0, 0xffff); s390_ngr (code, ins->dreg, s390_r0); break; case OP_RCONV_TO_U2: if (mono_hwcap_s390x_has_fpe) { s390_clgebr (code, ins->dreg, 5, ins->sreg1, 0); s390_llill (code, s390_r0, 0xffff); s390_ngr (code, ins->dreg, s390_r0); } else { code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE); } break; case OP_RCONV_TO_I4: s390_cfebr (code, ins->dreg, 5, ins->sreg1); break; case OP_RCONV_TO_U4: if (mono_hwcap_s390x_has_fpe) { s390_clfebr (code, ins->dreg, 5, ins->sreg1, 0); } else { code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE); } break; case OP_RCONV_TO_I8: case OP_RCONV_TO_I: s390_cgebr (code, ins->dreg, 5, ins->sreg1); break; case OP_RCONV_TO_U8: if (mono_hwcap_s390x_has_fpe) { s390_clgebr (code, ins->dreg, 5, ins->sreg1, 0); } else { code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, FALSE); } break; case OP_LCONV_TO_OVF_I: { /* Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000 */ short int *o[5]; s390_ltgr (code, ins->sreg2, ins->sreg2); s390_jnl (code, 0); CODEPTR(code, o[0]); s390_ltgr (code, ins->sreg1, ins->sreg1); s390_jnl (code, 0); CODEPTR(code, o[1]); s390_lhi (code, s390_r13, -1); s390_cgr (code, ins->sreg1, s390_r13); s390_jnz (code, 0); CODEPTR(code, o[2]); if (ins->dreg != ins->sreg2) s390_lgr (code, ins->dreg, ins->sreg2); s390_j (code, 0); CODEPTR(code, o[3]); PTRSLOT(code, o[0]); s390_jz (code, 0); CODEPTR(code, o[4]); PTRSLOT(code, o[1]); PTRSLOT(code, o[2]); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException"); s390_brasl (code, s390_r14, 0); PTRSLOT(code, o[3]); PTRSLOT(code, o[4]); } break; case OP_ABS: s390_lpdbr (code, ins->dreg, ins->sreg1); break; case OP_ABSF: s390_lpebr (code, ins->dreg, ins->sreg1); break; case OP_CEIL: s390_fidbra (code, ins->dreg, 6, ins->sreg1, 4); break; case OP_CEILF: s390_fiebra (code, ins->dreg, 6, ins->sreg1, 4); break; case OP_FLOOR: s390_fidbra (code, ins->dreg, 7, ins->sreg1, 4); break; case OP_FLOORF: s390_fiebra (code, ins->dreg, 7, ins->sreg1, 4); break; case OP_FCOPYSIGN: s390_cpsdr (code, ins->dreg, ins->sreg2, ins->sreg1); break; case OP_ROUND: s390_fidbra (code, ins->dreg, 4, ins->sreg1, 4); break; case OP_SQRT: s390_sqdbr (code, ins->dreg, ins->sreg1); break; case OP_SQRTF: s390_sqebr (code, ins->dreg, ins->sreg1); break; case OP_TRUNC: s390_fidbra (code, ins->dreg, 5, ins->sreg1, 4); break; case OP_TRUNCF: s390_fiebra (code, ins->dreg, 5, ins->sreg1, 4); break; case OP_FADD: { CHECK_SRCDST_COM_F; s390_adbr (code, ins->dreg, src2); } break; case OP_RADD: { CHECK_SRCDST_COM_F; s390_aebr (code, ins->dreg, src2); } break; case OP_FSUB: { CHECK_SRCDST_NCOM_F(sdbr); } break; case OP_RSUB: { CHECK_SRCDST_NCOM_F(sebr); } break; case OP_FMUL: { CHECK_SRCDST_COM_F; s390_mdbr (code, ins->dreg, src2); } break; case OP_RMUL: { CHECK_SRCDST_COM_F; s390_meer (code, ins->dreg, src2); } break; case OP_FDIV: { CHECK_SRCDST_NCOM_F(ddbr); } break; case OP_RDIV: { CHECK_SRCDST_NCOM_F(debr); } break; case OP_FNEG: { s390_lcdbr (code, ins->dreg, ins->sreg1); } break; case OP_RNEG: { s390_lcebr (code, ins->dreg, ins->sreg1); } break; case OP_FREM: { CHECK_SRCDST_NCOM_FR(didbr, 5); } break; case OP_RREM: { CHECK_SRCDST_NCOM_FR(diebr, 5); } break; case OP_FCOMPARE: { s390_cdbr (code, ins->sreg1, ins->sreg2); } break; case OP_RCOMPARE: { s390_cebr (code, ins->sreg1, ins->sreg2); } break; case OP_FCEQ: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_je (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FCLT: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jl (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FCLT_UN: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jlo (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FCGT: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jh (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FCGT_UN: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jho (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FCNEQ: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jne (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FCGE: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jhe (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FCLE: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jle (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCEQ: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_je (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCLT: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jl (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCLT_UN: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jlo (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCGT: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jh (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCGT_UN: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jho (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCNEQ: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jne (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCGE: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jhe (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCLE: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jle (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FBEQ: { short *o; s390_jo (code, 0); CODEPTR(code, o); EMIT_COND_BRANCH (ins, S390_CC_EQ); PTRSLOT (code, o); } break; case OP_FBNE_UN: EMIT_COND_BRANCH (ins, S390_CC_NE|S390_CC_OV); break; case OP_FBLT: { short *o; s390_jo (code, 0); CODEPTR(code, o); EMIT_COND_BRANCH (ins, S390_CC_LT); PTRSLOT (code, o); } break; case OP_FBLT_UN: EMIT_COND_BRANCH (ins, S390_CC_LT|S390_CC_OV); break; case OP_FBGT: { short *o; s390_jo (code, 0); CODEPTR(code, o); EMIT_COND_BRANCH (ins, S390_CC_GT); PTRSLOT (code, o); } break; case OP_FBGT_UN: EMIT_COND_BRANCH (ins, S390_CC_GT|S390_CC_OV); break; case OP_FBGE: { short *o; s390_jo (code, 0); CODEPTR(code, o); EMIT_COND_BRANCH (ins, S390_CC_GE); PTRSLOT (code, o); } break; case OP_FBGE_UN: EMIT_COND_BRANCH (ins, S390_CC_GE|S390_CC_OV); break; case OP_FBLE: { short *o; s390_jo (code, 0); CODEPTR(code, o); EMIT_COND_BRANCH (ins, S390_CC_LE); PTRSLOT (code, o); } break; case OP_FBLE_UN: EMIT_COND_BRANCH (ins, S390_CC_LE|S390_CC_OV); break; case OP_CKFINITE: { short *o; s390_lhi (code, s390_r13, 0x7f); s390_tcdb (code, ins->sreg1, 0, s390_r13, 0); s390_jz (code, 0); CODEPTR(code, o); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException"); s390_brasl (code, s390_r14,0); PTRSLOT(code, o); } break; case OP_S390_MOVE: { if (ins->backend.size > 0) { if (ins->backend.size <= 256) { s390_mvc (code, ins->backend.size, ins->sreg2, ins->inst_offset, ins->sreg1, ins->inst_imm); } else { s390_lgr (code, s390_r0, ins->sreg2); if (ins->inst_offset > 0) { if (s390_is_imm16 (ins->inst_offset)) { s390_aghi (code, s390_r0, ins->inst_offset); } else if (s390_is_imm32 (ins->inst_offset)) { s390_agfi (code, s390_r0, ins->inst_offset); } else { S390_SET (code, s390_r13, ins->inst_offset); s390_agr (code, s390_r0, s390_r13); } } s390_lgr (code, s390_r12, ins->sreg1); if (ins->inst_imm > 0) { if (s390_is_imm16 (ins->inst_imm)) { s390_aghi (code, s390_r12, ins->inst_imm); } else if (s390_is_imm32 (ins->inst_imm)) { s390_agfi (code, s390_r12, ins->inst_imm); } else { S390_SET (code, s390_r13, ins->inst_imm); s390_agr (code, s390_r12, s390_r13); } } if (s390_is_imm16 (ins->backend.size)) { s390_lghi (code, s390_r1, ins->backend.size); } else if (s390_is_imm32 (ins->inst_offset)) { s390_agfi (code, s390_r1, ins->backend.size); } else { S390_SET (code, s390_r13, ins->backend.size); s390_agr (code, s390_r1, s390_r13); } s390_lgr (code, s390_r13, s390_r1); s390_mvcle(code, s390_r0, s390_r12, 0, 0); s390_jo (code, -2); } } } break; case OP_ATOMIC_ADD_I8: { if (mono_hwcap_s390x_has_ia) { s390_laag(code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset); if (mono_hwcap_s390x_has_mlt) { s390_agrk(code, ins->dreg, s390_r0, ins->sreg2); } else { s390_agr (code, s390_r0, ins->sreg2); s390_lgr (code, ins->dreg, s390_r0); } } else { s390_lgr (code, s390_r1, ins->sreg2); s390_lg (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset); s390_agr (code, s390_r1, s390_r0); s390_csg (code, s390_r0, s390_r1, ins->inst_basereg, ins->inst_offset); s390_jnz (code, -10); s390_lgr (code, ins->dreg, s390_r1); } } break; case OP_ATOMIC_EXCHANGE_I8: { s390_lg (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset); s390_csg (code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset); s390_jnz (code, -6); s390_lgr (code, ins->dreg, s390_r0); } break; case OP_ATOMIC_ADD_I4: { if (mono_hwcap_s390x_has_ia) { s390_laa (code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset); s390_ar (code, s390_r0, ins->sreg2); s390_lgfr(code, ins->dreg, s390_r0); } else { s390_lgfr(code, s390_r1, ins->sreg2); s390_lgf (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset); s390_agr (code, s390_r1, s390_r0); s390_cs (code, s390_r0, s390_r1, ins->inst_basereg, ins->inst_offset); s390_jnz (code, -9); s390_lgfr(code, ins->dreg, s390_r1); } } break; case OP_ATOMIC_EXCHANGE_I4: { s390_l (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset); s390_cs (code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset); s390_jnz (code, -4); s390_lgfr(code, ins->dreg, s390_r0); } break; case OP_S390_BKCHAIN: { s390_lgr (code, ins->dreg, ins->sreg1); if (s390_is_imm16 (cfg->stack_offset)) { s390_aghi (code, ins->dreg, cfg->stack_offset); } else if (s390_is_imm32 (cfg->stack_offset)) { s390_agfi (code, ins->dreg, cfg->stack_offset); } else { S390_SET (code, s390_r13, cfg->stack_offset); s390_agr (code, ins->dreg, s390_r13); } } break; case OP_MEMORY_BARRIER: s390_mem (code); break; case OP_POPCNT32: s390_llgfr (code, s390_r1, ins->sreg1); if (mono_hwcap_s390x_has_mie3) { s390_popcnt (code, ins->dreg, 0x80, s390_r1); } else { s390_popcnt (code, s390_r0, 0, s390_r1); s390_ahhlr (code, s390_r0, s390_r0, s390_r0); s390_sllg (code, s390_r1, s390_r0, 0, 16); s390_algr (code, s390_r0, s390_r1); s390_sllg (code, s390_r1, s390_r0, 0, 8); s390_algr (code, s390_r0, s390_r1); s390_srlg (code, ins->dreg, s390_r0, 0, 56); } break; case OP_POPCNT64: if (mono_hwcap_s390x_has_mie3) { s390_popcnt (code, ins->dreg, 0x80, ins->sreg1); } else { s390_ahhlr (code, s390_r0, s390_r0, s390_r0); s390_sllg (code, s390_r1, s390_r0, 0, 16); s390_algr (code, s390_r0, s390_r1); s390_sllg (code, s390_r1, s390_r0, 0, 8); s390_algr (code, s390_r0, s390_r1); s390_srlg (code, ins->dreg, s390_r0, 0, 56); } break; case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: { short *br; s390_ltg (code, s390_r0, 0, ins->sreg1, 0); s390_jz (code, 0); CODEPTR(code, br); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll)); PTRSLOT (code, br); break; } case OP_GC_LIVENESS_DEF: case OP_GC_LIVENESS_USE: case OP_GC_PARAM_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; break; case OP_GC_SPILL_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins); break; #ifdef MONO_ARCH_SIMD_INTRINSICS case OP_ADDPS: s390x_addps (code, ins->sreg1, ins->sreg2); break; case OP_DIVPS: s390x_divps (code, ins->sreg1, ins->sreg2); break; case OP_MULPS: s390x_mulps (code, ins->sreg1, ins->sreg2); break; case OP_SUBPS: s390x_subps (code, ins->sreg1, ins->sreg2); break; case OP_MAXPS: s390x_maxps (code, ins->sreg1, ins->sreg2); break; case OP_MINPS: s390x_minps (code, ins->sreg1, ins->sreg2); break; case OP_COMPPS: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7); s390x_cmpps_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ANDPS: s390x_andps (code, ins->sreg1, ins->sreg2); break; case OP_ANDNPS: s390x_andnps (code, ins->sreg1, ins->sreg2); break; case OP_ORPS: s390x_orps (code, ins->sreg1, ins->sreg2); break; case OP_XORPS: s390x_xorps (code, ins->sreg1, ins->sreg2); break; case OP_SQRTPS: s390x_sqrtps (code, ins->dreg, ins->sreg1); break; case OP_RSQRTPS: s390x_rsqrtps (code, ins->dreg, ins->sreg1); break; case OP_RCPPS: s390x_rcpps (code, ins->dreg, ins->sreg1); break; case OP_ADDSUBPS: s390x_addsubps (code, ins->sreg1, ins->sreg2); break; case OP_HADDPS: s390x_haddps (code, ins->sreg1, ins->sreg2); break; case OP_HSUBPS: s390x_hsubps (code, ins->sreg1, ins->sreg2); break; case OP_DUPPS_HIGH: s390x_movshdup (code, ins->dreg, ins->sreg1); break; case OP_DUPPS_LOW: s390x_movsldup (code, ins->dreg, ins->sreg1); break; case OP_PSHUFLEW_HIGH: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); s390x_pshufhw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_PSHUFLEW_LOW: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); s390x_pshuflw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_PSHUFLED: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); s390x_pshufd_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_SHUFPS: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); s390x_shufps_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_SHUFPD: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0x3); s390x_shufpd_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ADDPD: s390x_addpd (code, ins->sreg1, ins->sreg2); break; case OP_DIVPD: s390x_divpd (code, ins->sreg1, ins->sreg2); break; case OP_MULPD: s390x_mulpd (code, ins->sreg1, ins->sreg2); break; case OP_SUBPD: s390x_subpd (code, ins->sreg1, ins->sreg2); break; case OP_MAXPD: s390x_maxpd (code, ins->sreg1, ins->sreg2); break; case OP_MINPD: s390x_minpd (code, ins->sreg1, ins->sreg2); break; case OP_COMPPD: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7); s390x_cmppd_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ANDPD: s390x_andpd (code, ins->sreg1, ins->sreg2); break; case OP_ANDNPD: s390x_andnpd (code, ins->sreg1, ins->sreg2); break; case OP_ORPD: s390x_orpd (code, ins->sreg1, ins->sreg2); break; case OP_XORPD: s390x_xorpd (code, ins->sreg1, ins->sreg2); break; case OP_SQRTPD: s390x_sqrtpd (code, ins->dreg, ins->sreg1); break; case OP_ADDSUBPD: s390x_addsubpd (code, ins->sreg1, ins->sreg2); break; case OP_HADDPD: s390x_haddpd (code, ins->sreg1, ins->sreg2); break; case OP_HSUBPD: s390x_hsubpd (code, ins->sreg1, ins->sreg2); break; case OP_DUPPD: s390x_movddup (code, ins->dreg, ins->sreg1); break; case OP_EXTRACT_MASK: s390x_pmovmskb (code, ins->dreg, ins->sreg1); break; case OP_PAND: s390x_pand (code, ins->sreg1, ins->sreg2); break; case OP_POR: s390x_por (code, ins->sreg1, ins->sreg2); break; case OP_PXOR: s390x_pxor (code, ins->sreg1, ins->sreg2); break; case OP_PADDB: s390x_paddb (code, ins->sreg1, ins->sreg2); break; case OP_PADDW: s390x_paddw (code, ins->sreg1, ins->sreg2); break; case OP_PADDD: s390x_paddd (code, ins->sreg1, ins->sreg2); break; case OP_PADDQ: s390x_paddq (code, ins->sreg1, ins->sreg2); break; case OP_PSUBB: s390x_psubb (code, ins->sreg1, ins->sreg2); break; case OP_PSUBW: s390x_psubw (code, ins->sreg1, ins->sreg2); break; case OP_PSUBD: s390x_psubd (code, ins->sreg1, ins->sreg2); break; case OP_PSUBQ: s390x_psubq (code, ins->sreg1, ins->sreg2); break; case OP_PMAXB_UN: s390x_pmaxub (code, ins->sreg1, ins->sreg2); break; case OP_PMAXW_UN: s390x_pmaxuw (code, ins->sreg1, ins->sreg2); break; case OP_PMAXD_UN: s390x_pmaxud (code, ins->sreg1, ins->sreg2); break; case OP_PMAXB: s390x_pmaxsb (code, ins->sreg1, ins->sreg2); break; case OP_PMAXW: s390x_pmaxsw (code, ins->sreg1, ins->sreg2); break; case OP_PMAXD: s390x_pmaxsd (code, ins->sreg1, ins->sreg2); break; case OP_PAVGB_UN: s390x_pavgb (code, ins->sreg1, ins->sreg2); break; case OP_PAVGW_UN: s390x_pavgw (code, ins->sreg1, ins->sreg2); break; case OP_PMINB_UN: s390x_pminub (code, ins->sreg1, ins->sreg2); break; case OP_PMINW_UN: s390x_pminuw (code, ins->sreg1, ins->sreg2); break; case OP_PMIND_UN: s390x_pminud (code, ins->sreg1, ins->sreg2); break; case OP_PMINB: s390x_pminsb (code, ins->sreg1, ins->sreg2); break; case OP_PMINW: s390x_pminsw (code, ins->sreg1, ins->sreg2); break; case OP_PMIND: s390x_pminsd (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQB: s390x_pcmpeqb (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQW: s390x_pcmpeqw (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQD: s390x_pcmpeqd (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQQ: s390x_pcmpeqq (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTB: s390x_pcmpgtb (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTW: s390x_pcmpgtw (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTD: s390x_pcmpgtd (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTQ: s390x_pcmpgtq (code, ins->sreg1, ins->sreg2); break; case OP_PSUM_ABS_DIFF: s390x_psadbw (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWB: s390x_punpcklbw (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWW: s390x_punpcklwd (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWD: s390x_punpckldq (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWQ: s390x_punpcklqdq (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWPS: s390x_unpcklps (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWPD: s390x_unpcklpd (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHB: s390x_punpckhbw (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHW: s390x_punpckhwd (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHD: s390x_punpckhdq (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHQ: s390x_punpckhqdq (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHPS: s390x_unpckhps (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHPD: s390x_unpckhpd (code, ins->sreg1, ins->sreg2); break; case OP_PACKW: s390x_packsswb (code, ins->sreg1, ins->sreg2); break; case OP_PACKD: s390x_packssdw (code, ins->sreg1, ins->sreg2); break; case OP_PACKW_UN: s390x_packuswb (code, ins->sreg1, ins->sreg2); break; case OP_PACKD_UN: s390x_packusdw (code, ins->sreg1, ins->sreg2); break; case OP_PADDB_SAT_UN: s390x_paddusb (code, ins->sreg1, ins->sreg2); break; case OP_PSUBB_SAT_UN: s390x_psubusb (code, ins->sreg1, ins->sreg2); break; case OP_PADDW_SAT_UN: s390x_paddusw (code, ins->sreg1, ins->sreg2); break; case OP_PSUBW_SAT_UN: s390x_psubusw (code, ins->sreg1, ins->sreg2); break; case OP_PADDB_SAT: s390x_paddsb (code, ins->sreg1, ins->sreg2); break; case OP_PSUBB_SAT: s390x_psubsb (code, ins->sreg1, ins->sreg2); break; case OP_PADDW_SAT: s390x_paddsw (code, ins->sreg1, ins->sreg2); break; case OP_PSUBW_SAT: s390x_psubsw (code, ins->sreg1, ins->sreg2); break; case OP_PMULW: s390x_pmullw (code, ins->sreg1, ins->sreg2); break; case OP_PMULD: s390x_pmulld (code, ins->sreg1, ins->sreg2); break; case OP_PMULQ: s390x_pmuludq (code, ins->sreg1, ins->sreg2); break; case OP_PMULW_HIGH_UN: s390x_pmulhuw (code, ins->sreg1, ins->sreg2); break; case OP_PMULW_HIGH: s390x_pmulhw (code, ins->sreg1, ins->sreg2); break; case OP_PSHRW: s390x_psrlw_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHRW_REG: s390x_psrlw (code, ins->dreg, ins->sreg2); break; case OP_PSARW: s390x_psraw_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSARW_REG: s390x_psraw (code, ins->dreg, ins->sreg2); break; case OP_PSHLW: s390x_psllw_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHLW_REG: s390x_psllw (code, ins->dreg, ins->sreg2); break; case OP_PSHRD: s390x_psrld_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHRD_REG: s390x_psrld (code, ins->dreg, ins->sreg2); break; case OP_PSARD: s390x_psrad_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSARD_REG: s390x_psrad (code, ins->dreg, ins->sreg2); break; case OP_PSHLD: s390x_pslld_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHLD_REG: s390x_pslld (code, ins->dreg, ins->sreg2); break; case OP_PSHRQ: s390x_psrlq_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHRQ_REG: s390x_psrlq (code, ins->dreg, ins->sreg2); break; /*TODO: This is appart of the sse spec but not added case OP_PSARQ: s390x_psraq_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSARQ_REG: s390x_psraq (code, ins->dreg, ins->sreg2); break; */ case OP_PSHLQ: s390x_psllq_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHLQ_REG: s390x_psllq (code, ins->dreg, ins->sreg2); break; case OP_CVTDQ2PD: s390x_cvtdq2pd (code, ins->dreg, ins->sreg1); break; case OP_CVTDQ2PS: s390x_cvtdq2ps (code, ins->dreg, ins->sreg1); break; case OP_CVTPD2DQ: s390x_cvtpd2dq (code, ins->dreg, ins->sreg1); break; case OP_CVTPD2PS: s390x_cvtpd2ps (code, ins->dreg, ins->sreg1); break; case OP_CVTPS2DQ: s390x_cvtps2dq (code, ins->dreg, ins->sreg1); break; case OP_CVTPS2PD: s390x_cvtps2pd (code, ins->dreg, ins->sreg1); break; case OP_CVTTPD2DQ: s390x_cvttpd2dq (code, ins->dreg, ins->sreg1); break; case OP_CVTTPS2DQ: s390x_cvttps2dq (code, ins->dreg, ins->sreg1); break; case OP_ICONV_TO_X: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_EXTRACT_I4: amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_EXTRACT_I8: if (ins->inst_c0) { amd64_movhlps (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1); amd64_movd_reg_xreg_size (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG, 8); } else { amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8); } break; case OP_EXTRACT_I1: case OP_EXTRACT_U1: amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); if (ins->inst_c0) amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_c0 * 8); amd64_widen_reg (code, ins->dreg, ins->dreg, ins->inst_c1 == OP_EXTRACT_I1, FALSE); break; case OP_EXTRACT_I2: case OP_EXTRACT_U2: /*amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); if (ins->inst_c0) amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, 16, 4);*/ s390x_pextrw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); amd64_widen_reg_size (code, ins->dreg, ins->dreg, ins->inst_c1 == OP_EXTRACT_I2, TRUE, 4); break; case OP_EXTRACT_R8: if (ins->inst_c0) amd64_movhlps (code, ins->dreg, ins->sreg1); else s390x_movsd (code, ins->dreg, ins->sreg1); break; case OP_INSERT_I2: s390x_pinsrw_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_EXTRACTX_U2: s390x_pextrw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_INSERTX_U1_SLOW: /*sreg1 is the extracted ireg (scratch) /sreg2 is the to be inserted ireg (scratch) /dreg is the xreg to receive the value*/ /*clear the bits from the extracted word*/ amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_c0 & 1 ? 0x00FF : 0xFF00); /*shift the value to insert if needed*/ if (ins->inst_c0 & 1) amd64_shift_reg_imm_size (code, X86_SHL, ins->sreg2, 8, 4); /*join them together*/ amd64_alu (code, X86_OR, ins->sreg1, ins->sreg2); s390x_pinsrw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0 / 2); break; case OP_INSERTX_I4_SLOW: s390x_pinsrw_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2); amd64_shift_reg_imm (code, X86_SHR, ins->sreg2, 16); s390x_pinsrw_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2 + 1); break; case OP_INSERTX_I8_SLOW: amd64_movd_xreg_reg_size(code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg2, 8); if (ins->inst_c0) amd64_movlhps (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG); else s390x_movsd (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG); break; case OP_INSERTX_R4_SLOW: switch (ins->inst_c0) { case 0: if (cfg->r4fp) s390x_movss (code, ins->dreg, ins->sreg2); else s390x_cvtsd2ss (code, ins->dreg, ins->sreg2); break; case 1: s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3)); if (cfg->r4fp) s390x_movss (code, ins->dreg, ins->sreg2); else s390x_cvtsd2ss (code, ins->dreg, ins->sreg2); s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3)); break; case 2: s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3)); if (cfg->r4fp) s390x_movss (code, ins->dreg, ins->sreg2); else s390x_cvtsd2ss (code, ins->dreg, ins->sreg2); s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3)); break; case 3: s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0)); if (cfg->r4fp) s390x_movss (code, ins->dreg, ins->sreg2); else s390x_cvtsd2ss (code, ins->dreg, ins->sreg2); s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0)); break; } break; case OP_INSERTX_R8_SLOW: if (ins->inst_c0) amd64_movlhps (code, ins->dreg, ins->sreg2); else s390x_movsd (code, ins->dreg, ins->sreg2); break; case OP_STOREX_MEMBASE_REG: case OP_STOREX_MEMBASE: s390x_movups_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1); break; case OP_LOADX_MEMBASE: s390x_movups_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_LOADX_ALIGNED_MEMBASE: s390x_movaps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_STOREX_ALIGNED_MEMBASE_REG: s390x_movaps_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1); break; case OP_STOREX_NTA_MEMBASE_REG: s390x_movntps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_PREFETCH_MEMBASE: s390x_prefetch_reg_membase (code, ins->backend.arg_info, ins->sreg1, ins->inst_offset); break; case OP_XMOVE: /*FIXME the peephole pass should have killed this*/ if (ins->dreg != ins->sreg1) s390x_movaps (code, ins->dreg, ins->sreg1); break; case OP_XZERO: s390x_pxor (code, ins->dreg, ins->dreg); break; case OP_ICONV_TO_R4_RAW: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_FCONV_TO_R8_X: s390x_movsd (code, ins->dreg, ins->sreg1); break; case OP_XCONV_R8_TO_I4: s390x_cvttsd2si_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); switch (ins->backend.source_opcode) { case OP_FCONV_TO_I1: amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE); break; case OP_FCONV_TO_U1: amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); break; case OP_FCONV_TO_I2: amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE); break; case OP_FCONV_TO_U2: amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE); break; } break; case OP_EXPAND_I2: s390x_pinsrw_imm (code, ins->dreg, ins->sreg1, 0); s390x_pinsrw_imm (code, ins->dreg, ins->sreg1, 1); s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_I4: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4); s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_I8: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8); s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0x44); break; case OP_EXPAND_R4: if (cfg->r4fp) { s390x_movsd (code, ins->dreg, ins->sreg1); } else { s390x_movsd (code, ins->dreg, ins->sreg1); s390x_cvtsd2ss (code, ins->dreg, ins->dreg); } s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_R8: s390x_movsd (code, ins->dreg, ins->sreg1); s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0x44); break; #endif default: g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__); g_assert_not_reached (); } if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) { g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)", mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset); g_assert_not_reached (); } } set_code_cursor (cfg, code); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific registration of lowlevel calls * * Register routines to register optimized lowlevel operations */ void mono_arch_register_lowlevel_calls (void) { } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific patching * @param[in] @cfg - Compilation control block * @param[in] @code - Start of code * @param[in] @target - Target of patch * @param[in] @relo - Relocation type * * Perform patching action */ static void emit_patch_full (MonoCompile *cfg, MonoJumpInfo *ji, guint8 *code, gpointer target, int relo) { guint8 *ip = ji->ip.i + code; switch (relo) { case MONO_R_S390_RELINS : target = S390_RELATIVE(target, ip); ip += 2; s390_patch_rel (ip, (guint64) target); break; case MONO_R_S390_THUNKED : if (cfg) create_thunk(cfg, ip, code, target); else update_thunk(cfg, code, target); break; case MONO_R_S390_DIRECT : S390_EMIT_CALL (ip, target); break; case MONO_R_S390_ADDR : s390_patch_addr (ip, (guint64) target); break; case MONO_R_S390_SWITCH : S390_EMIT_LOAD (ip, target); break; case MONO_R_S390_REL : target = S390_RELATIVE(target, ip); s390_patch_rel (ip, (guint64) target); break; default : g_assert_not_reached(); } } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific patching of instructions and data * * @param[in] @cfg - Compile control block * @param[in] @method - Current method * @param[in] @code - Current code block * @param[in] @ji - Jump information * @param[in] @target - Target of patch * * Process the patch data created during the instruction build process. * This resolves jumps, calls, variables etc. */ void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target) { switch (ji->type) { case MONO_PATCH_INFO_IP: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDTOKEN: case MONO_PATCH_INFO_EXC: emit_patch_full (cfg, ji, code, target, MONO_R_S390_ADDR); break; case MONO_PATCH_INFO_BB: case MONO_PATCH_INFO_JIT_ICALL_ADDR: case MONO_PATCH_INFO_JIT_ICALL_ID: case MONO_PATCH_INFO_METHOD: emit_patch_full (cfg, ji, code, target, ji->relocation); break; case MONO_PATCH_INFO_METHOD_JUMP: case MONO_PATCH_INFO_RGCTX_FETCH: case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR: case MONO_PATCH_INFO_ABS: emit_patch_full (cfg, ji, code, target, MONO_R_S390_THUNKED); break; case MONO_PATCH_INFO_SWITCH: emit_patch_full(cfg, ji, code, target, MONO_R_S390_SWITCH); break; case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_IMAGE: case MONO_PATCH_INFO_FIELD: case MONO_PATCH_INFO_IID: case MONO_PATCH_INFO_EXC_NAME: emit_patch_full(cfg, ji, code, target, MONO_R_S390_REL); break; case MONO_PATCH_INFO_NONE: break; default: emit_patch_full (cfg, ji, code, target, MONO_R_S390_RELINS); } } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific prolog generation * * @param[in] @cfg - Compile control block * @returns Location of code code generated * * Create the instruction sequence for entry into a method: * - Determine stack size * - Save preserved registers * - Unload parameters * - Determine if LMF needs saving and generate that sequence */ guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoBasicBlock *bb; MonoMethodSignature *sig; MonoInst *inst; long alloc_size, pos, max_offset, i, cfa_offset = 0; guint8 *code; guint32 size; CallInfo *cinfo; int argsClobbered = 0, lmfOffset, fpOffset = 0; cfg->code_size = 512; if (method->save_lmf) cfg->code_size += 200; cfg->native_code = code = (guint8 *) g_malloc (cfg->code_size); /** * Create unwind information */ mono_emit_unwind_op_def_cfa (cfg, code, STK_BASE, S390_CFA_OFFSET); s390_stmg (code, s390_r6, s390_r15, STK_BASE, S390_REG_SAVE_OFFSET); emit_unwind_regs(cfg, code, s390_r6, s390_r15, S390_REG_SAVE_OFFSET - S390_CFA_OFFSET); if (cfg->arch.bkchain_reg != -1) s390_lgr (code, cfg->arch.bkchain_reg, STK_BASE); /* * If there are local allocations the R11 becomes the frame register */ if (cfg->flags & MONO_CFG_HAS_ALLOCA) { cfg->used_int_regs |= 1 << s390_r11; } /* * Check if FP registers need preserving */ if ((cfg->arch.used_fp_regs & S390_FP_SAVE_MASK) != 0) { for (int i = s390_f8; i <= s390_f15; i++) { if (cfg->arch.used_fp_regs & (1 << i)) fpOffset += sizeof(double); } fpOffset = S390_ALIGN(fpOffset, sizeof(double)); } cfg->arch.fpSize = fpOffset; /* * Calculate stack requirements */ alloc_size = cfg->stack_offset + fpOffset; cfg->stack_usage = cfa_offset = alloc_size; s390_lgr (code, s390_r11, STK_BASE); if (s390_is_imm16 (alloc_size)) { s390_aghi (code, STK_BASE, -alloc_size); } else if (s390_is_imm32 (alloc_size)) { s390_agfi (code, STK_BASE, -alloc_size); } else { int stackSize = alloc_size; while (stackSize > INT_MAX) { s390_agfi (code, STK_BASE, -INT_MAX); stackSize -= INT_MAX; } s390_agfi (code, STK_BASE, -stackSize); } mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size + S390_CFA_OFFSET); s390_stg (code, s390_r11, 0, STK_BASE, 0); if (fpOffset > 0) { int stkOffset = 0; s390_lgr (code, s390_r1, s390_r11); s390_aghi (code, s390_r1, -fpOffset); for (int i = s390_f8; i <= s390_f15; i++) { if (cfg->arch.used_fp_regs & (1 << i)) { s390_std (code, i, 0, s390_r1, stkOffset); emit_unwind_regs(cfg, code, 16+i, 16+i, stkOffset+fpOffset - S390_CFA_OFFSET); stkOffset += sizeof(double); } } } if (cfg->frame_reg != STK_BASE) { s390_lgr (code, s390_r11, STK_BASE); mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg); } /* store runtime generic context */ if (cfg->rgctx_var) { g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET); s390_stg (code, MONO_ARCH_RGCTX_REG, 0, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset); } #if 0 char *methodName = getenv("MONO_TRACE_METHOD"); if (methodName != NULL) { printf("ns: %s k: %s m: %s\n",method->klass->name_space,method->klass->name,method->name);fflush(stdout); // Tests:set_ip //if ((strcmp(method->klass->name_space,"") == 0) && // (strcmp(method->klass->name,"Tests") == 0) && // (strcmp(method->name, "set_ip") == 0)) { // (strcmp("CancellationToken,TaskCreationOptions,TaskContinuationOptions,TaskScheduler",mono_signature_get_desc(method->signature, FALSE)) != 0)) { if ((strcmp(method->name, methodName) == 0)) { printf("SIGNATURE: %s\n",mono_signature_get_desc(method->signature, FALSE)); fflush(stdout); s390_j (code, 0); } } #endif /* compute max_offset in order to use short forward jumps * we always do it on s390 because the immediate displacement * for jumps is too small */ max_offset = 0; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins; bb->max_offset = max_offset; MONO_BB_FOR_EACH_INS (bb, ins) max_offset += ins_get_size (ins->opcode); } /* load arguments allocated to register from the stack */ sig = mono_method_signature_internal (method); pos = 0; cinfo = cfg->arch.cinfo; if (cinfo->struct_ret) { ArgInfo *ainfo = &cinfo->ret; inst = cfg->vret_addr; inst->backend.size = ainfo->vtsize; if (inst->opcode == OP_REGVAR) s390_lgr (code, inst->dreg, ainfo->reg); else s390_stg (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); } /** * Process the arguments passed to the method */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; inst = cfg->args [pos]; if (inst->opcode == OP_VTARG_ADDR) inst = inst->inst_left; if (inst->opcode == OP_REGVAR) { if (ainfo->regtype == RegTypeGeneral) s390_lgr (code, inst->dreg, ainfo->reg); else if (ainfo->regtype == RegTypeFP) { if (inst->dreg != ainfo->reg) { s390_ldr (code, inst->dreg, ainfo->reg); } } else if (ainfo->regtype == RegTypeFPR4) { if (!cfg->r4fp) s390_ledbr (code, inst->dreg, ainfo->reg); } else if (ainfo->regtype == RegTypeBase) { s390_lgr (code, s390_r13, STK_BASE); s390_aghi (code, s390_r13, alloc_size); s390_lg (code, inst->dreg, 0, s390_r13, ainfo->offset); } else g_assert_not_reached (); if (cfg->verbose_level > 2) g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg)); } else { if (ainfo->regtype == RegTypeGeneral) { if (!((ainfo->reg >= 2) && (ainfo->reg <= 6))) g_assert_not_reached(); switch (ainfo->size) { case 1: s390_stc (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); break; case 2: s390_sth (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); break; case 4: s390_st (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); break; case 8: s390_stg (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); break; } } else if (ainfo->regtype == RegTypeBase) { } else if (ainfo->regtype == RegTypeFP) { s390_std (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); } else if (ainfo->regtype == RegTypeFPR4) { s390_ste (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); } else if (ainfo->regtype == RegTypeStructByVal) { int doffset = inst->inst_offset; size = (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE && sig->pinvoke && !sig->marshalling_disabled ? mono_class_native_size(mono_class_from_mono_type_internal (inst->inst_vtype), NULL) : ainfo->size); switch (size) { case 1: if (ainfo->reg != STK_BASE) s390_stc (code, ainfo->reg, 0, inst->inst_basereg, doffset); break; case 2: if (ainfo->reg != STK_BASE) s390_sth (code, ainfo->reg, 0, inst->inst_basereg, doffset); break; case 4: if (ainfo->reg != STK_BASE) s390_st (code, ainfo->reg, 0, inst->inst_basereg, doffset); break; case 8: if (ainfo->reg != STK_BASE) s390_stg (code, ainfo->reg, 0, inst->inst_basereg, doffset); break; default: if (ainfo->reg != STK_BASE) s390_stg (code, ainfo->reg, 0, STK_BASE, doffset); } } else if (ainfo->regtype == RegTypeStructByAddr) { s390_stg (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); } else g_assert_not_reached (); } pos++; } if (method->save_lmf) { /** * Build the MonoLMF structure on the stack - see mini-s390x.h */ lmfOffset = alloc_size - sizeof(MonoLMF); s390_lgr (code, s390_r13, cfg->frame_reg); s390_aghi (code, s390_r13, lmfOffset); /* * Preserve the parameter registers while we fix up the lmf */ s390_stmg (code, s390_r2, s390_r6, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, pregs)); for (i = 0; i < 5; i++) mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, pregs) + i * sizeof(gulong), SLOT_NOREF); /* * On return from this call r2 have the address of the &lmf */ code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern)); /* * Set lmf.lmf_addr = jit_tls->lmf */ s390_stg (code, s390_r2, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, lmf_addr)); mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr), SLOT_NOREF); /* * Get current lmf */ s390_lg (code, s390_r0, 0, s390_r2, 0); /* * Set our lmf as the current lmf */ s390_stg (code, s390_r13, 0, s390_r2, 0); /* * Have our lmf.previous_lmf point to the last lmf */ s390_stg (code, s390_r0, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, previous_lmf)); mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF); /* * Save method info */ S390_SET (code, s390_r1, method); s390_stg (code, s390_r1, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, method)); mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, method), SLOT_NOREF); /* * Save the current IP */ s390_stg (code, STK_BASE, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, ebp)); s390_basr (code, s390_r1, 0); s390_stg (code, s390_r1, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, eip)); mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, ebp), SLOT_NOREF); mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, eip), SLOT_NOREF); /* * Save general and floating point registers */ s390_stmg (code, s390_r2, s390_r12, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, gregs) + 2 * sizeof(gulong)); for (i = 0; i < 11; i++) mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, gregs) + i * sizeof(gulong), SLOT_NOREF); fpOffset = lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, fregs); for (i = 0; i < 16; i++) { s390_std (code, i, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, fregs) + i * sizeof(gulong)); mini_gc_set_slot_type_from_fp (cfg, fpOffset, SLOT_NOREF); fpOffset += sizeof(double); } /* * Restore the parameter registers now that we've set up the lmf */ s390_lmg (code, s390_r2, s390_r6, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, pregs)); } if (cfg->method->save_lmf) argsClobbered = TRUE; /* * Optimize the common case of the first bblock making a call with the same * arguments as the method. This works because the arguments are still in their * original argument registers. */ if (!argsClobbered) { MonoBasicBlock *first_bb = cfg->bb_entry; MonoInst *next; int filter = FILTER_IL_SEQ_POINT; next = mono_bb_first_inst (first_bb, filter); if (!next && first_bb->next_bb) { first_bb = first_bb->next_bb; next = mono_bb_first_inst (first_bb, filter); } if (first_bb->in_count > 1) next = NULL; for (i = 0; next && i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; gboolean match = FALSE; inst = cfg->args [i]; if (inst->opcode != OP_REGVAR) { switch (ainfo->regtype) { case RegTypeGeneral: { if (((next->opcode == OP_LOAD_MEMBASE) || (next->opcode == OP_LOADI4_MEMBASE)) && next->inst_basereg == inst->inst_basereg && next->inst_offset == inst->inst_offset) { if (next->dreg == ainfo->reg) { NULLIFY_INS (next); match = TRUE; } else { next->opcode = OP_MOVE; next->sreg1 = ainfo->reg; /* Only continue if the instruction doesn't change argument regs */ if (next->dreg == ainfo->reg) match = TRUE; } } break; } default: break; } } else { /* Argument allocated to (non-volatile) register */ switch (ainfo->regtype) { case RegTypeGeneral: if (next->opcode == OP_MOVE && next->sreg1 == inst->dreg && next->dreg == ainfo->reg) { NULLIFY_INS (next); match = TRUE; } break; default: break; } } if (match) { next = mono_inst_next (next, filter); if (!next) break; } } } if (cfg->gen_sdb_seq_points) { MonoInst *seq; /* Initialize ss_tramp_var */ seq = cfg->arch.ss_tramp_var; g_assert (seq->opcode == OP_REGOFFSET); S390_SET (code, s390_r1, (guint64) &ss_trampoline); s390_stg (code, s390_r1, 0, seq->inst_basereg, seq->inst_offset); /* Initialize bp_tramp_var */ seq = cfg->arch.bp_tramp_var; g_assert (seq->opcode == OP_REGOFFSET); S390_SET (code, s390_r1, (guint64) &bp_trampoline); s390_stg (code, s390_r1, 0, seq->inst_basereg, seq->inst_offset); } set_code_cursor (cfg, code); return code; } /*========================= End of Function ========================*/ /** * * @brief Architecutre-specific epilog generation * * @param[in] @cfg - Compile control block * * Create the instruction sequence for exit from a method */ void mono_arch_emit_epilog (MonoCompile *cfg) { MonoMethod *method = cfg->method; guint8 *code; int max_epilog_size = 96, i; int fpOffset = 0; if (cfg->method->save_lmf) max_epilog_size += 128; code = realloc_code (cfg, max_epilog_size); cfg->has_unwind_info_for_epilog = TRUE; /* Mark the start of the epilog */ mono_emit_unwind_op_mark_loc (cfg, code, 0); /* Save the uwind state which is needed by the out-of-line code */ mono_emit_unwind_op_remember_state (cfg, code); if (method->save_lmf) restoreLMF(code, cfg->frame_reg, cfg->stack_usage); code = backUpStackPtr(cfg, code); mono_emit_unwind_op_def_cfa (cfg, code, STK_BASE, S390_CFA_OFFSET); mono_emit_unwind_op_same_value (cfg, code, STK_BASE); if (cfg->arch.fpSize != 0) { fpOffset = -cfg->arch.fpSize; for (int i=8; i<16; i++) { if (cfg->arch.used_fp_regs & (1 << i)) { s390_ldy (code, i, 0, STK_BASE, fpOffset); mono_emit_unwind_op_same_value (cfg, code, 16+i); fpOffset += sizeof(double); } } } s390_lmg (code, s390_r6, s390_r14, STK_BASE, S390_REG_SAVE_OFFSET); for (i = s390_r6; i < s390_r15; i++) mono_emit_unwind_op_same_value (cfg, code, i); s390_br (code, s390_r14); /* Restore the unwind state to be the same as before the epilog */ mono_emit_unwind_op_restore_state (cfg, code); /* Round up for start of any thunk entries */ code = (guint8 *) ((((uintptr_t) code + 7) >> 3) << 3); set_code_cursor (cfg, code); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific exception emission * * @param[in] @cfg - Compile control block * * Create the instruction sequence for exception handling */ void mono_arch_emit_exceptions (MonoCompile *cfg) { MonoJumpInfo *patch_info; guint8 *code; int nThrows = 0, exc_count = 0, iExc; guint32 code_size; MonoClass *exc_classes [MAX_EXC]; guint8 *exc_throw_start [MAX_EXC]; for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { if (patch_info->type == MONO_PATCH_INFO_EXC) exc_count++; } code_size = exc_count * 48; code = realloc_code (cfg, code_size); /* * Add code to raise exceptions */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { switch (patch_info->type) { case MONO_PATCH_INFO_EXC: { guint8 *ip = patch_info->ip.i + cfg->native_code; MonoClass *exc_class; /* * Patch the branch in epilog to come here */ s390_patch_rel (ip + 2, (guint64) S390_RELATIVE(code,ip)); exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name); for (iExc = 0; iExc < nThrows; ++iExc) if (exc_classes [iExc] == exc_class) break; if (iExc < nThrows) { s390_jcl (code, S390_CC_UN, (guint64) exc_throw_start [iExc]); patch_info->type = MONO_PATCH_INFO_NONE; } else { if (nThrows < MAX_EXC) { exc_classes [nThrows] = exc_class; exc_throw_start [nThrows] = code; } /* * Patch the parameter passed to the handler */ S390_SET (code, s390_r2, m_class_get_type_token (exc_class)); /* * Load return address & parameter register */ s390_larl (code, s390_r14, (guint64)S390_RELATIVE((patch_info->ip.i + cfg->native_code + 8), code)); /* * Reuse the current patch to set the jump */ patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID; patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception; patch_info->ip.i = code - cfg->native_code; patch_info->relocation = MONO_R_S390_THUNKED; S390_BR_TEMPLATE (code, s390_r1); cfg->thunk_area += THUNK_SIZE; } break; } default: /* do nothing */ break; } } /* Round up for start of any thunk entries */ code = (guint8 *) ((((uintptr_t) code + 7) >> 3) << 3); set_code_cursor (cfg, code); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific finishing of initialization * * Perform any architectural-specific operations at the conclusion of * the initialization phase */ void mono_arch_finish_init (void) { } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific instruction emission for method * * @param[in] @cfg - Compile Control block * @param[in] @cmethod - Current method * @param[in] @fsig - Method signature * @param[in] @args - Arguments to method * @returns Instruction(s) required for architecture * * Provide any architectural shortcuts for specific methods. */ MonoInst * mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins = NULL; int opcode = 0; MonoStackType stack_type = STACK_R8; if (cmethod->klass == mono_class_try_get_math_class ()) { // unary double if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) { if (strcmp (cmethod->name, "Abs") == 0) { opcode = OP_ABS; } else if (strcmp (cmethod->name, "Ceiling") == 0) { opcode = OP_CEIL; } else if (strcmp (cmethod->name, "Floor") == 0) { opcode = OP_FLOOR; } else if (strcmp (cmethod->name, "Round") == 0) { opcode = OP_ROUND; } else if (strcmp (cmethod->name, "Sqrt") == 0) { opcode = OP_SQRT; } else if (strcmp (cmethod->name, "Truncate") == 0) { opcode = OP_TRUNC; } } // unary float (overloaded) else if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R4) { if (strcmp (cmethod->name, "Abs") == 0) { if (cfg->r4fp) { opcode = OP_ABSF; stack_type = STACK_R4; } else { opcode = OP_ABS; } } } // binary double else if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R8 && fsig->params [1]->type == MONO_TYPE_R8) { if (strcmp (cmethod->name, "CopySign") == 0) { opcode = OP_FCOPYSIGN; } } } else if (cmethod->klass == mono_class_try_get_mathf_class ()) { if (fsig->param_count == 1) { stack_type = STACK_R4; if (strcmp (cmethod->name, "Abs") == 0) { if (cfg->r4fp) { opcode = OP_ABSF; stack_type = STACK_R4; } else { opcode = OP_ABS; } } else if (strcmp (cmethod->name, "Ceiling") == 0) { if (cfg->r4fp) { opcode = OP_CEILF; stack_type = STACK_R4; } else { opcode = OP_CEIL; } } else if (strcmp (cmethod->name, "Floor") == 0) { if (cfg->r4fp) { opcode = OP_FLOORF; stack_type = STACK_R4; } else { opcode = OP_FLOOR; } } else if (strcmp (cmethod->name, "Sqrt") == 0) { if (cfg->r4fp) { opcode = OP_SQRTF; stack_type = STACK_R4; } else { opcode = OP_SQRT; } } else if (strcmp (cmethod->name, "Truncate") == 0) { if (cfg->r4fp) { opcode = OP_TRUNCF; stack_type = STACK_R4; } else { opcode = OP_TRUNC; } opcode = OP_TRUNCF; } } } if (opcode) { MONO_INST_NEW (cfg, ins, opcode); ins->type = stack_type; ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = args [0]->dreg; if (fsig->param_count > 1) { ins->sreg2 = args [1]->dreg; } g_assert (fsig->param_count <= 2); MONO_ADD_INS (cfg->cbb, ins); } return ins; } /*========================= End of Function ========================*/ /** * * @brief Decompose opcode into a System z operation * * @param[in] @cfg - Compile Control block * @param[in] @ins - Mono Instruction * * Substitute a System z instruction for a Mono operation. */ void mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins) { /* * Have to rename these to avoid being decomposed normally, since the normal * decomposition does not work on S390. */ switch (ins->opcode) { case OP_ISUB_OVF: ins->opcode = OP_S390_ISUB_OVF; break; case OP_ISUB_OVF_UN: ins->opcode = OP_S390_ISUB_OVF_UN; break; case OP_IADD_OVF: ins->opcode = OP_S390_IADD_OVF; break; case OP_IADD_OVF_UN: ins->opcode = OP_S390_IADD_OVF_UN; break; case OP_LADD_OVF: ins->opcode = OP_S390_LADD_OVF; break; case OP_LADD_OVF_UN: ins->opcode = OP_S390_LADD_OVF_UN; break; case OP_LSUB_OVF: ins->opcode = OP_S390_LSUB_OVF; break; case OP_LSUB_OVF_UN: ins->opcode = OP_S390_LSUB_OVF_UN; break; default: break; } } /*========================= End of Function ========================*/ /** * * @brief Determine the cost of allocation a variable * * @param[in] @cfg - Compile Control block * @param[in] @vmv - Mono Method Variable * @returns Cost (hardcoded on s390x to 2) * * Determine the cost, in the number of memory references, of the action * of allocating the variable VMV into a register during global register * allocation. * */ guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { /* FIXME: */ return 2; } /*========================= End of Function ========================*/ /** * * @brief Architectural specific register window flushing * * Not applicable for s390x so we just do nothing * */ void mono_arch_flush_register_windows (void) { } /*========================= End of Function ========================*/ /** * * @brief Architectural specific check if value may be immediate * * @param[in] @opcode - Operation code * @param[in] @imm_opcode - Immediate operation code * @param[in] @imm - Value to be examined * @returns True if it is a valid immediate value * * Determine if operand qualifies as an immediate value. For s390x * this is a value in the range -2**32/2**32-1 * */ gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return s390_is_imm32 (imm); } /*========================= End of Function ========================*/ /** * * @brief Architectural specific patch offset value for AOT * * @param[in] @code - Location of code to check * @returns Offset * * Dummy entry point if/when s390x supports AOT. */ guint32 mono_arch_get_patch_offset (guint8 *code) { return 0; } /*========================= End of Function ========================*/ /** * * @brief Architectural specific returning of register from context * * @param[in] @ctx - Mono context * @param[in] @reg - Register number to be returned * @returns Contents of the register from the context * * Return a register from the context. */ host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { return ctx->uc_mcontext.gregs[reg]; } host_mgreg_t* mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { return &ctx->uc_mcontext.gregs[reg]; } /*========================= End of Function ========================*/ /** * * @brief Architectural specific setting of a register in the context * * @param[in] @ctx - Mono context * @param[in] @reg - Register number to be returned * @param[in] @val - Value to be set * * Set the specified register in the context with the value passed */ void mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val) { ctx->uc_mcontext.gregs[reg] = val; } /*========================= End of Function ========================*/ /** * * @brief Architectural specific returning of the "this" value from context * * @param[in] @ctx - Mono context * @param[in] @code - Current location * @returns Pointer to the "this" object * * Extract register 2 from the context as for s390x this is where the * this parameter is passed */ gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code) { return (gpointer) regs [s390_r2]; } /*========================= End of Function ========================*/ /** * * @brief Delegation trampoline processing * * @param[in] @info - Trampoline information * @param[in] @has_target - Use target from delegation * @param[in] @param_count - Count of parameters * @param[in] @aot - AOT indicator * @returns Next instruction location * * Process the delegation trampolines */ static guint8 * get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, MonoMethodSignature *sig, gboolean aot) { guint8 *code, *start; if (has_target) { int size = 32; start = code = (guint8 *) mono_global_codeman_reserve (size); /* Replace the this argument with the target */ s390_lg (code, s390_r1, 0, s390_r2, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); s390_lg (code, s390_r2, 0, s390_r2, MONO_STRUCT_OFFSET (MonoDelegate, target)); s390_br (code, s390_r1); g_assert ((code - start) <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } else { int size, i, offset = S390_MINIMAL_STACK_SIZE, iReg = s390_r2; CallInfo *cinfo = get_call_info (NULL, sig); size = 32 + sig->param_count * 8; start = code = (guint8 *) mono_global_codeman_reserve (size); s390_lg (code, s390_r1, 0, s390_r2, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); /* slide down the arguments */ for (i = 0; i < sig->param_count; ++i) { switch(cinfo->args[i].regtype) { case RegTypeGeneral : if (iReg < S390_LAST_ARG_REG) { s390_lgr (code, iReg, (iReg + 1)); } else { s390_lg (code, iReg, 0, STK_BASE, offset); } iReg++; break; default : s390_mvc (code, sizeof(uintptr_t), STK_BASE, offset, STK_BASE, offset+sizeof(uintptr_t)); offset += sizeof(uintptr_t); } } s390_br (code, s390_r1); g_free (cinfo); g_assert ((code - start) <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } if (has_target) { *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, NULL); } else { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); *info = mono_tramp_info_create (name, start, code - start, NULL, NULL); g_free (name); } return start; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific delegation trampolines processing * * @returns List of trampolines * * Return a list of MonoTrampInfo structures for the delegate invoke impl trampolines. */ GSList* mono_arch_get_delegate_invoke_impls (void) { GSList *res = NULL; MonoTrampInfo *info; get_delegate_invoke_impl (&info, TRUE, 0, TRUE); res = g_slist_prepend (res, info); #if 0 for (int i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) { get_delegate_invoke_impl (&info, FALSE, NULL, TRUE); res = g_slist_prepend (res, info); } #endif return res; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific delegation trampoline processing * * @param[in] @sig - Method signature * @param[in] @has_target - Whether delegation contains a target * @returns Trampoline * * Return a pointer to a delegation trampoline */ gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target) { guint8 *code, *start; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; /* FIXME: Support more cases */ if (MONO_TYPE_ISSTRUCT (mini_get_underlying_type (sig->ret))) return NULL; if (has_target) { static guint8* cached = NULL; if (cached) return cached; if (mono_ee_features.use_aot_trampolines) { start = (guint8 *) mono_aot_get_trampoline ("delegate_invoke_impl_has_target"); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, TRUE, sig, FALSE); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cached = start; } else { static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL}; int i; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; for (i = 0; i < sig->param_count; ++i) if (!mono_is_regsize_var (sig->params [i])) return NULL; code = cache [sig->param_count]; if (code) return code; if (mono_ee_features.use_aot_trampolines) { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); start = (guint8 *) mono_aot_get_trampoline (name); g_free (name); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, FALSE, sig, FALSE); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cache [sig->param_count] = start; } return start; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific delegation virtual trampoline processing * * @param[in] @sig - Method signature * @param[in] @method - Method * @param[in] @offset - Offset into vtable * @param[in] @load_imt_reg - Whether to load the LMT register * @returns Trampoline * * Return a pointer to a delegation virtual trampoline */ gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg) { guint8 *code, *start; int size = 40; start = code = (guint8 *) mono_global_codeman_reserve (size); /* * Replace the "this" argument with the target */ s390_lgr (code, s390_r1, s390_r2); s390_lg (code, s390_r2, 0, s390_r1, MONO_STRUCT_OFFSET(MonoDelegate, target)); /* * Load the IMT register, if needed */ if (load_imt_reg) { s390_lg (code, MONO_ARCH_IMT_REG, 0, s390_r1, MONO_STRUCT_OFFSET(MonoDelegate, method)); } /* * Load the vTable */ s390_lg (code, s390_r1, 0, s390_r2, MONO_STRUCT_OFFSET(MonoObject, vtable)); if (offset != 0) { s390_agfi(code, s390_r1, offset); } s390_lg (code, s390_r1, 0, s390_r1, 0); s390_br (code, s390_r1); mono_arch_flush_icache (start, code - start); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); return(start); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific build of IMT trampoline * * @param[in] @vtable - Mono VTable * @param[in] @domain - Mono Domain * @param[in] @imt_entries - List of IMT check items * @param[in] @count - Count of items * @param[in] @fail_tramp - Pointer to a failure trampoline * @returns Trampoline * * Return a pointer to an IMT trampoline */ gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int i; int size = 0; guchar *code, *start; MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) item->chunk_size += CMP_SIZE + JUMP_SIZE; if (item->has_target_code) item->chunk_size += BR_SIZE + JUMP_SIZE + LOADCON_SIZE; else item->chunk_size += BR_SIZE + JUMP_SIZE + LOADCON_SIZE + LOAD_SIZE; } else { if (fail_tramp) { item->chunk_size += CMP_SIZE + 2 * BR_SIZE + JUMP_SIZE + 2 * LOADCON_SIZE; if (!item->has_target_code) item->chunk_size += LOAD_SIZE; } else { item->chunk_size += LOADCON_SIZE + LOAD_SIZE + BR_SIZE; #if ENABLE_WRONG_METHOD_CHECK item->chunk_size += CMP_SIZE + JUMP_SIZE; #endif } } } else { item->chunk_size += CMP_SIZE + JUMP_SIZE; imt_entries [item->check_target_idx]->compare_done = TRUE; } size += item->chunk_size; } if (fail_tramp) { code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size); } else { code = mono_mem_manager_code_reserve (mem_manager, size); } start = code; for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; item->code_target = (guint8 *) code; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) { S390_SET (code, s390_r0, item->key); s390_cgr (code, s390_r0, MONO_ARCH_IMT_REG); } item->jmp_code = (guint8*) code; s390_jcl (code, S390_CC_NE, 0); if (item->has_target_code) { S390_SET (code, s390_r1, item->value.target_code); } else { S390_SET (code, s390_r1, (&(vtable->vtable [item->value.vtable_slot]))); s390_lg (code, s390_r1, 0, s390_r1, 0); } s390_br (code, s390_r1); } else { if (fail_tramp) { gint64 target; S390_SET (code, s390_r0, item->key); s390_cgr (code, s390_r0, MONO_ARCH_IMT_REG); item->jmp_code = (guint8*) code; s390_jcl (code, S390_CC_NE, 0); if (item->has_target_code) { S390_SET (code, s390_r1, item->value.target_code); } else { g_assert (vtable); S390_SET (code, s390_r1, (&(vtable->vtable [item->value.vtable_slot]))); s390_lg (code, s390_r1, 0, s390_r1, 0); } s390_br (code, s390_r1); target = (gint64) S390_RELATIVE(code, item->jmp_code); s390_patch_rel(item->jmp_code+2, target); S390_SET (code, s390_r1, fail_tramp); s390_br (code, s390_r1); item->jmp_code = NULL; } else { /* enable the commented code to assert on wrong method */ #if ENABLE_WRONG_METHOD_CHECK g_assert_not_reached (); #endif S390_SET (code, s390_r1, (&(vtable->vtable [item->value.vtable_slot]))); s390_lg (code, s390_r1, 0, s390_r1, 0); s390_br (code, s390_r1); } } } else { S390_SET (code, s390_r0, item->key); s390_cgr (code, MONO_ARCH_IMT_REG, s390_r0); item->jmp_code = (guint8 *) code; s390_jcl (code, S390_CC_GE, 0); } } /* * patch the branches to get to the target items */ for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code) { if (item->check_target_idx) { gint64 offset; offset = (gint64) S390_RELATIVE(imt_entries [item->check_target_idx]->code_target, item->jmp_code); s390_patch_rel ((guchar *) item->jmp_code + 2, (guint64) offset); } } } mono_arch_flush_icache ((guint8*)start, (code - start)); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL)); if (!fail_tramp) UnlockedAdd (&mono_stats.imt_trampolines_size, code - start); g_assert (code - start <= size); mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), mem_manager); return (start); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific return of pointer to IMT method * * @param[in] @regs - Context registers * @param[in] @code - Current location * @returns Pointer to IMT method * * Extract the value of the IMT register from the context */ MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { return ((MonoMethod *) regs [MONO_ARCH_IMT_REG]); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific return of pointer static call vtable. * * @param[in] @regs - Context registers * @param[in] @code - Current location * @returns Pointer to static call vtable * * Extract the value of the RGCTX register from the context which * points to the static call vtable. */ MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code) { return (MonoVTable*)(gsize) regs [MONO_ARCH_RGCTX_REG]; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific return of unwind bytecode for DWARF CIE * * @returns Unwind byte code * * Returns the unwind bytecode for DWARF CIE */ GSList* mono_arch_get_cie_program (void) { GSList *l = NULL; mono_add_unwind_op_def_cfa (l, 0, 0, STK_BASE, S390_CFA_OFFSET); return(l); } /*========================= End of Function ========================*/ #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED /** * * @brief Architecture-specific setting of a breakpoint * * @param[in] @ji - Mono JIT Information * @param[in] @ip - Insruction pointer * * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET. * The location should contain code emitted by OP_SEQ_POINT. */ void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *bp = ip; /* IP should point to a LGHI R1,0 */ g_assert (bp[0] == 0xa7); /* Replace it with a LGHI R1,1 */ s390_lghi (bp, s390_r1, 1); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific clearing of a breakpoint * * @param[in] @ji - Mono JIT Information * @param[in] @ip - Insruction pointer * * Replace the breakpoint with a no-operation. */ void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *bp = ip; /* IP should point to a LGHI R1,1 */ g_assert (bp[0] == 0xa7); /* Replace it with a LGHI R1,0 */ s390_lghi (bp, s390_r1, 0); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific check if this is a breakpoint event * * @param[in] @info - Signal information * @param[in] @sigctx - Signal context * @returns True if this is a breakpoint event * * We use soft breakpoints so always return FALSE */ gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx) { /* We use soft breakpoints on s390x */ return FALSE; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific skip of a breakpoint * * @param[in] @ctx - Mono Context * @param[in] @ji - Mono JIT information * * We use soft breakpoints so this is a no-op */ void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji) { g_assert_not_reached (); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific start of single stepping * * Unprotect the trigger page to enable single stepping */ void mono_arch_start_single_stepping (void) { ss_trampoline = mini_get_single_step_trampoline(); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific stop of single stepping * * Write-protect the trigger page to disable single stepping */ void mono_arch_stop_single_stepping (void) { ss_trampoline = NULL; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific check if single stepping event * * @param[in] @info - Signal information * @param[in] @sigctx - Signal context * @returns True if this is a single stepping event * * Return whether the machine state in sigctx corresponds to a single step event. * On s390x we use soft breakpoints so return FALSE */ gboolean mono_arch_is_single_step_event (void *info, void *sigctx) { /* We use soft breakpoints on s390x */ return FALSE; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific skip of a single stepping event * * @param[in] @ctx - Mono Context * * Modify the ctx so the IP is placed after the single step trigger * instruction, so that the instruction is not executed again. * On s390x we use soft breakpoints so we shouldn't get here */ void mono_arch_skip_single_step (MonoContext *ctx) { g_assert_not_reached(); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific creation of sequence point information * * @param[in] @domain - Mono Domain * @param[in] @code - Current location pointer * @returns Sequence Point Information * * Return a pointer to a data struction which is used by the sequence * point implementation in AOTed code. A no-op on s390x until AOT is * ever supported. */ SeqPointInfo * mono_arch_get_seq_point_info (guint8 *code) { SeqPointInfo *info; MonoJitInfo *ji; MonoJitMemoryManager *jit_mm; jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code); jit_mm_unlock (jit_mm); if (!info) { ji = mini_jit_info_table_find (code); g_assert (ji); // FIXME: Optimize the size info = (SeqPointInfo *)g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size * sizeof (gpointer))); info->ss_tramp_addr = &ss_trampoline; jit_mm_lock (jit_mm); g_hash_table_insert (jit_mm->arch_seq_points, code, info); jit_mm_unlock (jit_mm); } return info; } /*========================= End of Function ========================*/ #endif /** * * @brief Architecture-specific check of supported operation codes * * @param[in] @opcode - Operation code to be checked * @returns True if operation code is supported * * Check if a mono operation is supported in hardware. */ gboolean mono_arch_opcode_supported (int opcode) { switch (opcode) { case OP_ATOMIC_ADD_I4: case OP_ATOMIC_ADD_I8: case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_EXCHANGE_I8: return TRUE; default: return FALSE; } } /*========================= End of Function ========================*/ #ifndef DISABLE_JIT /** * * @brief Architecture-specific check of tailcall support * * @param[in] @cfg - Mono Compile control block * @param[in] @caller_sig - Signature of caller * @param[in] @callee_sig - Signature of callee * @param[in] @virtual_ - Whether this a virtual call * @returns True if the tailcall operation is supported * * Check if a tailcall may be made from caller to callee based on a * number of conditions including parameter types and stack sizes */ gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { g_assert (caller_sig); g_assert (callee_sig); CallInfo *caller_info = get_call_info (NULL, caller_sig); CallInfo *callee_info = get_call_info (NULL, callee_sig); gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage); // Any call that would result in parameters being placed on the stack cannot be "tailed" as it may // result in the callers parameter variables being overwritten. ArgInfo const * const ainfo = callee_info->args + callee_sig->hasthis; for (int i = 0; res && i < callee_sig->param_count; ++i) { switch(ainfo[i].regtype) { case RegTypeGeneral : // R6 is both used as argument register and call-saved // This means we cannot use a tail call if R6 is needed if (ainfo[i].reg == S390_LAST_ARG_REG) res = FALSE; else res = TRUE; break; case RegTypeFP : case RegTypeFPR4 : case RegTypeStructByValInFP : res = TRUE; break; case RegTypeBase : res = FALSE; break; case RegTypeStructByAddr : if (ainfo[i].reg == STK_BASE || ainfo[i].reg == S390_LAST_ARG_REG) res = FALSE; else res = TRUE; break; case RegTypeStructByVal : if (ainfo[i].reg == STK_BASE || ainfo[i].reg == S390_LAST_ARG_REG) res = FALSE; else { switch(ainfo[i].size) { case 0: case 1: case 2: case 4: case 8: res = TRUE; break; default: res = FALSE; } } break; } } g_free (caller_info); g_free (callee_info); return(res); } /*========================= End of Function ========================*/ #endif /** * * @brief Architecture-specific load function * * @param[in] @jit_call_id - JIT callee identifier * @returns Pointer to load function trampoline * * A no-operation on s390x until if/when it supports AOT. */ gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { return NULL; } /*========================= End of Function ========================*/ /** * * @brief Emit call to thunked code * * @param[in] @cfg - configuration data * @param[inout] @code - where to emit call * @param[in] @call - call instruction * @returns Pointer to next code area * */ static __inline__ guint8* emit_call (MonoCompile *cfg, guint8 *code, MonoJumpInfoType type, gconstpointer target) { mono_add_patch_info_rel (cfg, code-cfg->native_code, type, target, MONO_R_S390_THUNKED); S390_CALL_TEMPLATE (code, s390_r14); cfg->thunk_area += THUNK_SIZE; return code; } /*========================= End of Function ========================*/ /** * * @brief Emit thunk for an indirect call * * @param[inout] @code - where to emit thunk * @param[in] @target - thunk target * @returns Pointer to next code area * */ static guint8* emit_thunk (guint8 *code, gconstpointer target) { *(guint64*)code = (guint64)target; code += sizeof (guint64); return code; } /*========================= End of Function ========================*/ /** * * @brief Create thunk * * @param[in] @cfg - Compiler configuration * @param[inout] @code - where to emit thunk * @param[in] @target - thunk target * * Create a new thunk * */ static void create_thunk (MonoCompile *cfg, guint8 *ip, guint8 *code, gpointer target) { guint8 *thunks; int thunks_size; /* * This can be called multiple times during JITting, * save the current position in cfg->arch to avoid * doing a O(n^2) search. */ if (!cfg->arch.thunks) { cfg->arch.thunks = cfg->thunks; cfg->arch.thunks_size = cfg->thunk_area; } thunks = (guint8 *) cfg->arch.thunks; thunks_size = cfg->arch.thunks_size; if (!thunks_size) { g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE)); g_assert_not_reached (); } g_assert (*(guint64 *)thunks == 0); emit_thunk (thunks, target); cfg->arch.thunks += THUNK_SIZE; cfg->arch.thunks_size -= THUNK_SIZE; S390_EMIT_CALL(ip, thunks); } /*========================= End of Function ========================*/ /** * * @brief Update thunk * * @param[in] @cfg - Compiler configuration * @param[inout] @code - where to emit thunk * @param[in] @target - thunk target * * Update an existing thunk * */ static void update_thunk (MonoCompile *cfg, guint8 *code, gpointer target) { MonoJitInfo *ji; MonoThunkJitInfo *info; guint8 *thunks; guint8 *orig_target; guint8 *target_thunk; int thunks_size; ji = mini_jit_info_table_find ((char*)code); g_assert (ji); info = mono_jit_info_get_thunk_info (ji); g_assert (info); thunks = (guint8*)ji->code_start + info->thunks_offset; thunks_size = info->thunks_size; /* * We're pointing at the start of jump to thunk, * but mono_arch_get_call_target expects we're pointing * after the branch so we adjust */ orig_target = mono_arch_get_call_target (code + 6); target_thunk = NULL; if (orig_target >= thunks && orig_target < thunks + thunks_size) { /* The call already points to a thunk, because of trampolines etc. */ target_thunk = orig_target; } else { g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE)); g_assert_not_reached (); } emit_thunk (target_thunk, target); } /*========================= End of Function ========================*/
/** * @file * @author - Neale Ferguson ([email protected]) * * @section description * Function - S/390 backend for the Mono code generator. * * Date - January, 2004 * * Derivation - From mini-x86 & mini-ppc by - * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * */ /*------------------------------------------------------------------*/ /* D e f i n e s */ /*------------------------------------------------------------------*/ #define MAX_ARCH_DELEGATE_PARAMS 10 #define EMIT_COND_BRANCH(ins,cond) \ { \ if (ins->inst_true_bb->native_offset) { \ int displace; \ displace = ((cfg->native_code + \ ins->inst_true_bb->native_offset) - code) / 2; \ if (s390_is_imm16(displace)) { \ s390_brc (code, cond, displace); \ } else { \ s390_jcl (code, cond, displace); \ } \ } else { \ mono_add_patch_info_rel (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_BB, ins->inst_true_bb, \ MONO_R_S390_RELINS); \ s390_jcl (code, cond, 0); \ } \ } #define EMIT_UNCOND_BRANCH(ins) \ { \ if (ins->inst_target_bb->native_offset) { \ int displace; \ displace = ((cfg->native_code + \ ins->inst_target_bb->native_offset) - code) / 2; \ if (s390_is_imm16(displace)) { \ s390_brc (code, S390_CC_UN, displace); \ } else { \ s390_jcl (code, S390_CC_UN, displace); \ } \ } else { \ mono_add_patch_info_rel (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_BB, ins->inst_target_bb, \ MONO_R_S390_RELINS); \ s390_jcl (code, S390_CC_UN, 0); \ } \ } #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) \ do { \ mono_add_patch_info (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_EXC, exc_name); \ s390_jcl (code, cond, 0); \ } while (0); #define EMIT_COMP_AND_BRANCH(ins, cab, cmp) \ { \ if (ins->inst_true_bb->native_offset) { \ int displace; \ displace = ((cfg->native_code + \ ins->inst_true_bb->native_offset) - code) / 2; \ if (s390_is_imm16(displace)) { \ s390_##cab (code, ins->sreg1, ins->sreg2, \ ins->sreg3, displace); \ } else { \ s390_##cmp (code, ins->sreg1, ins->sreg2); \ displace = ((cfg->native_code + \ ins->inst_true_bb->native_offset) - code) / 2; \ s390_jcl (code, ins->sreg3, displace); \ } \ } else { \ s390_##cmp (code, ins->sreg1, ins->sreg2); \ mono_add_patch_info_rel (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_BB, ins->inst_true_bb, \ MONO_R_S390_RELINS); \ s390_jcl (code, ins->sreg3, 0); \ } \ } #define EMIT_COMP_AND_BRANCH_IMM(ins, cab, cmp, lat, logical) \ { \ if (ins->inst_true_bb->native_offset) { \ int displace; \ if ((ins->backend.data == 0) && (!logical)) { \ s390_##lat (code, ins->sreg1, ins->sreg1); \ displace = ((cfg->native_code + \ ins->inst_true_bb->native_offset) - code) / 2; \ if (s390_is_imm16(displace)) { \ s390_brc (code, ins->sreg3, displace); \ } else { \ s390_jcl (code, ins->sreg3, displace); \ } \ } else { \ S390_SET (code, s390_r0, ins->backend.data); \ displace = ((cfg->native_code + \ ins->inst_true_bb->native_offset) - code) / 2; \ if (s390_is_imm16(displace)) { \ s390_##cab (code, ins->sreg1, s390_r0, \ ins->sreg3, displace); \ } else { \ s390_##cmp (code, ins->sreg1, s390_r0); \ displace = ((cfg->native_code + \ ins->inst_true_bb->native_offset) - code) / 2; \ s390_jcl (code, ins->sreg3, displace); \ } \ } \ } else { \ if ((ins->backend.data == 0) && (!logical)) { \ s390_##lat (code, ins->sreg1, ins->sreg1); \ } else { \ S390_SET (code, s390_r0, ins->backend.data); \ s390_##cmp (code, ins->sreg1, s390_r0); \ } \ mono_add_patch_info_rel (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_BB, ins->inst_true_bb, \ MONO_R_S390_RELINS); \ s390_jcl (code, ins->sreg3, 0); \ } \ } #define CHECK_SRCDST_COM \ if (ins->dreg == ins->sreg2) { \ src2 = ins->sreg1; \ } else { \ src2 = ins->sreg2; \ if (ins->dreg != ins->sreg1) { \ s390_lgr (code, ins->dreg, ins->sreg1); \ } \ } #define CHECK_SRCDST_NCOM \ if (ins->dreg == ins->sreg2) { \ src2 = s390_r13; \ s390_lgr (code, s390_r13, ins->sreg2); \ } else { \ src2 = ins->sreg2; \ } \ if (ins->dreg != ins->sreg1) { \ s390_lgr (code, ins->dreg, ins->sreg1); \ } #define CHECK_SRCDST_COM_I \ if (ins->dreg == ins->sreg2) { \ src2 = ins->sreg1; \ } else { \ src2 = ins->sreg2; \ if (ins->dreg != ins->sreg1) { \ s390_lgfr (code, ins->dreg, ins->sreg1); \ } \ } #define CHECK_SRCDST_NCOM_I \ if (ins->dreg == ins->sreg2) { \ src2 = s390_r13; \ s390_lgfr (code, s390_r13, ins->sreg2); \ } else { \ src2 = ins->sreg2; \ } \ if (ins->dreg != ins->sreg1) { \ s390_lgfr (code, ins->dreg, ins->sreg1); \ } #define CHECK_SRCDST_COM_F \ if (ins->dreg == ins->sreg2) { \ src2 = ins->sreg1; \ } else { \ src2 = ins->sreg2; \ if (ins->dreg != ins->sreg1) { \ s390_ldr (code, ins->dreg, ins->sreg1); \ } \ } #define CHECK_SRCDST_NCOM_F(op) \ if (ins->dreg == ins->sreg2) { \ s390_lgdr (code, s390_r0, s390_f15); \ s390_ldr (code, s390_f15, ins->sreg2); \ if (ins->dreg != ins->sreg1) { \ s390_ldr (code, ins->dreg, ins->sreg1); \ } \ s390_ ## op (code, ins->dreg, s390_f15); \ s390_ldgr (code, s390_f15, s390_r0); \ } else { \ if (ins->dreg != ins->sreg1) { \ s390_ldr (code, ins->dreg, ins->sreg1); \ } \ s390_ ## op (code, ins->dreg, ins->sreg2); \ } #define CHECK_SRCDST_NCOM_FR(op, m) \ s390_lgdr (code, s390_r1, s390_f14); \ if (ins->dreg == ins->sreg2) { \ s390_lgdr (code, s390_r0, s390_f15); \ s390_ldr (code, s390_f15, ins->sreg2); \ if (ins->dreg != ins->sreg1) { \ s390_ldr (code, ins->dreg, ins->sreg1); \ } \ s390_ ## op (code, ins->dreg, s390_f15, m, s390_f14); \ s390_ldgr (code, s390_f15, s390_r0); \ } else { \ if (ins->dreg != ins->sreg1) { \ s390_ldr (code, ins->dreg, ins->sreg1); \ } \ s390_ ## op (code, ins->dreg, ins->sreg2, m, s390_f14); \ } \ s390_ldgr (code, s390_f14, s390_r1); #undef DEBUG #define DEBUG(a) if (cfg->verbose_level > 1) a #define MAX_EXC 16 #define S390_TRACE_STACK_SIZE (5*sizeof(gpointer)+4*sizeof(gdouble)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) /* * imt trampoline size values */ #define CMP_SIZE 24 #define LOADCON_SIZE 20 #define LOAD_SIZE 6 #define BR_SIZE 2 #define JUMP_SIZE 6 #define ENABLE_WRONG_METHOD_CHECK 0 /*========================= End of Defines =========================*/ /*------------------------------------------------------------------*/ /* I n c l u d e s */ /*------------------------------------------------------------------*/ #include "mini.h" #include <string.h> #include <sys/types.h> #include <unistd.h> #include <mono/metadata/abi-details.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/profiler-private.h> #include <mono/utils/mono-error.h> #include <mono/utils/mono-error-internals.h> #include <mono/utils/mono-math.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/mono-threads.h> #include <mono/utils/unlocked.h> #include "mini-s390x.h" #include "cpu-s390x.h" #include "jit-icalls.h" #include "ir-emit.h" #include "mini-gc.h" #include "aot-runtime.h" #include "mini-runtime.h" /*========================= End of Includes ========================*/ /*------------------------------------------------------------------*/ /* T y p e d e f s */ /*------------------------------------------------------------------*/ /** * Track stack use */ typedef struct { guint stack_size, code_size, parm_size, retStruct; } size_data; /** * ABI - register use in calls etc. */ typedef enum { RegTypeGeneral, RegTypeBase, RegTypeFP, RegTypeFPR4, RegTypeStructByVal, RegTypeStructByValInFP, RegTypeStructByAddr } ArgStorage; /** * Track method arguments */ typedef struct { gint32 offset; /* offset from caller's stack */ guint16 vtsize; /* in param area */ guint8 reg; ArgStorage regtype; guint32 size; /* Size of structure used by RegTypeStructByVal */ gint32 type; /* Data type of argument */ } ArgInfo; /** * Call information - parameters and stack use for s390x ABI */ struct CallInfo { int nargs; int lastgr; guint32 stack_usage; guint32 struct_ret; ArgInfo ret; ArgInfo sigCookie; size_data sz; int vret_arg_index; MonoMethodSignature *sig; ArgInfo args [1]; }; /** * Registers used in parameter passing */ typedef struct { gint64 gr[5]; /* R2-R6 */ gdouble fp[3]; /* F0-F2 */ } __attribute__ ((__packed__)) RegParm; /*========================= End of Typedefs ========================*/ /*------------------------------------------------------------------*/ /* P r o t o t y p e s */ /*------------------------------------------------------------------*/ static guint8 * backUpStackPtr(MonoCompile *, guint8 *); static void add_general (guint *, size_data *, ArgInfo *); static void add_stackParm (guint *, size_data *, ArgInfo *, gint, ArgStorage); static void add_float (guint *, size_data *, ArgInfo *, gboolean); static CallInfo * get_call_info (MonoMemPool *, MonoMethodSignature *); static guchar * emit_float_to_int (MonoCompile *, guchar *, int, int, int, gboolean); static __inline__ void emit_unwind_regs(MonoCompile *, guint8 *, int, int, long); static void compare_and_branch(MonoBasicBlock *, MonoInst *, int, gboolean); static __inline__ guint8 * emit_call(MonoCompile *, guint8 *, MonoJumpInfoType, gconstpointer); static guint8 * emit_thunk(guint8 *, gconstpointer); static void create_thunk(MonoCompile *, guint8 *, guint8 *, gpointer); static void update_thunk(MonoCompile *, guint8 *, gpointer); static void emit_patch_full (MonoCompile *, MonoJumpInfo *, guint8 *, gpointer, int); /*========================= End of Prototypes ======================*/ /*------------------------------------------------------------------*/ /* G l o b a l V a r i a b l e s */ /*------------------------------------------------------------------*/ /** * The single-step trampoline */ static gpointer ss_trampoline; /** * The breakpoint trampoline */ static gpointer bp_trampoline; /** * Constants used in debugging - map general register names */ static const char * grNames[] = { "s390_r0", "s390_sp", "s390_r2", "s390_r3", "s390_r4", "s390_r5", "s390_r6", "s390_r7", "s390_r8", "s390_r9", "s390_r10", "s390_r11", "s390_r12", "s390_r13", "s390_r14", "s390_r15" }; /** * Constants used in debugging - map floating point register names */ static const char * fpNames[] = { "s390_f0", "s390_f1", "s390_f2", "s390_f3", "s390_f4", "s390_f5", "s390_f6", "s390_f7", "s390_f8", "s390_f9", "s390_f10", "s390_f11", "s390_f12", "s390_f13", "s390_f14", "s390_f15" }; /** * Constants used in debugging - map vector register names */ static const char * vrNames[] = { "vr0", "vr1", "vr2", "vr3", "vr4", "vr5", "vr6", "vr7", "vr8", "vr9", "vr10", "vr11", "vr12", "vr13", "vr14", "vr15", "vr16", "vr17", "vr18", "vr19", "vr20", "vr21", "vr22", "vr23", "vr24", "vr25", "vr26", "vr27", "vr28", "vr29", "vr30", "vr31" }; #if 0 /** * Constants used in debugging - ABI register types */ static const char *typeParm[] = { "General", "Base", "FPR8", "FPR4", "StructByVal", "StructByValInFP", "ByAddr"}; #endif /*====================== End of Global Variables ===================*/ static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math") static GENERATE_TRY_GET_CLASS_WITH_CACHE (mathf, "System", "MathF") /** * * @brief Return general register name * * @param[in] register number * @returns Name of register * * Returns the name of the general register specified by the input parameter. */ const char* mono_arch_regname (int reg) { if (reg >= 0 && reg < 16) return grNames [reg]; else return "unknown"; } /*========================= End of Function ========================*/ /** * * @brief Return floating point register name * * @param[in] register number * @returns Name of register * * Returns the name of the FP register specified by the input parameter. */ const char* mono_arch_fregname (int reg) { if (reg >= 0 && reg < 16) return fpNames [reg]; else return "unknown"; } /*========================= End of Function ========================*/ /** * * @brief Return vector register name * * @param[in] register number * @returns Name of register * * Returns the name of the vector register specified by the input parameter. */ const char * mono_arch_xregname (int reg) { if (reg < s390_VR_NREG) return vrNames [reg]; else return "unknown"; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific return argument information * * @param[in] @csig - Method signature * @param[in] @param_count - Number of parameters to consider * @param[out] @arg_info - An array in which to store results * @returns Size of the activation frame * * Gathers information on parameters such as size, alignment, and padding. * arg_info should be large * enough to hold param_count + 1 entries. */ int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { int k, frame_size = 0; int size, align, pad; int offset = 8; if (MONO_TYPE_ISSTRUCT (csig->ret)) { frame_size += sizeof (target_mgreg_t); offset += 8; } arg_info [0].offset = offset; if (csig->hasthis) { frame_size += sizeof (target_mgreg_t); offset += 8; } arg_info [0].size = frame_size; for (k = 0; k < param_count; k++) { if (csig->pinvoke && !csig->marshalling_disabled) size = mono_type_native_stack_size (csig->params [k], (guint32 *) &align); else size = mini_type_stack_size (csig->params [k], &align); frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; frame_size += size; arg_info [k + 1].pad = 0; arg_info [k + 1].size = size; offset += pad; arg_info [k + 1].offset = offset; offset += size; } align = MONO_ARCH_FRAME_ALIGNMENT; frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; return frame_size; } /*========================= End of Function ========================*/ /** * * @brief Emit an s390x move operation * * @param[in] @cfg - MonoCompile control block * @param[in] @dr - Destination register * @param[in] @ins - Current instruction * @param[in] @src - Instruction representing the source of move * * Emit a move instruction for VT parameters */ static void __inline__ emit_new_move(MonoCompile *cfg, int dr, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst *) ins->inst_p0; ArgInfo *ainfo = (ArgInfo *) ins->inst_p1; MonoInst *vtcopy = mono_compile_create_var (cfg, m_class_get_byval_arg (src->klass), OP_LOCAL); MonoInst *load; MonoInst *move; int size; if (call->signature->pinvoke && !call->signature->marshalling_disabled) { size = mono_type_native_stack_size (m_class_get_byval_arg (src->klass), NULL); vtcopy->backend.is_pinvoke = 1; } else { size = ins->backend.size; } EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype); MONO_INST_NEW (cfg, move, OP_S390_MOVE); move->sreg2 = load->dreg; move->inst_offset = 0; move->sreg1 = src->dreg; move->inst_imm = 0; move->backend.size = size; MONO_ADD_INS (cfg->cbb, move); if (dr != 0) MONO_EMIT_NEW_UNALU(cfg, OP_MOVE, dr, load->dreg); else MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, ainfo->reg, ainfo->offset, load->dreg); } /*========================= End of Function ========================*/ /** * * @brief Generate output sequence for VT register parameters * * @param[in] @cfg - MonoCompile control block * @param[in] @dr - Destination register * @param[in] @ins - Current instruction * @param[in] @src - Instruction representing the source * * Emit the output of structures for calls whose address is placed in a register. */ static void __inline__ emit_outarg_vtr(MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst *) ins->inst_p0; ArgInfo *ainfo = (ArgInfo *) ins->inst_p1; int reg = mono_alloc_preg (cfg); switch (ins->backend.size) { case 0: MONO_EMIT_NEW_ICONST(cfg, reg, 0); break; case 1: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU1_MEMBASE, reg, src->dreg, 0); break; case 2: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU2_MEMBASE, reg, src->dreg, 0); break; case 4: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI4_MEMBASE, reg, src->dreg, 0); break; case 8: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI8_MEMBASE, reg, src->dreg, 0); break; default: emit_new_move (cfg, reg, ins, src); } mono_call_inst_add_outarg_reg(cfg, call, reg, ainfo->reg, FALSE); } /*========================= End of Function ========================*/ /** * * @brief Generate output sequence for VT stack parameters * * @param[in] @cfg - MonoCompile control block * @param[in] @dr - Destination register * @param[in] @ins - Current instruction * @param[in] @src - Instruction representing the source * * Emit the output of structures for calls whose address is placed on the stack */ static void __inline__ emit_outarg_vts(MonoCompile *cfg, MonoInst *ins, MonoInst *src) { ArgInfo *ainfo = (ArgInfo *) ins->inst_p1; int tmpr = mono_alloc_preg (cfg); switch (ins->backend.size) { case 0: MONO_EMIT_NEW_ICONST(cfg, tmpr, 0); MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, ainfo->reg, ainfo->offset, tmpr); break; case 1: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU1_MEMBASE, tmpr, src->dreg, 0); MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, ainfo->reg, ainfo->offset, tmpr); break; case 2: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU2_MEMBASE, tmpr, src->dreg, 0); MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, ainfo->reg, ainfo->offset, tmpr); break; case 4: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI4_MEMBASE, tmpr, src->dreg, 0); MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, ainfo->reg, ainfo->offset, tmpr); break; case 8: MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI8_MEMBASE, tmpr, src->dreg, 0); MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, ainfo->reg, ainfo->offset, tmpr); break; default: { emit_new_move (cfg, 0, ins, src); } } } /*========================= End of Function ========================*/ /** * * @brief Generate unwind information for range of registers * * @param[in] @cfg - MonoCompile control block * @param[in] @code - Location of code * @param[in] @start - Starting register * @param[in] @end - Ending register * @param[in] @offset - Offset in stack * * Emit unwind information for a range of registers. */ static void __inline__ emit_unwind_regs(MonoCompile *cfg, guint8 *code, int start, int end, long offset) { int i; for (i = start; i <= end; i++) { mono_emit_unwind_op_offset (cfg, code, i, offset); mini_gc_set_slot_type_from_cfa (cfg, offset, SLOT_NOREF); offset += sizeof(gulong); } } /*========================= End of Function ========================*/ /** * * @brief Get previous stack frame pointer * * @param[in] @cfg - MonoCompile control block * @param[in] @code - Location of code * @returns Previous stack pointer * * Retrieve the stack pointer of the previous frame */ static guint8 * backUpStackPtr(MonoCompile *cfg, guint8 *code) { int stackSize = cfg->stack_usage; if (cfg->flags & MONO_CFG_HAS_ALLOCA) { s390_lg (code, STK_BASE, 0, STK_BASE, 0); } else { if (cfg->frame_reg != STK_BASE) s390_lgr (code, STK_BASE, cfg->frame_reg); if (s390_is_imm16 (stackSize)) { s390_aghi (code, STK_BASE, stackSize); } else if (s390_is_imm32 (stackSize)) { s390_agfi (code, STK_BASE, stackSize); } else { while (stackSize > INT_MAX) { s390_aghi (code, STK_BASE, INT_MAX); stackSize -= INT_MAX; } s390_agfi (code, STK_BASE, stackSize); } } return (code); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific CPU initialization * * Perform CPU specific initialization to execute managed code. */ void mono_arch_cpu_init (void) { } /*========================= End of Function ========================*/ /** * * @brief Archictecture specific initialization * * * Initialize architecture specific code: * - Define trigger pages for debugger * - Generate breakpoint code stub */ void mono_arch_init (void) { mono_set_partial_sharing_supported (FALSE); if (!mono_aot_only) bp_trampoline = mini_get_breakpoint_trampoline(); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific cleaup code * * * Clean up before termination: * - Free the trigger pages */ void mono_arch_cleanup (void) { } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific check for fast TLS access * * @returns True * * Returns whether we use fast inlined thread local storage managed access, * instead of falling back to native code. */ gboolean mono_arch_have_fast_tls (void) { return TRUE; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific check of mono optimizations * * @param[out] @exclude_mask - Optimization exclusion mask * @returns Optimizations supported on this CPU * * Returns the optimizations supported on this CPU */ guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { guint32 opts = 0; /* * No s390-specific optimizations yet */ *exclude_mask = 0; return opts; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific allocation of integer variables * * @param[in] @cfg - MonoCompile control block * @returns A list of integer variables * * Returns a list of allocatable integer variables */ GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG)) continue; /* we can only allocate 32 bit values */ if (mono_is_regsize_var(ins->inst_vtype)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE); } } return vars; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific determination of usable integer registers * * @param[in] @cfg - MonoCompile control block * @returns A list of allocatable registers * * Returns a list of usable integer registers */ GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; MonoMethodHeader *header; int i, top = 13; header = cfg->header; if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses) cfg->frame_reg = s390_r11; /* FIXME: s390_r12 is reserved for bkchain_reg. Only reserve it if needed */ top = 12; for (i = 8; i < top; ++i) { if ((cfg->frame_reg != i) && //!((cfg->uses_rgctx_reg) && (i == MONO_ARCH_IMT_REG))) (i != MONO_ARCH_IMT_REG)) regs = g_list_prepend (regs, GUINT_TO_POINTER (i)); } return regs; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific flush of instruction cache * * @param[in] @code - Start of code * @param[in] @size - Amount to be flushed * * Flush the CPU icache. */ void mono_arch_flush_icache (guint8 *code, gint size) { } /*========================= End of Function ========================*/ /** * * @brief Add an integer register parameter * * @param[in] @gr - Address of current register number * @param[in] @sz - Stack size data * @param[in] @ainfo - Parameter information * * Assign a parameter to a general register or spill it onto the stack */ static void inline add_general (guint *gr, size_data *sz, ArgInfo *ainfo) { if (*gr > S390_LAST_ARG_REG) { sz->stack_size = S390_ALIGN(sz->stack_size, sizeof(long)); ainfo->offset = sz->stack_size; ainfo->reg = STK_BASE; ainfo->regtype = RegTypeBase; sz->stack_size += sizeof(long); sz->code_size += 12; } else { ainfo->reg = *gr; ainfo->regtype = RegTypeGeneral; sz->code_size += 8; } (*gr) ++; } /*========================= End of Function ========================*/ /** * * @brief Add a structure variable to parameter list * * @param[in] @gr - Address of current register number * @param[in] @sz - Stack size data * @param[in] @ainfo - Parameter information * @param[in] @size - Size of parameter * @param[in] @type - Type of stack parameter (reference or value) * * Assign a structure address to a register or spill it onto the stack */ static void inline add_stackParm (guint *gr, size_data *sz, ArgInfo *ainfo, gint size, ArgStorage type) { if (*gr > S390_LAST_ARG_REG) { sz->stack_size = S390_ALIGN(sz->stack_size, sizeof(long)); ainfo->reg = STK_BASE; ainfo->offset = sz->stack_size; sz->stack_size += sizeof (target_mgreg_t); sz->parm_size += sizeof(gpointer); } else { ainfo->reg = *gr; } (*gr) ++; ainfo->regtype = type; ainfo->size = size; ainfo->vtsize = size; sz->parm_size += size; } /*========================= End of Function ========================*/ /** * * @brief Add a floating point register parameter * * @param[in] @fr - Address of current register number * @param[in] @sz - Stack size data * @param[in] @ainfo - Parameter information * @param[in] @isDouble - Precision of parameter * * Assign a parameter to a FP register or spill it onto the stack */ static void inline add_float (guint *fr, size_data *sz, ArgInfo *ainfo, gboolean isDouble) { if ((*fr) <= S390_LAST_FPARG_REG) { if (isDouble) ainfo->regtype = RegTypeFP; else ainfo->regtype = RegTypeFPR4; ainfo->reg = *fr; sz->code_size += 4; (*fr) += 2; } else { ainfo->offset = sz->stack_size; ainfo->reg = STK_BASE; sz->code_size += 4; sz->stack_size += sizeof(double); ainfo->regtype = RegTypeBase; } } /*========================= End of Function ========================*/ /** * * @brief Extract information about call parameters and stack use * * @param[in] @mp - Mono Memory Pool * @param[in] @sig - Mono Method Signature * @returns Information about the parameters and stack usage for a call * * Determine the amount of space required for code and stack. In addition * determine starting points for stack-based parameters, and area for * structures being returned on the stack. */ static CallInfo * get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { guint i, fr, gr, size, pstart; int nParm = sig->hasthis + sig->param_count; MonoType *ret_type; guint32 simpleType, align; gboolean is_pinvoke = sig->pinvoke; CallInfo *cinfo; size_data *sz; if (mp) cinfo = (CallInfo *) mono_mempool_alloc0 (mp, sizeof (CallInfo) + sizeof (ArgInfo) * nParm); else cinfo = (CallInfo *) g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * nParm); fr = 0; gr = s390_r2; nParm = 0; cinfo->struct_ret = 0; cinfo->sig = sig; sz = &cinfo->sz; sz->retStruct = 0; sz->stack_size = S390_MINIMAL_STACK_SIZE; sz->code_size = 0; sz->parm_size = 0; align = 0; size = 0; /*----------------------------------------------------------*/ /* We determine the size of the return code/stack in case we*/ /* need to reserve a register to be used to address a stack */ /* area that the callee will use. */ /*----------------------------------------------------------*/ ret_type = mini_get_underlying_type (sig->ret); simpleType = ret_type->type; enum_retvalue: switch (simpleType) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_OBJECT: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_STRING: cinfo->ret.reg = s390_r2; sz->code_size += 4; break; case MONO_TYPE_R4: case MONO_TYPE_R8: cinfo->ret.reg = s390_f0; sz->code_size += 4; break; case MONO_TYPE_I8: case MONO_TYPE_U8: cinfo->ret.reg = s390_r2; sz->code_size += 4; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (sig->ret)) { cinfo->ret.reg = s390_r2; sz->code_size += 4; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: { MonoClass *klass = mono_class_from_mono_type_internal (sig->ret); if (m_class_is_enumtype (klass)) { simpleType = mono_class_enum_basetype_internal (klass)->type; goto enum_retvalue; } size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, sig->pinvoke && !sig->marshalling_disabled); cinfo->struct_ret = 1; cinfo->ret.size = size; cinfo->ret.vtsize = size; break; } case MONO_TYPE_TYPEDBYREF: { MonoClass *klass = mono_class_from_mono_type_internal (sig->ret); size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, sig->pinvoke && !sig->marshalling_disabled); cinfo->struct_ret = 1; cinfo->ret.size = size; cinfo->ret.vtsize = size; } break; case MONO_TYPE_VOID: break; default: g_error ("Can't handle as return value 0x%x", sig->ret->type); } pstart = 0; /* * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after * the first argument, allowing 'this' to be always passed in the first arg reg. * Also do this if the first argument is a reference type, since virtual calls * are sometimes made using calli without sig->hasthis set, like in the delegate * invoke wrappers. */ if (cinfo->struct_ret && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) { if (sig->hasthis) { cinfo->args[nParm].size = sizeof (target_mgreg_t); add_general (&gr, sz, cinfo->args + nParm); } else { cinfo->args[nParm].size = sizeof (target_mgreg_t); add_general (&gr, sz, &cinfo->args [sig->hasthis + nParm]); pstart = 1; } nParm ++; cinfo->vret_arg_index = 1; cinfo->ret.reg = gr; gr ++; } else { /* this */ if (sig->hasthis) { cinfo->args[nParm].size = sizeof (target_mgreg_t); add_general (&gr, sz, cinfo->args + nParm); nParm ++; } if (cinfo->struct_ret) { cinfo->ret.reg = gr; gr++; } } if ((sig->call_convention == MONO_CALL_VARARG) && (sig->param_count == 0)) { gr = S390_LAST_ARG_REG + 1; fr = S390_LAST_FPARG_REG + 1; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, sz, &cinfo->sigCookie); } /*----------------------------------------------------------*/ /* We determine the size of the parameter code and stack */ /* requirements by checking the types and sizes of the */ /* parameters. */ /*----------------------------------------------------------*/ for (i = pstart; i < sig->param_count; ++i) { MonoType *ptype; /*--------------------------------------------------*/ /* Handle vararg type calls. All args are put on */ /* the stack. */ /*--------------------------------------------------*/ if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { gr = S390_LAST_ARG_REG + 1; fr = S390_LAST_FPARG_REG + 1; add_general (&gr, sz, &cinfo->sigCookie); } if (m_type_is_byref (sig->params [i])) { add_general (&gr, sz, cinfo->args+nParm); cinfo->args[nParm].size = sizeof(gpointer); nParm++; continue; } ptype = mini_get_underlying_type (sig->params [i]); simpleType = ptype->type; cinfo->args[nParm].type = simpleType; switch (simpleType) { case MONO_TYPE_I1: case MONO_TYPE_U1: cinfo->args[nParm].size = sizeof(char); add_general (&gr, sz, cinfo->args+nParm); nParm++; break; case MONO_TYPE_I2: case MONO_TYPE_U2: cinfo->args[nParm].size = sizeof(short); add_general (&gr, sz, cinfo->args+nParm); nParm++; break; case MONO_TYPE_I4: case MONO_TYPE_U4: cinfo->args[nParm].size = sizeof(int); add_general (&gr, sz, cinfo->args+nParm); nParm++; break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: cinfo->args[nParm].size = sizeof(gpointer); add_general (&gr, sz, cinfo->args+nParm); nParm++; break; case MONO_TYPE_I8: case MONO_TYPE_U8: cinfo->args[nParm].size = sizeof(long long); add_general (&gr, sz, cinfo->args+nParm); nParm++; break; case MONO_TYPE_R4: cinfo->args[nParm].size = sizeof(float); add_float (&fr, sz, cinfo->args+nParm, FALSE); nParm++; break; case MONO_TYPE_R8: cinfo->args[nParm].size = sizeof(double); add_float (&fr, sz, cinfo->args+nParm, TRUE); nParm++; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ptype)) { cinfo->args[nParm].size = sizeof(gpointer); add_general (&gr, sz, cinfo->args+nParm); nParm++; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: { MonoMarshalType *info; MonoClass *klass = mono_class_from_mono_type_internal (ptype); if (sig->pinvoke && !sig->marshalling_disabled) size = mono_class_native_size(klass, NULL); else size = mono_class_value_size(klass, NULL); if (simpleType != MONO_TYPE_GENERICINST) { info = mono_marshal_load_type_info(klass); if ((info->native_size == sizeof(float)) && (info->num_fields == 1) && (info->fields[0].field->type->type == MONO_TYPE_R4)) { cinfo->args[nParm].size = sizeof(float); add_float(&fr, sz, cinfo->args+nParm, FALSE); nParm ++; break; } if ((info->native_size == sizeof(double)) && (info->num_fields == 1) && (info->fields[0].field->type->type == MONO_TYPE_R8)) { cinfo->args[nParm].size = sizeof(double); add_float(&fr, sz, cinfo->args+nParm, TRUE); nParm ++; break; } } cinfo->args[nParm].vtsize = 0; cinfo->args[nParm].size = 0; switch (size) { /*----------------------------------*/ /* On S/390, structures of size 1, */ /* 2, 4, and 8 bytes are passed in */ /* (a) register(s). */ /*----------------------------------*/ case 0: case 1: case 2: case 4: case 8: add_general(&gr, sz, cinfo->args+nParm); cinfo->args[nParm].size = size; cinfo->args[nParm].regtype = RegTypeStructByVal; nParm++; break; default: add_stackParm(&gr, sz, cinfo->args+nParm, size, RegTypeStructByVal); nParm++; } } break; case MONO_TYPE_TYPEDBYREF: { add_stackParm(&gr, sz, cinfo->args+nParm, sizeof(uintptr_t), RegTypeStructByAddr); nParm++; } break; default: g_error ("Can't trampoline 0x%x", ptype); } } /*----------------------------------------------------------*/ /* Handle the case where there are no implicit arguments */ /*----------------------------------------------------------*/ if ((sig->call_convention == MONO_CALL_VARARG) && (nParm > 0) && (!sig->pinvoke) && (sig->param_count == sig->sentinelpos)) { gr = S390_LAST_ARG_REG + 1; fr = S390_LAST_FPARG_REG + 1; add_general (&gr, sz, &cinfo->sigCookie); } /* * If we are passing a structure back then we make room at * the end of the parameters that may have been placed on * the stack */ if (cinfo->struct_ret) { cinfo->ret.offset = sz->stack_size; sz->stack_size += S390_ALIGN(cinfo->ret.size, align); } cinfo->lastgr = gr; sz->stack_size = sz->stack_size + sz->parm_size; sz->stack_size = S390_ALIGN(sz->stack_size, sizeof(long)); return (cinfo); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific allocation of variables * * @param[in] @cfg - Compile control block * * Set var information according to the calling convention for s390x. * */ void mono_arch_allocate_vars (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; MonoInst *inst; CallInfo *cinfo; int iParm, iVar, offset, align, size, curinst; int frame_reg = STK_BASE; int sArg, eArg; header = cfg->header; cfg->flags |= MONO_CFG_HAS_SPILLUP; /*---------------------------------------------------------*/ /* We use the frame register also for any method that has */ /* filter clauses. This way, when the handlers are called, */ /* the code will reference local variables using the frame */ /* reg instead of the stack pointer: if we had to restore */ /* the stack pointer, we'd corrupt the method frames that */ /* are already on the stack (since filters get called */ /* before stack unwinding happens) when the filter code */ /* would call any method. */ /*---------------------------------------------------------*/ if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses) frame_reg = s390_r11; cfg->frame_reg = frame_reg; cfg->arch.bkchain_reg = -1; if (frame_reg != STK_BASE) cfg->used_int_regs |= (1LL << frame_reg); sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; /*--------------------------------------------------------------*/ /* local vars are at a positive offset from the stack pointer */ /* also note that if the function uses alloca, we use s390_r11 */ /* to point at the local variables. */ /* add parameter area size for called functions */ /*--------------------------------------------------------------*/ if (cfg->param_area == 0) offset = S390_MINIMAL_STACK_SIZE; else offset = cfg->param_area; cfg->sig_cookie = 0; if (MONO_TYPE_ISSTRUCT(sig->ret)) { cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cfg->ret->dreg = cinfo->ret.reg; } else { switch (mini_get_underlying_type (sig->ret)->type) { case MONO_TYPE_VOID: break; default: cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cfg->ret->dreg = cinfo->ret.reg; } } if (sig->hasthis) { inst = cfg->args [0]; if (inst->opcode != OP_REGVAR) { inst->opcode = OP_REGOFFSET; inst->inst_basereg = frame_reg; offset = S390_ALIGN(offset, sizeof(gpointer)); inst->inst_offset = offset; offset += sizeof (target_mgreg_t); } curinst = sArg = 1; } else { curinst = sArg = 0; } eArg = sig->param_count + sArg; if (sig->call_convention == MONO_CALL_VARARG) cfg->sig_cookie += S390_MINIMAL_STACK_SIZE; for (iParm = sArg; iParm < eArg; ++iParm) { inst = cfg->args [curinst]; if (inst->opcode != OP_REGVAR) { switch (cinfo->args[iParm].regtype) { case RegTypeStructByAddr : { MonoInst *indir; size = sizeof (target_mgreg_t); if (cinfo->args [iParm].reg == STK_BASE) { /* Similar to the == STK_BASE case below */ cfg->arch.bkchain_reg = s390_r12; cfg->used_int_regs |= 1 << cfg->arch.bkchain_reg; inst->opcode = OP_REGOFFSET; inst->dreg = mono_alloc_preg (cfg); inst->inst_basereg = cfg->arch.bkchain_reg; inst->inst_offset = cinfo->args [iParm].offset; } else { inst->opcode = OP_REGOFFSET; inst->dreg = cinfo->args [iParm].reg; inst->opcode = OP_REGOFFSET; inst->dreg = mono_alloc_preg (cfg); inst->inst_basereg = cfg->frame_reg; // inst->inst_offset = cinfo->args [iParm].offset; inst->inst_offset = offset; } /* Add a level of indirection */ MONO_INST_NEW (cfg, indir, 0); *indir = *inst; inst->opcode = OP_VTARG_ADDR; inst->inst_left = indir; } break; case RegTypeStructByVal : { MonoInst *indir; cfg->arch.bkchain_reg = s390_r12; cfg->used_int_regs |= 1 << cfg->arch.bkchain_reg; size = cinfo->args[iParm].size; if (cinfo->args [iParm].reg == STK_BASE) { int offStruct = 0; switch(size) { case 0: case 1: case 2: case 4: case 8: offStruct = (size < 8 ? sizeof(uintptr_t) - size : 0); default: inst->opcode = OP_REGOFFSET; inst->dreg = mono_alloc_preg (cfg); inst->inst_basereg = cfg->arch.bkchain_reg; inst->inst_offset = cinfo->args [iParm].offset + offStruct; } } else { offset = S390_ALIGN(offset, sizeof(uintptr_t)); inst->opcode = OP_REGOFFSET; inst->inst_basereg = cfg->frame_reg; inst->inst_offset = offset; } switch (size) { case 0 : case 1 : case 2 : case 4 : case 8 : break; default : /* Add a level of indirection */ MONO_INST_NEW (cfg, indir, 0); *indir = *inst; inst->opcode = OP_VTARG_ADDR; inst->inst_left = indir; } } break; default : if (cinfo->args [iParm].reg == STK_BASE) { /* * These arguments are in the previous frame, so we can't * compute their offset from the current frame pointer right * now, since cfg->stack_offset is not yet known, so dedicate a * register holding the previous frame pointer. */ cfg->arch.bkchain_reg = s390_r12; cfg->used_int_regs |= 1 << cfg->arch.bkchain_reg; inst->opcode = OP_REGOFFSET; inst->inst_basereg = cfg->arch.bkchain_reg; size = (cinfo->args[iParm].size < 8 ? 8 - cinfo->args[iParm].size : 0); inst->inst_offset = cinfo->args [iParm].offset + size; size = sizeof (long); } else { inst->opcode = OP_REGOFFSET; inst->inst_basereg = frame_reg; size = (cinfo->args[iParm].size < 8 ? sizeof(int) : sizeof(long)); offset = S390_ALIGN(offset, size); if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) inst->inst_offset = offset; else inst->inst_offset = offset + (8 - size); } } offset += MAX(size, 8); } curinst++; } cfg->locals_min_stack_offset = offset; curinst = cfg->locals_start; for (iVar = curinst; iVar < cfg->num_varinfo; ++iVar) { inst = cfg->varinfo [iVar]; if ((inst->flags & MONO_INST_IS_DEAD) || (inst->opcode == OP_REGVAR)) continue; /*--------------------------------------------------*/ /* inst->backend.is_pinvoke indicates native sized */ /* value types this is used by the pinvoke wrappers */ /* when they call functions returning structure */ /*--------------------------------------------------*/ if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype)) size = mono_class_native_size (mono_class_from_mono_type_internal (inst->inst_vtype), (guint32 *) &align); else size = mono_type_size (inst->inst_vtype, &align); offset = S390_ALIGN(offset, align); inst->inst_offset = offset; inst->opcode = OP_REGOFFSET; inst->inst_basereg = frame_reg; offset += size; DEBUG (g_print("allocating local %d to %ld, size: %d\n", iVar, inst->inst_offset, size)); } offset = S390_ALIGN(offset, sizeof(uintptr_t)); cfg->locals_max_stack_offset = offset; /*------------------------------------------------------*/ /* Reserve space to save LMF and caller saved registers */ /*------------------------------------------------------*/ if (cfg->method->save_lmf) offset += sizeof (MonoLMF); /*------------------------------------------------------*/ /* align the offset */ /*------------------------------------------------------*/ cfg->stack_offset = S390_ALIGN(offset, S390_STACK_ALIGNMENT); /*------------------------------------------------------*/ /* Fix offsets for args whose value is in parent frame */ /*------------------------------------------------------*/ for (iParm = sArg; iParm < eArg; ++iParm) { inst = cfg->args [iParm]; if (inst->opcode == OP_S390_STKARG) { inst->opcode = OP_REGOFFSET; inst->inst_offset += cfg->stack_offset; } } } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific creation of variables * * @param[in] @cfg - Compile control block * * Create variables for the method. * */ void mono_arch_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig = mono_method_signature_internal (cfg->method); CallInfo *cinfo; if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; if (cinfo->struct_ret) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG); if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr = "); mono_print_ins (cfg->vret_addr); } } if (cfg->gen_sdb_seq_points) { MonoInst *ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.ss_tramp_var = ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.bp_tramp_var = ins; } } /*========================= End of Function ========================*/ /** * * @brief Add a register to the call operation * * @param[in] @cfg - Compile control block * @param[in] @call - Call Instruction * @param[in] @storage - Register use type * @param[in] @reg - Register number * @param[in] @tree - Call arguments * * Add register use information to the call sequence */ static void add_outarg_reg2 (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree) { MonoInst *ins; switch (storage) { case RegTypeGeneral: MONO_INST_NEW (cfg, ins, OP_MOVE); ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = tree->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE); break; case RegTypeFP: MONO_INST_NEW (cfg, ins, OP_FMOVE); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = tree->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE); break; case RegTypeFPR4: MONO_INST_NEW (cfg, ins, OP_S390_SETF4RET); ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = tree->dreg; MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE); break; default: g_assert_not_reached (); } } /*========================= End of Function ========================*/ /** * * @brief Emit a signature cookine * * @param[in] @cfg - Compile control block * @param[in] @call - Call Instruction * @param[in] @cinfo - Call Information * * Emit the signature cooke as a parameter */ static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { MonoMethodSignature *tmpSig; MonoInst *sig_arg; cfg->disable_aot = TRUE; /* * mono_ArgIterator_Setup assumes the signature cookie is * passed first and all the arguments which were before it * passed on the stack after the signature. So compensate * by passing a different signature. */ tmpSig = mono_metadata_signature_dup (call->signature); tmpSig->param_count -= call->signature->sentinelpos; tmpSig->sentinelpos = 0; if (tmpSig->param_count > 0) memcpy (tmpSig->params, call->signature->params + call->signature->sentinelpos, tmpSig->param_count * sizeof(MonoType *)); MONO_INST_NEW (cfg, sig_arg, OP_ICONST); sig_arg->dreg = mono_alloc_ireg (cfg); sig_arg->inst_p0 = tmpSig; MONO_ADD_INS (cfg->cbb, sig_arg); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, STK_BASE, cinfo->sigCookie.offset, sig_arg->dreg); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific emission of a call operation * * @param[in] @cfg - Compile control block * @param[in] @call - Call Instruction * * Process all parameters for a call and generate the sequence of * operations to perform the call according to the s390x ABI. */ void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoInst *in; MonoMethodSignature *sig; MonoInst *ins; int i, n, lParamArea; CallInfo *cinfo; ArgInfo *ainfo = NULL; int stackSize; sig = call->signature; n = sig->param_count + sig->hasthis; DEBUG (g_print ("Call requires: %d parameters\n",n)); cinfo = get_call_info (cfg->mempool, sig); stackSize = cinfo->sz.stack_size + cinfo->sz.parm_size; call->stack_usage = MAX(stackSize, call->stack_usage); lParamArea = MAX((call->stack_usage-S390_MINIMAL_STACK_SIZE-cinfo->sz.parm_size), 0); cfg->param_area = MAX(((signed) cfg->param_area), lParamArea); /* FIXME */ cfg->flags |= MONO_CFG_HAS_CALLS; if (cinfo->struct_ret) { MONO_INST_NEW (cfg, ins, OP_MOVE); ins->sreg1 = call->vret_var->dreg; ins->dreg = mono_alloc_preg (cfg); MONO_ADD_INS (cfg->cbb, ins); mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, cinfo->ret.reg, FALSE); } for (i = 0; i < n; ++i) { MonoType *t; ainfo = cinfo->args + i; if (i >= sig->hasthis) t = sig->params [i - sig->hasthis]; else t = mono_get_int_type (); t = mini_get_underlying_type (t); in = call->args [i]; if ((sig->call_convention == MONO_CALL_VARARG) && (!sig->pinvoke) && (i == sig->sentinelpos)) { emit_sig_cookie (cfg, call, cinfo); } switch (ainfo->regtype) { case RegTypeGeneral : add_outarg_reg2 (cfg, call, ainfo->regtype, ainfo->reg, in); break; case RegTypeFP : case RegTypeFPR4 : if (MONO_TYPE_ISSTRUCT (t)) { /* Valuetype passed in one fp register */ ainfo->regtype = RegTypeStructByValInFP; /* Fall through */ } else { add_outarg_reg2 (cfg, call, ainfo->regtype, ainfo->reg, in); break; } case RegTypeStructByVal : case RegTypeStructByAddr : { g_assert (in->klass); MONO_INST_NEW (cfg, ins, OP_OUTARG_VT); ins->sreg1 = in->dreg; ins->klass = in->klass; ins->backend.size = ainfo->size; ins->inst_p0 = call; ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, ins); break; } case RegTypeBase : if (!m_type_is_byref (t) && t->type == MONO_TYPE_R4) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, STK_BASE, ainfo->offset + 4, in->dreg); } else if (!m_type_is_byref (t) && (t->type == MONO_TYPE_R8)) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, STK_BASE, ainfo->offset, in->dreg); } else { MONO_INST_NEW (cfg, ins, OP_STORE_MEMBASE_REG); ins->inst_destbasereg = STK_BASE; ins->inst_offset = ainfo->offset; ins->sreg1 = in->dreg; MONO_ADD_INS (cfg->cbb, ins); } break; default: g_assert_not_reached (); break; } } /* * Handle the case where there are no implicit arguments */ if ((sig->call_convention == MONO_CALL_VARARG) && (!sig->pinvoke) && (i == sig->sentinelpos)) { emit_sig_cookie (cfg, call, cinfo); } } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific Value Type parameter processing * * @param[in] @cfg - Compile control block * @param[in] @call - Call Instruction * @param[in] @src - Source parameter * * Process value type parameters for a call operation */ void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst*) ins->inst_p0; ArgInfo *ainfo = (ArgInfo *) ins->inst_p1; if (ainfo->regtype == RegTypeStructByVal) { if (ainfo->reg != STK_BASE) { emit_outarg_vtr (cfg, ins, src); } else { emit_outarg_vts (cfg, ins, src); } } else if (ainfo->regtype == RegTypeStructByValInFP) { int dreg = mono_alloc_freg (cfg); if (ainfo->size == 4) { MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, dreg, src->dreg, 0); MONO_EMIT_NEW_UNALU (cfg, OP_S390_SETF4RET, dreg, dreg); } else { g_assert (ainfo->size == 8); MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, dreg, src->dreg, 0); } mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE); } else { ERROR_DECL (error); MonoMethodHeader *header; MonoInst *vtcopy = mono_compile_create_var (cfg, m_class_get_byval_arg (src->klass), OP_LOCAL); MonoInst *load; int ovf_size = ainfo->vtsize, srcReg; guint32 size; /* FIXME: alignment? */ if (call->signature->pinvoke && !call->signature->marshalling_disabled) { size = mono_type_native_stack_size (m_class_get_byval_arg (src->klass), NULL); vtcopy->backend.is_pinvoke = 1; } else { size = mini_type_stack_size (m_class_get_byval_arg (src->klass), NULL); } if (size > 0) g_assert (ovf_size > 0); header = mono_method_get_header_checked (cfg->method, error); mono_error_assert_ok (error); /* FIXME don't swallow the error */ if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses) srcReg = s390_r11; else srcReg = STK_BASE; EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype); mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, TARGET_SIZEOF_VOID_P); if (ainfo->reg == STK_BASE) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, srcReg, ainfo->offset, load->dreg); if (cfg->compute_gc_maps) { MonoInst *def; EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, m_class_get_byval_arg (ins->klass)); } } else mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg, FALSE); } } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific call value return processing * * @param[in] @cfg - Compile control block * @param[in] @method - Method * @param[in] @val - Instruction representing the result returned to method * * Create the sequence to unload the value returned from a call */ void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret); if (!m_type_is_byref (ret)) { if (ret->type == MONO_TYPE_R4) { MONO_EMIT_NEW_UNALU (cfg, OP_S390_SETF4RET, s390_f0, val->dreg); return; } else if (ret->type == MONO_TYPE_R8) { MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, s390_f0, val->dreg); return; } } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); } /*========================= End of Function ========================*/ /** * * @brief Replace compound compare/branch operations with single operation * * @param[in] @bb - Basic block * @param[in] @ins - Current instruction * @param[in] @cc - Condition code of branch * @param[in] @logical - Whether comparison is signed or logical * * Form a peephole pass at the code looking for simple optimizations * that will combine compare/branch instructions into a single operation. */ static void compare_and_branch(MonoBasicBlock *bb, MonoInst *ins, int cc, gboolean logical) { MonoInst *last; if (mono_hwcap_s390x_has_gie) { last = mono_inst_prev (ins, FILTER_IL_SEQ_POINT); ins->sreg1 = last->sreg1; ins->sreg2 = last->sreg2; ins->sreg3 = cc; switch(last->opcode) { case OP_ICOMPARE: if (logical) ins->opcode = OP_S390_CLRJ; else ins->opcode = OP_S390_CRJ; MONO_DELETE_INS(bb, last); break; case OP_COMPARE: case OP_LCOMPARE: if (logical) ins->opcode = OP_S390_CLGRJ; else ins->opcode = OP_S390_CGRJ; MONO_DELETE_INS(bb, last); break; case OP_ICOMPARE_IMM: ins->backend.data = (gpointer) last->inst_imm; if (logical) ins->opcode = OP_S390_CLIJ; else ins->opcode = OP_S390_CIJ; MONO_DELETE_INS(bb, last); break; case OP_COMPARE_IMM: case OP_LCOMPARE_IMM: ins->backend.data = (gpointer) last->inst_imm; if (logical) ins->opcode = OP_S390_CLGIJ; else ins->opcode = OP_S390_CGIJ; MONO_DELETE_INS(bb, last); break; } } } /*========================= End of Function ========================*/ /** * * @brief Architecure-specific peephole pass 1 processing * * @param[in] @cfg - Compile control block * @param[in] @bb - Basic block * * Form a peephole pass at the code looking for compare and branch * optimizations. */ void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { switch (ins->opcode) { case OP_IBEQ: case OP_LBEQ: compare_and_branch(bb, ins, S390_CC_EQ, FALSE); break; case OP_LBNE_UN: case OP_IBNE_UN: compare_and_branch(bb, ins, S390_CC_NE, TRUE); break; case OP_LBLT: case OP_IBLT: compare_and_branch(bb, ins, S390_CC_LT, FALSE); break; case OP_LBLT_UN: case OP_IBLT_UN: compare_and_branch(bb, ins, S390_CC_LT, TRUE); break; case OP_LBGT: case OP_IBGT: compare_and_branch(bb, ins, S390_CC_GT, FALSE); break; case OP_LBGT_UN: case OP_IBGT_UN: compare_and_branch(bb, ins, S390_CC_GT, TRUE); break; case OP_LBGE: case OP_IBGE: compare_and_branch(bb, ins, S390_CC_GE, FALSE); break; case OP_LBGE_UN: case OP_IBGE_UN: compare_and_branch(bb, ins, S390_CC_GE, TRUE); break; case OP_LBLE: case OP_IBLE: compare_and_branch(bb, ins, S390_CC_LE, FALSE); break; case OP_LBLE_UN: case OP_IBLE_UN: compare_and_branch(bb, ins, S390_CC_LE, TRUE); break; // default: // mono_peephole_ins (bb, ins); } } } /*========================= End of Function ========================*/ /** * * @brief Architecure-specific peephole pass 2 processing * * @param[in] @cfg - Compile control block * @param[in] @bb - Basic block * * Form a peephole pass at the code looking for simple optimizations. */ void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n, *last_ins = NULL; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { switch (ins->opcode) { case OP_LOADU4_MEMBASE: case OP_LOADI4_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = (ins->opcode == OP_LOADI4_MEMBASE) ? OP_ICONV_TO_I4 : OP_ICONV_TO_U4; ins->sreg1 = last_ins->sreg1; } break; } mono_peephole_ins (bb, ins); } } /*========================= End of Function ========================*/ /** * * @brief Architecure-specific lowering pass processing * * @param[in] @cfg - Compile control block * @param[in] @bb - Basic block * * Form a lowering pass at the code looking for simple optimizations. */ void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *next; MONO_BB_FOR_EACH_INS_SAFE (bb, next, ins) { switch (ins->opcode) { case OP_DIV_IMM: case OP_REM_IMM: case OP_IDIV_IMM: case OP_IREM_IMM: case OP_IDIV_UN_IMM: case OP_IREM_UN_IMM: case OP_LAND_IMM: case OP_LOR_IMM: case OP_LREM_IMM: case OP_LXOR_IMM: case OP_LOCALLOC_IMM: mono_decompose_op_imm (cfg, bb, ins); break; case OP_LADD_IMM: if (!s390_is_imm16 (ins->inst_imm)) /* This is created by the memcpy code which ignores is_inst_imm */ mono_decompose_op_imm (cfg, bb, ins); break; default: break; } } bb->max_vreg = cfg->next_vreg; } /*========================= End of Function ========================*/ /** * * @brief Emit float-to-int sequence * * @param[in] @cfg - Compile control block * @param[in] @code - Current instruction area * @param[in] @dreg - Destination general register * @param[in] @sreg - Source floating point register * @param[in] @size - Size of destination * @param[in] @is_signed - Destination is signed/unsigned * @returns Next instruction location * * Emit instructions to convert a single precision floating point value to an integer */ static guchar * emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed) { /* sreg is a float, dreg is an integer reg. */ if (is_signed) { s390_cgebr (code, dreg, 5, sreg); switch (size) { case 1: s390_ltgr (code, dreg, dreg); s390_jnl (code, 4); s390_oill (code, dreg, 0x80); s390_lghi (code, s390_r0, 0xff); s390_ngr (code, dreg, s390_r0); break; case 2: s390_ltgr (code, dreg, dreg); s390_jnl (code, 4); s390_oill (code, dreg, 0x8000); s390_llill(code, s390_r0, 0xffff); s390_ngr (code, dreg, s390_r0); break; } } else { short *o[1]; s390_lgdr (code, s390_r14, s390_f14); s390_lgdr (code, s390_r13, s390_f15); S390_SET (code, s390_r0, 0x4f000000u); s390_ldgr (code, s390_f14, s390_r0); s390_ler (code, s390_f15, sreg); s390_cebr (code, s390_f15, s390_f14); s390_jl (code, 0); CODEPTR (code, o[0]); S390_SET (code, s390_r0, 0x4f800000u); s390_ldgr (code, s390_f14, s390_r0); s390_sebr (code, s390_f15, s390_f14); s390_cfebr (code, dreg, 7, s390_f15); s390_j (code, 4); PTRSLOT (code, o[0]); s390_cfebr (code, dreg, 5, sreg); switch (size) { case 1: s390_lghi (code, s390_r0, 0xff); s390_ngr (code, dreg, s390_r0); break; case 2: s390_llill(code, s390_r0, 0xffff); s390_ngr (code, dreg, s390_r0); break; } s390_ldgr (code, s390_f14, s390_r14); s390_ldgr (code, s390_f15, s390_r13); } return code; } /*========================= End of Function ========================*/ /** * * @brief Emit double-to-int sequence * * @param[in] @cfg - Compile control block * @param[in] @code - Current instruction area * @param[in] @dreg - Destination general register * @param[in] @sreg - Source floating point register * @param[in] @size - Size of destination * @param[in] @is_signed - Destination is signed/unsigned * @returns Next instruction location * * Emit instructions to convert a single precision floating point value to an integer */ static guchar* emit_double_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed) { /* sreg is a float, dreg is an integer reg. */ if (is_signed) { s390_cgdbr (code, dreg, 5, sreg); switch (size) { case 1: s390_ltgr (code, dreg, dreg); s390_jnl (code, 4); s390_oill (code, dreg, 0x80); s390_lghi (code, s390_r0, 0xff); s390_ngr (code, dreg, s390_r0); break; case 2: s390_ltgr (code, dreg, dreg); s390_jnl (code, 4); s390_oill (code, dreg, 0x8000); s390_llill(code, s390_r0, 0xffff); s390_ngr (code, dreg, s390_r0); break; } } else { short *o[1]; s390_lgdr (code, s390_r14, s390_f14); s390_lgdr (code, s390_r13, s390_f15); S390_SET (code, s390_r0, 0x41e0000000000000llu); s390_ldgr (code, s390_f14, s390_r0); s390_ldr (code, s390_f15, sreg); s390_cdbr (code, s390_f15, s390_f14); s390_jl (code, 0); CODEPTR (code, o[0]); S390_SET (code, s390_r0, 0x41f0000000000000llu); s390_ldgr (code, s390_f14, s390_r0); s390_sdbr (code, s390_f15, s390_f14); s390_cfdbr (code, dreg, 7, s390_f15); s390_j (code, 4); PTRSLOT (code, o[0]); s390_cfdbr (code, dreg, 5, sreg); switch (size) { case 1: s390_lghi (code, s390_r0, 0xff); s390_ngr (code, dreg, s390_r0); break; case 2: s390_llill(code, s390_r0, 0xffff); s390_ngr (code, dreg, s390_r0); break; } s390_ldgr (code, s390_f14, s390_r14); s390_ldgr (code, s390_f15, s390_r13); } return code; } /*========================= End of Function ========================*/ /** * * @brief Check if branch is for unsigned comparison * * @param[in] @next - Next instruction * @returns True if the branch is for an unsigned comparison * * Determine if next instruction is a branch for an unsigned comparison */ static gboolean is_unsigned (MonoInst *next) { if ((next) && (((next->opcode >= OP_IBNE_UN) && (next->opcode <= OP_IBLT_UN)) || ((next->opcode >= OP_LBNE_UN) && (next->opcode <= OP_LBLT_UN)) || ((next->opcode >= OP_COND_EXC_NE_UN) && (next->opcode <= OP_COND_EXC_LT_UN)) || ((next->opcode >= OP_COND_EXC_INE_UN) && (next->opcode <= OP_COND_EXC_ILT_UN)) || ((next->opcode == OP_CLT_UN) || (next->opcode == OP_CGT_UN) || (next->opcode == OP_ICGE_UN) || (next->opcode == OP_ICLE_UN)) || ((next->opcode == OP_ICLT_UN) || (next->opcode == OP_ICGT_UN) || (next->opcode == OP_LCLT_UN) || (next->opcode == OP_LCGT_UN)))) return TRUE; else return FALSE; } /*========================= End of Function ========================*/ /** * * @brief Architecutre-specific processing of a basic block * * @param[in] @cfg - Compile control block * @param[in] @bb - Basic block * * Process instructions within basic block emitting s390x instructions * based on the VM operation codes */ void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins; MonoCallInst *call; guint8 *code = cfg->native_code + cfg->code_len; int src2; /* we don't align basic blocks of loops on s390 */ if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); MONO_BB_FOR_EACH_INS (bb, ins) { const guint offset = code - cfg->native_code; set_code_cursor (cfg, code); int max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); mono_debug_record_line_number (cfg, ins, offset); switch (ins->opcode) { case OP_STOREI1_MEMBASE_IMM: { s390_lghi (code, s390_r0, ins->inst_imm); S390_LONG (code, stcy, stc, s390_r0, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_STOREI2_MEMBASE_IMM: { s390_lghi (code, s390_r0, ins->inst_imm); S390_LONG (code, sthy, sth, s390_r0, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_STOREI4_MEMBASE_IMM: { s390_lgfi (code, s390_r0, ins->inst_imm); S390_LONG (code, sty, st, s390_r0, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_STORE_MEMBASE_IMM: case OP_STOREI8_MEMBASE_IMM: { S390_SET (code, s390_r0, ins->inst_imm); S390_LONG (code, stg, stg, s390_r0, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_STOREI1_MEMBASE_REG: { S390_LONG (code, stcy, stc, ins->sreg1, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_STOREI2_MEMBASE_REG: { S390_LONG (code, sthy, sth, ins->sreg1, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_STOREI4_MEMBASE_REG: { S390_LONG (code, sty, st, ins->sreg1, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_STORE_MEMBASE_REG: case OP_STOREI8_MEMBASE_REG: { S390_LONG (code, stg, stg, ins->sreg1, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_LOADU4_MEM: g_assert_not_reached (); break; case OP_LOAD_MEMBASE: case OP_LOADI8_MEMBASE: { S390_LONG (code, lg, lg, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_LOADI4_MEMBASE: { S390_LONG (code, lgf, lgf, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_LOADU4_MEMBASE: { S390_LONG (code, llgf, llgf, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_LOADU1_MEMBASE: { S390_LONG (code, llgc, llgc, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_LOADI1_MEMBASE: { S390_LONG (code, lgb, lgb, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_LOADU2_MEMBASE: { S390_LONG (code, llgh, llgh, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_LOADI2_MEMBASE: { S390_LONG (code, lgh, lgh, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_LCONV_TO_I1: { s390_lgbr (code, ins->dreg, ins->sreg1); } break; case OP_LCONV_TO_I2: { s390_lghr (code, ins->dreg, ins->sreg1); } break; case OP_LCONV_TO_U1: { s390_llgcr (code, ins->dreg, ins->sreg1); } break; case OP_LCONV_TO_U2: { s390_llghr (code, ins->dreg, ins->sreg1); } break; case OP_ICONV_TO_I1: { s390_lgbr (code, ins->dreg, ins->sreg1); } break; case OP_ICONV_TO_I2: { s390_lghr (code, ins->dreg, ins->sreg1); } break; case OP_ICONV_TO_U1: { s390_llgcr (code, ins->dreg, ins->sreg1); } break; case OP_ICONV_TO_U2: { s390_llghr (code, ins->dreg, ins->sreg1); } break; case OP_ICONV_TO_U4: { s390_llgfr (code, ins->dreg, ins->sreg1); } break; case OP_ICONV_TO_I4: { s390_lgfr (code, ins->dreg, ins->sreg1); } break; case OP_COMPARE: case OP_LCOMPARE: { if (is_unsigned (ins->next)) s390_clgr (code, ins->sreg1, ins->sreg2); else s390_cgr (code, ins->sreg1, ins->sreg2); } break; case OP_ICOMPARE: { if (is_unsigned (ins->next)) s390_clr (code, ins->sreg1, ins->sreg2); else s390_cr (code, ins->sreg1, ins->sreg2); } break; case OP_COMPARE_IMM: case OP_LCOMPARE_IMM: { gboolean branchUn = is_unsigned (ins->next); if ((ins->inst_imm == 0) && (!branchUn)) { s390_ltgr (code, ins->sreg1, ins->sreg1); } else { S390_SET (code, s390_r0, ins->inst_imm); if (branchUn) s390_clgr (code, ins->sreg1, s390_r0); else s390_cgr (code, ins->sreg1, s390_r0); } } break; case OP_ICOMPARE_IMM: { gboolean branchUn = is_unsigned (ins->next); if ((ins->inst_imm == 0) && (!branchUn)) { s390_ltr (code, ins->sreg1, ins->sreg1); } else { S390_SET (code, s390_r0, ins->inst_imm); if (branchUn) s390_clr (code, ins->sreg1, s390_r0); else s390_cr (code, ins->sreg1, s390_r0); } } break; case OP_BREAK: { code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break)); } break; case OP_ADDCC: { if (mono_hwcap_s390x_has_mlt) { s390_agrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_COM; s390_agr (code, ins->dreg, src2); } } break; case OP_LADD: { if (mono_hwcap_s390x_has_mlt) { s390_agrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_COM; s390_agr (code, ins->dreg, src2); } } break; case OP_ADC: { CHECK_SRCDST_COM; s390_alcgr (code, ins->dreg, src2); } break; case OP_ADD_IMM: { if (mono_hwcap_s390x_has_mlt) { if (s390_is_imm16 (ins->inst_imm)) { s390_aghik(code, ins->dreg, ins->sreg1, ins->inst_imm); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_agrk (code, ins->dreg, ins->sreg1, s390_r0); } } else { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (ins->inst_imm)) { s390_aghi (code, ins->dreg, ins->inst_imm); } else if (s390_is_imm32 (ins->inst_imm)) { s390_agfi (code, ins->dreg, ins->inst_imm); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_agr (code, ins->dreg, s390_r0); } } } break; case OP_LADD_IMM: { if (mono_hwcap_s390x_has_mlt) { if (s390_is_imm16 (ins->inst_imm)) { s390_aghik(code, ins->dreg, ins->sreg1, ins->inst_imm); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_agrk (code, ins->dreg, ins->sreg1, s390_r0); } } else { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } if (s390_is_imm32 (ins->inst_imm)) { s390_agfi (code, ins->dreg, ins->inst_imm); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_agr (code, ins->dreg, s390_r0); } } } break; case OP_ADC_IMM: { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (ins->inst_imm)) { s390_lghi (code, s390_r0, ins->inst_imm); s390_alcgr (code, ins->dreg, s390_r0); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_alcgr (code, ins->dreg, s390_r0); } } break; case OP_IADD_OVF: case OP_S390_IADD_OVF: { CHECK_SRCDST_COM; s390_ar (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); s390_lgfr (code, ins->dreg, ins->dreg); } break; case OP_IADD_OVF_UN: case OP_S390_IADD_OVF_UN: { CHECK_SRCDST_COM; s390_alr (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException"); s390_llgfr (code, ins->dreg, ins->dreg); } break; case OP_ADD_OVF_CARRY: { CHECK_SRCDST_COM; s390_lghi (code, s390_r0, 0); s390_lgr (code, s390_r1, s390_r0); s390_alcgr (code, s390_r0, s390_r1); s390_agr (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); s390_agr (code, ins->dreg, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); } break; case OP_ADD_OVF_UN_CARRY: { CHECK_SRCDST_COM; s390_alcgr (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException"); } break; case OP_SUBCC: { if (mono_hwcap_s390x_has_mlt) { s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_NCOM; s390_sgr (code, ins->dreg, src2); } } break; case OP_LSUB: { if (mono_hwcap_s390x_has_mlt) { s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_NCOM; s390_sgr (code, ins->dreg, src2); } } break; case OP_SBB: { CHECK_SRCDST_NCOM; s390_slbgr(code, ins->dreg, src2); } break; case OP_SUB_IMM: { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (-ins->inst_imm)) { s390_aghi (code, ins->dreg, -ins->inst_imm); } else if (s390_is_imm32 (-ins->inst_imm)) { s390_slgfi (code, ins->dreg, ins->inst_imm); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_slgr (code, ins->dreg, s390_r0); } } break; case OP_LSUB_IMM: { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (-ins->inst_imm)) { s390_aghi (code, ins->dreg, -ins->inst_imm); } else if (s390_is_imm32 (-ins->inst_imm)) { s390_slgfi (code, ins->dreg, ins->inst_imm); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_slgr (code, ins->dreg, s390_r0); } } break; case OP_SBB_IMM: { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (-ins->inst_imm)) { s390_lghi (code, s390_r0, ins->inst_imm); s390_slbgr (code, ins->dreg, s390_r0); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_slbgr(code, ins->dreg, s390_r0); } } break; case OP_SUB_OVF_CARRY: { CHECK_SRCDST_NCOM; s390_lghi (code, s390_r0, 0); s390_lgr (code, s390_r1, s390_r0); s390_slbgr (code, s390_r0, s390_r1); s390_sgr (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); s390_agr (code, ins->dreg, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); } break; case OP_SUB_OVF_UN_CARRY: { CHECK_SRCDST_NCOM; s390_slbgr (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, "OverflowException"); } break; case OP_LAND: { if (mono_hwcap_s390x_has_mlt) { s390_ngrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { if (ins->sreg1 == ins->dreg) { s390_ngr (code, ins->dreg, ins->sreg2); } else { if (ins->sreg2 == ins->dreg) { s390_ngr (code, ins->dreg, ins->sreg1); } else { s390_lgr (code, ins->dreg, ins->sreg1); s390_ngr (code, ins->dreg, ins->sreg2); } } } } break; case OP_AND_IMM: { S390_SET_MASK (code, s390_r0, ins->inst_imm); if (mono_hwcap_s390x_has_mlt) { s390_ngrk (code, ins->dreg, ins->sreg1, s390_r0); } else { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } s390_ngr (code, ins->dreg, s390_r0); } } break; case OP_LDIV: { s390_lgr (code, s390_r1, ins->sreg1); s390_dsgr (code, s390_r0, ins->sreg2); s390_lgr (code, ins->dreg, s390_r1); } break; case OP_LDIV_UN: { s390_lgr (code, s390_r1, ins->sreg1); s390_lghi (code, s390_r0, 0); s390_dlgr (code, s390_r0, ins->sreg2); s390_lgr (code, ins->dreg, s390_r1); } break; case OP_LREM: { s390_lgr (code, s390_r1, ins->sreg1); s390_dsgr (code, s390_r0, ins->sreg2); s390_lgr (code, ins->dreg, s390_r0); break; } case OP_LREM_IMM: { if (s390_is_imm16 (ins->inst_imm)) { s390_lghi (code, s390_r13, ins->inst_imm); } else { s390_lgfi (code, s390_r13, ins->inst_imm); } s390_lgr (code, s390_r0, ins->sreg1); s390_dsgr (code, s390_r0, s390_r13); s390_lgfr (code, ins->dreg, s390_r0); } break; case OP_LREM_UN: { s390_lgr (code, s390_r1, ins->sreg1); s390_lghi (code, s390_r0, 0); s390_dlgr (code, s390_r0, ins->sreg2); s390_lgr (code, ins->dreg, s390_r0); } break; case OP_LOR: { if (mono_hwcap_s390x_has_mlt) { s390_ogrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { if (ins->sreg1 == ins->dreg) { s390_ogr (code, ins->dreg, ins->sreg2); } else { if (ins->sreg2 == ins->dreg) { s390_ogr (code, ins->dreg, ins->sreg1); } else { s390_lgr (code, ins->dreg, ins->sreg1); s390_ogr (code, ins->dreg, ins->sreg2); } } } } break; case OP_OR_IMM: { S390_SET_MASK(code, s390_r0, ins->inst_imm); if (mono_hwcap_s390x_has_mlt) { s390_ogrk (code, ins->dreg, ins->sreg1, s390_r0); } else { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } s390_ogr (code, ins->dreg, s390_r0); } } break; case OP_LXOR: { if (mono_hwcap_s390x_has_mlt) { s390_xgrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { if (ins->sreg1 == ins->dreg) { s390_xgr (code, ins->dreg, ins->sreg2); } else { if (ins->sreg2 == ins->dreg) { s390_xgr (code, ins->dreg, ins->sreg1); } else { s390_lgr (code, ins->dreg, ins->sreg1); s390_xgr (code, ins->dreg, ins->sreg2); } } } } break; case OP_XOR_IMM: { S390_SET_MASK(code, s390_r0, ins->inst_imm); if (mono_hwcap_s390x_has_mlt) { s390_xgrk (code, ins->dreg, ins->sreg1, s390_r0); } else { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } s390_xgr (code, ins->dreg, s390_r0); } } break; case OP_LSHL: { CHECK_SRCDST_NCOM; s390_sllg (code, ins->dreg, ins->dreg, src2, 0); } break; case OP_SHL_IMM: case OP_LSHL_IMM: { if (ins->sreg1 != ins->dreg) { s390_lgr (code, ins->dreg, ins->sreg1); } s390_sllg (code, ins->dreg, ins->dreg, 0, (ins->inst_imm & 0x3f)); } break; case OP_LSHR: { CHECK_SRCDST_NCOM; s390_srag (code, ins->dreg, ins->dreg, src2, 0); } break; case OP_SHR_IMM: case OP_LSHR_IMM: { if (ins->sreg1 != ins->dreg) { s390_lgr (code, ins->dreg, ins->sreg1); } s390_srag (code, ins->dreg, ins->dreg, 0, (ins->inst_imm & 0x3f)); } break; case OP_SHR_UN_IMM: case OP_LSHR_UN_IMM: { if (ins->sreg1 != ins->dreg) { s390_lgr (code, ins->dreg, ins->sreg1); } s390_srlg (code, ins->dreg, ins->dreg, 0, (ins->inst_imm & 0x3f)); } break; case OP_LSHR_UN: { CHECK_SRCDST_NCOM; s390_srlg (code, ins->dreg, ins->dreg, src2, 0); } break; case OP_LNOT: { if (ins->sreg1 != ins->dreg) { s390_lgr (code, ins->dreg, ins->sreg1); } s390_lghi (code, s390_r0, -1); s390_xgr (code, ins->dreg, s390_r0); } break; case OP_LNEG: { s390_lcgr (code, ins->dreg, ins->sreg1); } break; case OP_LMUL: { CHECK_SRCDST_COM; s390_msgr (code, ins->dreg, src2); } break; case OP_MUL_IMM: case OP_LMUL_IMM: { if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } if ((mono_hwcap_s390x_has_gie) && (s390_is_imm32 (ins->inst_imm))) { s390_msgfi (code, ins->dreg, ins->inst_imm); } else { if (s390_is_imm16 (ins->inst_imm)) { s390_lghi (code, s390_r13, ins->inst_imm); } else if (s390_is_imm32 (ins->inst_imm)) { s390_lgfi (code, s390_r13, ins->inst_imm); } else { S390_SET (code, s390_r13, ins->inst_imm); } s390_msgr (code, ins->dreg, s390_r13); } } break; case OP_LMUL_OVF: { short int *o[2]; if (mono_hwcap_s390x_has_mie2) { s390_msgrkc (code, ins->dreg, ins->sreg1, ins->sreg2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); } else { s390_ltgr (code, s390_r1, ins->sreg1); s390_jz (code, 0); CODEPTR(code, o[0]); s390_ltgr (code, s390_r0, ins->sreg2); s390_jnz (code, 6); s390_lghi (code, s390_r1, 0); s390_j (code, 0); CODEPTR(code, o[1]); s390_xgr (code, s390_r0, s390_r1); s390_msgr (code, s390_r1, ins->sreg2); s390_xgr (code, s390_r0, s390_r1); s390_srlg (code, s390_r0, s390_r0, 0, 63); s390_ltgr (code, s390_r0, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException"); PTRSLOT (code, o[0]); PTRSLOT (code, o[1]); s390_lgr (code, ins->dreg, s390_r1); } } break; case OP_LMUL_OVF_UN: { s390_lghi (code, s390_r0, 0); s390_lgr (code, s390_r1, ins->sreg1); s390_mlgr (code, s390_r0, ins->sreg2); s390_ltgr (code, s390_r0, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException"); s390_lgr (code, ins->dreg, s390_r1); } break; case OP_IADDCC: { g_assert_not_reached (); CHECK_SRCDST_COM_I; s390_algr (code, ins->dreg, src2); } break; case OP_IADD: { CHECK_SRCDST_COM_I; s390_agr (code, ins->dreg, src2); } break; case OP_IADC: { g_assert_not_reached (); CHECK_SRCDST_COM_I; s390_alcgr (code, ins->dreg, src2); } break; case OP_IADD_IMM: { if (ins->dreg != ins->sreg1) { s390_lgfr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (ins->inst_imm)) { s390_aghi (code, ins->dreg, ins->inst_imm); } else { s390_afi (code, ins->dreg, ins->inst_imm); } } break; case OP_IADC_IMM: { if (ins->dreg != ins->sreg1) { s390_lgfr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (ins->inst_imm)) { s390_lghi (code, s390_r0, ins->inst_imm); s390_alcgr (code, ins->dreg, s390_r0); } else { S390_SET (code, s390_r0, ins->inst_imm); s390_alcgr (code, ins->dreg, s390_r0); } } break; case OP_LADD_OVF: case OP_S390_LADD_OVF: { if (mono_hwcap_s390x_has_mlt) { s390_agrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_COM; s390_agr (code, ins->dreg, src2); } EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); } break; case OP_LADD_OVF_UN: case OP_S390_LADD_OVF_UN: { if (mono_hwcap_s390x_has_mlt) { s390_algrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_COM; s390_algr (code, ins->dreg, src2); } EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException"); } break; case OP_ISUBCC: { if (mono_hwcap_s390x_has_mlt) { s390_slgrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_NCOM_I; s390_slgr (code, ins->dreg, src2); } } break; case OP_ISUB: { if (mono_hwcap_s390x_has_mlt) { s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_NCOM_I; s390_sgr (code, ins->dreg, src2); } } break; case OP_ISBB: { CHECK_SRCDST_NCOM_I; s390_slbgr (code, ins->dreg, src2); } break; case OP_ISUB_IMM: { if (ins->dreg != ins->sreg1) { s390_lgfr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (-ins->inst_imm)) { s390_aghi (code, ins->dreg, -ins->inst_imm); } else { s390_agfi (code, ins->dreg, -ins->inst_imm); } } break; case OP_ISBB_IMM: { S390_SET (code, s390_r0, ins->inst_imm); s390_slgfr (code, ins->dreg, s390_r0); } break; case OP_ISUB_OVF: case OP_S390_ISUB_OVF: { if (mono_hwcap_s390x_has_mlt) { s390_srk (code, ins->dreg, ins->sreg1, ins->sreg2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); } else { CHECK_SRCDST_NCOM; s390_sr (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); s390_lgfr (code, ins->dreg, ins->dreg); } } break; case OP_ISUB_OVF_UN: case OP_S390_ISUB_OVF_UN: { if (mono_hwcap_s390x_has_mlt) { s390_slrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_NCOM; s390_slr (code, ins->dreg, src2); } EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, "OverflowException"); s390_llgfr(code, ins->dreg, ins->dreg); } break; case OP_LSUB_OVF: case OP_S390_LSUB_OVF: { if (mono_hwcap_s390x_has_mlt) { s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_NCOM; s390_sgr (code, ins->dreg, src2); } EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); } break; case OP_LSUB_OVF_UN: case OP_S390_LSUB_OVF_UN: { CHECK_SRCDST_NCOM; s390_slgr (code, ins->dreg, src2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, "OverflowException"); } break; case OP_IAND: { if (mono_hwcap_s390x_has_mlt) { s390_ngrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_NCOM_I; s390_ngr (code, ins->dreg, src2); } } break; case OP_IAND_IMM: { S390_SET_MASK (code, s390_r0, ins->inst_imm); if (mono_hwcap_s390x_has_mlt) { s390_ngrk (code, ins->dreg, ins->sreg1, s390_r0); } else { if (ins->dreg != ins->sreg1) { s390_lgfr (code, ins->dreg, ins->sreg1); } s390_ngr (code, ins->dreg, s390_r0); } } break; case OP_IDIV: { s390_lgfr (code, s390_r0, ins->sreg1); s390_srda (code, s390_r0, 0, 32); s390_dr (code, s390_r0, ins->sreg2); s390_lgfr (code, ins->dreg, s390_r1); } break; case OP_IDIV_UN: { s390_lgfr (code, s390_r0, ins->sreg1); s390_srdl (code, s390_r0, 0, 32); s390_dlr (code, s390_r0, ins->sreg2); s390_lgfr (code, ins->dreg, s390_r1); } break; case OP_IDIV_IMM: { if (s390_is_imm16 (ins->inst_imm)) { s390_lghi (code, s390_r13, ins->inst_imm); } else { s390_lgfi (code, s390_r13, ins->inst_imm); } s390_lgfr (code, s390_r0, ins->sreg1); s390_srda (code, s390_r0, 0, 32); s390_dr (code, s390_r0, ins->sreg2); s390_lgfr (code, ins->dreg, s390_r1); } break; case OP_IREM: { s390_lgfr (code, s390_r0, ins->sreg1); s390_srda (code, s390_r0, 0, 32); s390_dr (code, s390_r0, ins->sreg2); s390_lgfr (code, ins->dreg, s390_r0); break; case OP_IREM_UN: s390_lgfr (code, s390_r0, ins->sreg1); s390_srdl (code, s390_r0, 0, 32); s390_dlr (code, s390_r0, ins->sreg2); s390_lgfr (code, ins->dreg, s390_r0); } break; case OP_IREM_IMM: { if (s390_is_imm16 (ins->inst_imm)) { s390_lghi (code, s390_r13, ins->inst_imm); } else { s390_lgfi (code, s390_r13, ins->inst_imm); } s390_lgfr (code, s390_r0, ins->sreg1); s390_srda (code, s390_r0, 0, 32); s390_dr (code, s390_r0, ins->sreg2); s390_lgfr (code, ins->dreg, s390_r0); } break; case OP_IOR: { if (mono_hwcap_s390x_has_mlt) { s390_ogrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_COM_I; s390_ogr (code, ins->dreg, src2); } } break; case OP_IOR_IMM: { S390_SET_MASK (code, s390_r0, ins->inst_imm); if (mono_hwcap_s390x_has_mlt) { s390_ogrk (code, ins->dreg, ins->sreg1, s390_r0); } else { if (ins->dreg != ins->sreg1) { s390_lgfr (code, ins->dreg, ins->sreg1); } s390_ogr (code, ins->dreg, s390_r0); } } break; case OP_IXOR: { if (mono_hwcap_s390x_has_mlt) { s390_xgrk (code, ins->dreg, ins->sreg1, ins->sreg2); } else { CHECK_SRCDST_COM_I; s390_xgr (code, ins->dreg, src2); } } break; case OP_IXOR_IMM: { S390_SET_MASK (code, s390_r0, ins->inst_imm); if (mono_hwcap_s390x_has_mlt) { s390_xgrk (code, ins->dreg, ins->sreg1, s390_r0); } else { if (ins->dreg != ins->sreg1) { s390_lgfr (code, ins->dreg, ins->sreg1); } s390_xgr (code, ins->dreg, s390_r0); } } break; case OP_ISHL: { CHECK_SRCDST_NCOM; s390_sll (code, ins->dreg, src2, 0); } break; case OP_ISHL_IMM: { if (ins->sreg1 != ins->dreg) { s390_lgfr (code, ins->dreg, ins->sreg1); } s390_sll (code, ins->dreg, 0, (ins->inst_imm & 0x1f)); } break; case OP_ISHR: { CHECK_SRCDST_NCOM; s390_sra (code, ins->dreg, src2, 0); } break; case OP_ISHR_IMM: { if (ins->sreg1 != ins->dreg) { s390_lgfr (code, ins->dreg, ins->sreg1); } s390_sra (code, ins->dreg, 0, (ins->inst_imm & 0x1f)); } break; case OP_ISHR_UN_IMM: { if (ins->sreg1 != ins->dreg) { s390_lgfr (code, ins->dreg, ins->sreg1); } s390_srl (code, ins->dreg, 0, (ins->inst_imm & 0x1f)); } break; case OP_ISHR_UN: { CHECK_SRCDST_NCOM; s390_srl (code, ins->dreg, src2, 0); } break; case OP_INOT: { if (ins->sreg1 != ins->dreg) { s390_lgfr (code, ins->dreg, ins->sreg1); } s390_lghi (code, s390_r0, -1); s390_xgr (code, ins->dreg, s390_r0); } break; case OP_INEG: { s390_lcgr (code, ins->dreg, ins->sreg1); } break; case OP_IMUL: { CHECK_SRCDST_COM_I; s390_msr (code, ins->dreg, src2); } break; case OP_IMUL_IMM: { if (ins->dreg != ins->sreg1) { s390_lgfr (code, ins->dreg, ins->sreg1); } if (s390_is_imm16 (ins->inst_imm)) { s390_lghi (code, s390_r0, ins->inst_imm); } else { s390_lgfi (code, s390_r0, ins->inst_imm); } s390_msr (code, ins->dreg, s390_r0); } break; case OP_IMUL_OVF: { short int *o[2]; if (mono_hwcap_s390x_has_mie2) { s390_msrkc (code, ins->dreg, ins->sreg1, ins->sreg2); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException"); s390_lgfr (code, ins->dreg, ins->dreg); } else { s390_ltr (code, s390_r1, ins->sreg1); s390_jz (code, 0); CODEPTR(code, o[0]); s390_ltr (code, s390_r0, ins->sreg2); s390_jnz (code, 6); s390_lhi (code, s390_r1, 0); s390_j (code, 0); CODEPTR(code, o[1]); s390_xr (code, s390_r0, s390_r1); s390_msr (code, s390_r1, ins->sreg2); s390_xr (code, s390_r0, s390_r1); s390_srl (code, s390_r0, 0, 31); s390_ltr (code, s390_r0, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException"); PTRSLOT (code, o[0]); PTRSLOT (code, o[1]); s390_lgfr (code, ins->dreg, s390_r1); } } break; case OP_IMUL_OVF_UN: { s390_lhi (code, s390_r0, 0); s390_lr (code, s390_r1, ins->sreg1); s390_mlr (code, s390_r0, ins->sreg2); s390_ltr (code, s390_r0, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException"); s390_lgfr (code, ins->dreg, s390_r1); } break; case OP_ICONST: case OP_I8CONST: { S390_SET (code, ins->dreg, ins->inst_c0); } break; case OP_AOTCONST: { mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0); S390_LOAD_TEMPLATE (code, ins->dreg); } break; case OP_JUMP_TABLE: { mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0); S390_LOAD_TEMPLATE (code, ins->dreg); } break; case OP_MOVE: if (ins->dreg != ins->sreg1) { s390_lgr (code, ins->dreg, ins->sreg1); } break; case OP_LCONV_TO_I: case OP_LCONV_TO_I8: case OP_SEXT_I4: s390_lgfr (code, ins->dreg, ins->sreg1); break; case OP_LCONV_TO_I4: s390_lgfr (code, ins->dreg, ins->sreg1); break; case OP_LCONV_TO_U: case OP_LCONV_TO_U8: case OP_LCONV_TO_U4: case OP_ZEXT_I4: s390_llgfr (code, ins->dreg, ins->sreg1); break; case OP_LCONV_TO_OVF_U4: S390_SET (code, s390_r0, 4294967295); s390_clgr (code, ins->sreg1, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GT, "OverflowException"); s390_ltgr (code, ins->sreg1, ins->sreg1); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, "OverflowException"); s390_llgfr(code, ins->dreg, ins->sreg1); break; case OP_LCONV_TO_OVF_I4_UN: S390_SET (code, s390_r0, 2147483647); s390_cgr (code, ins->sreg1, s390_r0); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GT, "OverflowException"); s390_ltgr (code, ins->sreg1, ins->sreg1); EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, "OverflowException"); s390_lgfr (code, ins->dreg, ins->sreg1); break; case OP_RCONV_TO_R4: if (ins->dreg != ins->sreg1) s390_ler (code, ins->dreg, ins->sreg1); break; case OP_RCONV_TO_R8: s390_ldebr (code, ins->dreg, ins->sreg1); break; case OP_FMOVE: if (ins->dreg != ins->sreg1) s390_ldr (code, ins->dreg, ins->sreg1); break; case OP_RMOVE: if (ins->dreg != ins->sreg1) s390_ldr (code, ins->dreg, ins->sreg1); break; case OP_MOVE_F_TO_I8: s390_lgdr (code, ins->dreg, ins->sreg1); break; case OP_MOVE_I8_TO_F: s390_ldgr (code, ins->dreg, ins->sreg1); break; case OP_MOVE_F_TO_I4: if (!cfg->r4fp) { s390_ledbr (code, s390_f0, ins->sreg1); s390_lgdr (code, ins->dreg, s390_f0); } else { s390_lgdr (code, ins->dreg, ins->sreg1); } s390_srag (code, ins->dreg, ins->dreg, 0, 32); break; case OP_MOVE_I4_TO_F: s390_slag (code, s390_r0, ins->sreg1, 0, 32); s390_ldgr (code, ins->dreg, s390_r0); if (!cfg->r4fp) s390_ldebr (code, ins->dreg, ins->dreg); break; case OP_FCONV_TO_R4: s390_ledbr (code, ins->dreg, ins->sreg1); if (!cfg->r4fp) s390_ldebr (code, ins->dreg, ins->dreg); break; case OP_S390_SETF4RET: if (!cfg->r4fp) s390_ledbr (code, ins->dreg, ins->sreg1); else s390_ldr (code, ins->dreg, ins->sreg1); break; case OP_TLS_GET: { if (s390_is_imm16 (ins->inst_offset)) { s390_lghi (code, s390_r13, ins->inst_offset); } else if (s390_is_imm32 (ins->inst_offset)) { s390_lgfi (code, s390_r13, ins->inst_offset); } else { S390_SET (code, s390_r13, ins->inst_offset); } s390_ear (code, s390_r1, 0); s390_sllg(code, s390_r1, s390_r1, 0, 32); s390_ear (code, s390_r1, 1); s390_lg (code, ins->dreg, s390_r13, s390_r1, 0); } break; case OP_TLS_SET: { if (s390_is_imm16 (ins->inst_offset)) { s390_lghi (code, s390_r13, ins->inst_offset); } else if (s390_is_imm32 (ins->inst_offset)) { s390_lgfi (code, s390_r13, ins->inst_offset); } else { S390_SET (code, s390_r13, ins->inst_offset); } s390_ear (code, s390_r1, 0); s390_sllg(code, s390_r1, s390_r1, 0, 32); s390_ear (code, s390_r1, 1); s390_stg (code, ins->sreg1, s390_r13, s390_r1, 0); } break; case OP_TAILCALL_PARAMETER : // This opcode helps compute sizes, i.e. // of the subsequent OP_TAILCALL, but contributes no code. g_assert (ins->next); break; case OP_TAILCALL : case OP_TAILCALL_REG : case OP_TAILCALL_MEMBASE : { call = (MonoCallInst *) ins; /* * Restore SP to caller's SP */ code = backUpStackPtr(cfg, code); /* * If the destination is specified as a register or membase then * save destination so it doesn't get overwritten by the restores */ if (ins->opcode != OP_TAILCALL) s390_lgr (code, s390_r1, ins->sreg1); /* * We have to restore R6, so it cannot be used as argument register. * This is ensured by mono_arch_tailcall_supported, but verify here. */ g_assert (!(call->used_iregs & (1 << S390_LAST_ARG_REG))); /* * Likewise for the IMT/RGCTX register */ g_assert (!(call->used_iregs & (1 << MONO_ARCH_RGCTX_REG))); g_assert (!(call->rgctx_reg)); /* * Restore all general registers */ s390_lmg (code, s390_r6, s390_r14, STK_BASE, S390_REG_SAVE_OFFSET); /* * Restore any FP registers that have been altered */ if (cfg->arch.fpSize != 0) { int fpOffset = -cfg->arch.fpSize; for (int i = 8; i < 16; i++) { if (cfg->arch.used_fp_regs & (1 << i)) { s390_ldy (code, i, 0, STK_BASE, fpOffset); fpOffset += sizeof(double); } } } if (ins->opcode == OP_TAILCALL_REG) { s390_br (code, s390_r1); } else { if (ins->opcode == OP_TAILCALL_MEMBASE) { if (mono_hwcap_s390x_has_mie2) { s390_bi (code, 0, s390_r1, ins->inst_offset); } else { s390_lg (code, s390_r1, 0, s390_r1, ins->inst_offset); s390_br (code, s390_r1); } } else { mono_add_patch_info_rel (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method, MONO_R_S390_THUNKED); S390_BR_TEMPLATE (code, s390_r1); cfg->thunk_area += THUNK_SIZE; } } } break; case OP_CHECK_THIS: { /* ensure ins->sreg1 is not NULL */ s390_lg (code, s390_r0, 0, ins->sreg1, 0); s390_ltgr (code, s390_r0, s390_r0); } break; case OP_ARGLIST: { const int offset = cfg->sig_cookie + cfg->stack_usage; S390_SET (code, s390_r0, offset); s390_agr (code, s390_r0, cfg->frame_reg); s390_stg (code, s390_r0, 0, ins->sreg1, 0); } break; case OP_FCALL: { call = (MonoCallInst *) ins; const MonoJumpInfoTarget patch = mono_call_to_patch (call); code = emit_call (cfg, code, patch.type, patch.target); if (!cfg->r4fp && call->signature->ret->type == MONO_TYPE_R4) s390_ldebr (code, s390_f0, s390_f0); } break; case OP_RCALL: { call = (MonoCallInst *) ins; const MonoJumpInfoTarget patch = mono_call_to_patch (call); code = emit_call (cfg, code, patch.type, patch.target); if (ins->dreg != s390_f0) s390_ldr (code, ins->dreg, s390_f0); break; } case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: { call = (MonoCallInst *) ins; const MonoJumpInfoTarget patch = mono_call_to_patch (call); code = emit_call (cfg, code, patch.type, patch.target); } break; case OP_FCALL_REG: call = (MonoCallInst*)ins; s390_lgr (code, s390_r1, ins->sreg1); s390_basr (code, s390_r14, s390_r1); if (!cfg->r4fp && call->signature->ret->type == MONO_TYPE_R4) s390_ldebr (code, s390_f0, s390_f0); break; case OP_RCALL_REG: call = (MonoCallInst*)ins; s390_lgr (code, s390_r1, ins->sreg1); s390_basr (code, s390_r14, s390_r1); if (ins->dreg != s390_f0) s390_ldr (code, ins->dreg, s390_f0); break; case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: { s390_lgr (code, s390_r1, ins->sreg1); s390_basr (code, s390_r14, s390_r1); } break; case OP_FCALL_MEMBASE: call = (MonoCallInst*)ins; s390_lg (code, s390_r1, 0, ins->sreg1, ins->inst_offset); s390_basr (code, s390_r14, s390_r1); if (!cfg->r4fp && call->signature->ret->type == MONO_TYPE_R4) s390_ldebr (code, s390_f0, s390_f0); break; case OP_RCALL_MEMBASE: call = (MonoCallInst*)ins; s390_lg (code, s390_r1, 0, ins->sreg1, ins->inst_offset); s390_basr (code, s390_r14, s390_r1); if (ins->dreg != s390_f0) s390_ldr (code, ins->dreg, s390_f0); break; case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: { s390_lg (code, s390_r1, 0, ins->sreg1, ins->inst_offset); s390_basr (code, s390_r14, s390_r1); } break; case OP_LOCALLOC: { int area_offset; if (cfg->param_area == 0) area_offset = S390_MINIMAL_STACK_SIZE; else area_offset = cfg->param_area; area_offset = S390_ALIGN(area_offset, S390_STACK_ALIGNMENT); /* Get current backchain pointer */ s390_lg (code, s390_r13, 0, STK_BASE, 0); /* * Round object size to doubleword */ s390_lgr (code, s390_r1, ins->sreg1); s390_aghi (code, s390_r1, 7); s390_srlg (code, s390_r1, s390_r1, 0, 3); s390_sllg (code, s390_r1, s390_r1, 0, 3); if (mono_hwcap_s390x_has_gie) { if (ins->flags & MONO_INST_INIT) s390_lgr (code, s390_r0, s390_r1); s390_risbg (code, ins->dreg, s390_r1, 0, 0xb3, 0); s390_sgrk (code, ins->dreg, STK_BASE, ins->dreg); s390_cgr (code, STK_BASE, ins->dreg); /* L0: */ s390_je (code, 9); /* je L1 */ s390_aghi (code, STK_BASE, -4096); s390_mvghi (code, s390_r15, 0, 0); s390_j (code, -9); /* j L0 */ s390_risbg (code, ins->dreg, s390_r1, 0x34, 0xbf, 0); /* L1: */ s390_ltgr (code, ins->dreg, ins->dreg); s390_jz (code, 13); /* jz L2: */ s390_sgr (code, STK_BASE, ins->dreg); s390_risbg (code, s390_r1, s390_r1, 0x34, 0xbf, 0); s390_lay (code, s390_r1, s390_r1, STK_BASE, -8); s390_mvghi (code, s390_r1, 0, 0); /* L2: */ } else { s390_lgr (code, ins->dreg, s390_r1); s390_nill (code, ins->dreg, 0xf000); s390_lgr (code, s390_r0, STK_BASE); s390_sgr (code, s390_r0, ins->dreg); s390_lgr (code, ins->dreg, s390_r0); s390_cgr (code, STK_BASE, ins->dreg); /* L0: */ s390_je (code, 11); /* je L1 */ s390_aghi (code, STK_BASE, -4096); s390_lghi (code, s390_r0, 0); s390_stg (code, s390_r0, 0, STK_BASE, 4088); s390_j (code, -11); /* j L0 */ s390_lghi (code, ins->dreg, 4095); /* L1: */ s390_ngr (code, ins->dreg, s390_r1); s390_ltgr (code, ins->dreg, ins->dreg); s390_jz (code, 7); /* jz L2 */ s390_sgr (code, STK_BASE, ins->dreg); s390_stg (code, ins->dreg, s390_r1, STK_BASE, -8); /* L2: */ if (ins->flags & MONO_INST_INIT) s390_lgr (code, s390_r0, s390_r1); } /* * Compute address of localloc'd object */ s390_lgr (code, s390_r1, STK_BASE); if (s390_is_imm16(area_offset)) s390_aghi (code, s390_r1, area_offset); else s390_agfi (code, s390_r1, area_offset); s390_aghi (code, s390_r1, 7); s390_srlg (code, s390_r1, s390_r1, 0, 3); s390_sllg (code, s390_r1, s390_r1, 0, 3); s390_lgr (code, ins->dreg, s390_r1); /* Save backchain pointer */ s390_stg (code, s390_r13, 0, STK_BASE, 0); /* * If we need to zero the area then clear from localloc start * using the length we saved earlier */ if (ins->flags & MONO_INST_INIT) { s390_lgr (code, s390_r1, s390_r0); s390_lgr (code, s390_r0, ins->dreg); s390_lgr (code, s390_r14, s390_r12); s390_lghi (code, s390_r13, 0); s390_mvcle(code, s390_r0, s390_r12, 0, 0); s390_jo (code, -2); s390_lgr (code, s390_r12, s390_r14); } /* * If we have an LMF then we have to adjust its BP */ if (cfg->method->save_lmf) { int lmfOffset = cfg->stack_usage - sizeof(MonoLMF); if (s390_is_imm16(lmfOffset)) { s390_lghi (code, s390_r13, lmfOffset); } else if (s390_is_imm32(lmfOffset)) { s390_lgfi (code, s390_r13, lmfOffset); } else { S390_SET (code, s390_r13, lmfOffset); } s390_stg (code, s390_r15, s390_r13, cfg->frame_reg, MONO_STRUCT_OFFSET(MonoLMF, ebp)); } } break; case OP_THROW: { s390_lgr (code, s390_r2, ins->sreg1); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception)); } break; case OP_RETHROW: { s390_lgr (code, s390_r2, ins->sreg1); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception)); } break; case OP_START_HANDLER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); S390_LONG (code, stg, stg, s390_r14, 0, spvar->inst_basereg, spvar->inst_offset); } break; case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); if (ins->sreg1 != s390_r2) s390_lgr(code, s390_r2, ins->sreg1); S390_LONG (code, lg, lg, s390_r14, 0, spvar->inst_basereg, spvar->inst_offset); s390_br (code, s390_r14); } break; case OP_ENDFINALLY: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); S390_LONG (code, lg, lg, s390_r14, 0, spvar->inst_basereg, spvar->inst_offset); s390_br (code, s390_r14); } break; case OP_CALL_HANDLER: { mono_add_patch_info_rel (cfg, code-cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb, MONO_R_S390_DIRECT); s390_brasl (code, s390_r14, 0); for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); } break; case OP_LABEL: { ins->inst_c0 = code - cfg->native_code; } break; case OP_RELAXED_NOP: case OP_NOP: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_I8CONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: case OP_NOT_REACHED: case OP_NOT_NULL: { } break; case OP_IL_SEQ_POINT: mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); break; case OP_SEQ_POINT: { MonoInst *var; RI_Format *o[2]; guint16 displace; if (cfg->compile_aot) NOT_IMPLEMENTED; if (ins->flags & MONO_INST_SINGLE_STEP_LOC) { var = cfg->arch.ss_tramp_var; s390_lg (code, s390_r1, 0, var->inst_basereg, var->inst_offset); if (mono_hwcap_s390x_has_eif) { s390_ltg (code, s390_r14, 0, s390_r1, 0); } else { s390_lg (code, s390_r14, 0, s390_r1, 0); s390_ltgr (code, s390_r14, s390_r14); } o[0] = (RI_Format *) code; s390_jz (code, 4); s390_lgr (code, s390_r1, cfg->frame_reg); s390_basr (code, s390_r14, s390_r14); displace = ((uintptr_t) code - (uintptr_t) o[0]) / 2; o[0]->i2 = displace; } /* * This is the address which is saved in seq points, */ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); var = cfg->arch.bp_tramp_var; s390_lghi (code, s390_r1, 0); s390_ltgr (code, s390_r1, s390_r1); o[0] = (RI_Format *) code; s390_jz (code, 0); s390_lg (code, s390_r1, 0, var->inst_basereg, var->inst_offset); if (mono_hwcap_s390x_has_eif) { s390_ltg (code, s390_r14, 0, s390_r1, 0); } else { s390_lg (code, s390_r1, 0, s390_r1, 0); s390_ltgr (code, s390_r14, s390_r1); } o[1] = (RI_Format *) code; s390_jz (code, 4); s390_lgr (code, s390_r1, cfg->frame_reg); s390_basr (code, s390_r14, s390_r14); displace = ((uintptr_t) code - (uintptr_t) o[0]) / 2; o[0]->i2 = displace; displace = ((uintptr_t) code - (uintptr_t) o[1]) / 2; o[1]->i2 = displace; /* * Add an additional nop so skipping the bp doesn't cause the ip to point * to another IL offset. */ s390_nop (code); break; } case OP_GENERIC_CLASS_INIT: { static int byte_offset = -1; static guint8 bitmask; short int *jump; g_assert (ins->sreg1 == S390_FIRST_ARG_REG); if (byte_offset < 0) mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask); s390_tm (code, ins->sreg1, byte_offset, bitmask); s390_jo (code, 0); CODEPTR(code, jump); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init)); PTRSLOT (code, jump); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; break; } case OP_BR: EMIT_UNCOND_BRANCH(ins); break; case OP_BR_REG: { s390_br (code, ins->sreg1); } break; case OP_CEQ: case OP_ICEQ: case OP_LCEQ: { s390_lghi(code, ins->dreg, 1); s390_jz (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_CLT: case OP_ICLT: case OP_LCLT: { s390_lghi(code, ins->dreg, 1); s390_jl (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_CLT_UN: case OP_ICLT_UN: case OP_LCLT_UN: { s390_lghi(code, ins->dreg, 1); s390_jlo (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_CGT: case OP_ICGT: case OP_LCGT: { s390_lghi(code, ins->dreg, 1); s390_jh (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_CGT_UN: case OP_ICGT_UN: case OP_LCGT_UN: { s390_lghi(code, ins->dreg, 1); s390_jho (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_ICNEQ: { s390_lghi(code, ins->dreg, 1); s390_jne (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_ICGE: { s390_lghi(code, ins->dreg, 1); s390_jhe (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_ICLE: { s390_lghi(code, ins->dreg, 1); s390_jle (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_ICGE_UN: { s390_lghi(code, ins->dreg, 1); s390_jhe (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_ICLE_UN: { s390_lghi(code, ins->dreg, 1); s390_jle (code, 4); s390_lghi(code, ins->dreg, 0); } break; case OP_COND_EXC_EQ: case OP_COND_EXC_IEQ: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_EQ, ins->inst_p1); break; case OP_COND_EXC_NE_UN: case OP_COND_EXC_INE_UN: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NE, ins->inst_p1); break; case OP_COND_EXC_LT: case OP_COND_EXC_ILT: case OP_COND_EXC_LT_UN: case OP_COND_EXC_ILT_UN: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, ins->inst_p1); break; case OP_COND_EXC_GT: case OP_COND_EXC_IGT: case OP_COND_EXC_GT_UN: case OP_COND_EXC_IGT_UN: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GT, ins->inst_p1); break; case OP_COND_EXC_GE: case OP_COND_EXC_IGE: case OP_COND_EXC_GE_UN: case OP_COND_EXC_IGE_UN: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GE, ins->inst_p1); break; case OP_COND_EXC_LE: case OP_COND_EXC_ILE: case OP_COND_EXC_LE_UN: case OP_COND_EXC_ILE_UN: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LE, ins->inst_p1); break; case OP_COND_EXC_OV: case OP_COND_EXC_IOV: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, ins->inst_p1); break; case OP_COND_EXC_NO: case OP_COND_EXC_INO: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NO, ins->inst_p1); break; case OP_COND_EXC_C: case OP_COND_EXC_IC: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, ins->inst_p1); break; case OP_COND_EXC_NC: case OP_COND_EXC_INC: EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, ins->inst_p1); break; case OP_LBEQ: case OP_IBEQ: EMIT_COND_BRANCH (ins, S390_CC_EQ); break; case OP_LBNE_UN: case OP_IBNE_UN: EMIT_COND_BRANCH (ins, S390_CC_NE); break; case OP_LBLT: case OP_LBLT_UN: case OP_IBLT: case OP_IBLT_UN: EMIT_COND_BRANCH (ins, S390_CC_LT); break; case OP_LBGT: case OP_LBGT_UN: case OP_IBGT: case OP_IBGT_UN: EMIT_COND_BRANCH (ins, S390_CC_GT); break; case OP_LBGE: case OP_LBGE_UN: case OP_IBGE: case OP_IBGE_UN: EMIT_COND_BRANCH (ins, S390_CC_GE); break; case OP_LBLE: case OP_LBLE_UN: case OP_IBLE: case OP_IBLE_UN: EMIT_COND_BRANCH (ins, S390_CC_LE); break; case OP_S390_CRJ: EMIT_COMP_AND_BRANCH(ins, crj, cr); break; case OP_S390_CLRJ: EMIT_COMP_AND_BRANCH(ins, clrj, clr); break; case OP_S390_CGRJ: EMIT_COMP_AND_BRANCH(ins, cgrj, cgr); break; case OP_S390_CLGRJ: EMIT_COMP_AND_BRANCH(ins, clgrj, clgr); break; case OP_S390_CIJ: EMIT_COMP_AND_BRANCH_IMM(ins, crj, cr, ltr, FALSE); break; case OP_S390_CLIJ: EMIT_COMP_AND_BRANCH_IMM(ins, clrj, clr, ltr, TRUE); break; case OP_S390_CGIJ: EMIT_COMP_AND_BRANCH_IMM(ins, cgrj, cgr, ltgr, FALSE); break; case OP_S390_CLGIJ: EMIT_COMP_AND_BRANCH_IMM(ins, clgrj, clgr, ltgr, TRUE); break; /* floating point opcodes */ case OP_R8CONST: { double d = *(double *) ins->inst_p0; if (d == 0) { s390_lzdr (code, ins->dreg); if (mono_signbit (d) != 0) s390_lndbr (code, ins->dreg, ins->dreg); } else { S390_SET (code, s390_r13, ins->inst_p0); s390_ld (code, ins->dreg, 0, s390_r13, 0); } } break; case OP_R4CONST: { float f = *(float *) ins->inst_p0; if (f == 0) { if (cfg->r4fp) { s390_lzer (code, ins->dreg); if (mono_signbit (f) != 0) s390_lnebr (code, ins->dreg, ins->dreg); } else { s390_lzdr (code, ins->dreg); if (mono_signbit (f) != 0) s390_lndbr (code, ins->dreg, ins->dreg); } } else { S390_SET (code, s390_r13, ins->inst_p0); s390_le (code, ins->dreg, 0, s390_r13, 0); if (!cfg->r4fp) s390_ldebr (code, ins->dreg, ins->dreg); else s390_le (code, ins->dreg, 0, s390_r13, 0); } } break; case OP_STORER8_MEMBASE_REG: { S390_LONG (code, stdy, std, ins->sreg1, 0, ins->inst_destbasereg, ins->inst_offset); } break; case OP_LOADR8_MEMBASE: { S390_LONG (code, ldy, ld, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } break; case OP_STORER4_MEMBASE_REG: { if (cfg->r4fp) { S390_LONG (code, stey, ste, ins->sreg1, 0, ins->inst_destbasereg, ins->inst_offset); } else { s390_ledbr (code, ins->sreg1, ins->sreg1); S390_LONG (code, stey, ste, ins->sreg1, 0, ins->inst_destbasereg, ins->inst_offset); s390_ldebr (code, ins->sreg1, ins->sreg1); } } break; case OP_LOADR4_MEMBASE: { if (cfg->r4fp) { S390_LONG (code, ley, le, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); } else { S390_LONG (code, ley, le, ins->dreg, 0, ins->inst_basereg, ins->inst_offset); s390_ldebr (code, ins->dreg, ins->dreg); } } break; case OP_ICONV_TO_R_UN: { if (mono_hwcap_s390x_has_fpe) { s390_cdlfbr (code, ins->dreg, 5, ins->sreg1, 0); } else { s390_llgfr (code, s390_r0, ins->sreg1); s390_cdgbr (code, ins->dreg, s390_r0); } } break; case OP_LCONV_TO_R_UN: { if (mono_hwcap_s390x_has_fpe) { s390_cdlgbr (code, ins->dreg, 6, ins->sreg1, 0); } else { short int *jump; s390_lgdr (code, s390_r0, s390_r15); s390_lgdr (code, s390_r1, s390_r13); s390_lgdr (code, s390_r14, s390_r12); s390_cxgbr (code, s390_f12, ins->sreg1); s390_ltgr (code, ins->sreg1, ins->sreg1); s390_jnl (code, 0); CODEPTR(code, jump); S390_SET (code, s390_r13, 0x403f000000000000llu); s390_lgdr (code, s390_f13, s390_r13); s390_lzdr (code, s390_f15); s390_axbr (code, s390_f12, s390_f13); PTRSLOT(code, jump); s390_ldxbr (code, s390_f13, s390_f12); s390_ldr (code, ins->dreg, s390_f13); s390_ldgr (code, s390_f12, s390_r14); s390_ldgr (code, s390_f13, s390_r1); s390_ldgr (code, s390_f15, s390_r0); } } break; case OP_ICONV_TO_R4: s390_cefbr (code, ins->dreg, ins->sreg1); if (!cfg->r4fp) s390_ldebr (code, ins->dreg, ins->dreg); break; case OP_LCONV_TO_R4: s390_cegbr (code, ins->dreg, ins->sreg1); if (!cfg->r4fp) s390_ldebr (code, ins->dreg, ins->dreg); break; case OP_ICONV_TO_R8: s390_cdfbr (code, ins->dreg, ins->sreg1); break; case OP_LCONV_TO_R8: s390_cdgbr (code, ins->dreg, ins->sreg1); break; case OP_FCONV_TO_I1: s390_cgdbr (code, ins->dreg, 5, ins->sreg1); s390_ltgr (code, ins->dreg, ins->dreg); s390_jnl (code, 4); s390_oill (code, ins->dreg, 0x80); s390_lghi (code, s390_r0, 0xff); s390_ngr (code, ins->dreg, s390_r0); break; case OP_FCONV_TO_U1: if (mono_hwcap_s390x_has_fpe) { s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0); s390_lghi (code, s390_r0, 0xff); s390_ngr (code, ins->dreg, s390_r0); } else { code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE); } break; case OP_FCONV_TO_I2: s390_cgdbr (code, ins->dreg, 5, ins->sreg1); s390_ltgr (code, ins->dreg, ins->dreg); s390_jnl (code, 4); s390_oill (code, ins->dreg, 0x8000); s390_llill (code, s390_r0, 0xffff); s390_ngr (code, ins->dreg, s390_r0); break; case OP_FCONV_TO_U2: if (mono_hwcap_s390x_has_fpe) { s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0); s390_llill (code, s390_r0, 0xffff); s390_ngr (code, ins->dreg, s390_r0); } else { code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE); } break; case OP_FCONV_TO_I4: s390_cfdbr (code, ins->dreg, 5, ins->sreg1); break; case OP_FCONV_TO_U4: if (mono_hwcap_s390x_has_fpe) { s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0); } else { code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE); } break; case OP_FCONV_TO_I8: s390_cgdbr (code, ins->dreg, 5, ins->sreg1); break; case OP_FCONV_TO_U8: if (mono_hwcap_s390x_has_fpe) { s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0); } else { code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 8, FALSE); } break; case OP_RCONV_TO_I1: s390_cgebr (code, ins->dreg, 5, ins->sreg1); s390_ltgr (code, ins->dreg, ins->dreg); s390_jnl (code, 4); s390_oill (code, ins->dreg, 0x80); s390_lghi (code, s390_r0, 0xff); s390_ngr (code, ins->dreg, s390_r0); break; case OP_RCONV_TO_U1: if (mono_hwcap_s390x_has_fpe) { s390_clgebr (code, ins->dreg, 5, ins->sreg1, 0); s390_lghi (code, s390_r0, 0xff); s390_ngr (code, ins->dreg, s390_r0); } else { code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE); } break; case OP_RCONV_TO_I2: s390_cgebr (code, ins->dreg, 5, ins->sreg1); s390_ltgr (code, ins->dreg, ins->dreg); s390_jnl (code, 4); s390_oill (code, ins->dreg, 0x8000); s390_llill (code, s390_r0, 0xffff); s390_ngr (code, ins->dreg, s390_r0); break; case OP_RCONV_TO_U2: if (mono_hwcap_s390x_has_fpe) { s390_clgebr (code, ins->dreg, 5, ins->sreg1, 0); s390_llill (code, s390_r0, 0xffff); s390_ngr (code, ins->dreg, s390_r0); } else { code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE); } break; case OP_RCONV_TO_I4: s390_cfebr (code, ins->dreg, 5, ins->sreg1); break; case OP_RCONV_TO_U4: if (mono_hwcap_s390x_has_fpe) { s390_clfebr (code, ins->dreg, 5, ins->sreg1, 0); } else { code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE); } break; case OP_RCONV_TO_I8: s390_cgebr (code, ins->dreg, 5, ins->sreg1); break; case OP_RCONV_TO_U8: if (mono_hwcap_s390x_has_fpe) { s390_clgebr (code, ins->dreg, 5, ins->sreg1, 0); } else { code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, FALSE); } break; case OP_LCONV_TO_OVF_I: { /* Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000 */ short int *o[5]; s390_ltgr (code, ins->sreg2, ins->sreg2); s390_jnl (code, 0); CODEPTR(code, o[0]); s390_ltgr (code, ins->sreg1, ins->sreg1); s390_jnl (code, 0); CODEPTR(code, o[1]); s390_lhi (code, s390_r13, -1); s390_cgr (code, ins->sreg1, s390_r13); s390_jnz (code, 0); CODEPTR(code, o[2]); if (ins->dreg != ins->sreg2) s390_lgr (code, ins->dreg, ins->sreg2); s390_j (code, 0); CODEPTR(code, o[3]); PTRSLOT(code, o[0]); s390_jz (code, 0); CODEPTR(code, o[4]); PTRSLOT(code, o[1]); PTRSLOT(code, o[2]); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException"); s390_brasl (code, s390_r14, 0); PTRSLOT(code, o[3]); PTRSLOT(code, o[4]); } break; case OP_ABS: s390_lpdbr (code, ins->dreg, ins->sreg1); break; case OP_ABSF: s390_lpebr (code, ins->dreg, ins->sreg1); break; case OP_CEIL: s390_fidbra (code, ins->dreg, 6, ins->sreg1, 4); break; case OP_CEILF: s390_fiebra (code, ins->dreg, 6, ins->sreg1, 4); break; case OP_FLOOR: s390_fidbra (code, ins->dreg, 7, ins->sreg1, 4); break; case OP_FLOORF: s390_fiebra (code, ins->dreg, 7, ins->sreg1, 4); break; case OP_FCOPYSIGN: s390_cpsdr (code, ins->dreg, ins->sreg2, ins->sreg1); break; case OP_ROUND: s390_fidbra (code, ins->dreg, 4, ins->sreg1, 4); break; case OP_SQRT: s390_sqdbr (code, ins->dreg, ins->sreg1); break; case OP_SQRTF: s390_sqebr (code, ins->dreg, ins->sreg1); break; case OP_TRUNC: s390_fidbra (code, ins->dreg, 5, ins->sreg1, 4); break; case OP_TRUNCF: s390_fiebra (code, ins->dreg, 5, ins->sreg1, 4); break; case OP_FADD: { CHECK_SRCDST_COM_F; s390_adbr (code, ins->dreg, src2); } break; case OP_RADD: { CHECK_SRCDST_COM_F; s390_aebr (code, ins->dreg, src2); } break; case OP_FSUB: { CHECK_SRCDST_NCOM_F(sdbr); } break; case OP_RSUB: { CHECK_SRCDST_NCOM_F(sebr); } break; case OP_FMUL: { CHECK_SRCDST_COM_F; s390_mdbr (code, ins->dreg, src2); } break; case OP_RMUL: { CHECK_SRCDST_COM_F; s390_meer (code, ins->dreg, src2); } break; case OP_FDIV: { CHECK_SRCDST_NCOM_F(ddbr); } break; case OP_RDIV: { CHECK_SRCDST_NCOM_F(debr); } break; case OP_FNEG: { s390_lcdbr (code, ins->dreg, ins->sreg1); } break; case OP_RNEG: { s390_lcebr (code, ins->dreg, ins->sreg1); } break; case OP_FREM: { CHECK_SRCDST_NCOM_FR(didbr, 5); } break; case OP_RREM: { CHECK_SRCDST_NCOM_FR(diebr, 5); } break; case OP_FCOMPARE: { s390_cdbr (code, ins->sreg1, ins->sreg2); } break; case OP_RCOMPARE: { s390_cebr (code, ins->sreg1, ins->sreg2); } break; case OP_FCEQ: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_je (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FCLT: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jl (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FCLT_UN: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jlo (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FCGT: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jh (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FCGT_UN: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jho (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FCNEQ: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jne (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FCGE: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jhe (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FCLE: { s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jle (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCEQ: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_je (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCLT: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jl (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCLT_UN: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jlo (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCGT: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jh (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCGT_UN: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jho (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCNEQ: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jne (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCGE: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jhe (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_RCLE: { if (cfg->r4fp) s390_cebr (code, ins->sreg1, ins->sreg2); else s390_cdbr (code, ins->sreg1, ins->sreg2); s390_lghi (code, ins->dreg, 1); s390_jle (code, 4); s390_lghi (code, ins->dreg, 0); } break; case OP_FBEQ: { short *o; s390_jo (code, 0); CODEPTR(code, o); EMIT_COND_BRANCH (ins, S390_CC_EQ); PTRSLOT (code, o); } break; case OP_FBNE_UN: EMIT_COND_BRANCH (ins, S390_CC_NE|S390_CC_OV); break; case OP_FBLT: { short *o; s390_jo (code, 0); CODEPTR(code, o); EMIT_COND_BRANCH (ins, S390_CC_LT); PTRSLOT (code, o); } break; case OP_FBLT_UN: EMIT_COND_BRANCH (ins, S390_CC_LT|S390_CC_OV); break; case OP_FBGT: { short *o; s390_jo (code, 0); CODEPTR(code, o); EMIT_COND_BRANCH (ins, S390_CC_GT); PTRSLOT (code, o); } break; case OP_FBGT_UN: EMIT_COND_BRANCH (ins, S390_CC_GT|S390_CC_OV); break; case OP_FBGE: { short *o; s390_jo (code, 0); CODEPTR(code, o); EMIT_COND_BRANCH (ins, S390_CC_GE); PTRSLOT (code, o); } break; case OP_FBGE_UN: EMIT_COND_BRANCH (ins, S390_CC_GE|S390_CC_OV); break; case OP_FBLE: { short *o; s390_jo (code, 0); CODEPTR(code, o); EMIT_COND_BRANCH (ins, S390_CC_LE); PTRSLOT (code, o); } break; case OP_FBLE_UN: EMIT_COND_BRANCH (ins, S390_CC_LE|S390_CC_OV); break; case OP_CKFINITE: { short *o; s390_lhi (code, s390_r13, 0x7f); s390_tcdb (code, ins->sreg1, 0, s390_r13, 0); s390_jz (code, 0); CODEPTR(code, o); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException"); s390_brasl (code, s390_r14,0); PTRSLOT(code, o); } break; case OP_S390_MOVE: { if (ins->backend.size > 0) { if (ins->backend.size <= 256) { s390_mvc (code, ins->backend.size, ins->sreg2, ins->inst_offset, ins->sreg1, ins->inst_imm); } else { s390_lgr (code, s390_r0, ins->sreg2); if (ins->inst_offset > 0) { if (s390_is_imm16 (ins->inst_offset)) { s390_aghi (code, s390_r0, ins->inst_offset); } else if (s390_is_imm32 (ins->inst_offset)) { s390_agfi (code, s390_r0, ins->inst_offset); } else { S390_SET (code, s390_r13, ins->inst_offset); s390_agr (code, s390_r0, s390_r13); } } s390_lgr (code, s390_r12, ins->sreg1); if (ins->inst_imm > 0) { if (s390_is_imm16 (ins->inst_imm)) { s390_aghi (code, s390_r12, ins->inst_imm); } else if (s390_is_imm32 (ins->inst_imm)) { s390_agfi (code, s390_r12, ins->inst_imm); } else { S390_SET (code, s390_r13, ins->inst_imm); s390_agr (code, s390_r12, s390_r13); } } if (s390_is_imm16 (ins->backend.size)) { s390_lghi (code, s390_r1, ins->backend.size); } else if (s390_is_imm32 (ins->inst_offset)) { s390_agfi (code, s390_r1, ins->backend.size); } else { S390_SET (code, s390_r13, ins->backend.size); s390_agr (code, s390_r1, s390_r13); } s390_lgr (code, s390_r13, s390_r1); s390_mvcle(code, s390_r0, s390_r12, 0, 0); s390_jo (code, -2); } } } break; case OP_ATOMIC_ADD_I8: { if (mono_hwcap_s390x_has_ia) { s390_laag(code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset); if (mono_hwcap_s390x_has_mlt) { s390_agrk(code, ins->dreg, s390_r0, ins->sreg2); } else { s390_agr (code, s390_r0, ins->sreg2); s390_lgr (code, ins->dreg, s390_r0); } } else { s390_lgr (code, s390_r1, ins->sreg2); s390_lg (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset); s390_agr (code, s390_r1, s390_r0); s390_csg (code, s390_r0, s390_r1, ins->inst_basereg, ins->inst_offset); s390_jnz (code, -10); s390_lgr (code, ins->dreg, s390_r1); } } break; case OP_ATOMIC_EXCHANGE_I8: { s390_lg (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset); s390_csg (code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset); s390_jnz (code, -6); s390_lgr (code, ins->dreg, s390_r0); } break; case OP_ATOMIC_ADD_I4: { if (mono_hwcap_s390x_has_ia) { s390_laa (code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset); s390_ar (code, s390_r0, ins->sreg2); s390_lgfr(code, ins->dreg, s390_r0); } else { s390_lgfr(code, s390_r1, ins->sreg2); s390_lgf (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset); s390_agr (code, s390_r1, s390_r0); s390_cs (code, s390_r0, s390_r1, ins->inst_basereg, ins->inst_offset); s390_jnz (code, -9); s390_lgfr(code, ins->dreg, s390_r1); } } break; case OP_ATOMIC_EXCHANGE_I4: { s390_l (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset); s390_cs (code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset); s390_jnz (code, -4); s390_lgfr(code, ins->dreg, s390_r0); } break; case OP_S390_BKCHAIN: { s390_lgr (code, ins->dreg, ins->sreg1); if (s390_is_imm16 (cfg->stack_offset)) { s390_aghi (code, ins->dreg, cfg->stack_offset); } else if (s390_is_imm32 (cfg->stack_offset)) { s390_agfi (code, ins->dreg, cfg->stack_offset); } else { S390_SET (code, s390_r13, cfg->stack_offset); s390_agr (code, ins->dreg, s390_r13); } } break; case OP_MEMORY_BARRIER: s390_mem (code); break; case OP_POPCNT32: s390_llgfr (code, s390_r1, ins->sreg1); if (mono_hwcap_s390x_has_mie3) { s390_popcnt (code, ins->dreg, 0x80, s390_r1); } else { s390_popcnt (code, s390_r0, 0, s390_r1); s390_ahhlr (code, s390_r0, s390_r0, s390_r0); s390_sllg (code, s390_r1, s390_r0, 0, 16); s390_algr (code, s390_r0, s390_r1); s390_sllg (code, s390_r1, s390_r0, 0, 8); s390_algr (code, s390_r0, s390_r1); s390_srlg (code, ins->dreg, s390_r0, 0, 56); } break; case OP_POPCNT64: if (mono_hwcap_s390x_has_mie3) { s390_popcnt (code, ins->dreg, 0x80, ins->sreg1); } else { s390_ahhlr (code, s390_r0, s390_r0, s390_r0); s390_sllg (code, s390_r1, s390_r0, 0, 16); s390_algr (code, s390_r0, s390_r1); s390_sllg (code, s390_r1, s390_r0, 0, 8); s390_algr (code, s390_r0, s390_r1); s390_srlg (code, ins->dreg, s390_r0, 0, 56); } break; case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: { short *br; s390_ltg (code, s390_r0, 0, ins->sreg1, 0); s390_jz (code, 0); CODEPTR(code, br); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll)); PTRSLOT (code, br); break; } case OP_GC_LIVENESS_DEF: case OP_GC_LIVENESS_USE: case OP_GC_PARAM_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; break; case OP_GC_SPILL_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins); break; #ifdef MONO_ARCH_SIMD_INTRINSICS case OP_ADDPS: s390x_addps (code, ins->sreg1, ins->sreg2); break; case OP_DIVPS: s390x_divps (code, ins->sreg1, ins->sreg2); break; case OP_MULPS: s390x_mulps (code, ins->sreg1, ins->sreg2); break; case OP_SUBPS: s390x_subps (code, ins->sreg1, ins->sreg2); break; case OP_MAXPS: s390x_maxps (code, ins->sreg1, ins->sreg2); break; case OP_MINPS: s390x_minps (code, ins->sreg1, ins->sreg2); break; case OP_COMPPS: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7); s390x_cmpps_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ANDPS: s390x_andps (code, ins->sreg1, ins->sreg2); break; case OP_ANDNPS: s390x_andnps (code, ins->sreg1, ins->sreg2); break; case OP_ORPS: s390x_orps (code, ins->sreg1, ins->sreg2); break; case OP_XORPS: s390x_xorps (code, ins->sreg1, ins->sreg2); break; case OP_SQRTPS: s390x_sqrtps (code, ins->dreg, ins->sreg1); break; case OP_RSQRTPS: s390x_rsqrtps (code, ins->dreg, ins->sreg1); break; case OP_RCPPS: s390x_rcpps (code, ins->dreg, ins->sreg1); break; case OP_ADDSUBPS: s390x_addsubps (code, ins->sreg1, ins->sreg2); break; case OP_HADDPS: s390x_haddps (code, ins->sreg1, ins->sreg2); break; case OP_HSUBPS: s390x_hsubps (code, ins->sreg1, ins->sreg2); break; case OP_DUPPS_HIGH: s390x_movshdup (code, ins->dreg, ins->sreg1); break; case OP_DUPPS_LOW: s390x_movsldup (code, ins->dreg, ins->sreg1); break; case OP_PSHUFLEW_HIGH: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); s390x_pshufhw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_PSHUFLEW_LOW: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); s390x_pshuflw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_PSHUFLED: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); s390x_pshufd_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_SHUFPS: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); s390x_shufps_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_SHUFPD: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0x3); s390x_shufpd_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ADDPD: s390x_addpd (code, ins->sreg1, ins->sreg2); break; case OP_DIVPD: s390x_divpd (code, ins->sreg1, ins->sreg2); break; case OP_MULPD: s390x_mulpd (code, ins->sreg1, ins->sreg2); break; case OP_SUBPD: s390x_subpd (code, ins->sreg1, ins->sreg2); break; case OP_MAXPD: s390x_maxpd (code, ins->sreg1, ins->sreg2); break; case OP_MINPD: s390x_minpd (code, ins->sreg1, ins->sreg2); break; case OP_COMPPD: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7); s390x_cmppd_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ANDPD: s390x_andpd (code, ins->sreg1, ins->sreg2); break; case OP_ANDNPD: s390x_andnpd (code, ins->sreg1, ins->sreg2); break; case OP_ORPD: s390x_orpd (code, ins->sreg1, ins->sreg2); break; case OP_XORPD: s390x_xorpd (code, ins->sreg1, ins->sreg2); break; case OP_SQRTPD: s390x_sqrtpd (code, ins->dreg, ins->sreg1); break; case OP_ADDSUBPD: s390x_addsubpd (code, ins->sreg1, ins->sreg2); break; case OP_HADDPD: s390x_haddpd (code, ins->sreg1, ins->sreg2); break; case OP_HSUBPD: s390x_hsubpd (code, ins->sreg1, ins->sreg2); break; case OP_DUPPD: s390x_movddup (code, ins->dreg, ins->sreg1); break; case OP_EXTRACT_MASK: s390x_pmovmskb (code, ins->dreg, ins->sreg1); break; case OP_PAND: s390x_pand (code, ins->sreg1, ins->sreg2); break; case OP_POR: s390x_por (code, ins->sreg1, ins->sreg2); break; case OP_PXOR: s390x_pxor (code, ins->sreg1, ins->sreg2); break; case OP_PADDB: s390x_paddb (code, ins->sreg1, ins->sreg2); break; case OP_PADDW: s390x_paddw (code, ins->sreg1, ins->sreg2); break; case OP_PADDD: s390x_paddd (code, ins->sreg1, ins->sreg2); break; case OP_PADDQ: s390x_paddq (code, ins->sreg1, ins->sreg2); break; case OP_PSUBB: s390x_psubb (code, ins->sreg1, ins->sreg2); break; case OP_PSUBW: s390x_psubw (code, ins->sreg1, ins->sreg2); break; case OP_PSUBD: s390x_psubd (code, ins->sreg1, ins->sreg2); break; case OP_PSUBQ: s390x_psubq (code, ins->sreg1, ins->sreg2); break; case OP_PMAXB_UN: s390x_pmaxub (code, ins->sreg1, ins->sreg2); break; case OP_PMAXW_UN: s390x_pmaxuw (code, ins->sreg1, ins->sreg2); break; case OP_PMAXD_UN: s390x_pmaxud (code, ins->sreg1, ins->sreg2); break; case OP_PMAXB: s390x_pmaxsb (code, ins->sreg1, ins->sreg2); break; case OP_PMAXW: s390x_pmaxsw (code, ins->sreg1, ins->sreg2); break; case OP_PMAXD: s390x_pmaxsd (code, ins->sreg1, ins->sreg2); break; case OP_PAVGB_UN: s390x_pavgb (code, ins->sreg1, ins->sreg2); break; case OP_PAVGW_UN: s390x_pavgw (code, ins->sreg1, ins->sreg2); break; case OP_PMINB_UN: s390x_pminub (code, ins->sreg1, ins->sreg2); break; case OP_PMINW_UN: s390x_pminuw (code, ins->sreg1, ins->sreg2); break; case OP_PMIND_UN: s390x_pminud (code, ins->sreg1, ins->sreg2); break; case OP_PMINB: s390x_pminsb (code, ins->sreg1, ins->sreg2); break; case OP_PMINW: s390x_pminsw (code, ins->sreg1, ins->sreg2); break; case OP_PMIND: s390x_pminsd (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQB: s390x_pcmpeqb (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQW: s390x_pcmpeqw (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQD: s390x_pcmpeqd (code, ins->sreg1, ins->sreg2); break; case OP_PCMPEQQ: s390x_pcmpeqq (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTB: s390x_pcmpgtb (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTW: s390x_pcmpgtw (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTD: s390x_pcmpgtd (code, ins->sreg1, ins->sreg2); break; case OP_PCMPGTQ: s390x_pcmpgtq (code, ins->sreg1, ins->sreg2); break; case OP_PSUM_ABS_DIFF: s390x_psadbw (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWB: s390x_punpcklbw (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWW: s390x_punpcklwd (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWD: s390x_punpckldq (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWQ: s390x_punpcklqdq (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWPS: s390x_unpcklps (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWPD: s390x_unpcklpd (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHB: s390x_punpckhbw (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHW: s390x_punpckhwd (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHD: s390x_punpckhdq (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHQ: s390x_punpckhqdq (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHPS: s390x_unpckhps (code, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHPD: s390x_unpckhpd (code, ins->sreg1, ins->sreg2); break; case OP_PACKW: s390x_packsswb (code, ins->sreg1, ins->sreg2); break; case OP_PACKD: s390x_packssdw (code, ins->sreg1, ins->sreg2); break; case OP_PACKW_UN: s390x_packuswb (code, ins->sreg1, ins->sreg2); break; case OP_PACKD_UN: s390x_packusdw (code, ins->sreg1, ins->sreg2); break; case OP_PADDB_SAT_UN: s390x_paddusb (code, ins->sreg1, ins->sreg2); break; case OP_PSUBB_SAT_UN: s390x_psubusb (code, ins->sreg1, ins->sreg2); break; case OP_PADDW_SAT_UN: s390x_paddusw (code, ins->sreg1, ins->sreg2); break; case OP_PSUBW_SAT_UN: s390x_psubusw (code, ins->sreg1, ins->sreg2); break; case OP_PADDB_SAT: s390x_paddsb (code, ins->sreg1, ins->sreg2); break; case OP_PSUBB_SAT: s390x_psubsb (code, ins->sreg1, ins->sreg2); break; case OP_PADDW_SAT: s390x_paddsw (code, ins->sreg1, ins->sreg2); break; case OP_PSUBW_SAT: s390x_psubsw (code, ins->sreg1, ins->sreg2); break; case OP_PMULW: s390x_pmullw (code, ins->sreg1, ins->sreg2); break; case OP_PMULD: s390x_pmulld (code, ins->sreg1, ins->sreg2); break; case OP_PMULQ: s390x_pmuludq (code, ins->sreg1, ins->sreg2); break; case OP_PMULW_HIGH_UN: s390x_pmulhuw (code, ins->sreg1, ins->sreg2); break; case OP_PMULW_HIGH: s390x_pmulhw (code, ins->sreg1, ins->sreg2); break; case OP_PSHRW: s390x_psrlw_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHRW_REG: s390x_psrlw (code, ins->dreg, ins->sreg2); break; case OP_PSARW: s390x_psraw_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSARW_REG: s390x_psraw (code, ins->dreg, ins->sreg2); break; case OP_PSHLW: s390x_psllw_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHLW_REG: s390x_psllw (code, ins->dreg, ins->sreg2); break; case OP_PSHRD: s390x_psrld_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHRD_REG: s390x_psrld (code, ins->dreg, ins->sreg2); break; case OP_PSARD: s390x_psrad_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSARD_REG: s390x_psrad (code, ins->dreg, ins->sreg2); break; case OP_PSHLD: s390x_pslld_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHLD_REG: s390x_pslld (code, ins->dreg, ins->sreg2); break; case OP_PSHRQ: s390x_psrlq_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHRQ_REG: s390x_psrlq (code, ins->dreg, ins->sreg2); break; /*TODO: This is appart of the sse spec but not added case OP_PSARQ: s390x_psraq_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSARQ_REG: s390x_psraq (code, ins->dreg, ins->sreg2); break; */ case OP_PSHLQ: s390x_psllq_reg_imm (code, ins->dreg, ins->inst_imm); break; case OP_PSHLQ_REG: s390x_psllq (code, ins->dreg, ins->sreg2); break; case OP_CVTDQ2PD: s390x_cvtdq2pd (code, ins->dreg, ins->sreg1); break; case OP_CVTDQ2PS: s390x_cvtdq2ps (code, ins->dreg, ins->sreg1); break; case OP_CVTPD2DQ: s390x_cvtpd2dq (code, ins->dreg, ins->sreg1); break; case OP_CVTPD2PS: s390x_cvtpd2ps (code, ins->dreg, ins->sreg1); break; case OP_CVTPS2DQ: s390x_cvtps2dq (code, ins->dreg, ins->sreg1); break; case OP_CVTPS2PD: s390x_cvtps2pd (code, ins->dreg, ins->sreg1); break; case OP_CVTTPD2DQ: s390x_cvttpd2dq (code, ins->dreg, ins->sreg1); break; case OP_CVTTPS2DQ: s390x_cvttps2dq (code, ins->dreg, ins->sreg1); break; case OP_ICONV_TO_X: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_EXTRACT_I4: amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_EXTRACT_I8: if (ins->inst_c0) { amd64_movhlps (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1); amd64_movd_reg_xreg_size (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG, 8); } else { amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8); } break; case OP_EXTRACT_I1: case OP_EXTRACT_U1: amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); if (ins->inst_c0) amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_c0 * 8); amd64_widen_reg (code, ins->dreg, ins->dreg, ins->inst_c1 == OP_EXTRACT_I1, FALSE); break; case OP_EXTRACT_I2: case OP_EXTRACT_U2: /*amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); if (ins->inst_c0) amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, 16, 4);*/ s390x_pextrw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); amd64_widen_reg_size (code, ins->dreg, ins->dreg, ins->inst_c1 == OP_EXTRACT_I2, TRUE, 4); break; case OP_EXTRACT_R8: if (ins->inst_c0) amd64_movhlps (code, ins->dreg, ins->sreg1); else s390x_movsd (code, ins->dreg, ins->sreg1); break; case OP_INSERT_I2: s390x_pinsrw_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_EXTRACTX_U2: s390x_pextrw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_INSERTX_U1_SLOW: /*sreg1 is the extracted ireg (scratch) /sreg2 is the to be inserted ireg (scratch) /dreg is the xreg to receive the value*/ /*clear the bits from the extracted word*/ amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_c0 & 1 ? 0x00FF : 0xFF00); /*shift the value to insert if needed*/ if (ins->inst_c0 & 1) amd64_shift_reg_imm_size (code, X86_SHL, ins->sreg2, 8, 4); /*join them together*/ amd64_alu (code, X86_OR, ins->sreg1, ins->sreg2); s390x_pinsrw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0 / 2); break; case OP_INSERTX_I4_SLOW: s390x_pinsrw_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2); amd64_shift_reg_imm (code, X86_SHR, ins->sreg2, 16); s390x_pinsrw_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2 + 1); break; case OP_INSERTX_I8_SLOW: amd64_movd_xreg_reg_size(code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg2, 8); if (ins->inst_c0) amd64_movlhps (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG); else s390x_movsd (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG); break; case OP_INSERTX_R4_SLOW: switch (ins->inst_c0) { case 0: if (cfg->r4fp) s390x_movss (code, ins->dreg, ins->sreg2); else s390x_cvtsd2ss (code, ins->dreg, ins->sreg2); break; case 1: s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3)); if (cfg->r4fp) s390x_movss (code, ins->dreg, ins->sreg2); else s390x_cvtsd2ss (code, ins->dreg, ins->sreg2); s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3)); break; case 2: s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3)); if (cfg->r4fp) s390x_movss (code, ins->dreg, ins->sreg2); else s390x_cvtsd2ss (code, ins->dreg, ins->sreg2); s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3)); break; case 3: s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0)); if (cfg->r4fp) s390x_movss (code, ins->dreg, ins->sreg2); else s390x_cvtsd2ss (code, ins->dreg, ins->sreg2); s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0)); break; } break; case OP_INSERTX_R8_SLOW: if (ins->inst_c0) amd64_movlhps (code, ins->dreg, ins->sreg2); else s390x_movsd (code, ins->dreg, ins->sreg2); break; case OP_STOREX_MEMBASE_REG: case OP_STOREX_MEMBASE: s390x_movups_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1); break; case OP_LOADX_MEMBASE: s390x_movups_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_LOADX_ALIGNED_MEMBASE: s390x_movaps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_STOREX_ALIGNED_MEMBASE_REG: s390x_movaps_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1); break; case OP_STOREX_NTA_MEMBASE_REG: s390x_movntps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_PREFETCH_MEMBASE: s390x_prefetch_reg_membase (code, ins->backend.arg_info, ins->sreg1, ins->inst_offset); break; case OP_XMOVE: /*FIXME the peephole pass should have killed this*/ if (ins->dreg != ins->sreg1) s390x_movaps (code, ins->dreg, ins->sreg1); break; case OP_XZERO: s390x_pxor (code, ins->dreg, ins->dreg); break; case OP_ICONV_TO_R4_RAW: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4); break; case OP_FCONV_TO_R8_X: s390x_movsd (code, ins->dreg, ins->sreg1); break; case OP_XCONV_R8_TO_I4: s390x_cvttsd2si_reg_xreg_size (code, ins->dreg, ins->sreg1, 4); switch (ins->backend.source_opcode) { case OP_FCONV_TO_I1: amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE); break; case OP_FCONV_TO_U1: amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); break; case OP_FCONV_TO_I2: amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE); break; case OP_FCONV_TO_U2: amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE); break; } break; case OP_EXPAND_I2: s390x_pinsrw_imm (code, ins->dreg, ins->sreg1, 0); s390x_pinsrw_imm (code, ins->dreg, ins->sreg1, 1); s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_I4: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4); s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_I8: amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8); s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0x44); break; case OP_EXPAND_R4: if (cfg->r4fp) { s390x_movsd (code, ins->dreg, ins->sreg1); } else { s390x_movsd (code, ins->dreg, ins->sreg1); s390x_cvtsd2ss (code, ins->dreg, ins->dreg); } s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_R8: s390x_movsd (code, ins->dreg, ins->sreg1); s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0x44); break; #endif default: g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__); g_assert_not_reached (); } if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) { g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)", mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset); g_assert_not_reached (); } } set_code_cursor (cfg, code); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific registration of lowlevel calls * * Register routines to register optimized lowlevel operations */ void mono_arch_register_lowlevel_calls (void) { } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific patching * @param[in] @cfg - Compilation control block * @param[in] @code - Start of code * @param[in] @target - Target of patch * @param[in] @relo - Relocation type * * Perform patching action */ static void emit_patch_full (MonoCompile *cfg, MonoJumpInfo *ji, guint8 *code, gpointer target, int relo) { guint8 *ip = ji->ip.i + code; switch (relo) { case MONO_R_S390_RELINS : target = S390_RELATIVE(target, ip); ip += 2; s390_patch_rel (ip, (guint64) target); break; case MONO_R_S390_THUNKED : if (cfg) create_thunk(cfg, ip, code, target); else update_thunk(cfg, code, target); break; case MONO_R_S390_DIRECT : S390_EMIT_CALL (ip, target); break; case MONO_R_S390_ADDR : s390_patch_addr (ip, (guint64) target); break; case MONO_R_S390_SWITCH : S390_EMIT_LOAD (ip, target); break; case MONO_R_S390_REL : target = S390_RELATIVE(target, ip); s390_patch_rel (ip, (guint64) target); break; default : g_assert_not_reached(); } } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific patching of instructions and data * * @param[in] @cfg - Compile control block * @param[in] @method - Current method * @param[in] @code - Current code block * @param[in] @ji - Jump information * @param[in] @target - Target of patch * * Process the patch data created during the instruction build process. * This resolves jumps, calls, variables etc. */ void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target) { switch (ji->type) { case MONO_PATCH_INFO_IP: case MONO_PATCH_INFO_LDSTR: case MONO_PATCH_INFO_TYPE_FROM_HANDLE: case MONO_PATCH_INFO_LDTOKEN: case MONO_PATCH_INFO_EXC: emit_patch_full (cfg, ji, code, target, MONO_R_S390_ADDR); break; case MONO_PATCH_INFO_BB: case MONO_PATCH_INFO_JIT_ICALL_ADDR: case MONO_PATCH_INFO_JIT_ICALL_ID: case MONO_PATCH_INFO_METHOD: emit_patch_full (cfg, ji, code, target, ji->relocation); break; case MONO_PATCH_INFO_METHOD_JUMP: case MONO_PATCH_INFO_RGCTX_FETCH: case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR: case MONO_PATCH_INFO_ABS: emit_patch_full (cfg, ji, code, target, MONO_R_S390_THUNKED); break; case MONO_PATCH_INFO_SWITCH: emit_patch_full(cfg, ji, code, target, MONO_R_S390_SWITCH); break; case MONO_PATCH_INFO_METHODCONST: case MONO_PATCH_INFO_CLASS: case MONO_PATCH_INFO_IMAGE: case MONO_PATCH_INFO_FIELD: case MONO_PATCH_INFO_IID: case MONO_PATCH_INFO_EXC_NAME: emit_patch_full(cfg, ji, code, target, MONO_R_S390_REL); break; case MONO_PATCH_INFO_NONE: break; default: emit_patch_full (cfg, ji, code, target, MONO_R_S390_RELINS); } } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific prolog generation * * @param[in] @cfg - Compile control block * @returns Location of code code generated * * Create the instruction sequence for entry into a method: * - Determine stack size * - Save preserved registers * - Unload parameters * - Determine if LMF needs saving and generate that sequence */ guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoBasicBlock *bb; MonoMethodSignature *sig; MonoInst *inst; long alloc_size, pos, max_offset, i, cfa_offset = 0; guint8 *code; guint32 size; CallInfo *cinfo; int argsClobbered = 0, lmfOffset, fpOffset = 0; cfg->code_size = 512; if (method->save_lmf) cfg->code_size += 200; cfg->native_code = code = (guint8 *) g_malloc (cfg->code_size); /** * Create unwind information */ mono_emit_unwind_op_def_cfa (cfg, code, STK_BASE, S390_CFA_OFFSET); s390_stmg (code, s390_r6, s390_r15, STK_BASE, S390_REG_SAVE_OFFSET); emit_unwind_regs(cfg, code, s390_r6, s390_r15, S390_REG_SAVE_OFFSET - S390_CFA_OFFSET); if (cfg->arch.bkchain_reg != -1) s390_lgr (code, cfg->arch.bkchain_reg, STK_BASE); /* * If there are local allocations the R11 becomes the frame register */ if (cfg->flags & MONO_CFG_HAS_ALLOCA) { cfg->used_int_regs |= 1 << s390_r11; } /* * Check if FP registers need preserving */ if ((cfg->arch.used_fp_regs & S390_FP_SAVE_MASK) != 0) { for (int i = s390_f8; i <= s390_f15; i++) { if (cfg->arch.used_fp_regs & (1 << i)) fpOffset += sizeof(double); } fpOffset = S390_ALIGN(fpOffset, sizeof(double)); } cfg->arch.fpSize = fpOffset; /* * Calculate stack requirements */ alloc_size = cfg->stack_offset + fpOffset; cfg->stack_usage = cfa_offset = alloc_size; s390_lgr (code, s390_r11, STK_BASE); if (s390_is_imm16 (alloc_size)) { s390_aghi (code, STK_BASE, -alloc_size); } else if (s390_is_imm32 (alloc_size)) { s390_agfi (code, STK_BASE, -alloc_size); } else { int stackSize = alloc_size; while (stackSize > INT_MAX) { s390_agfi (code, STK_BASE, -INT_MAX); stackSize -= INT_MAX; } s390_agfi (code, STK_BASE, -stackSize); } mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size + S390_CFA_OFFSET); s390_stg (code, s390_r11, 0, STK_BASE, 0); if (fpOffset > 0) { int stkOffset = 0; s390_lgr (code, s390_r1, s390_r11); s390_aghi (code, s390_r1, -fpOffset); for (int i = s390_f8; i <= s390_f15; i++) { if (cfg->arch.used_fp_regs & (1 << i)) { s390_std (code, i, 0, s390_r1, stkOffset); emit_unwind_regs(cfg, code, 16+i, 16+i, stkOffset+fpOffset - S390_CFA_OFFSET); stkOffset += sizeof(double); } } } if (cfg->frame_reg != STK_BASE) { s390_lgr (code, s390_r11, STK_BASE); mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg); } /* store runtime generic context */ if (cfg->rgctx_var) { g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET); s390_stg (code, MONO_ARCH_RGCTX_REG, 0, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset); } #if 0 char *methodName = getenv("MONO_TRACE_METHOD"); if (methodName != NULL) { printf("ns: %s k: %s m: %s\n",method->klass->name_space,method->klass->name,method->name);fflush(stdout); // Tests:set_ip //if ((strcmp(method->klass->name_space,"") == 0) && // (strcmp(method->klass->name,"Tests") == 0) && // (strcmp(method->name, "set_ip") == 0)) { // (strcmp("CancellationToken,TaskCreationOptions,TaskContinuationOptions,TaskScheduler",mono_signature_get_desc(method->signature, FALSE)) != 0)) { if ((strcmp(method->name, methodName) == 0)) { printf("SIGNATURE: %s\n",mono_signature_get_desc(method->signature, FALSE)); fflush(stdout); s390_j (code, 0); } } #endif /* compute max_offset in order to use short forward jumps * we always do it on s390 because the immediate displacement * for jumps is too small */ max_offset = 0; for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins; bb->max_offset = max_offset; MONO_BB_FOR_EACH_INS (bb, ins) max_offset += ins_get_size (ins->opcode); } /* load arguments allocated to register from the stack */ sig = mono_method_signature_internal (method); pos = 0; cinfo = cfg->arch.cinfo; if (cinfo->struct_ret) { ArgInfo *ainfo = &cinfo->ret; inst = cfg->vret_addr; inst->backend.size = ainfo->vtsize; if (inst->opcode == OP_REGVAR) s390_lgr (code, inst->dreg, ainfo->reg); else s390_stg (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); } /** * Process the arguments passed to the method */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; inst = cfg->args [pos]; if (inst->opcode == OP_VTARG_ADDR) inst = inst->inst_left; if (inst->opcode == OP_REGVAR) { if (ainfo->regtype == RegTypeGeneral) s390_lgr (code, inst->dreg, ainfo->reg); else if (ainfo->regtype == RegTypeFP) { if (inst->dreg != ainfo->reg) { s390_ldr (code, inst->dreg, ainfo->reg); } } else if (ainfo->regtype == RegTypeFPR4) { if (!cfg->r4fp) s390_ledbr (code, inst->dreg, ainfo->reg); } else if (ainfo->regtype == RegTypeBase) { s390_lgr (code, s390_r13, STK_BASE); s390_aghi (code, s390_r13, alloc_size); s390_lg (code, inst->dreg, 0, s390_r13, ainfo->offset); } else g_assert_not_reached (); if (cfg->verbose_level > 2) g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg)); } else { if (ainfo->regtype == RegTypeGeneral) { if (!((ainfo->reg >= 2) && (ainfo->reg <= 6))) g_assert_not_reached(); switch (ainfo->size) { case 1: s390_stc (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); break; case 2: s390_sth (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); break; case 4: s390_st (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); break; case 8: s390_stg (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); break; } } else if (ainfo->regtype == RegTypeBase) { } else if (ainfo->regtype == RegTypeFP) { s390_std (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); } else if (ainfo->regtype == RegTypeFPR4) { s390_ste (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); } else if (ainfo->regtype == RegTypeStructByVal) { int doffset = inst->inst_offset; size = (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE && sig->pinvoke && !sig->marshalling_disabled ? mono_class_native_size(mono_class_from_mono_type_internal (inst->inst_vtype), NULL) : ainfo->size); switch (size) { case 1: if (ainfo->reg != STK_BASE) s390_stc (code, ainfo->reg, 0, inst->inst_basereg, doffset); break; case 2: if (ainfo->reg != STK_BASE) s390_sth (code, ainfo->reg, 0, inst->inst_basereg, doffset); break; case 4: if (ainfo->reg != STK_BASE) s390_st (code, ainfo->reg, 0, inst->inst_basereg, doffset); break; case 8: if (ainfo->reg != STK_BASE) s390_stg (code, ainfo->reg, 0, inst->inst_basereg, doffset); break; default: if (ainfo->reg != STK_BASE) s390_stg (code, ainfo->reg, 0, STK_BASE, doffset); } } else if (ainfo->regtype == RegTypeStructByAddr) { s390_stg (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset); } else g_assert_not_reached (); } pos++; } if (method->save_lmf) { /** * Build the MonoLMF structure on the stack - see mini-s390x.h */ lmfOffset = alloc_size - sizeof(MonoLMF); s390_lgr (code, s390_r13, cfg->frame_reg); s390_aghi (code, s390_r13, lmfOffset); /* * Preserve the parameter registers while we fix up the lmf */ s390_stmg (code, s390_r2, s390_r6, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, pregs)); for (i = 0; i < 5; i++) mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, pregs) + i * sizeof(gulong), SLOT_NOREF); /* * On return from this call r2 have the address of the &lmf */ code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern)); /* * Set lmf.lmf_addr = jit_tls->lmf */ s390_stg (code, s390_r2, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, lmf_addr)); mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr), SLOT_NOREF); /* * Get current lmf */ s390_lg (code, s390_r0, 0, s390_r2, 0); /* * Set our lmf as the current lmf */ s390_stg (code, s390_r13, 0, s390_r2, 0); /* * Have our lmf.previous_lmf point to the last lmf */ s390_stg (code, s390_r0, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, previous_lmf)); mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF); /* * Save method info */ S390_SET (code, s390_r1, method); s390_stg (code, s390_r1, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, method)); mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, method), SLOT_NOREF); /* * Save the current IP */ s390_stg (code, STK_BASE, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, ebp)); s390_basr (code, s390_r1, 0); s390_stg (code, s390_r1, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, eip)); mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, ebp), SLOT_NOREF); mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, eip), SLOT_NOREF); /* * Save general and floating point registers */ s390_stmg (code, s390_r2, s390_r12, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, gregs) + 2 * sizeof(gulong)); for (i = 0; i < 11; i++) mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, gregs) + i * sizeof(gulong), SLOT_NOREF); fpOffset = lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, fregs); for (i = 0; i < 16; i++) { s390_std (code, i, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, fregs) + i * sizeof(gulong)); mini_gc_set_slot_type_from_fp (cfg, fpOffset, SLOT_NOREF); fpOffset += sizeof(double); } /* * Restore the parameter registers now that we've set up the lmf */ s390_lmg (code, s390_r2, s390_r6, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, pregs)); } if (cfg->method->save_lmf) argsClobbered = TRUE; /* * Optimize the common case of the first bblock making a call with the same * arguments as the method. This works because the arguments are still in their * original argument registers. */ if (!argsClobbered) { MonoBasicBlock *first_bb = cfg->bb_entry; MonoInst *next; int filter = FILTER_IL_SEQ_POINT; next = mono_bb_first_inst (first_bb, filter); if (!next && first_bb->next_bb) { first_bb = first_bb->next_bb; next = mono_bb_first_inst (first_bb, filter); } if (first_bb->in_count > 1) next = NULL; for (i = 0; next && i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; gboolean match = FALSE; inst = cfg->args [i]; if (inst->opcode != OP_REGVAR) { switch (ainfo->regtype) { case RegTypeGeneral: { if (((next->opcode == OP_LOAD_MEMBASE) || (next->opcode == OP_LOADI4_MEMBASE)) && next->inst_basereg == inst->inst_basereg && next->inst_offset == inst->inst_offset) { if (next->dreg == ainfo->reg) { NULLIFY_INS (next); match = TRUE; } else { next->opcode = OP_MOVE; next->sreg1 = ainfo->reg; /* Only continue if the instruction doesn't change argument regs */ if (next->dreg == ainfo->reg) match = TRUE; } } break; } default: break; } } else { /* Argument allocated to (non-volatile) register */ switch (ainfo->regtype) { case RegTypeGeneral: if (next->opcode == OP_MOVE && next->sreg1 == inst->dreg && next->dreg == ainfo->reg) { NULLIFY_INS (next); match = TRUE; } break; default: break; } } if (match) { next = mono_inst_next (next, filter); if (!next) break; } } } if (cfg->gen_sdb_seq_points) { MonoInst *seq; /* Initialize ss_tramp_var */ seq = cfg->arch.ss_tramp_var; g_assert (seq->opcode == OP_REGOFFSET); S390_SET (code, s390_r1, (guint64) &ss_trampoline); s390_stg (code, s390_r1, 0, seq->inst_basereg, seq->inst_offset); /* Initialize bp_tramp_var */ seq = cfg->arch.bp_tramp_var; g_assert (seq->opcode == OP_REGOFFSET); S390_SET (code, s390_r1, (guint64) &bp_trampoline); s390_stg (code, s390_r1, 0, seq->inst_basereg, seq->inst_offset); } set_code_cursor (cfg, code); return code; } /*========================= End of Function ========================*/ /** * * @brief Architecutre-specific epilog generation * * @param[in] @cfg - Compile control block * * Create the instruction sequence for exit from a method */ void mono_arch_emit_epilog (MonoCompile *cfg) { MonoMethod *method = cfg->method; guint8 *code; int max_epilog_size = 96, i; int fpOffset = 0; if (cfg->method->save_lmf) max_epilog_size += 128; code = realloc_code (cfg, max_epilog_size); cfg->has_unwind_info_for_epilog = TRUE; /* Mark the start of the epilog */ mono_emit_unwind_op_mark_loc (cfg, code, 0); /* Save the uwind state which is needed by the out-of-line code */ mono_emit_unwind_op_remember_state (cfg, code); if (method->save_lmf) restoreLMF(code, cfg->frame_reg, cfg->stack_usage); code = backUpStackPtr(cfg, code); mono_emit_unwind_op_def_cfa (cfg, code, STK_BASE, S390_CFA_OFFSET); mono_emit_unwind_op_same_value (cfg, code, STK_BASE); if (cfg->arch.fpSize != 0) { fpOffset = -cfg->arch.fpSize; for (int i=8; i<16; i++) { if (cfg->arch.used_fp_regs & (1 << i)) { s390_ldy (code, i, 0, STK_BASE, fpOffset); mono_emit_unwind_op_same_value (cfg, code, 16+i); fpOffset += sizeof(double); } } } s390_lmg (code, s390_r6, s390_r14, STK_BASE, S390_REG_SAVE_OFFSET); for (i = s390_r6; i < s390_r15; i++) mono_emit_unwind_op_same_value (cfg, code, i); s390_br (code, s390_r14); /* Restore the unwind state to be the same as before the epilog */ mono_emit_unwind_op_restore_state (cfg, code); /* Round up for start of any thunk entries */ code = (guint8 *) ((((uintptr_t) code + 7) >> 3) << 3); set_code_cursor (cfg, code); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific exception emission * * @param[in] @cfg - Compile control block * * Create the instruction sequence for exception handling */ void mono_arch_emit_exceptions (MonoCompile *cfg) { MonoJumpInfo *patch_info; guint8 *code; int nThrows = 0, exc_count = 0, iExc; guint32 code_size; MonoClass *exc_classes [MAX_EXC]; guint8 *exc_throw_start [MAX_EXC]; for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { if (patch_info->type == MONO_PATCH_INFO_EXC) exc_count++; } code_size = exc_count * 48; code = realloc_code (cfg, code_size); /* * Add code to raise exceptions */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { switch (patch_info->type) { case MONO_PATCH_INFO_EXC: { guint8 *ip = patch_info->ip.i + cfg->native_code; MonoClass *exc_class; /* * Patch the branch in epilog to come here */ s390_patch_rel (ip + 2, (guint64) S390_RELATIVE(code,ip)); exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name); for (iExc = 0; iExc < nThrows; ++iExc) if (exc_classes [iExc] == exc_class) break; if (iExc < nThrows) { s390_jcl (code, S390_CC_UN, (guint64) exc_throw_start [iExc]); patch_info->type = MONO_PATCH_INFO_NONE; } else { if (nThrows < MAX_EXC) { exc_classes [nThrows] = exc_class; exc_throw_start [nThrows] = code; } /* * Patch the parameter passed to the handler */ S390_SET (code, s390_r2, m_class_get_type_token (exc_class)); /* * Load return address & parameter register */ s390_larl (code, s390_r14, (guint64)S390_RELATIVE((patch_info->ip.i + cfg->native_code + 8), code)); /* * Reuse the current patch to set the jump */ patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID; patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception; patch_info->ip.i = code - cfg->native_code; patch_info->relocation = MONO_R_S390_THUNKED; S390_BR_TEMPLATE (code, s390_r1); cfg->thunk_area += THUNK_SIZE; } break; } default: /* do nothing */ break; } } /* Round up for start of any thunk entries */ code = (guint8 *) ((((uintptr_t) code + 7) >> 3) << 3); set_code_cursor (cfg, code); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific finishing of initialization * * Perform any architectural-specific operations at the conclusion of * the initialization phase */ void mono_arch_finish_init (void) { } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific instruction emission for method * * @param[in] @cfg - Compile Control block * @param[in] @cmethod - Current method * @param[in] @fsig - Method signature * @param[in] @args - Arguments to method * @returns Instruction(s) required for architecture * * Provide any architectural shortcuts for specific methods. */ MonoInst * mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins = NULL; int opcode = 0; MonoStackType stack_type = STACK_R8; if (cmethod->klass == mono_class_try_get_math_class ()) { // unary double if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) { if (strcmp (cmethod->name, "Abs") == 0) { opcode = OP_ABS; } else if (strcmp (cmethod->name, "Ceiling") == 0) { opcode = OP_CEIL; } else if (strcmp (cmethod->name, "Floor") == 0) { opcode = OP_FLOOR; } else if (strcmp (cmethod->name, "Round") == 0) { opcode = OP_ROUND; } else if (strcmp (cmethod->name, "Sqrt") == 0) { opcode = OP_SQRT; } else if (strcmp (cmethod->name, "Truncate") == 0) { opcode = OP_TRUNC; } } // unary float (overloaded) else if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R4) { if (strcmp (cmethod->name, "Abs") == 0) { if (cfg->r4fp) { opcode = OP_ABSF; stack_type = STACK_R4; } else { opcode = OP_ABS; } } } // binary double else if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R8 && fsig->params [1]->type == MONO_TYPE_R8) { if (strcmp (cmethod->name, "CopySign") == 0) { opcode = OP_FCOPYSIGN; } } } else if (cmethod->klass == mono_class_try_get_mathf_class ()) { if (fsig->param_count == 1) { stack_type = STACK_R4; if (strcmp (cmethod->name, "Abs") == 0) { if (cfg->r4fp) { opcode = OP_ABSF; stack_type = STACK_R4; } else { opcode = OP_ABS; } } else if (strcmp (cmethod->name, "Ceiling") == 0) { if (cfg->r4fp) { opcode = OP_CEILF; stack_type = STACK_R4; } else { opcode = OP_CEIL; } } else if (strcmp (cmethod->name, "Floor") == 0) { if (cfg->r4fp) { opcode = OP_FLOORF; stack_type = STACK_R4; } else { opcode = OP_FLOOR; } } else if (strcmp (cmethod->name, "Sqrt") == 0) { if (cfg->r4fp) { opcode = OP_SQRTF; stack_type = STACK_R4; } else { opcode = OP_SQRT; } } else if (strcmp (cmethod->name, "Truncate") == 0) { if (cfg->r4fp) { opcode = OP_TRUNCF; stack_type = STACK_R4; } else { opcode = OP_TRUNC; } opcode = OP_TRUNCF; } } } if (opcode) { MONO_INST_NEW (cfg, ins, opcode); ins->type = stack_type; ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = args [0]->dreg; if (fsig->param_count > 1) { ins->sreg2 = args [1]->dreg; } g_assert (fsig->param_count <= 2); MONO_ADD_INS (cfg->cbb, ins); } return ins; } /*========================= End of Function ========================*/ /** * * @brief Decompose opcode into a System z operation * * @param[in] @cfg - Compile Control block * @param[in] @ins - Mono Instruction * * Substitute a System z instruction for a Mono operation. */ void mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins) { /* * Have to rename these to avoid being decomposed normally, since the normal * decomposition does not work on S390. */ switch (ins->opcode) { case OP_ISUB_OVF: ins->opcode = OP_S390_ISUB_OVF; break; case OP_ISUB_OVF_UN: ins->opcode = OP_S390_ISUB_OVF_UN; break; case OP_IADD_OVF: ins->opcode = OP_S390_IADD_OVF; break; case OP_IADD_OVF_UN: ins->opcode = OP_S390_IADD_OVF_UN; break; case OP_LADD_OVF: ins->opcode = OP_S390_LADD_OVF; break; case OP_LADD_OVF_UN: ins->opcode = OP_S390_LADD_OVF_UN; break; case OP_LSUB_OVF: ins->opcode = OP_S390_LSUB_OVF; break; case OP_LSUB_OVF_UN: ins->opcode = OP_S390_LSUB_OVF_UN; break; default: break; } } /*========================= End of Function ========================*/ /** * * @brief Determine the cost of allocation a variable * * @param[in] @cfg - Compile Control block * @param[in] @vmv - Mono Method Variable * @returns Cost (hardcoded on s390x to 2) * * Determine the cost, in the number of memory references, of the action * of allocating the variable VMV into a register during global register * allocation. * */ guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { /* FIXME: */ return 2; } /*========================= End of Function ========================*/ /** * * @brief Architectural specific register window flushing * * Not applicable for s390x so we just do nothing * */ void mono_arch_flush_register_windows (void) { } /*========================= End of Function ========================*/ /** * * @brief Architectural specific check if value may be immediate * * @param[in] @opcode - Operation code * @param[in] @imm_opcode - Immediate operation code * @param[in] @imm - Value to be examined * @returns True if it is a valid immediate value * * Determine if operand qualifies as an immediate value. For s390x * this is a value in the range -2**32/2**32-1 * */ gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return s390_is_imm32 (imm); } /*========================= End of Function ========================*/ /** * * @brief Architectural specific patch offset value for AOT * * @param[in] @code - Location of code to check * @returns Offset * * Dummy entry point if/when s390x supports AOT. */ guint32 mono_arch_get_patch_offset (guint8 *code) { return 0; } /*========================= End of Function ========================*/ /** * * @brief Architectural specific returning of register from context * * @param[in] @ctx - Mono context * @param[in] @reg - Register number to be returned * @returns Contents of the register from the context * * Return a register from the context. */ host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { return ctx->uc_mcontext.gregs[reg]; } host_mgreg_t* mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { return &ctx->uc_mcontext.gregs[reg]; } /*========================= End of Function ========================*/ /** * * @brief Architectural specific setting of a register in the context * * @param[in] @ctx - Mono context * @param[in] @reg - Register number to be returned * @param[in] @val - Value to be set * * Set the specified register in the context with the value passed */ void mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val) { ctx->uc_mcontext.gregs[reg] = val; } /*========================= End of Function ========================*/ /** * * @brief Architectural specific returning of the "this" value from context * * @param[in] @ctx - Mono context * @param[in] @code - Current location * @returns Pointer to the "this" object * * Extract register 2 from the context as for s390x this is where the * this parameter is passed */ gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code) { return (gpointer) regs [s390_r2]; } /*========================= End of Function ========================*/ /** * * @brief Delegation trampoline processing * * @param[in] @info - Trampoline information * @param[in] @has_target - Use target from delegation * @param[in] @param_count - Count of parameters * @param[in] @aot - AOT indicator * @returns Next instruction location * * Process the delegation trampolines */ static guint8 * get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, MonoMethodSignature *sig, gboolean aot) { guint8 *code, *start; if (has_target) { int size = 32; start = code = (guint8 *) mono_global_codeman_reserve (size); /* Replace the this argument with the target */ s390_lg (code, s390_r1, 0, s390_r2, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); s390_lg (code, s390_r2, 0, s390_r2, MONO_STRUCT_OFFSET (MonoDelegate, target)); s390_br (code, s390_r1); g_assert ((code - start) <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } else { int size, i, offset = S390_MINIMAL_STACK_SIZE, iReg = s390_r2; CallInfo *cinfo = get_call_info (NULL, sig); size = 32 + sig->param_count * 8; start = code = (guint8 *) mono_global_codeman_reserve (size); s390_lg (code, s390_r1, 0, s390_r2, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); /* slide down the arguments */ for (i = 0; i < sig->param_count; ++i) { switch(cinfo->args[i].regtype) { case RegTypeGeneral : if (iReg < S390_LAST_ARG_REG) { s390_lgr (code, iReg, (iReg + 1)); } else { s390_lg (code, iReg, 0, STK_BASE, offset); } iReg++; break; default : s390_mvc (code, sizeof(uintptr_t), STK_BASE, offset, STK_BASE, offset+sizeof(uintptr_t)); offset += sizeof(uintptr_t); } } s390_br (code, s390_r1); g_free (cinfo); g_assert ((code - start) <= size); mono_arch_flush_icache (start, size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); } if (has_target) { *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, NULL); } else { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); *info = mono_tramp_info_create (name, start, code - start, NULL, NULL); g_free (name); } return start; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific delegation trampolines processing * * @returns List of trampolines * * Return a list of MonoTrampInfo structures for the delegate invoke impl trampolines. */ GSList* mono_arch_get_delegate_invoke_impls (void) { GSList *res = NULL; MonoTrampInfo *info; get_delegate_invoke_impl (&info, TRUE, 0, TRUE); res = g_slist_prepend (res, info); #if 0 for (int i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) { get_delegate_invoke_impl (&info, FALSE, NULL, TRUE); res = g_slist_prepend (res, info); } #endif return res; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific delegation trampoline processing * * @param[in] @sig - Method signature * @param[in] @has_target - Whether delegation contains a target * @returns Trampoline * * Return a pointer to a delegation trampoline */ gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target) { guint8 *code, *start; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; /* FIXME: Support more cases */ if (MONO_TYPE_ISSTRUCT (mini_get_underlying_type (sig->ret))) return NULL; if (has_target) { static guint8* cached = NULL; if (cached) return cached; if (mono_ee_features.use_aot_trampolines) { start = (guint8 *) mono_aot_get_trampoline ("delegate_invoke_impl_has_target"); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, TRUE, sig, FALSE); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cached = start; } else { static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL}; int i; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; for (i = 0; i < sig->param_count; ++i) if (!mono_is_regsize_var (sig->params [i])) return NULL; code = cache [sig->param_count]; if (code) return code; if (mono_ee_features.use_aot_trampolines) { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); start = (guint8 *) mono_aot_get_trampoline (name); g_free (name); } else { MonoTrampInfo *info; start = get_delegate_invoke_impl (&info, FALSE, sig, FALSE); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cache [sig->param_count] = start; } return start; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific delegation virtual trampoline processing * * @param[in] @sig - Method signature * @param[in] @method - Method * @param[in] @offset - Offset into vtable * @param[in] @load_imt_reg - Whether to load the LMT register * @returns Trampoline * * Return a pointer to a delegation virtual trampoline */ gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg) { guint8 *code, *start; int size = 40; start = code = (guint8 *) mono_global_codeman_reserve (size); /* * Replace the "this" argument with the target */ s390_lgr (code, s390_r1, s390_r2); s390_lg (code, s390_r2, 0, s390_r1, MONO_STRUCT_OFFSET(MonoDelegate, target)); /* * Load the IMT register, if needed */ if (load_imt_reg) { s390_lg (code, MONO_ARCH_IMT_REG, 0, s390_r1, MONO_STRUCT_OFFSET(MonoDelegate, method)); } /* * Load the vTable */ s390_lg (code, s390_r1, 0, s390_r2, MONO_STRUCT_OFFSET(MonoObject, vtable)); if (offset != 0) { s390_agfi(code, s390_r1, offset); } s390_lg (code, s390_r1, 0, s390_r1, 0); s390_br (code, s390_r1); mono_arch_flush_icache (start, code - start); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); return(start); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific build of IMT trampoline * * @param[in] @vtable - Mono VTable * @param[in] @domain - Mono Domain * @param[in] @imt_entries - List of IMT check items * @param[in] @count - Count of items * @param[in] @fail_tramp - Pointer to a failure trampoline * @returns Trampoline * * Return a pointer to an IMT trampoline */ gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int i; int size = 0; guchar *code, *start; MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) item->chunk_size += CMP_SIZE + JUMP_SIZE; if (item->has_target_code) item->chunk_size += BR_SIZE + JUMP_SIZE + LOADCON_SIZE; else item->chunk_size += BR_SIZE + JUMP_SIZE + LOADCON_SIZE + LOAD_SIZE; } else { if (fail_tramp) { item->chunk_size += CMP_SIZE + 2 * BR_SIZE + JUMP_SIZE + 2 * LOADCON_SIZE; if (!item->has_target_code) item->chunk_size += LOAD_SIZE; } else { item->chunk_size += LOADCON_SIZE + LOAD_SIZE + BR_SIZE; #if ENABLE_WRONG_METHOD_CHECK item->chunk_size += CMP_SIZE + JUMP_SIZE; #endif } } } else { item->chunk_size += CMP_SIZE + JUMP_SIZE; imt_entries [item->check_target_idx]->compare_done = TRUE; } size += item->chunk_size; } if (fail_tramp) { code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size); } else { code = mono_mem_manager_code_reserve (mem_manager, size); } start = code; for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; item->code_target = (guint8 *) code; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) { S390_SET (code, s390_r0, item->key); s390_cgr (code, s390_r0, MONO_ARCH_IMT_REG); } item->jmp_code = (guint8*) code; s390_jcl (code, S390_CC_NE, 0); if (item->has_target_code) { S390_SET (code, s390_r1, item->value.target_code); } else { S390_SET (code, s390_r1, (&(vtable->vtable [item->value.vtable_slot]))); s390_lg (code, s390_r1, 0, s390_r1, 0); } s390_br (code, s390_r1); } else { if (fail_tramp) { gint64 target; S390_SET (code, s390_r0, item->key); s390_cgr (code, s390_r0, MONO_ARCH_IMT_REG); item->jmp_code = (guint8*) code; s390_jcl (code, S390_CC_NE, 0); if (item->has_target_code) { S390_SET (code, s390_r1, item->value.target_code); } else { g_assert (vtable); S390_SET (code, s390_r1, (&(vtable->vtable [item->value.vtable_slot]))); s390_lg (code, s390_r1, 0, s390_r1, 0); } s390_br (code, s390_r1); target = (gint64) S390_RELATIVE(code, item->jmp_code); s390_patch_rel(item->jmp_code+2, target); S390_SET (code, s390_r1, fail_tramp); s390_br (code, s390_r1); item->jmp_code = NULL; } else { /* enable the commented code to assert on wrong method */ #if ENABLE_WRONG_METHOD_CHECK g_assert_not_reached (); #endif S390_SET (code, s390_r1, (&(vtable->vtable [item->value.vtable_slot]))); s390_lg (code, s390_r1, 0, s390_r1, 0); s390_br (code, s390_r1); } } } else { S390_SET (code, s390_r0, item->key); s390_cgr (code, MONO_ARCH_IMT_REG, s390_r0); item->jmp_code = (guint8 *) code; s390_jcl (code, S390_CC_GE, 0); } } /* * patch the branches to get to the target items */ for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code) { if (item->check_target_idx) { gint64 offset; offset = (gint64) S390_RELATIVE(imt_entries [item->check_target_idx]->code_target, item->jmp_code); s390_patch_rel ((guchar *) item->jmp_code + 2, (guint64) offset); } } } mono_arch_flush_icache ((guint8*)start, (code - start)); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL)); if (!fail_tramp) UnlockedAdd (&mono_stats.imt_trampolines_size, code - start); g_assert (code - start <= size); mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), mem_manager); return (start); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific return of pointer to IMT method * * @param[in] @regs - Context registers * @param[in] @code - Current location * @returns Pointer to IMT method * * Extract the value of the IMT register from the context */ MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { return ((MonoMethod *) regs [MONO_ARCH_IMT_REG]); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific return of pointer static call vtable. * * @param[in] @regs - Context registers * @param[in] @code - Current location * @returns Pointer to static call vtable * * Extract the value of the RGCTX register from the context which * points to the static call vtable. */ MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code) { return (MonoVTable*)(gsize) regs [MONO_ARCH_RGCTX_REG]; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific return of unwind bytecode for DWARF CIE * * @returns Unwind byte code * * Returns the unwind bytecode for DWARF CIE */ GSList* mono_arch_get_cie_program (void) { GSList *l = NULL; mono_add_unwind_op_def_cfa (l, 0, 0, STK_BASE, S390_CFA_OFFSET); return(l); } /*========================= End of Function ========================*/ #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED /** * * @brief Architecture-specific setting of a breakpoint * * @param[in] @ji - Mono JIT Information * @param[in] @ip - Insruction pointer * * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET. * The location should contain code emitted by OP_SEQ_POINT. */ void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *bp = ip; /* IP should point to a LGHI R1,0 */ g_assert (bp[0] == 0xa7); /* Replace it with a LGHI R1,1 */ s390_lghi (bp, s390_r1, 1); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific clearing of a breakpoint * * @param[in] @ji - Mono JIT Information * @param[in] @ip - Insruction pointer * * Replace the breakpoint with a no-operation. */ void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *bp = ip; /* IP should point to a LGHI R1,1 */ g_assert (bp[0] == 0xa7); /* Replace it with a LGHI R1,0 */ s390_lghi (bp, s390_r1, 0); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific check if this is a breakpoint event * * @param[in] @info - Signal information * @param[in] @sigctx - Signal context * @returns True if this is a breakpoint event * * We use soft breakpoints so always return FALSE */ gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx) { /* We use soft breakpoints on s390x */ return FALSE; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific skip of a breakpoint * * @param[in] @ctx - Mono Context * @param[in] @ji - Mono JIT information * * We use soft breakpoints so this is a no-op */ void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji) { g_assert_not_reached (); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific start of single stepping * * Unprotect the trigger page to enable single stepping */ void mono_arch_start_single_stepping (void) { ss_trampoline = mini_get_single_step_trampoline(); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific stop of single stepping * * Write-protect the trigger page to disable single stepping */ void mono_arch_stop_single_stepping (void) { ss_trampoline = NULL; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific check if single stepping event * * @param[in] @info - Signal information * @param[in] @sigctx - Signal context * @returns True if this is a single stepping event * * Return whether the machine state in sigctx corresponds to a single step event. * On s390x we use soft breakpoints so return FALSE */ gboolean mono_arch_is_single_step_event (void *info, void *sigctx) { /* We use soft breakpoints on s390x */ return FALSE; } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific skip of a single stepping event * * @param[in] @ctx - Mono Context * * Modify the ctx so the IP is placed after the single step trigger * instruction, so that the instruction is not executed again. * On s390x we use soft breakpoints so we shouldn't get here */ void mono_arch_skip_single_step (MonoContext *ctx) { g_assert_not_reached(); } /*========================= End of Function ========================*/ /** * * @brief Architecture-specific creation of sequence point information * * @param[in] @domain - Mono Domain * @param[in] @code - Current location pointer * @returns Sequence Point Information * * Return a pointer to a data struction which is used by the sequence * point implementation in AOTed code. A no-op on s390x until AOT is * ever supported. */ SeqPointInfo * mono_arch_get_seq_point_info (guint8 *code) { SeqPointInfo *info; MonoJitInfo *ji; MonoJitMemoryManager *jit_mm; jit_mm = get_default_jit_mm (); jit_mm_lock (jit_mm); info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code); jit_mm_unlock (jit_mm); if (!info) { ji = mini_jit_info_table_find (code); g_assert (ji); // FIXME: Optimize the size info = (SeqPointInfo *)g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size * sizeof (gpointer))); info->ss_tramp_addr = &ss_trampoline; jit_mm_lock (jit_mm); g_hash_table_insert (jit_mm->arch_seq_points, code, info); jit_mm_unlock (jit_mm); } return info; } /*========================= End of Function ========================*/ #endif /** * * @brief Architecture-specific check of supported operation codes * * @param[in] @opcode - Operation code to be checked * @returns True if operation code is supported * * Check if a mono operation is supported in hardware. */ gboolean mono_arch_opcode_supported (int opcode) { switch (opcode) { case OP_ATOMIC_ADD_I4: case OP_ATOMIC_ADD_I8: case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_EXCHANGE_I8: return TRUE; default: return FALSE; } } /*========================= End of Function ========================*/ #ifndef DISABLE_JIT /** * * @brief Architecture-specific check of tailcall support * * @param[in] @cfg - Mono Compile control block * @param[in] @caller_sig - Signature of caller * @param[in] @callee_sig - Signature of callee * @param[in] @virtual_ - Whether this a virtual call * @returns True if the tailcall operation is supported * * Check if a tailcall may be made from caller to callee based on a * number of conditions including parameter types and stack sizes */ gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { g_assert (caller_sig); g_assert (callee_sig); CallInfo *caller_info = get_call_info (NULL, caller_sig); CallInfo *callee_info = get_call_info (NULL, callee_sig); gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage); // Any call that would result in parameters being placed on the stack cannot be "tailed" as it may // result in the callers parameter variables being overwritten. ArgInfo const * const ainfo = callee_info->args + callee_sig->hasthis; for (int i = 0; res && i < callee_sig->param_count; ++i) { switch(ainfo[i].regtype) { case RegTypeGeneral : // R6 is both used as argument register and call-saved // This means we cannot use a tail call if R6 is needed if (ainfo[i].reg == S390_LAST_ARG_REG) res = FALSE; else res = TRUE; break; case RegTypeFP : case RegTypeFPR4 : case RegTypeStructByValInFP : res = TRUE; break; case RegTypeBase : res = FALSE; break; case RegTypeStructByAddr : if (ainfo[i].reg == STK_BASE || ainfo[i].reg == S390_LAST_ARG_REG) res = FALSE; else res = TRUE; break; case RegTypeStructByVal : if (ainfo[i].reg == STK_BASE || ainfo[i].reg == S390_LAST_ARG_REG) res = FALSE; else { switch(ainfo[i].size) { case 0: case 1: case 2: case 4: case 8: res = TRUE; break; default: res = FALSE; } } break; } } g_free (caller_info); g_free (callee_info); return(res); } /*========================= End of Function ========================*/ #endif /** * * @brief Architecture-specific load function * * @param[in] @jit_call_id - JIT callee identifier * @returns Pointer to load function trampoline * * A no-operation on s390x until if/when it supports AOT. */ gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { return NULL; } /*========================= End of Function ========================*/ /** * * @brief Emit call to thunked code * * @param[in] @cfg - configuration data * @param[inout] @code - where to emit call * @param[in] @call - call instruction * @returns Pointer to next code area * */ static __inline__ guint8* emit_call (MonoCompile *cfg, guint8 *code, MonoJumpInfoType type, gconstpointer target) { mono_add_patch_info_rel (cfg, code-cfg->native_code, type, target, MONO_R_S390_THUNKED); S390_CALL_TEMPLATE (code, s390_r14); cfg->thunk_area += THUNK_SIZE; return code; } /*========================= End of Function ========================*/ /** * * @brief Emit thunk for an indirect call * * @param[inout] @code - where to emit thunk * @param[in] @target - thunk target * @returns Pointer to next code area * */ static guint8* emit_thunk (guint8 *code, gconstpointer target) { *(guint64*)code = (guint64)target; code += sizeof (guint64); return code; } /*========================= End of Function ========================*/ /** * * @brief Create thunk * * @param[in] @cfg - Compiler configuration * @param[inout] @code - where to emit thunk * @param[in] @target - thunk target * * Create a new thunk * */ static void create_thunk (MonoCompile *cfg, guint8 *ip, guint8 *code, gpointer target) { guint8 *thunks; int thunks_size; /* * This can be called multiple times during JITting, * save the current position in cfg->arch to avoid * doing a O(n^2) search. */ if (!cfg->arch.thunks) { cfg->arch.thunks = cfg->thunks; cfg->arch.thunks_size = cfg->thunk_area; } thunks = (guint8 *) cfg->arch.thunks; thunks_size = cfg->arch.thunks_size; if (!thunks_size) { g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE)); g_assert_not_reached (); } g_assert (*(guint64 *)thunks == 0); emit_thunk (thunks, target); cfg->arch.thunks += THUNK_SIZE; cfg->arch.thunks_size -= THUNK_SIZE; S390_EMIT_CALL(ip, thunks); } /*========================= End of Function ========================*/ /** * * @brief Update thunk * * @param[in] @cfg - Compiler configuration * @param[inout] @code - where to emit thunk * @param[in] @target - thunk target * * Update an existing thunk * */ static void update_thunk (MonoCompile *cfg, guint8 *code, gpointer target) { MonoJitInfo *ji; MonoThunkJitInfo *info; guint8 *thunks; guint8 *orig_target; guint8 *target_thunk; int thunks_size; ji = mini_jit_info_table_find ((char*)code); g_assert (ji); info = mono_jit_info_get_thunk_info (ji); g_assert (info); thunks = (guint8*)ji->code_start + info->thunks_offset; thunks_size = info->thunks_size; /* * We're pointing at the start of jump to thunk, * but mono_arch_get_call_target expects we're pointing * after the branch so we adjust */ orig_target = mono_arch_get_call_target (code + 6); target_thunk = NULL; if (orig_target >= thunks && orig_target < thunks + thunks_size) { /* The call already points to a thunk, because of trampolines etc. */ target_thunk = orig_target; } else { g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE)); g_assert_not_reached (); } emit_thunk (target_thunk, target); } /*========================= End of Function ========================*/
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/mini-sparc.c
/** * \file * Sparc backend for the Mono code generator * * Authors: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * * Modified for SPARC: * Christopher Taylor ([email protected]) * Mark Crichton ([email protected]) * Zoltan Varga ([email protected]) * * (C) 2003 Ximian, Inc. */ #include "mini.h" #include <string.h> #include <pthread.h> #include <unistd.h> #ifndef __linux__ #include <thread.h> #endif #include <unistd.h> #include <sys/mman.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/tokentype.h> #include <mono/utils/mono-math.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/unlocked.h> #include "mini-sparc.h" #include "trace.h" #include "cpu-sparc.h" #include "jit-icalls.h" #include "ir-emit.h" #include "mono/utils/mono-tls-inline.h" /* * Sparc V9 means two things: * - the instruction set * - the ABI * * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc * processors in use are 64 bit processors. The V9 ABI is only usable if the * mono executable is a 64 bit executable. So it would make sense to use the 64 bit * instructions without using the 64 bit ABI. */ /* * Register usage: * - %i0..%i<n> hold the incoming arguments, these are never written by JITted * code. Unused input registers are used for global register allocation. * - %o0..%o5 and %l7 is used for local register allocation and passing arguments * - %l0..%l6 is used for global register allocation * - %o7 and %g1 is used as scratch registers in opcodes * - all floating point registers are used for local register allocation except %f0. * Only double precision registers are used. * In 64 bit mode: * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are * used for local allocation. */ /* * Alignment: * - doubles and longs must be stored in dword aligned locations */ /* * The following things are not implemented or do not work: * - some fp arithmetic corner cases * The following tests in mono/mini are expected to fail: * - test_0_simple_double_casts * This test casts (guint64)-1 to double and then back to guint64 again. * Under x86, it returns 0, while under sparc it returns -1. * * In addition to this, the runtime requires the trunc function, or its * solaris counterpart, aintl, to do some double->int conversions. If this * function is not available, it is emulated somewhat, but the results can be * strange. */ /* * SPARCV9 FIXME: * - optimize sparc_set according to the memory model * - when non-AOT compiling, compute patch targets immediately so we don't * have to emit the 6 byte template. * - varags * - struct arguments/returns */ /* * SPARCV9 ISSUES: * - sparc_call_simple can't be used in a lot of places since the displacement * might not fit into an imm30. * - g1 can't be used in a lot of places since it is used as a scratch reg in * sparc_set. * - sparc_f0 can't be used as a scratch register on V9 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie. * %d36 = %f5. * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down * - ins->dreg can't be used as a scatch register in r4 opcodes since it might * be a double precision register which has no single precision part. * - passing/returning structs is hard to implement, because: * - the spec is very hard to understand * - it requires knowledge about the fields of structure, needs to handle * nested structures etc. */ /* * Possible optimizations: * - delay slot scheduling * - allocate large constants to registers * - add more mul/div/rem optimizations */ #ifndef __linux__ #define MONO_SPARC_THR_TLS 1 #endif /* * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32, * causing infinite loops in dominator computation. So glib-2.4 is required. */ #ifdef SPARCV9 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4 #error "glib 2.4 or later is required for 64 bit mode." #endif #endif #define SIGNAL_STACK_SIZE (64 * 1024) #define STACK_BIAS MONO_SPARC_STACK_BIAS #ifdef SPARCV9 /* %g1 is used by sparc_set */ #define GP_SCRATCH_REG sparc_g4 /* %f0 is used for parameter passing */ #define FP_SCRATCH_REG sparc_f30 #define ARGS_OFFSET (STACK_BIAS + 128) #else #define FP_SCRATCH_REG sparc_f0 #define ARGS_OFFSET 68 #define GP_SCRATCH_REG sparc_g1 #endif /* Whenever this is a 64bit executable */ #if SPARCV9 static gboolean v64 = TRUE; #else static gboolean v64 = FALSE; #endif static gpointer mono_arch_get_lmf_addr (void); const char* mono_arch_regname (int reg) { static const char * rnames[] = { "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4", "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1", "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp", "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3", "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0", "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5", "sparc_fp", "sparc_retadr" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } const char* mono_arch_fregname (int reg) { static const char *rnames [] = { "sparc_f0", "sparc_f1", "sparc_f2", "sparc_f3", "sparc_f4", "sparc_f5", "sparc_f6", "sparc_f7", "sparc_f8", "sparc_f9", "sparc_f10", "sparc_f11", "sparc_f12", "sparc_f13", "sparc_f14", "sparc_f15", "sparc_f16", "sparc_f17", "sparc_f18", "sparc_f19", "sparc_f20", "sparc_f21", "sparc_f22", "sparc_f23", "sparc_f24", "sparc_f25", "sparc_f26", "sparc_f27", "sparc_f28", "sparc_f29", "sparc_f30", "sparc_f31" }; if (reg >= 0 && reg < 32) return rnames [reg]; else return "unknown"; } /* * Initialize the cpu to execute managed code. */ void mono_arch_cpu_init (void) { } /* * Initialize architecture specific code. */ void mono_arch_init (void) { } /* * Cleanup architecture specific code. */ void mono_arch_cleanup (void) { } gboolean mono_arch_have_fast_tls (void) { return FALSE; } /* * This function returns the optimizations supported on this cpu. */ guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { guint32 opts = 0; *exclude_mask = 0; /* * On some processors, the cmov instructions are even slower than the * normal ones... */ if (mono_hwcap_sparc_is_v9) opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV; else *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV; return opts; } #ifdef __GNUC__ #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory") #else /* assume Sun's compiler */ static void flushi(void *addr) { asm("flush %i0"); } #endif #ifndef __linux__ void sync_instruction_memory(caddr_t addr, int len); #endif void mono_arch_flush_icache (guint8 *code, gint size) { #ifndef __linux__ /* Hopefully this is optimized based on the actual CPU */ sync_instruction_memory (code, size); #else gulong start = (gulong) code; gulong end = start + size; gulong align; /* Sparcv9 chips only need flushes on 32 byte * cacheline boundaries. * * Sparcv8 needs a flush every 8 bytes. */ align = (mono_hwcap_sparc_is_v9 ? 32 : 8); start &= ~(align - 1); end = (end + (align - 1)) & ~(align - 1); while (start < end) { #ifdef __GNUC__ __asm__ __volatile__ ("iflush %0"::"r"(start)); #else flushi (start); #endif start += align; } #endif } /* * mono_sparc_flushw: * * Flush all register windows to memory. Every register window is saved to * a 16 word area on the stack pointed to by its %sp register. */ void mono_sparc_flushw (void) { static guint32 start [64]; static int inited = 0; guint32 *code; static void (*flushw) (void); if (!inited) { code = start; sparc_save_imm (code, sparc_sp, -160, sparc_sp); sparc_flushw (code); sparc_ret (code); sparc_restore_simple (code); g_assert ((code - start) < 64); mono_arch_flush_icache ((guint8*)start, (guint8*)code - (guint8*)start); flushw = (gpointer)start; inited = 1; } flushw (); } void mono_arch_flush_register_windows (void) { mono_sparc_flushw (); } gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return sparc_is_imm13 (imm); } gboolean mono_sparc_is_v9 (void) { return mono_hwcap_sparc_is_v9; } gboolean mono_sparc_is_sparc64 (void) { return v64; } typedef enum { ArgInIReg, ArgInIRegPair, ArgInSplitRegStack, ArgInFReg, ArgInFRegPair, ArgOnStack, ArgOnStackPair, ArgInFloatReg, /* V9 only */ ArgInDoubleReg /* V9 only */ } ArgStorage; typedef struct { gint16 offset; /* This needs to be offset by %i0 or %o0 depending on caller/callee */ gint8 reg; ArgStorage storage; guint32 vt_offset; /* for valuetypes */ } ArgInfo; struct CallInfo { int nargs; guint32 stack_usage; guint32 reg_usage; ArgInfo ret; ArgInfo sig_cookie; ArgInfo args [1]; }; #define DEBUG(a) /* %o0..%o5 */ #define PARAM_REGS 6 static void inline add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair) { ainfo->offset = *stack_size; if (!pair) { if (*gr >= PARAM_REGS) { ainfo->storage = ArgOnStack; } else { ainfo->storage = ArgInIReg; ainfo->reg = *gr; (*gr) ++; } /* Allways reserve stack space for parameters passed in registers */ (*stack_size) += sizeof (target_mgreg_t); } else { if (*gr < PARAM_REGS - 1) { /* A pair of registers */ ainfo->storage = ArgInIRegPair; ainfo->reg = *gr; (*gr) += 2; } else if (*gr >= PARAM_REGS) { /* A pair of stack locations */ ainfo->storage = ArgOnStackPair; } else { ainfo->storage = ArgInSplitRegStack; ainfo->reg = *gr; (*gr) ++; } (*stack_size) += 2 * sizeof (target_mgreg_t); } } #ifdef SPARCV9 #define FLOAT_PARAM_REGS 32 static void inline add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean single) { ainfo->offset = *stack_size; if (single) { if (*gr >= FLOAT_PARAM_REGS) { ainfo->storage = ArgOnStack; } else { /* A single is passed in an even numbered fp register */ ainfo->storage = ArgInFloatReg; ainfo->reg = *gr + 1; (*gr) += 2; } } else { if (*gr < FLOAT_PARAM_REGS) { /* A double register */ ainfo->storage = ArgInDoubleReg; ainfo->reg = *gr; (*gr) += 2; } else { ainfo->storage = ArgOnStack; } } (*stack_size) += sizeof (target_mgreg_t); } #endif /* * get_call_info: * * Obtain information about a call according to the calling convention. * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version * document for more information. * For V9, see the "Low Level System Information (64-bit psABI)" chapter in * the 'Sparc Compliance Definition 2.4' document. */ static CallInfo* get_call_info (MonoCompile *cfg, MonoMethodSignature *sig, gboolean is_pinvoke) { guint32 i, gr, fr; int n = sig->hasthis + sig->param_count; guint32 stack_size = 0; CallInfo *cinfo; MonoType *ret_type; cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n)); gr = 0; fr = 0; #ifdef SPARCV9 if (MONO_TYPE_ISSTRUCT ((sig->ret))) { /* The address of the return value is passed in %o0 */ add_general (&gr, &stack_size, &cinfo->ret, FALSE); cinfo->ret.reg += sparc_i0; /* FIXME: Pass this after this as on other platforms */ NOT_IMPLEMENTED; } #endif /* this */ if (sig->hasthis) add_general (&gr, &stack_size, cinfo->args + 0, FALSE); if ((sig->call_convention == MONO_CALL_VARARG) && (n == 0)) { gr = PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE); } for (i = 0; i < sig->param_count; ++i) { ArgInfo *ainfo = &cinfo->args [sig->hasthis + i]; MonoType *ptype; if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { gr = PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE); } DEBUG(printf("param %d: ", i)); if (m_type_is_byref (sig->params [i])) { DEBUG(printf("byref\n")); add_general (&gr, &stack_size, ainfo, FALSE); continue; } ptype = mini_get_underlying_type (sig->params [i]); switch (ptype->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: add_general (&gr, &stack_size, ainfo, FALSE); /* the value is in the ls byte */ ainfo->offset += sizeof (target_mgreg_t) - 1; break; case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: add_general (&gr, &stack_size, ainfo, FALSE); /* the value is in the ls word */ ainfo->offset += sizeof (target_mgreg_t) - 2; break; case MONO_TYPE_I4: case MONO_TYPE_U4: add_general (&gr, &stack_size, ainfo, FALSE); /* the value is in the ls dword */ ainfo->offset += sizeof (target_mgreg_t) - 4; break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: add_general (&gr, &stack_size, ainfo, FALSE); break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ptype)) { add_general (&gr, &stack_size, ainfo, FALSE); break; } /* Fall through */ case MONO_TYPE_VALUETYPE: #ifdef SPARCV9 if (sig->pinvoke) NOT_IMPLEMENTED; #endif add_general (&gr, &stack_size, ainfo, FALSE); break; case MONO_TYPE_TYPEDBYREF: add_general (&gr, &stack_size, ainfo, FALSE); break; case MONO_TYPE_U8: case MONO_TYPE_I8: #ifdef SPARCV9 add_general (&gr, &stack_size, ainfo, FALSE); #else add_general (&gr, &stack_size, ainfo, TRUE); #endif break; case MONO_TYPE_R4: #ifdef SPARCV9 add_float (&fr, &stack_size, ainfo, TRUE); gr ++; #else /* single precision values are passed in integer registers */ add_general (&gr, &stack_size, ainfo, FALSE); #endif break; case MONO_TYPE_R8: #ifdef SPARCV9 add_float (&fr, &stack_size, ainfo, FALSE); gr ++; #else /* double precision values are passed in a pair of registers */ add_general (&gr, &stack_size, ainfo, TRUE); #endif break; default: g_assert_not_reached (); } } if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) { gr = PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE); } /* return value */ ret_type = mini_get_underlying_type (sig->ret); switch (ret_type->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_STRING: cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = sparc_i0; if (gr < 1) gr = 1; break; case MONO_TYPE_U8: case MONO_TYPE_I8: #ifdef SPARCV9 cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = sparc_i0; if (gr < 1) gr = 1; #else cinfo->ret.storage = ArgInIRegPair; cinfo->ret.reg = sparc_i0; if (gr < 2) gr = 2; #endif break; case MONO_TYPE_R4: case MONO_TYPE_R8: cinfo->ret.storage = ArgInFReg; cinfo->ret.reg = sparc_f0; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ret_type)) { cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = sparc_i0; if (gr < 1) gr = 1; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: if (v64) { if (sig->pinvoke) NOT_IMPLEMENTED; else /* Already done */ ; } else cinfo->ret.storage = ArgOnStack; break; case MONO_TYPE_TYPEDBYREF: if (v64) { if (sig->pinvoke) /* Same as a valuetype with size 24 */ NOT_IMPLEMENTED; else /* Already done */ ; } else cinfo->ret.storage = ArgOnStack; break; case MONO_TYPE_VOID: break; default: g_error ("Can't handle as return value 0x%x", sig->ret->type); } cinfo->stack_usage = stack_size; cinfo->reg_usage = gr; return cinfo; } GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; /* * FIXME: If an argument is allocated to a register, then load it from the * stack in the prolog. */ for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; /* FIXME: Make arguments on stack allocateable to registers */ if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG)) continue; if (mono_is_regsize_var (ins->inst_vtype)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE); } } return vars; } GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; int i; MonoMethodSignature *sig; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); cinfo = get_call_info (cfg, sig, FALSE); /* Use unused input registers */ for (i = cinfo->reg_usage; i < 6; ++i) regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i)); /* Use %l0..%l6 as global registers */ for (i = sparc_l0; i < sparc_l7; ++i) regs = g_list_prepend (regs, GUINT_TO_POINTER (i)); g_free (cinfo); return regs; } /* * mono_arch_regalloc_cost: * * Return the cost, in number of memory references, of the action of * allocating the variable VMV into a register during global register * allocation. */ guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { return 0; } /* * Set var information according to the calling convention. sparc version. * The locals var stuff should most likely be split in another method. */ void mono_arch_allocate_vars (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; MonoInst *inst; int i, offset, size, align, curinst; CallInfo *cinfo; header = cfg->header; sig = mono_method_signature_internal (cfg->method); cinfo = get_call_info (cfg, sig, FALSE); if (sig->ret->type != MONO_TYPE_VOID) { switch (cinfo->ret.storage) { case ArgInIReg: case ArgInFReg: cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cinfo->ret.reg; break; case ArgInIRegPair: { MonoType *t = mini_get_underlying_type (sig->ret); if (((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->ret->dreg)); MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->ret->dreg)); low->opcode = OP_REGVAR; low->dreg = cinfo->ret.reg + 1; high->opcode = OP_REGVAR; high->dreg = cinfo->ret.reg; } cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cinfo->ret.reg; break; } case ArgOnStack: #ifdef SPARCV9 g_assert_not_reached (); #else /* valuetypes */ cfg->vret_addr->opcode = OP_REGOFFSET; cfg->vret_addr->inst_basereg = sparc_fp; cfg->vret_addr->inst_offset = 64; #endif break; default: NOT_IMPLEMENTED; } cfg->ret->dreg = cfg->ret->inst_c0; } /* * We use the ABI calling conventions for managed code as well. * Exception: valuetypes are never returned in registers on V9. * FIXME: Use something more optimized. */ /* Locals are allocated backwards from %fp */ cfg->frame_reg = sparc_fp; offset = 0; /* * Reserve a stack slot for holding information used during exception * handling. */ if (header->num_clauses) offset += sizeof (target_mgreg_t) * 2; if (cfg->method->save_lmf) { offset += sizeof (MonoLMF); cfg->arch.lmf_offset = offset; } curinst = cfg->locals_start; for (i = curinst; i < cfg->num_varinfo; ++i) { inst = cfg->varinfo [i]; if ((inst->opcode == OP_REGVAR) || (inst->opcode == OP_REGOFFSET)) { //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg)); continue; } if (inst->flags & MONO_INST_IS_DEAD) continue; /* inst->backend.is_pinvoke indicates native sized value types, this is used by the * pinvoke wrappers when they call functions returning structure */ if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) size = mono_class_native_size (mono_class_from_mono_type_internal (inst->inst_vtype), &align); else size = mini_type_stack_size (inst->inst_vtype, &align); /* * This is needed since structures containing doubles must be doubleword * aligned. * FIXME: Do this only if needed. */ if (MONO_TYPE_ISSTRUCT (inst->inst_vtype)) align = 8; /* * variables are accessed as negative offsets from %fp, so increase * the offset before assigning it to a variable */ offset += size; offset += align - 1; offset &= ~(align - 1); inst->opcode = OP_REGOFFSET; inst->inst_basereg = sparc_fp; inst->inst_offset = STACK_BIAS + -offset; //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset); } if (sig->call_convention == MONO_CALL_VARARG) { cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET; } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { inst = cfg->args [i]; if (inst->opcode != OP_REGVAR) { ArgInfo *ainfo = &cinfo->args [i]; gboolean inreg = TRUE; MonoType *arg_type; ArgStorage storage; if (sig->hasthis && (i == 0)) arg_type = mono_get_object_type (); else arg_type = sig->params [i - sig->hasthis]; #ifndef SPARCV9 if (!m_type_is_byref (arg_type) && ((arg_type->type == MONO_TYPE_R4) || (arg_type->type == MONO_TYPE_R8))) /* * Since float arguments are passed in integer registers, we need to * save them to the stack in the prolog. */ inreg = FALSE; #endif /* FIXME: Allocate volatile arguments to registers */ /* FIXME: This makes the argument holding a vtype address into volatile */ if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) inreg = FALSE; if (MONO_TYPE_ISSTRUCT (arg_type)) /* FIXME: this isn't needed */ inreg = FALSE; inst->opcode = OP_REGOFFSET; if (!inreg) storage = ArgOnStack; else storage = ainfo->storage; switch (storage) { case ArgInIReg: inst->opcode = OP_REGVAR; inst->dreg = sparc_i0 + ainfo->reg; break; case ArgInIRegPair: if (inst->type == STACK_I8) { MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (inst->dreg)); MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (inst->dreg)); low->opcode = OP_REGVAR; low->dreg = sparc_i0 + ainfo->reg + 1; high->opcode = OP_REGVAR; high->dreg = sparc_i0 + ainfo->reg; } inst->opcode = OP_REGVAR; inst->dreg = sparc_i0 + ainfo->reg; break; case ArgInFloatReg: case ArgInDoubleReg: /* * Since float regs are volatile, we save the arguments to * the stack in the prolog. * FIXME: Avoid this if the method contains no calls. */ case ArgOnStack: case ArgOnStackPair: case ArgInSplitRegStack: /* Split arguments are saved to the stack in the prolog */ inst->opcode = OP_REGOFFSET; /* in parent frame */ inst->inst_basereg = sparc_fp; inst->inst_offset = ainfo->offset + ARGS_OFFSET; if (!m_type_is_byref (arg_type) && (arg_type->type == MONO_TYPE_R8)) { /* * It is very hard to load doubles from non-doubleword aligned * memory locations. So if the offset is misaligned, we copy the * argument to a stack location in the prolog. */ if ((inst->inst_offset - STACK_BIAS) % 8) { inst->inst_basereg = sparc_fp; offset += 8; align = 8; offset += align - 1; offset &= ~(align - 1); inst->inst_offset = STACK_BIAS + -offset; } } break; default: NOT_IMPLEMENTED; } if (MONO_TYPE_ISSTRUCT (arg_type)) { /* Add a level of indirection */ /* * It would be easier to add OP_LDIND_I here, but ldind_i instructions * are destructively modified in a lot of places in inssel.brg. */ MonoInst *indir; MONO_INST_NEW (cfg, indir, 0); *indir = *inst; inst->opcode = OP_VTARG_ADDR; inst->inst_left = indir; } } } /* * spillvars are stored between the normal locals and the storage reserved * by the ABI. */ cfg->stack_offset = offset; g_free (cinfo); } void mono_arch_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig; sig = mono_method_signature_internal (cfg->method); if (MONO_TYPE_ISSTRUCT ((sig->ret))) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG); if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr = "); mono_print_ins (cfg->vret_addr); } } if (!m_type_is_byref (sig->ret) && (sig->ret->type == MONO_TYPE_I8 || sig->ret->type == MONO_TYPE_U8)) { MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->ret->dreg)); MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->ret->dreg)); low->flags |= MONO_INST_VOLATILE; high->flags |= MONO_INST_VOLATILE; } /* Add a properly aligned dword for use by int<->float conversion opcodes */ cfg->arch.float_spill_slot = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_ARG); ((MonoInst*)cfg->arch.float_spill_slot)->flags |= MONO_INST_VOLATILE; } static void add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, guint32 sreg) { MonoInst *arg; MONO_INST_NEW (cfg, arg, 0); arg->sreg1 = sreg; switch (storage) { case ArgInIReg: arg->opcode = OP_MOVE; arg->dreg = mono_alloc_ireg (cfg); mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE); break; case ArgInFloatReg: arg->opcode = OP_FMOVE; arg->dreg = mono_alloc_freg (cfg); mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE); break; default: g_assert_not_reached (); } MONO_ADD_INS (cfg->cbb, arg); } static void add_outarg_load (MonoCompile *cfg, MonoCallInst *call, int opcode, int basereg, int offset, int reg) { int dreg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, sparc_sp, offset); mono_call_inst_add_outarg_reg (cfg, call, dreg, reg, FALSE); } static void emit_pass_long (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in) { int offset = ARGS_OFFSET + ainfo->offset; switch (ainfo->storage) { case ArgInIRegPair: add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg + 1, MONO_LVREG_LS (in->dreg)); add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, MONO_LVREG_MS (in->dreg)); break; case ArgOnStackPair: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset, MONO_LVREG_MS (in->dreg)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, MONO_LVREG_LS (in->dreg)); break; case ArgInSplitRegStack: add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, MONO_LVREG_MS (in->dreg)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, MONO_LVREG_LS (in->dreg)); break; default: g_assert_not_reached (); } } static void emit_pass_double (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in) { int offset = ARGS_OFFSET + ainfo->offset; switch (ainfo->storage) { case ArgInIRegPair: /* floating-point <-> integer transfer must go through memory */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg); /* Load into a register pair */ add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg); add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset + 4, sparc_o0 + ainfo->reg + 1); break; case ArgOnStackPair: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg); break; case ArgInSplitRegStack: /* floating-point <-> integer transfer must go through memory */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg); /* Load most significant word into register */ add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg); break; default: g_assert_not_reached (); } } static void emit_pass_float (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in) { int offset = ARGS_OFFSET + ainfo->offset; switch (ainfo->storage) { case ArgInIReg: /* floating-point <-> integer transfer must go through memory */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg); add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg); break; case ArgOnStack: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg); break; default: g_assert_not_reached (); } } static void emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in); static void emit_pass_vtype (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in, gboolean pinvoke) { MonoInst *arg; guint32 align, offset, pad, size; if (arg_type->type == MONO_TYPE_TYPEDBYREF) { size = MONO_ABI_SIZEOF (MonoTypedRef); align = sizeof (target_mgreg_t); } else if (pinvoke) size = mono_type_native_stack_size (m_class_get_byval_arg (in->klass), &align); else { /* * Other backends use mono_type_stack_size (), but that * aligns the size to 8, which is larger than the size of * the source, leading to reads of invalid memory if the * source is at the end of address space. */ size = mono_class_value_size (in->klass, &align); } /* The first 6 argument locations are reserved */ if (cinfo->stack_usage < 6 * sizeof (target_mgreg_t)) cinfo->stack_usage = 6 * sizeof (target_mgreg_t); offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align); pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage); cinfo->stack_usage += size; cinfo->stack_usage += pad; /* * We use OP_OUTARG_VT to copy the valuetype to a stack location, then * use the normal OUTARG opcodes to pass the address of the location to * the callee. */ if (size > 0) { MONO_INST_NEW (cfg, arg, OP_OUTARG_VT); arg->sreg1 = in->dreg; arg->klass = in->klass; arg->backend.size = size; arg->inst_p0 = call; arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo)); ((ArgInfo*)(arg->inst_p1))->offset = STACK_BIAS + offset; MONO_ADD_INS (cfg->cbb, arg); MONO_INST_NEW (cfg, arg, OP_ADD_IMM); arg->dreg = mono_alloc_preg (cfg); arg->sreg1 = sparc_sp; arg->inst_imm = STACK_BIAS + offset; MONO_ADD_INS (cfg->cbb, arg); emit_pass_other (cfg, call, ainfo, NULL, arg); } } static void emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in) { int offset = ARGS_OFFSET + ainfo->offset; int opcode; switch (ainfo->storage) { case ArgInIReg: add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg); break; case ArgOnStack: #ifdef SPARCV9 NOT_IMPLEMENTED; #else if (offset & 0x1) opcode = OP_STOREI1_MEMBASE_REG; else if (offset & 0x2) opcode = OP_STOREI2_MEMBASE_REG; else opcode = OP_STOREI4_MEMBASE_REG; MONO_EMIT_NEW_STORE_MEMBASE (cfg, opcode, sparc_sp, offset, in->dreg); #endif break; default: g_assert_not_reached (); } } static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { MonoMethodSignature *tmp_sig; /* * mono_ArgIterator_Setup assumes the signature cookie is * passed first and all the arguments which were before it are * passed on the stack after the signature. So compensate by * passing a different signature. */ tmp_sig = mono_metadata_signature_dup (call->signature); tmp_sig->param_count -= call->signature->sentinelpos; tmp_sig->sentinelpos = 0; memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*)); /* FIXME: Add support for signature tokens to AOT */ cfg->disable_aot = TRUE; /* We allways pass the signature on the stack for simplicity */ MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset, tmp_sig); } void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoInst *in; MonoMethodSignature *sig; int i, n; CallInfo *cinfo; ArgInfo *ainfo; guint32 extra_space = 0; sig = call->signature; n = sig->param_count + sig->hasthis; cinfo = get_call_info (cfg, sig, sig->pinvoke); if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) { /* Set the 'struct/union return pointer' location on the stack */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, call->vret_var->dreg); } for (i = 0; i < n; ++i) { MonoType *arg_type; ainfo = cinfo->args + i; if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Emit the signature cookie just before the first implicit argument */ emit_sig_cookie (cfg, call, cinfo); } in = call->args [i]; if (sig->hasthis && (i == 0)) arg_type = mono_get_object_type (); else arg_type = sig->params [i - sig->hasthis]; arg_type = mini_get_underlying_type (arg_type); if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) emit_pass_vtype (cfg, call, cinfo, ainfo, arg_type, in, sig->pinvoke && !sig->marshalling_disabled); else if (!m_type_is_byref (arg_type) && ((arg_type->type == MONO_TYPE_I8) || (arg_type->type == MONO_TYPE_U8))) emit_pass_long (cfg, call, ainfo, in); else if (!m_type_is_byref (arg_type) && (arg_type->type == MONO_TYPE_R8)) emit_pass_double (cfg, call, ainfo, in); else if (!m_type_is_byref (arg_type) && (arg_type->type == MONO_TYPE_R4)) emit_pass_float (cfg, call, ainfo, in); else emit_pass_other (cfg, call, ainfo, arg_type, in); } /* Handle the case where there are no implicit arguments */ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) { emit_sig_cookie (cfg, call, cinfo); } call->stack_usage = cinfo->stack_usage + extra_space; g_free (cinfo); } void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { ArgInfo *ainfo = (ArgInfo*)ins->inst_p1; int size = ins->backend.size; mini_emit_memcpy (cfg, sparc_sp, ainfo->offset, src->dreg, 0, size, TARGET_SIZEOF_VOID_P); } void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { CallInfo *cinfo = get_call_info (cfg, mono_method_signature_internal (method), FALSE); MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret); switch (cinfo->ret.storage) { case ArgInIReg: MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); break; case ArgInIRegPair: if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) { MONO_EMIT_NEW_UNALU (cfg, OP_LMOVE, cfg->ret->dreg, val->dreg); } else { MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_MS (cfg->ret->dreg), MONO_LVREG_MS (val->dreg)); MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (cfg->ret->dreg), MONO_LVREG_LS (val->dreg)); } break; case ArgInFReg: if (ret->type == MONO_TYPE_R4) MONO_EMIT_NEW_UNALU (cfg, OP_SETFRET, cfg->ret->dreg, val->dreg); else MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); break; default: g_assert_not_reached (); } g_assert (cinfo); } int cond_to_sparc_cond [][3] = { {sparc_be, sparc_be, sparc_fbe}, {sparc_bne, sparc_bne, 0}, {sparc_ble, sparc_ble, sparc_fble}, {sparc_bge, sparc_bge, sparc_fbge}, {sparc_bl, sparc_bl, sparc_fbl}, {sparc_bg, sparc_bg, sparc_fbg}, {sparc_bleu, sparc_bleu, 0}, {sparc_beu, sparc_beu, 0}, {sparc_blu, sparc_blu, sparc_fbl}, {sparc_bgu, sparc_bgu, sparc_fbg} }; /* Map opcode to the sparc condition codes */ static SparcCond opcode_to_sparc_cond (int opcode) { CompRelation rel; CompType t; switch (opcode) { case OP_COND_EXC_OV: case OP_COND_EXC_IOV: return sparc_bvs; case OP_COND_EXC_C: case OP_COND_EXC_IC: return sparc_bcs; case OP_COND_EXC_NO: case OP_COND_EXC_NC: NOT_IMPLEMENTED; default: rel = mono_opcode_to_cond (opcode); t = mono_opcode_to_type (opcode, -1); return cond_to_sparc_cond [rel][t]; break; } return -1; } #define COMPUTE_DISP(ins) \ if (ins->inst_true_bb->native_offset) \ disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \ else { \ disp = 0; \ mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \ } #ifdef SPARCV9 #define DEFAULT_ICC sparc_xcc_short #else #define DEFAULT_ICC sparc_icc_short #endif #ifdef SPARCV9 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \ do { \ gint32 disp; \ guint32 predict; \ COMPUTE_DISP(ins); \ predict = (disp != 0) ? 1 : 0; \ g_assert (sparc_is_imm19 (disp)); \ sparc_branchp (code, (annul), cond, icc, (predict), disp); \ if (filldelay) sparc_nop (code); \ } while (0) #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short)) #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \ do { \ gint32 disp; \ guint32 predict; \ COMPUTE_DISP(ins); \ predict = (disp != 0) ? 1 : 0; \ g_assert (sparc_is_imm19 (disp)); \ sparc_fbranch (code, (annul), cond, disp); \ if (filldelay) sparc_nop (code); \ } while (0) #else #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached () #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \ do { \ gint32 disp; \ COMPUTE_DISP(ins); \ g_assert (sparc_is_imm22 (disp)); \ sparc_ ## bop (code, (annul), cond, disp); \ if (filldelay) sparc_nop (code); \ } while (0) #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay) #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay) #endif #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \ do { \ gint32 disp; \ guint32 predict; \ COMPUTE_DISP(ins); \ predict = (disp != 0) ? 1 : 0; \ g_assert (sparc_is_imm19 (disp)); \ sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \ if (filldelay) sparc_nop (code); \ } while (0) #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \ do { \ gint32 disp; \ COMPUTE_DISP(ins); \ g_assert (sparc_is_imm22 (disp)); \ sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \ if (filldelay) sparc_nop (code); \ } while (0) /* emit an exception if condition is fail */ /* * We put the exception throwing code out-of-line, at the end of the method */ #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \ mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \ MONO_PATCH_INFO_EXC, sexc_name); \ if (mono_hwcap_sparc_is_v9 && ((icc) != sparc_icc_short)) { \ sparc_branchp (code, 0, (cond), (icc), 0, 0); \ } \ else { \ sparc_branch (code, 0, cond, 0); \ } \ if (filldelay) sparc_nop (code); \ } while (0); #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC) #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \ mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \ MONO_PATCH_INFO_EXC, sexc_name); \ sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \ sparc_nop (code); \ } while (0); #define EMIT_ALU_IMM(ins,op,setcc) do { \ if (sparc_is_imm13 ((ins)->inst_imm)) \ sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \ else { \ sparc_set (code, ins->inst_imm, sparc_o7); \ sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \ } \ } while (0); #define EMIT_LOAD_MEMBASE(ins,op) do { \ if (sparc_is_imm13 (ins->inst_offset)) \ sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \ else { \ sparc_set (code, ins->inst_offset, sparc_o7); \ sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \ } \ } while (0); /* max len = 5 */ #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \ guint32 sreg; \ if (ins->inst_imm == 0) \ sreg = sparc_g0; \ else { \ sparc_set (code, ins->inst_imm, sparc_o7); \ sreg = sparc_o7; \ } \ if (!sparc_is_imm13 (ins->inst_offset)) { \ sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \ sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \ } \ else \ sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \ } while (0); #define EMIT_STORE_MEMBASE_REG(ins,op) do { \ if (!sparc_is_imm13 (ins->inst_offset)) { \ sparc_set (code, ins->inst_offset, sparc_o7); \ sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \ } \ else \ sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \ } while (0); #define EMIT_CALL() do { \ if (v64) { \ sparc_set_template (code, sparc_o7); \ sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \ } \ else { \ sparc_call_simple (code, 0); \ } \ sparc_nop (code); \ } while (0); /* * A call template is 7 instructions long, so we want to avoid it if possible. */ static guint32* emit_call (MonoCompile *cfg, guint32 *code, guint32 patch_type, gconstpointer data) { ERROR_DECL (error); gpointer target; /* FIXME: This only works if the target method is already compiled */ if (0 && v64 && !cfg->compile_aot) { MonoJumpInfo patch_info; patch_info.type = patch_type; patch_info.data.target = data; target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, &patch_info, FALSE, error); mono_error_raise_exception_deprecated (error); /* FIXME: don't raise here */ /* FIXME: Add optimizations if the target is close enough */ sparc_set (code, target, sparc_o7); sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); sparc_nop (code); } else { mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, patch_type, data); EMIT_CALL (); } return code; } void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { } void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n, *last_ins = NULL; ins = bb->code; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { switch (ins->opcode) { case OP_MUL_IMM: /* remove unnecessary multiplication with 1 */ if (ins->inst_imm == 1) { if (ins->dreg != ins->sreg1) { ins->opcode = OP_MOVE; } else { MONO_DELETE_INS (bb, ins); continue; } } break; #ifndef SPARCV9 case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: /* * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg */ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG || last_ins->opcode == OP_STORE_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } else { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_MOVE; ins->sreg1 = last_ins->sreg1; } /* * Note: reg1 must be different from the basereg in the second load * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_MOVE reg1, reg2 */ } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE || last_ins->opcode == OP_LOAD_MEMBASE) && ins->inst_basereg != last_ins->dreg && ins->inst_basereg == last_ins->inst_basereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->dreg) { MONO_DELETE_INS (bb, ins); continue; } else { ins->opcode = OP_MOVE; ins->sreg1 = last_ins->dreg; } //g_assert_not_reached (); #if 0 /* * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg * --> * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_ICONST reg, imm */ } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM || last_ins->opcode == OP_STORE_MEMBASE_IMM) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_ICONST; ins->inst_c0 = last_ins->inst_imm; g_assert_not_reached (); // check this rule #endif } break; #endif case OP_LOADI1_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } else { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_MOVE; ins->sreg1 = last_ins->sreg1; } } break; case OP_LOADI2_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } else { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_MOVE; ins->sreg1 = last_ins->sreg1; } } break; case OP_STOREI4_MEMBASE_IMM: /* Convert pairs of 0 stores to a dword 0 store */ /* Used when initializing temporaries */ /* We know sparc_fp is dword aligned */ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) && (ins->inst_destbasereg == last_ins->inst_destbasereg) && (ins->inst_destbasereg == sparc_fp) && (ins->inst_offset < 0) && ((ins->inst_offset % 8) == 0) && ((ins->inst_offset == last_ins->inst_offset - 4)) && (ins->inst_imm == 0) && (last_ins->inst_imm == 0)) { if (mono_hwcap_sparc_is_v9) { last_ins->opcode = OP_STOREI8_MEMBASE_IMM; last_ins->inst_offset = ins->inst_offset; MONO_DELETE_INS (bb, ins); continue; } } break; case OP_IBEQ: case OP_IBNE_UN: case OP_IBLT: case OP_IBGT: case OP_IBGE: case OP_IBLE: case OP_COND_EXC_EQ: case OP_COND_EXC_GE: case OP_COND_EXC_GT: case OP_COND_EXC_LE: case OP_COND_EXC_LT: case OP_COND_EXC_NE_UN: /* * Convert compare with zero+branch to BRcc */ /* * This only works in 64 bit mode, since it examines all 64 * bits of the register. * Only do this if the method is small since BPr only has a 16bit * displacement. */ if (v64 && (cfg->header->code_size < 10000) && last_ins && (last_ins->opcode == OP_COMPARE_IMM) && (last_ins->inst_imm == 0)) { switch (ins->opcode) { case OP_IBEQ: ins->opcode = OP_SPARC_BRZ; break; case OP_IBNE_UN: ins->opcode = OP_SPARC_BRNZ; break; case OP_IBLT: ins->opcode = OP_SPARC_BRLZ; break; case OP_IBGT: ins->opcode = OP_SPARC_BRGZ; break; case OP_IBGE: ins->opcode = OP_SPARC_BRGEZ; break; case OP_IBLE: ins->opcode = OP_SPARC_BRLEZ; break; case OP_COND_EXC_EQ: ins->opcode = OP_SPARC_COND_EXC_EQZ; break; case OP_COND_EXC_GE: ins->opcode = OP_SPARC_COND_EXC_GEZ; break; case OP_COND_EXC_GT: ins->opcode = OP_SPARC_COND_EXC_GTZ; break; case OP_COND_EXC_LE: ins->opcode = OP_SPARC_COND_EXC_LEZ; break; case OP_COND_EXC_LT: ins->opcode = OP_SPARC_COND_EXC_LTZ; break; case OP_COND_EXC_NE_UN: ins->opcode = OP_SPARC_COND_EXC_NEZ; break; default: g_assert_not_reached (); } ins->sreg1 = last_ins->sreg1; *last_ins = *ins; MONO_DELETE_INS (bb, ins); continue; } break; case OP_MOVE: /* * OP_MOVE reg, reg */ if (ins->dreg == ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } /* * OP_MOVE sreg, dreg * OP_MOVE dreg, sreg */ if (last_ins && last_ins->opcode == OP_MOVE && ins->sreg1 == last_ins->dreg && ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } break; } last_ins = ins; ins = ins->next; } bb->last_ins = last_ins; } void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins) { switch (ins->opcode) { case OP_LNEG: MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, MONO_LVREG_LS (ins->dreg), 0, MONO_LVREG_LS (ins->sreg1)); MONO_EMIT_NEW_BIALU (cfg, OP_SBB, MONO_LVREG_MS (ins->dreg), 0, MONO_LVREG_MS (ins->sreg1)); NULLIFY_INS (ins); break; default: break; } } void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { } /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */ static void sparc_patch (guint32 *code, const gpointer target) { guint32 *c = code; guint32 ins = *code; guint32 op = ins >> 30; guint32 op2 = (ins >> 22) & 0x7; guint32 rd = (ins >> 25) & 0x1f; guint8* target8 = (guint8*)target; gint64 disp = (target8 - (guint8*)code) >> 2; int reg; // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target); if ((op == 0) && (op2 == 2)) { if (!sparc_is_imm22 (disp)) NOT_IMPLEMENTED; /* Bicc */ *code = ((ins >> 22) << 22) | (disp & 0x3fffff); } else if ((op == 0) && (op2 == 1)) { if (!sparc_is_imm19 (disp)) NOT_IMPLEMENTED; /* BPcc */ *code = ((ins >> 19) << 19) | (disp & 0x7ffff); } else if ((op == 0) && (op2 == 3)) { if (!sparc_is_imm16 (disp)) NOT_IMPLEMENTED; /* BPr */ *code &= ~(0x180000 | 0x3fff); *code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff); } else if ((op == 0) && (op2 == 6)) { if (!sparc_is_imm22 (disp)) NOT_IMPLEMENTED; /* FBicc */ *code = ((ins >> 22) << 22) | (disp & 0x3fffff); } else if ((op == 0) && (op2 == 4)) { guint32 ins2 = code [1]; if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) { /* sethi followed by or */ guint32 *p = code; sparc_set (p, target8, rd); while (p <= (code + 1)) sparc_nop (p); } else if (ins2 == 0x01000000) { /* sethi followed by nop */ guint32 *p = code; sparc_set (p, target8, rd); while (p <= (code + 1)) sparc_nop (p); } else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) { /* sethi followed by load/store */ #ifndef SPARCV9 guint32 t = (guint32)target8; *code &= ~(0x3fffff); *code |= (t >> 10); *(code + 1) &= ~(0x3ff); *(code + 1) |= (t & 0x3ff); #endif } else if (v64 && (sparc_inst_rd (ins) == sparc_g1) && (sparc_inst_op (c [1]) == 0) && (sparc_inst_op2 (c [1]) == 4) && (sparc_inst_op (c [2]) == 2) && (sparc_inst_op3 (c [2]) == 2) && (sparc_inst_op (c [3]) == 2) && (sparc_inst_op3 (c [3]) == 2)) { /* sparc_set */ guint32 *p = c; reg = sparc_inst_rd (c [1]); sparc_set (p, target8, reg); while (p < (c + 6)) sparc_nop (p); } else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) && (sparc_inst_imm (ins2))) { /* sethi followed by jmpl */ #ifndef SPARCV9 guint32 t = (guint32)target8; *code &= ~(0x3fffff); *code |= (t >> 10); *(code + 1) &= ~(0x3ff); *(code + 1) |= (t & 0x3ff); #endif } else NOT_IMPLEMENTED; } else if (op == 01) { gint64 disp = (target8 - (guint8*)code) >> 2; if (!sparc_is_imm30 (disp)) NOT_IMPLEMENTED; sparc_call_simple (code, target8 - (guint8*)code); } else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) { /* mov imm, reg */ g_assert (sparc_is_imm13 (target8)); *code &= ~(0x1fff); *code |= (guint32)target8; } else if ((sparc_inst_op (ins) == 2) && (sparc_inst_op3 (ins) == 0x7)) { /* sparc_set case 5. */ guint32 *p = c; g_assert (v64); reg = sparc_inst_rd (c [3]); sparc_set (p, target, reg); while (p < (c + 6)) sparc_nop (p); } else NOT_IMPLEMENTED; // g_print ("patched with 0x%08x\n", ins); } /* * mono_sparc_emit_save_lmf: * * Emit the code neccesary to push a new entry onto the lmf stack. Used by * trampolines as well. */ guint32* mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset) { /* Save lmf_addr */ sparc_sti_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr)); /* Save previous_lmf */ sparc_ldi (code, sparc_o0, sparc_g0, sparc_o7); sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf)); /* Set new lmf */ sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7); sparc_sti (code, sparc_o7, sparc_o0, sparc_g0); return code; } guint32* mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset) { /* Load previous_lmf */ sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0); /* Load lmf_addr */ sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1); /* *(lmf) = previous_lmf */ sparc_sti (code, sparc_l0, sparc_l1, sparc_g0); return code; } static guint32* emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code) { /* * Since register windows are saved to the current value of %sp, we need to * set the sp field in the lmf before the call, not in the prolog. */ if (cfg->method->save_lmf) { gint32 lmf_offset = MONO_SPARC_STACK_BIAS - cfg->arch.lmf_offset; /* Save sp */ sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp)); } return code; } static guint32* emit_vret_token (MonoInst *ins, guint32 *code) { MonoCallInst *call = (MonoCallInst*)ins; guint32 size; /* * The sparc ABI requires that calls to functions which return a structure * contain an additional unimpl instruction which is checked by the callee. */ if (call->signature->pinvoke && !call->signature->marshalling_disabled && MONO_TYPE_ISSTRUCT(call->signature->ret)) { if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF) size = mini_type_stack_size (call->signature->ret, NULL); else size = mono_class_native_size (call->signature->ret->data.klass, NULL); sparc_unimp (code, size & 0xfff); } return code; } static guint32* emit_move_return_value (MonoInst *ins, guint32 *code) { /* Move return value to the target register */ /* FIXME: do more things in the local reg allocator */ switch (ins->opcode) { case OP_VOIDCALL: case OP_VOIDCALL_REG: case OP_VOIDCALL_MEMBASE: break; case OP_CALL: case OP_CALL_REG: case OP_CALL_MEMBASE: g_assert (ins->dreg == sparc_o0); break; case OP_LCALL: case OP_LCALL_REG: case OP_LCALL_MEMBASE: /* * ins->dreg is the least significant reg due to the lreg: LCALL rule * in inssel-long32.brg. */ #ifdef SPARCV9 sparc_mov_reg_reg (code, sparc_o0, ins->dreg); #else g_assert (ins->dreg == sparc_o1); #endif break; case OP_FCALL: case OP_FCALL_REG: case OP_FCALL_MEMBASE: #ifdef SPARCV9 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) { sparc_fmovs (code, sparc_f0, ins->dreg); sparc_fstod (code, ins->dreg, ins->dreg); } else sparc_fmovd (code, sparc_f0, ins->dreg); #else sparc_fmovs (code, sparc_f0, ins->dreg); if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) sparc_fstod (code, ins->dreg, ins->dreg); else sparc_fmovs (code, sparc_f1, ins->dreg + 1); #endif break; case OP_VCALL: case OP_VCALL_REG: case OP_VCALL_MEMBASE: case OP_VCALL2: case OP_VCALL2_REG: case OP_VCALL2_MEMBASE: break; default: NOT_IMPLEMENTED; } return code; } /* * emit_load_volatile_arguments: * * Load volatile arguments from the stack to the original input registers. * Required before a tailcall. */ static guint32* emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code) { MonoMethod *method = cfg->method; MonoMethodSignature *sig; MonoInst *inst; CallInfo *cinfo; guint32 i, ireg; /* FIXME: Generate intermediate code instead */ sig = mono_method_signature_internal (method); cinfo = get_call_info (cfg, sig, FALSE); /* This is the opposite of the code in emit_prolog */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; gint32 stack_offset; MonoType *arg_type; inst = cfg->args [i]; if (sig->hasthis && (i == 0)) arg_type = mono_get_object_type (); else arg_type = sig->params [i - sig->hasthis]; stack_offset = ainfo->offset + ARGS_OFFSET; ireg = sparc_i0 + ainfo->reg; if (ainfo->storage == ArgInSplitRegStack) { g_assert (inst->opcode == OP_REGOFFSET); if (!sparc_is_imm13 (stack_offset)) NOT_IMPLEMENTED; sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5); } if (!v64 && !m_type_is_byref (arg_type) && (arg_type->type == MONO_TYPE_R8)) { if (ainfo->storage == ArgInIRegPair) { if (!sparc_is_imm13 (inst->inst_offset + 4)) NOT_IMPLEMENTED; sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg); sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1); } else if (ainfo->storage == ArgInSplitRegStack) { if (stack_offset != inst->inst_offset) { sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5); sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7); sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4); } } else if (ainfo->storage == ArgOnStackPair) { if (stack_offset != inst->inst_offset) { /* stack_offset is not dword aligned, so we need to make a copy */ sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7); sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset); sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7); sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4); } } else g_assert_not_reached (); } else if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) { /* Argument in register, but need to be saved to stack */ if (!sparc_is_imm13 (stack_offset)) NOT_IMPLEMENTED; if ((stack_offset - ARGS_OFFSET) & 0x1) /* FIXME: Is this ldsb or ldub ? */ sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg); else if ((stack_offset - ARGS_OFFSET) & 0x2) sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg); else if ((stack_offset - ARGS_OFFSET) & 0x4) sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg); else { if (v64) sparc_ldx_imm (code, inst->inst_basereg, stack_offset, ireg); else sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg); } } else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) { /* Argument in regpair, but need to be saved to stack */ if (!sparc_is_imm13 (inst->inst_offset + 4)) NOT_IMPLEMENTED; sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg); sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1); } else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) { NOT_IMPLEMENTED; } else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) { NOT_IMPLEMENTED; } if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack)) if (inst->opcode == OP_REGVAR) /* FIXME: Load the argument into memory */ NOT_IMPLEMENTED; } g_free (cinfo); return code; } /* * mono_sparc_is_virtual_call: * * Determine whenever the instruction at CODE is a virtual call. */ gboolean mono_sparc_is_virtual_call (guint32 *code) { guint32 buf[1]; guint32 *p; p = buf; if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) { /* * Register indirect call. If it is a virtual call, then the * instruction in the delay slot is a special kind of nop. */ /* Construct special nop */ sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0); p --; if (code [1] == p [0]) return TRUE; } return FALSE; } #define CMP_SIZE 3 #define BR_SMALL_SIZE 2 #define BR_LARGE_SIZE 2 #define JUMP_IMM_SIZE 5 #define ENABLE_WRONG_METHOD_CHECK 0 gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int i; int size = 0; guint32 *code, *start; for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) item->chunk_size += CMP_SIZE; item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE; } else { if (fail_tramp) item->chunk_size += 16; item->chunk_size += JUMP_IMM_SIZE; #if ENABLE_WRONG_METHOD_CHECK item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1; #endif } } else { item->chunk_size += CMP_SIZE + BR_LARGE_SIZE; imt_entries [item->check_target_idx]->compare_done = TRUE; } size += item->chunk_size; } if (fail_tramp) { code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size * 4); } else { MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); code = mono_mem_manager_code_reserve (mem_manager, size * 4); } start = code; for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; item->code_target = (guint8*)code; if (item->is_equals) { gboolean fail_case = !item->check_target_idx && fail_tramp; if (item->check_target_idx || fail_case) { if (!item->compare_done || fail_case) { sparc_set (code, (guint32)item->key, sparc_g5); sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5); } item->jmp_code = (guint8*)code; sparc_branch (code, 0, sparc_bne, 0); sparc_nop (code); if (item->has_target_code) { sparc_set (code, item->value.target_code, sparc_f5); } else { sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5); sparc_ld (code, sparc_g5, 0, sparc_g5); } sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0); sparc_nop (code); if (fail_case) { sparc_patch (item->jmp_code, code); sparc_set (code, fail_tramp, sparc_g5); sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0); sparc_nop (code); item->jmp_code = NULL; } } else { /* enable the commented code to assert on wrong method */ #if ENABLE_WRONG_METHOD_CHECK g_assert_not_reached (); #endif sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5); sparc_ld (code, sparc_g5, 0, sparc_g5); sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0); sparc_nop (code); #if ENABLE_WRONG_METHOD_CHECK g_assert_not_reached (); #endif } } else { sparc_set (code, (guint32)item->key, sparc_g5); sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5); item->jmp_code = (guint8*)code; sparc_branch (code, 0, sparc_beu, 0); sparc_nop (code); } } /* patch the branches to get to the target items */ for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code) { if (item->check_target_idx) { sparc_patch ((guint32*)item->jmp_code, imt_entries [item->check_target_idx]->code_target); } } } mono_arch_flush_icache ((guint8*)start, (code - start)); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL)); UnlockedAdd (&mono_stats.imt_trampolines_size, (code - start)); g_assert (code - start <= size); mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), NULL); return start; } MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { #ifdef SPARCV9 g_assert_not_reached (); #endif return (MonoMethod*)regs [sparc_g1]; } gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code) { mono_sparc_flushw (); return (gpointer)regs [sparc_o0]; } /* * Some conventions used in the following code. * 2) The only scratch registers we have are o7 and g1. We try to * stick to o7 when we can, and use g1 when necessary. */ void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins; MonoCallInst *call; guint offset; guint32 *code = (guint32*)(cfg->native_code + cfg->code_len); MonoInst *last_ins = NULL; int max_len, cpos; const char *spec; if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); cpos = bb->max_offset; MONO_BB_FOR_EACH_INS (bb, ins) { guint8* code_start; offset = (guint8*)code - cfg->native_code; spec = ins_get_spec (ins->opcode); max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); code_start = (guint8*)code; // if (ins->cil_code) // g_print ("cil code\n"); mono_debug_record_line_number (cfg, ins, offset); switch (ins->opcode) { case OP_STOREI1_MEMBASE_IMM: EMIT_STORE_MEMBASE_IMM (ins, stb); break; case OP_STOREI2_MEMBASE_IMM: EMIT_STORE_MEMBASE_IMM (ins, sth); break; case OP_STORE_MEMBASE_IMM: EMIT_STORE_MEMBASE_IMM (ins, sti); break; case OP_STOREI4_MEMBASE_IMM: EMIT_STORE_MEMBASE_IMM (ins, st); break; case OP_STOREI8_MEMBASE_IMM: #ifdef SPARCV9 EMIT_STORE_MEMBASE_IMM (ins, stx); #else /* Only generated by peephole opts */ g_assert ((ins->inst_offset % 8) == 0); g_assert (ins->inst_imm == 0); EMIT_STORE_MEMBASE_IMM (ins, stx); #endif break; case OP_STOREI1_MEMBASE_REG: EMIT_STORE_MEMBASE_REG (ins, stb); break; case OP_STOREI2_MEMBASE_REG: EMIT_STORE_MEMBASE_REG (ins, sth); break; case OP_STOREI4_MEMBASE_REG: EMIT_STORE_MEMBASE_REG (ins, st); break; case OP_STOREI8_MEMBASE_REG: #ifdef SPARCV9 EMIT_STORE_MEMBASE_REG (ins, stx); #else /* Only used by OP_MEMSET */ EMIT_STORE_MEMBASE_REG (ins, std); #endif break; case OP_STORE_MEMBASE_REG: EMIT_STORE_MEMBASE_REG (ins, sti); break; case OP_LOADU4_MEM: sparc_set (code, ins->inst_c0, ins->dreg); sparc_ld (code, ins->dreg, sparc_g0, ins->dreg); break; case OP_LOADI4_MEMBASE: #ifdef SPARCV9 EMIT_LOAD_MEMBASE (ins, ldsw); #else EMIT_LOAD_MEMBASE (ins, ld); #endif break; case OP_LOADU4_MEMBASE: EMIT_LOAD_MEMBASE (ins, ld); break; case OP_LOADU1_MEMBASE: EMIT_LOAD_MEMBASE (ins, ldub); break; case OP_LOADI1_MEMBASE: EMIT_LOAD_MEMBASE (ins, ldsb); break; case OP_LOADU2_MEMBASE: EMIT_LOAD_MEMBASE (ins, lduh); break; case OP_LOADI2_MEMBASE: EMIT_LOAD_MEMBASE (ins, ldsh); break; case OP_LOAD_MEMBASE: #ifdef SPARCV9 EMIT_LOAD_MEMBASE (ins, ldx); #else EMIT_LOAD_MEMBASE (ins, ld); #endif break; #ifdef SPARCV9 case OP_LOADI8_MEMBASE: EMIT_LOAD_MEMBASE (ins, ldx); break; #endif case OP_ICONV_TO_I1: sparc_sll_imm (code, ins->sreg1, 24, sparc_o7); sparc_sra_imm (code, sparc_o7, 24, ins->dreg); break; case OP_ICONV_TO_I2: sparc_sll_imm (code, ins->sreg1, 16, sparc_o7); sparc_sra_imm (code, sparc_o7, 16, ins->dreg); break; case OP_ICONV_TO_U1: sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg); break; case OP_ICONV_TO_U2: sparc_sll_imm (code, ins->sreg1, 16, sparc_o7); sparc_srl_imm (code, sparc_o7, 16, ins->dreg); break; case OP_LCONV_TO_OVF_U4: case OP_ICONV_TO_OVF_U4: /* Only used on V9 */ sparc_cmp_imm (code, ins->sreg1, 0); mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, MONO_PATCH_INFO_EXC, "OverflowException"); sparc_branchp (code, 0, sparc_bl, sparc_xcc_short, 0, 0); /* Delay slot */ sparc_set (code, 1, sparc_o7); sparc_sllx_imm (code, sparc_o7, 32, sparc_o7); sparc_cmp (code, ins->sreg1, sparc_o7); mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, MONO_PATCH_INFO_EXC, "OverflowException"); sparc_branchp (code, 0, sparc_bge, sparc_xcc_short, 0, 0); sparc_nop (code); sparc_mov_reg_reg (code, ins->sreg1, ins->dreg); break; case OP_LCONV_TO_OVF_I4_UN: case OP_ICONV_TO_OVF_I4_UN: /* Only used on V9 */ NOT_IMPLEMENTED; break; case OP_COMPARE: case OP_LCOMPARE: case OP_ICOMPARE: sparc_cmp (code, ins->sreg1, ins->sreg2); break; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: if (sparc_is_imm13 (ins->inst_imm)) sparc_cmp_imm (code, ins->sreg1, ins->inst_imm); else { sparc_set (code, ins->inst_imm, sparc_o7); sparc_cmp (code, ins->sreg1, sparc_o7); } break; case OP_BREAK: /* * gdb does not like encountering 'ta 1' in the debugged code. So * instead of emitting a trap, we emit a call a C function and place a * breakpoint there. */ //sparc_ta (code, 1); mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break)); EMIT_CALL(); break; case OP_ADDCC: case OP_IADDCC: sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_IADD: sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_ADDCC_IMM: case OP_ADD_IMM: case OP_IADD_IMM: /* according to inssel-long32.brg, this should set cc */ EMIT_ALU_IMM (ins, add, TRUE); break; case OP_ADC: case OP_IADC: /* according to inssel-long32.brg, this should set cc */ sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_ADC_IMM: case OP_IADC_IMM: EMIT_ALU_IMM (ins, addx, TRUE); break; case OP_SUBCC: case OP_ISUBCC: sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_ISUB: sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_SUBCC_IMM: case OP_SUB_IMM: case OP_ISUB_IMM: /* according to inssel-long32.brg, this should set cc */ EMIT_ALU_IMM (ins, sub, TRUE); break; case OP_SBB: case OP_ISBB: /* according to inssel-long32.brg, this should set cc */ sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_SBB_IMM: case OP_ISBB_IMM: EMIT_ALU_IMM (ins, subx, TRUE); break; case OP_IAND: sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_AND_IMM: case OP_IAND_IMM: EMIT_ALU_IMM (ins, and, FALSE); break; case OP_IDIV: /* Sign extend sreg1 into %y */ sparc_sra_imm (code, ins->sreg1, 31, sparc_o7); sparc_wry (code, sparc_o7, sparc_g0); sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg); EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short); break; case OP_IDIV_UN: sparc_wry (code, sparc_g0, sparc_g0); sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_DIV_IMM: case OP_IDIV_IMM: { int i, imm; /* Transform division into a shift */ for (i = 1; i < 30; ++i) { imm = (1 << i); if (ins->inst_imm == imm) break; } if (i < 30) { if (i == 1) { /* gcc 2.95.3 */ sparc_srl_imm (code, ins->sreg1, 31, sparc_o7); sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg); sparc_sra_imm (code, ins->dreg, 1, ins->dreg); } else { /* http://compilers.iecc.com/comparch/article/93-04-079 */ sparc_sra_imm (code, ins->sreg1, 31, sparc_o7); sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7); sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg); sparc_sra_imm (code, ins->dreg, i, ins->dreg); } } else { /* Sign extend sreg1 into %y */ sparc_sra_imm (code, ins->sreg1, 31, sparc_o7); sparc_wry (code, sparc_o7, sparc_g0); EMIT_ALU_IMM (ins, sdiv, TRUE); EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short); } break; } case OP_IDIV_UN_IMM: sparc_wry (code, sparc_g0, sparc_g0); EMIT_ALU_IMM (ins, udiv, FALSE); break; case OP_IREM: /* Sign extend sreg1 into %y */ sparc_sra_imm (code, ins->sreg1, 31, sparc_o7); sparc_wry (code, sparc_o7, sparc_g0); sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7); EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short); sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7); sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg); break; case OP_IREM_UN: sparc_wry (code, sparc_g0, sparc_g0); sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7); sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7); sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg); break; case OP_REM_IMM: case OP_IREM_IMM: /* Sign extend sreg1 into %y */ sparc_sra_imm (code, ins->sreg1, 31, sparc_o7); sparc_wry (code, sparc_o7, sparc_g0); if (!sparc_is_imm13 (ins->inst_imm)) { sparc_set (code, ins->inst_imm, GP_SCRATCH_REG); sparc_sdiv (code, TRUE, ins->sreg1, GP_SCRATCH_REG, sparc_o7); EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short); sparc_smul (code, FALSE, sparc_o7, GP_SCRATCH_REG, sparc_o7); } else { sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7); EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short); sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7); } sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg); break; case OP_IREM_UN_IMM: sparc_set (code, ins->inst_imm, GP_SCRATCH_REG); sparc_wry (code, sparc_g0, sparc_g0); sparc_udiv (code, FALSE, ins->sreg1, GP_SCRATCH_REG, sparc_o7); sparc_umul (code, FALSE, GP_SCRATCH_REG, sparc_o7, sparc_o7); sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg); break; case OP_IOR: sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_OR_IMM: case OP_IOR_IMM: EMIT_ALU_IMM (ins, or, FALSE); break; case OP_IXOR: sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_XOR_IMM: case OP_IXOR_IMM: EMIT_ALU_IMM (ins, xor, FALSE); break; case OP_ISHL: sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_SHL_IMM: case OP_ISHL_IMM: if (ins->inst_imm < (1 << 5)) sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg); else { sparc_set (code, ins->inst_imm, sparc_o7); sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg); } break; case OP_ISHR: sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_ISHR_IMM: case OP_SHR_IMM: if (ins->inst_imm < (1 << 5)) sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg); else { sparc_set (code, ins->inst_imm, sparc_o7); sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg); } break; case OP_SHR_UN_IMM: case OP_ISHR_UN_IMM: if (ins->inst_imm < (1 << 5)) sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg); else { sparc_set (code, ins->inst_imm, sparc_o7); sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg); } break; case OP_ISHR_UN: sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_LSHL: sparc_sllx (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_LSHL_IMM: if (ins->inst_imm < (1 << 6)) sparc_sllx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg); else { sparc_set (code, ins->inst_imm, sparc_o7); sparc_sllx (code, ins->sreg1, sparc_o7, ins->dreg); } break; case OP_LSHR: sparc_srax (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_LSHR_IMM: if (ins->inst_imm < (1 << 6)) sparc_srax_imm (code, ins->sreg1, ins->inst_imm, ins->dreg); else { sparc_set (code, ins->inst_imm, sparc_o7); sparc_srax (code, ins->sreg1, sparc_o7, ins->dreg); } break; case OP_LSHR_UN: sparc_srlx (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_LSHR_UN_IMM: if (ins->inst_imm < (1 << 6)) sparc_srlx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg); else { sparc_set (code, ins->inst_imm, sparc_o7); sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg); } break; case OP_INOT: /* can't use sparc_not */ sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg); break; case OP_INEG: /* can't use sparc_neg */ sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg); break; case OP_IMUL: sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_IMUL_IMM: case OP_MUL_IMM: { int i, imm; if ((ins->inst_imm == 1) && (ins->sreg1 == ins->dreg)) break; /* Transform multiplication into a shift */ for (i = 0; i < 30; ++i) { imm = (1 << i); if (ins->inst_imm == imm) break; } if (i < 30) sparc_sll_imm (code, ins->sreg1, i, ins->dreg); else EMIT_ALU_IMM (ins, smul, FALSE); break; } case OP_IMUL_OVF: sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg); sparc_rdy (code, sparc_g1); sparc_sra_imm (code, ins->dreg, 31, sparc_o7); sparc_cmp (code, sparc_g1, sparc_o7); EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short); break; case OP_IMUL_OVF_UN: sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg); sparc_rdy (code, sparc_o7); sparc_cmp (code, sparc_o7, sparc_g0); EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short); break; case OP_ICONST: sparc_set (code, ins->inst_c0, ins->dreg); break; case OP_I8CONST: sparc_set (code, ins->inst_l, ins->dreg); break; case OP_AOTCONST: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0); sparc_set_template (code, ins->dreg); break; case OP_JUMP_TABLE: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0); sparc_set_template (code, ins->dreg); break; case OP_ICONV_TO_I4: case OP_ICONV_TO_U4: case OP_MOVE: if (ins->sreg1 != ins->dreg) sparc_mov_reg_reg (code, ins->sreg1, ins->dreg); break; case OP_FMOVE: #ifdef SPARCV9 if (ins->sreg1 != ins->dreg) sparc_fmovd (code, ins->sreg1, ins->dreg); #else sparc_fmovs (code, ins->sreg1, ins->dreg); sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1); #endif break; case OP_CHECK_THIS: /* ensure ins->sreg1 is not NULL */ /* Might be misaligned in case of vtypes so use a byte load */ sparc_ldsb_imm (code, ins->sreg1, 0, sparc_g0); break; case OP_ARGLIST: sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7); sparc_sti_imm (code, sparc_o7, ins->sreg1, 0); break; case OP_FCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: { call = (MonoCallInst*)ins; g_assert (!call->virtual); code = emit_save_sp_to_lmf (cfg, code); const MonoJumpInfoTarget patch = mono_call_to_patch (call); code = emit_call (cfg, code, patch.type, patch.target); code = emit_vret_token (ins, code); code = emit_move_return_value (ins, code); break; } case OP_FCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: call = (MonoCallInst*)ins; code = emit_save_sp_to_lmf (cfg, code); sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite); /* * We emit a special kind of nop in the delay slot to tell the * trampoline code that this is a virtual call, thus an unbox * trampoline might need to be called. */ if (call->virtual) sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0); else sparc_nop (code); code = emit_vret_token (ins, code); code = emit_move_return_value (ins, code); break; case OP_FCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: call = (MonoCallInst*)ins; code = emit_save_sp_to_lmf (cfg, code); if (sparc_is_imm13 (ins->inst_offset)) { sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7); } else { sparc_set (code, ins->inst_offset, sparc_o7); sparc_ldi (code, ins->inst_basereg, sparc_o7, sparc_o7); } sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite); if (call->virtual) sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0); else sparc_nop (code); code = emit_vret_token (ins, code); code = emit_move_return_value (ins, code); break; case OP_SETFRET: if (mono_method_signature_internal (cfg->method)->ret->type == MONO_TYPE_R4) sparc_fdtos (code, ins->sreg1, sparc_f0); else { #ifdef SPARCV9 sparc_fmovd (code, ins->sreg1, ins->dreg); #else /* FIXME: Why not use fmovd ? */ sparc_fmovs (code, ins->sreg1, ins->dreg); sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1); #endif } break; case OP_LOCALLOC: { guint32 size_reg; gint32 offset2; #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK /* Perform stack touching */ NOT_IMPLEMENTED; #endif /* Keep alignment */ /* Add 4 to compensate for the rounding of localloc_offset */ sparc_add_imm (code, FALSE, ins->sreg1, 4 + MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg); sparc_set (code, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1), sparc_o7); sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg); if ((ins->flags & MONO_INST_INIT) && (ins->sreg1 == ins->dreg)) { #ifdef SPARCV9 size_reg = sparc_g4; #else size_reg = sparc_g1; #endif sparc_mov_reg_reg (code, ins->dreg, size_reg); } else size_reg = ins->sreg1; sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg); /* Keep %sp valid at all times */ sparc_mov_reg_reg (code, ins->dreg, sparc_sp); /* Round localloc_offset too so the result is at least 8 aligned */ offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8); g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2)); sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + offset2, ins->dreg); if (ins->flags & MONO_INST_INIT) { guint32 *br [3]; /* Initialize memory region */ sparc_cmp_imm (code, size_reg, 0); br [0] = code; sparc_branch (code, 0, sparc_be, 0); /* delay slot */ sparc_set (code, 0, sparc_o7); sparc_sub_imm (code, 0, size_reg, mono_hwcap_sparc_is_v9 ? 8 : 4, size_reg); /* start of loop */ br [1] = code; if (mono_hwcap_sparc_is_v9) sparc_stx (code, sparc_g0, ins->dreg, sparc_o7); else sparc_st (code, sparc_g0, ins->dreg, sparc_o7); sparc_cmp (code, sparc_o7, size_reg); br [2] = code; sparc_branch (code, 0, sparc_bl, 0); sparc_patch (br [2], br [1]); /* delay slot */ sparc_add_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7); sparc_patch (br [0], code); } break; } case OP_LOCALLOC_IMM: { gint32 offset = ins->inst_imm; gint32 offset2; #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK /* Perform stack touching */ NOT_IMPLEMENTED; #endif /* To compensate for the rounding of localloc_offset */ offset += sizeof (target_mgreg_t); offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT); if (sparc_is_imm13 (offset)) sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp); else { sparc_set (code, offset, sparc_o7); sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp); } /* Round localloc_offset too so the result is at least 8 aligned */ offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8); g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2)); sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + offset2, ins->dreg); if ((ins->flags & MONO_INST_INIT) && (offset > 0)) { guint32 *br [2]; int i; if (offset <= 16) { i = 0; while (i < offset) { if (mono_hwcap_sparc_is_v9) { sparc_stx_imm (code, sparc_g0, ins->dreg, i); i += 8; } else { sparc_st_imm (code, sparc_g0, ins->dreg, i); i += 4; } } } else { sparc_set (code, offset, sparc_o7); sparc_sub_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7); /* beginning of loop */ br [0] = code; if (mono_hwcap_sparc_is_v9) sparc_stx (code, sparc_g0, ins->dreg, sparc_o7); else sparc_st (code, sparc_g0, ins->dreg, sparc_o7); sparc_cmp_imm (code, sparc_o7, 0); br [1] = code; sparc_branch (code, 0, sparc_bne, 0); /* delay slot */ sparc_sub_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7); sparc_patch (br [1], br [0]); } } break; } case OP_THROW: sparc_mov_reg_reg (code, ins->sreg1, sparc_o0); mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception)); EMIT_CALL (); break; case OP_RETHROW: sparc_mov_reg_reg (code, ins->sreg1, sparc_o0); mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception)); EMIT_CALL (); break; case OP_START_HANDLER: { /* * The START_HANDLER instruction marks the beginning of a handler * block. It is called using a call instruction, so %o7 contains * the return address. Since the handler executes in the same stack * frame as the method itself, we can't use save/restore to save * the return address. Instead, we save it into a dedicated * variable. */ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); if (!sparc_is_imm13 (spvar->inst_offset)) { sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG); sparc_sti (code, sparc_o7, spvar->inst_basereg, GP_SCRATCH_REG); } else sparc_sti_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset); break; } case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); if (!sparc_is_imm13 (spvar->inst_offset)) { sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG); sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7); } else sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7); sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0); /* Delay slot */ sparc_mov_reg_reg (code, ins->sreg1, sparc_o0); break; } case OP_ENDFINALLY: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); if (!sparc_is_imm13 (spvar->inst_offset)) { sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG); sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7); } else sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7); sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0); sparc_nop (code); break; } case OP_CALL_HANDLER: mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); /* This is a jump inside the method, so call_simple works even on V9 */ sparc_call_simple (code, 0); sparc_nop (code); for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); break; case OP_LABEL: ins->inst_c0 = (guint8*)code - cfg->native_code; break; case OP_RELAXED_NOP: case OP_NOP: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_I8CONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: case OP_NOT_REACHED: case OP_NOT_NULL: break; case OP_BR: //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins); if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins) break; if (ins->inst_target_bb->native_offset) { gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; g_assert (sparc_is_imm22 (disp)); sparc_branch (code, 1, sparc_ba, disp); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); sparc_branch (code, 1, sparc_ba, 0); } sparc_nop (code); break; case OP_BR_REG: sparc_jmp (code, ins->sreg1, sparc_g0); sparc_nop (code); break; case OP_CEQ: case OP_CLT: case OP_CLT_UN: case OP_CGT: case OP_CGT_UN: if (v64 && (cfg->opt & MONO_OPT_CMOV)) { sparc_clr_reg (code, ins->dreg); sparc_movcc_imm (code, sparc_xcc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg); } else { sparc_clr_reg (code, ins->dreg); #ifdef SPARCV9 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), DEFAULT_ICC, 0, 2); #else sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2); #endif /* delay slot */ sparc_set (code, 1, ins->dreg); } break; case OP_ICEQ: case OP_ICLT: case OP_ICLT_UN: case OP_ICGT: case OP_ICGT_UN: if (v64 && (cfg->opt & MONO_OPT_CMOV)) { sparc_clr_reg (code, ins->dreg); sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg); } else { sparc_clr_reg (code, ins->dreg); sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), sparc_icc_short, 0, 2); /* delay slot */ sparc_set (code, 1, ins->dreg); } break; case OP_COND_EXC_EQ: case OP_COND_EXC_NE_UN: case OP_COND_EXC_LT: case OP_COND_EXC_LT_UN: case OP_COND_EXC_GT: case OP_COND_EXC_GT_UN: case OP_COND_EXC_GE: case OP_COND_EXC_GE_UN: case OP_COND_EXC_LE: case OP_COND_EXC_LE_UN: case OP_COND_EXC_OV: case OP_COND_EXC_NO: case OP_COND_EXC_C: case OP_COND_EXC_NC: case OP_COND_EXC_IEQ: case OP_COND_EXC_INE_UN: case OP_COND_EXC_ILT: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_IGT: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_IGE: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_ILE: case OP_COND_EXC_ILE_UN: case OP_COND_EXC_IOV: case OP_COND_EXC_INO: case OP_COND_EXC_IC: case OP_COND_EXC_INC: #ifdef SPARCV9 NOT_IMPLEMENTED; #else EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1); #endif break; case OP_SPARC_COND_EXC_EQZ: EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1); break; case OP_SPARC_COND_EXC_GEZ: EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgez, ins->inst_p1); break; case OP_SPARC_COND_EXC_GTZ: EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgz, ins->inst_p1); break; case OP_SPARC_COND_EXC_LEZ: EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlez, ins->inst_p1); break; case OP_SPARC_COND_EXC_LTZ: EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlz, ins->inst_p1); break; case OP_SPARC_COND_EXC_NEZ: EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1); break; case OP_IBEQ: case OP_IBNE_UN: case OP_IBLT: case OP_IBLT_UN: case OP_IBGT: case OP_IBGT_UN: case OP_IBGE: case OP_IBGE_UN: case OP_IBLE: case OP_IBLE_UN: { if (mono_hwcap_sparc_is_v9) EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1); else EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1); break; } case OP_SPARC_BRZ: EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1); break; case OP_SPARC_BRLEZ: EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1); break; case OP_SPARC_BRLZ: EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1); break; case OP_SPARC_BRNZ: EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1); break; case OP_SPARC_BRGZ: EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1); break; case OP_SPARC_BRGEZ: EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1); break; /* floating point opcodes */ case OP_R8CONST: mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0); #ifdef SPARCV9 sparc_set_template (code, sparc_o7); #else sparc_sethi (code, 0, sparc_o7); #endif sparc_lddf_imm (code, sparc_o7, 0, ins->dreg); break; case OP_R4CONST: mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0); #ifdef SPARCV9 sparc_set_template (code, sparc_o7); #else sparc_sethi (code, 0, sparc_o7); #endif sparc_ldf_imm (code, sparc_o7, 0, FP_SCRATCH_REG); /* Extend to double */ sparc_fstod (code, FP_SCRATCH_REG, ins->dreg); break; case OP_STORER8_MEMBASE_REG: if (!sparc_is_imm13 (ins->inst_offset + 4)) { sparc_set (code, ins->inst_offset, sparc_o7); /* SPARCV9 handles misaligned fp loads/stores */ if (!v64 && (ins->inst_offset % 8)) { /* Misaligned */ sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7); sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0); sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4); } else sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); } else { if (!v64 && (ins->inst_offset % 8)) { /* Misaligned */ sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4); } else sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } break; case OP_LOADR8_MEMBASE: EMIT_LOAD_MEMBASE (ins, lddf); break; case OP_STORER4_MEMBASE_REG: /* This requires a double->single conversion */ sparc_fdtos (code, ins->sreg1, FP_SCRATCH_REG); if (!sparc_is_imm13 (ins->inst_offset)) { sparc_set (code, ins->inst_offset, sparc_o7); sparc_stf (code, FP_SCRATCH_REG, ins->inst_destbasereg, sparc_o7); } else sparc_stf_imm (code, FP_SCRATCH_REG, ins->inst_destbasereg, ins->inst_offset); break; case OP_LOADR4_MEMBASE: { /* ldf needs a single precision register */ int dreg = ins->dreg; ins->dreg = FP_SCRATCH_REG; EMIT_LOAD_MEMBASE (ins, ldf); ins->dreg = dreg; /* Extend to double */ sparc_fstod (code, FP_SCRATCH_REG, ins->dreg); break; } case OP_ICONV_TO_R4: { MonoInst *spill = cfg->arch.float_spill_slot; gint32 reg = spill->inst_basereg; gint32 offset = spill->inst_offset; g_assert (spill->opcode == OP_REGOFFSET); #ifdef SPARCV9 if (!sparc_is_imm13 (offset)) { sparc_set (code, offset, sparc_o7); sparc_stx (code, ins->sreg1, reg, offset); sparc_lddf (code, reg, offset, FP_SCRATCH_REG); } else { sparc_stx_imm (code, ins->sreg1, reg, offset); sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG); } sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG); #else if (!sparc_is_imm13 (offset)) { sparc_set (code, offset, sparc_o7); sparc_st (code, ins->sreg1, reg, sparc_o7); sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG); } else { sparc_st_imm (code, ins->sreg1, reg, offset); sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG); } sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG); #endif sparc_fstod (code, FP_SCRATCH_REG, ins->dreg); break; } case OP_ICONV_TO_R8: { MonoInst *spill = cfg->arch.float_spill_slot; gint32 reg = spill->inst_basereg; gint32 offset = spill->inst_offset; g_assert (spill->opcode == OP_REGOFFSET); #ifdef SPARCV9 if (!sparc_is_imm13 (offset)) { sparc_set (code, offset, sparc_o7); sparc_stx (code, ins->sreg1, reg, sparc_o7); sparc_lddf (code, reg, sparc_o7, FP_SCRATCH_REG); } else { sparc_stx_imm (code, ins->sreg1, reg, offset); sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG); } sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg); #else if (!sparc_is_imm13 (offset)) { sparc_set (code, offset, sparc_o7); sparc_st (code, ins->sreg1, reg, sparc_o7); sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG); } else { sparc_st_imm (code, ins->sreg1, reg, offset); sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG); } sparc_fitod (code, FP_SCRATCH_REG, ins->dreg); #endif break; } case OP_FCONV_TO_I1: case OP_FCONV_TO_U1: case OP_FCONV_TO_I2: case OP_FCONV_TO_U2: #ifndef SPARCV9 case OP_FCONV_TO_I: #endif case OP_FCONV_TO_I4: case OP_FCONV_TO_U4: { MonoInst *spill = cfg->arch.float_spill_slot; gint32 reg = spill->inst_basereg; gint32 offset = spill->inst_offset; g_assert (spill->opcode == OP_REGOFFSET); sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG); if (!sparc_is_imm13 (offset)) { sparc_set (code, offset, sparc_o7); sparc_stdf (code, FP_SCRATCH_REG, reg, sparc_o7); sparc_ld (code, reg, sparc_o7, ins->dreg); } else { sparc_stdf_imm (code, FP_SCRATCH_REG, reg, offset); sparc_ld_imm (code, reg, offset, ins->dreg); } switch (ins->opcode) { case OP_FCONV_TO_I1: case OP_FCONV_TO_U1: sparc_and_imm (code, 0, ins->dreg, 0xff, ins->dreg); break; case OP_FCONV_TO_I2: case OP_FCONV_TO_U2: sparc_set (code, 0xffff, sparc_o7); sparc_and (code, 0, ins->dreg, sparc_o7, ins->dreg); break; default: break; } break; } case OP_FCONV_TO_I8: case OP_FCONV_TO_U8: /* Emulated */ g_assert_not_reached (); break; case OP_FCONV_TO_R4: /* FIXME: Change precision ? */ #ifdef SPARCV9 sparc_fmovd (code, ins->sreg1, ins->dreg); #else sparc_fmovs (code, ins->sreg1, ins->dreg); sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1); #endif break; case OP_LCONV_TO_R_UN: { /* Emulated */ g_assert_not_reached (); break; } case OP_LCONV_TO_OVF_I: case OP_LCONV_TO_OVF_I4_2: { guint32 *br [3], *label [1]; /* * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000 */ sparc_cmp_imm (code, ins->sreg1, 0); br [0] = code; sparc_branch (code, 1, sparc_bneg, 0); sparc_nop (code); /* positive */ /* ms word must be 0 */ sparc_cmp_imm (code, ins->sreg2, 0); br [1] = code; sparc_branch (code, 1, sparc_be, 0); sparc_nop (code); label [0] = code; EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException"); /* negative */ sparc_patch (br [0], code); /* ms word must 0xfffffff */ sparc_cmp_imm (code, ins->sreg2, -1); br [2] = code; sparc_branch (code, 1, sparc_bne, 0); sparc_nop (code); sparc_patch (br [2], label [0]); /* Ok */ sparc_patch (br [1], code); if (ins->sreg1 != ins->dreg) sparc_mov_reg_reg (code, ins->sreg1, ins->dreg); break; } case OP_FADD: sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_FSUB: sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_FMUL: sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_FDIV: sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_FNEG: #ifdef SPARCV9 sparc_fnegd (code, ins->sreg1, ins->dreg); #else /* FIXME: why don't use fnegd ? */ sparc_fnegs (code, ins->sreg1, ins->dreg); #endif break; case OP_FREM: sparc_fdivd (code, ins->sreg1, ins->sreg2, FP_SCRATCH_REG); sparc_fmuld (code, ins->sreg2, FP_SCRATCH_REG, FP_SCRATCH_REG); sparc_fsubd (code, ins->sreg1, FP_SCRATCH_REG, ins->dreg); break; case OP_FCOMPARE: sparc_fcmpd (code, ins->sreg1, ins->sreg2); break; case OP_FCEQ: case OP_FCLT: case OP_FCLT_UN: case OP_FCGT: case OP_FCGT_UN: sparc_fcmpd (code, ins->sreg1, ins->sreg2); sparc_clr_reg (code, ins->dreg); switch (ins->opcode) { case OP_FCLT_UN: case OP_FCGT_UN: sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4); /* delay slot */ sparc_set (code, 1, ins->dreg); sparc_fbranch (code, 1, sparc_fbu, 2); /* delay slot */ sparc_set (code, 1, ins->dreg); break; default: sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2); /* delay slot */ sparc_set (code, 1, ins->dreg); } break; case OP_FBEQ: case OP_FBLT: case OP_FBGT: EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1); break; case OP_FBGE: { /* clt.un + brfalse */ guint32 *p = code; sparc_fbranch (code, 1, sparc_fbul, 0); /* delay slot */ sparc_nop (code); EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1); sparc_patch (p, (guint8*)code); break; } case OP_FBLE: { /* cgt.un + brfalse */ guint32 *p = code; sparc_fbranch (code, 1, sparc_fbug, 0); /* delay slot */ sparc_nop (code); EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1); sparc_patch (p, (guint8*)code); break; } case OP_FBNE_UN: EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1); EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1); break; case OP_FBLT_UN: EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1); EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1); break; case OP_FBGT_UN: EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1); EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1); break; case OP_FBGE_UN: EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1); EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1); break; case OP_FBLE_UN: EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1); EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1); break; case OP_CKFINITE: { MonoInst *spill = cfg->arch.float_spill_slot; gint32 reg = spill->inst_basereg; gint32 offset = spill->inst_offset; g_assert (spill->opcode == OP_REGOFFSET); if (!sparc_is_imm13 (offset)) { sparc_set (code, offset, sparc_o7); sparc_stdf (code, ins->sreg1, reg, sparc_o7); sparc_lduh (code, reg, sparc_o7, sparc_o7); } else { sparc_stdf_imm (code, ins->sreg1, reg, offset); sparc_lduh_imm (code, reg, offset, sparc_o7); } sparc_srl_imm (code, sparc_o7, 4, sparc_o7); sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7); sparc_cmp_imm (code, sparc_o7, 2047); EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "OverflowException"); #ifdef SPARCV9 sparc_fmovd (code, ins->sreg1, ins->dreg); #else sparc_fmovs (code, ins->sreg1, ins->dreg); sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1); #endif break; } case OP_MEMORY_BARRIER: sparc_membar (code, sparc_membar_all); break; case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: break; default: #ifdef __GNUC__ g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__); #else g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode)); #endif g_assert_not_reached (); } if ((((guint8*)code) - code_start) > max_len) { g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)", mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start); g_assert_not_reached (); } cpos += max_len; last_ins = ins; } set_code_cursor (cfg, code); } void mono_arch_register_lowlevel_calls (void) { mono_register_jit_icall (mono_arch_get_lmf_addr, NULL, TRUE); } void mono_arch_patch_code (MonoCompile *cfg, MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors, MonoError *error) { MonoJumpInfo *patch_info; error_init (error); /* FIXME: Move part of this to arch independent code */ for (patch_info = ji; patch_info; patch_info = patch_info->next) { unsigned char *ip = patch_info->ip.i + code; gpointer target; target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors, error); return_if_nok (error); switch (patch_info->type) { case MONO_PATCH_INFO_NONE: continue; case MONO_PATCH_INFO_METHOD_JUMP: { guint32 *ip2 = (guint32*)ip; /* Might already been patched */ sparc_set_template (ip2, sparc_o7); break; } default: break; } sparc_patch ((guint32*)ip, target); } } #error obsolete tracing? void* mono_arch_instrument_prolog (MonoCompile *cfg, MonoJitICallId func, void *p, gboolean enable_arguments) { int i; guint32 *code = (guint32*)p; MonoMethodSignature *sig = mono_method_signature_internal (cfg->method); CallInfo *cinfo; /* Save registers to stack */ for (i = 0; i < 6; ++i) sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (target_mgreg_t))); cinfo = get_call_info (cfg, sig, FALSE); /* Save float regs on V9, since they are caller saved */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; gint32 stack_offset; stack_offset = ainfo->offset + ARGS_OFFSET; if (ainfo->storage == ArgInFloatReg) { if (!sparc_is_imm13 (stack_offset)) NOT_IMPLEMENTED; sparc_stf_imm (code, ainfo->reg, sparc_fp, stack_offset); } else if (ainfo->storage == ArgInDoubleReg) { /* The offset is guaranteed to be aligned by the ABI rules */ sparc_stdf_imm (code, ainfo->reg, sparc_fp, stack_offset); } } sparc_set (code, cfg->method, sparc_o0); sparc_add_imm (code, FALSE, sparc_fp, MONO_SPARC_STACK_BIAS, sparc_o1); mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (func)); EMIT_CALL (); /* Restore float regs on V9 */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; gint32 stack_offset; stack_offset = ainfo->offset + ARGS_OFFSET; if (ainfo->storage == ArgInFloatReg) { if (!sparc_is_imm13 (stack_offset)) NOT_IMPLEMENTED; sparc_ldf_imm (code, sparc_fp, stack_offset, ainfo->reg); } else if (ainfo->storage == ArgInDoubleReg) { /* The offset is guaranteed to be aligned by the ABI rules */ sparc_lddf_imm (code, sparc_fp, stack_offset, ainfo->reg); } } g_free (cinfo); return code; } enum { SAVE_NONE, SAVE_STRUCT, SAVE_ONE, SAVE_TWO, SAVE_FP }; #error obsolete tracing? void* mono_arch_instrument_epilog (MonoCompile *cfg, MonoJitICallId func, void *p, gboolean enable_arguments) { guint32 *code = (guint32*)p; int save_mode = SAVE_NONE; MonoMethod *method = cfg->method; switch (mini_get_underlying_type (mono_method_signature_internal (method)->ret)->type) { case MONO_TYPE_VOID: /* special case string .ctor icall */ if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class) save_mode = SAVE_ONE; else save_mode = SAVE_NONE; break; case MONO_TYPE_I8: case MONO_TYPE_U8: #ifdef SPARCV9 save_mode = SAVE_ONE; #else save_mode = SAVE_TWO; #endif break; case MONO_TYPE_R4: case MONO_TYPE_R8: save_mode = SAVE_FP; break; case MONO_TYPE_VALUETYPE: save_mode = SAVE_STRUCT; break; default: save_mode = SAVE_ONE; break; } /* Save the result to the stack and also put it into the output registers */ switch (save_mode) { case SAVE_TWO: /* V8 only */ sparc_st_imm (code, sparc_i0, sparc_fp, 68); sparc_st_imm (code, sparc_i0, sparc_fp, 72); sparc_mov_reg_reg (code, sparc_i0, sparc_o1); sparc_mov_reg_reg (code, sparc_i1, sparc_o2); break; case SAVE_ONE: sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET); sparc_mov_reg_reg (code, sparc_i0, sparc_o1); break; case SAVE_FP: #ifdef SPARCV9 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET); #else sparc_stdf_imm (code, sparc_f0, sparc_fp, 72); sparc_ld_imm (code, sparc_fp, 72, sparc_o1); sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2); #endif break; case SAVE_STRUCT: #ifdef SPARCV9 sparc_mov_reg_reg (code, sparc_i0, sparc_o1); #else sparc_ld_imm (code, sparc_fp, 64, sparc_o1); #endif break; case SAVE_NONE: default: break; } sparc_set (code, cfg->method, sparc_o0); mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (func)); EMIT_CALL (); /* Restore result */ switch (save_mode) { case SAVE_TWO: sparc_ld_imm (code, sparc_fp, 68, sparc_i0); sparc_ld_imm (code, sparc_fp, 72, sparc_i0); break; case SAVE_ONE: sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0); break; case SAVE_FP: sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0); break; case SAVE_NONE: default: break; } return code; } guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoMethodSignature *sig; MonoInst *inst; guint32 *code; CallInfo *cinfo; guint32 i, offset; cfg->code_size = 256; cfg->native_code = g_malloc (cfg->code_size); code = (guint32*)cfg->native_code; /* FIXME: Generate intermediate code instead */ offset = cfg->stack_offset; offset += (16 * sizeof (target_mgreg_t)); /* register save area */ #ifndef SPARCV9 offset += 4; /* struct/union return pointer */ #endif /* add parameter area size for called functions */ if (cfg->param_area < (6 * sizeof (target_mgreg_t))) /* Reserve space for the first 6 arguments even if it is unused */ offset += 6 * sizeof (target_mgreg_t); else offset += cfg->param_area; /* align the stack size */ offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT); /* * localloc'd memory is stored between the local variables (whose * size is given by cfg->stack_offset), and between the space reserved * by the ABI. */ cfg->arch.localloc_offset = offset - cfg->stack_offset; cfg->stack_offset = offset; #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK /* Perform stack touching */ NOT_IMPLEMENTED; #endif if (!sparc_is_imm13 (- cfg->stack_offset)) { /* Can't use sparc_o7 here, since we're still in the caller's frame */ sparc_set (code, (- cfg->stack_offset), GP_SCRATCH_REG); sparc_save (code, sparc_sp, GP_SCRATCH_REG, sparc_sp); } else sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp); /* if (strstr (cfg->method->name, "foo")) { mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_sparc_break)); sparc_call_simple (code, 0); sparc_nop (code); } */ sig = mono_method_signature_internal (method); cinfo = get_call_info (cfg, sig, FALSE); /* Keep in sync with emit_load_volatile_arguments */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; gint32 stack_offset; MonoType *arg_type; inst = cfg->args [i]; if (sig->hasthis && (i == 0)) arg_type = mono_get_object_type (); else arg_type = sig->params [i - sig->hasthis]; stack_offset = ainfo->offset + ARGS_OFFSET; /* Save the split arguments so they will reside entirely on the stack */ if (ainfo->storage == ArgInSplitRegStack) { /* Save the register to the stack */ g_assert (inst->opcode == OP_REGOFFSET); if (!sparc_is_imm13 (stack_offset)) NOT_IMPLEMENTED; sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset); } if (!v64 && !m_type_is_byref (arg_type) && (arg_type->type == MONO_TYPE_R8)) { /* Save the argument to a dword aligned stack location */ /* * stack_offset contains the offset of the argument on the stack. * inst->inst_offset contains the dword aligned offset where the value * should be stored. */ if (ainfo->storage == ArgInIRegPair) { if (!sparc_is_imm13 (inst->inst_offset + 4)) NOT_IMPLEMENTED; sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset); sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4); } else if (ainfo->storage == ArgInSplitRegStack) { #ifdef SPARCV9 g_assert_not_reached (); #endif if (stack_offset != inst->inst_offset) { /* stack_offset is not dword aligned, so we need to make a copy */ sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset); sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7); sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4); } } else if (ainfo->storage == ArgOnStackPair) { #ifdef SPARCV9 g_assert_not_reached (); #endif if (stack_offset != inst->inst_offset) { /* stack_offset is not dword aligned, so we need to make a copy */ sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7); sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset); sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7); sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4); } } else g_assert_not_reached (); } else if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) { /* Argument in register, but need to be saved to stack */ if (!sparc_is_imm13 (stack_offset)) NOT_IMPLEMENTED; if ((stack_offset - ARGS_OFFSET) & 0x1) sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset); else if ((stack_offset - ARGS_OFFSET) & 0x2) sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset); else if ((stack_offset - ARGS_OFFSET) & 0x4) sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset); else { if (v64) sparc_stx_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset); else sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset); } } else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) { #ifdef SPARCV9 NOT_IMPLEMENTED; #endif /* Argument in regpair, but need to be saved to stack */ if (!sparc_is_imm13 (inst->inst_offset + 4)) NOT_IMPLEMENTED; sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset); sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4); } else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) { if (!sparc_is_imm13 (stack_offset)) NOT_IMPLEMENTED; sparc_stf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) { /* The offset is guaranteed to be aligned by the ABI rules */ sparc_stdf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } if ((ainfo->storage == ArgInFloatReg) && (inst->opcode == OP_REGVAR)) { /* Need to move into the a double precision register */ sparc_fstod (code, ainfo->reg, ainfo->reg - 1); } if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack)) if (inst->opcode == OP_REGVAR) /* FIXME: Load the argument into memory */ NOT_IMPLEMENTED; } g_free (cinfo); if (cfg->method->save_lmf) { gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset; /* Save ip */ mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL); sparc_set_template (code, sparc_o7); sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip)); /* Save sp */ sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp)); /* Save fp */ sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp)); /* Save method */ /* FIXME: add a relocation for this */ sparc_set (code, cfg->method, sparc_o7); sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method)); mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_get_lmf_addr)); EMIT_CALL (); code = (guint32*)mono_sparc_emit_save_lmf (code, lmf_offset); } #error obsolete tracing? if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) code = (guint32*)mono_arch_instrument_prolog (cfg, MONO_JIT_ICALL_mono_trace_enter_method, code, TRUE); set_code_cursor (cfg, code); return (guint8*)code; } void mono_arch_emit_epilog (MonoCompile *cfg) { MonoMethod *method = cfg->method; guint32 *code; int can_fold = 0; int max_epilog_size = 16 + 20 * 4; if (cfg->method->save_lmf) max_epilog_size += 128; if (mono_jit_trace_calls != NULL) max_epilog_size += 50; code = (guint32 *)realloc_code (cfg, max_epilog_size); #error obsolete tracing? if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) code = (guint32*)mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE); if (cfg->method->save_lmf) { gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset; code = mono_sparc_emit_restore_lmf (code, lmf_offset); } /* * The V8 ABI requires that calls to functions which return a structure * return to %i7+12 */ if (!v64 && mono_method_signature_internal (cfg->method)->pinvoke && MONO_TYPE_ISSTRUCT(mono_method_signature_internal (cfg->method)->ret)) sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0); else sparc_ret (code); /* Only fold last instruction into the restore if the exit block has an in count of 1 and the previous block hasn't been optimized away since it may have an in count > 1 */ if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset) can_fold = 1; /* * FIXME: The last instruction might have a branch pointing into it like in * int_ceq sparc_i0 <- */ can_fold = 0; /* Try folding last instruction into the restore */ if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) { /* or reg, imm, %i0 */ int reg = sparc_inst_rs1 (code [-2]); int imm = (((gint32)(sparc_inst_imm13 (code [-2]))) << 19) >> 19; code [-2] = code [-1]; code --; sparc_restore_imm (code, reg, imm, sparc_o0); } else if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && (!sparc_inst_imm (code [-2])) && (sparc_inst_rd (code [-2]) == sparc_i0)) { /* or reg, reg, %i0 */ int reg1 = sparc_inst_rs1 (code [-2]); int reg2 = sparc_inst_rs2 (code [-2]); code [-2] = code [-1]; code --; sparc_restore (code, reg1, reg2, sparc_o0); } else sparc_restore_imm (code, sparc_g0, 0, sparc_g0); set_code_cursor (cfg, code); } void mono_arch_emit_exceptions (MonoCompile *cfg) { MonoJumpInfo *patch_info; guint32 *code; int nthrows = 0, i; int exc_count = 0; guint32 code_size; MonoClass *exc_classes [16]; guint8 *exc_throw_start [16], *exc_throw_end [16]; /* Compute needed space */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { if (patch_info->type == MONO_PATCH_INFO_EXC) exc_count++; } /* * make sure we have enough space for exceptions */ #ifdef SPARCV9 code_size = exc_count * (20 * 4); #else code_size = exc_count * 24; #endif code = (guint32*)realloc_code (cfg, code_size); for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { switch (patch_info->type) { case MONO_PATCH_INFO_EXC: { MonoClass *exc_class; guint32 *buf, *buf2; guint32 throw_ip, type_idx; gint32 disp; sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code); exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name); type_idx = m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF; throw_ip = patch_info->ip.i; /* Find a throw sequence for the same exception class */ for (i = 0; i < nthrows; ++i) if (exc_classes [i] == exc_class) break; if (i < nthrows) { guint32 throw_offset = (((guint8*)exc_throw_end [i] - cfg->native_code) - throw_ip) >> 2; if (!sparc_is_imm13 (throw_offset)) sparc_set32 (code, throw_offset, sparc_o1); disp = (exc_throw_start [i] - (guint8*)code) >> 2; g_assert (sparc_is_imm22 (disp)); sparc_branch (code, 0, sparc_ba, disp); if (sparc_is_imm13 (throw_offset)) sparc_set32 (code, throw_offset, sparc_o1); else sparc_nop (code); patch_info->type = MONO_PATCH_INFO_NONE; } else { /* Emit the template for setting o1 */ buf = code; if (sparc_is_imm13 (((((guint8*)code - cfg->native_code) - throw_ip) >> 2) - 8)) /* Can use a short form */ sparc_nop (code); else sparc_set_template (code, sparc_o1); buf2 = code; if (nthrows < 16) { exc_classes [nthrows] = exc_class; exc_throw_start [nthrows] = (guint8*)code; } /* mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_sparc_break)); EMIT_CALL(); */ /* first arg = type token */ /* Pass the type index to reduce the size of the sparc_set */ if (!sparc_is_imm13 (type_idx)) sparc_set32 (code, type_idx, sparc_o0); /* second arg = offset between the throw ip and the current ip */ /* On sparc, the saved ip points to the call instruction */ disp = (((guint8*)code - cfg->native_code) - throw_ip) >> 2; sparc_set32 (buf, disp, sparc_o1); while (buf < buf2) sparc_nop (buf); if (nthrows < 16) { exc_throw_end [nthrows] = (guint8*)code; nthrows ++; } patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception; patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID; patch_info->ip.i = (guint8*)code - cfg->native_code; EMIT_CALL (); if (sparc_is_imm13 (type_idx)) { /* Put it into the delay slot */ code --; buf = code; sparc_set32 (code, type_idx, sparc_o0); g_assert (code - buf == 1); } } break; } default: /* do nothing */ break; } set_code_cursor (cfg, code); } set_code_cursor (cfg, code); } gboolean lmf_addr_key_inited = FALSE; #ifdef MONO_SPARC_THR_TLS thread_key_t lmf_addr_key; #else pthread_key_t lmf_addr_key; #endif gpointer mono_arch_get_lmf_addr (void) { /* This is perf critical so we bypass the IO layer */ /* The thr_... functions seem to be somewhat faster */ #ifdef MONO_SPARC_THR_TLS gpointer res; thr_getspecific (lmf_addr_key, &res); return res; #else return pthread_getspecific (lmf_addr_key); #endif } #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK /* * There seems to be no way to determine stack boundaries under solaris, * so it's not possible to determine whenever a SIGSEGV is caused by stack * overflow or not. */ #error "--with-sigaltstack=yes not supported on solaris" #endif void mono_arch_tls_init (void) { MonoJitTlsData *jit_tls; if (!lmf_addr_key_inited) { int res; lmf_addr_key_inited = TRUE; #ifdef MONO_SPARC_THR_TLS res = thr_keycreate (&lmf_addr_key, NULL); #else res = pthread_key_create (&lmf_addr_key, NULL); #endif g_assert (res == 0); } jit_tls = mono_get_jit_tls (); #ifdef MONO_SPARC_THR_TLS thr_setspecific (lmf_addr_key, &jit_tls->lmf); #else pthread_setspecific (lmf_addr_key, &jit_tls->lmf); #endif } void mono_arch_finish_init (void) { } MonoInst* mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins = NULL; return ins; } /* * mono_arch_get_argument_info: * @csig: a method signature * @param_count: the number of parameters to consider * @arg_info: an array to store the result infos * * Gathers information on parameters such as size, alignment and * padding. arg_info should be large enought to hold param_count + 1 entries. * * Returns the size of the activation frame. */ int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { int k, align; CallInfo *cinfo; ArgInfo *ainfo; cinfo = get_call_info (NULL, csig, FALSE); if (csig->hasthis) { ainfo = &cinfo->args [0]; arg_info [0].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset; } for (k = 0; k < param_count; k++) { ainfo = &cinfo->args [k + csig->hasthis]; arg_info [k + 1].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset; arg_info [k + 1].size = mono_type_size (csig->params [k], &align); } g_free (cinfo); return 0; } host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { /* FIXME: implement */ g_assert_not_reached (); } host_mgreg_t* mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { /* FIXME: implement */ g_assert_not_reached (); } gboolean mono_arch_opcode_supported (int opcode) { return FALSE; } gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { return FALSE; } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { return NULL; }
/** * \file * Sparc backend for the Mono code generator * * Authors: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * * Modified for SPARC: * Christopher Taylor ([email protected]) * Mark Crichton ([email protected]) * Zoltan Varga ([email protected]) * * (C) 2003 Ximian, Inc. */ #include "mini.h" #include <string.h> #include <pthread.h> #include <unistd.h> #ifndef __linux__ #include <thread.h> #endif #include <unistd.h> #include <sys/mman.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/tokentype.h> #include <mono/utils/mono-math.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/unlocked.h> #include "mini-sparc.h" #include "trace.h" #include "cpu-sparc.h" #include "jit-icalls.h" #include "ir-emit.h" #include "mono/utils/mono-tls-inline.h" /* * Sparc V9 means two things: * - the instruction set * - the ABI * * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc * processors in use are 64 bit processors. The V9 ABI is only usable if the * mono executable is a 64 bit executable. So it would make sense to use the 64 bit * instructions without using the 64 bit ABI. */ /* * Register usage: * - %i0..%i<n> hold the incoming arguments, these are never written by JITted * code. Unused input registers are used for global register allocation. * - %o0..%o5 and %l7 is used for local register allocation and passing arguments * - %l0..%l6 is used for global register allocation * - %o7 and %g1 is used as scratch registers in opcodes * - all floating point registers are used for local register allocation except %f0. * Only double precision registers are used. * In 64 bit mode: * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are * used for local allocation. */ /* * Alignment: * - doubles and longs must be stored in dword aligned locations */ /* * The following things are not implemented or do not work: * - some fp arithmetic corner cases * The following tests in mono/mini are expected to fail: * - test_0_simple_double_casts * This test casts (guint64)-1 to double and then back to guint64 again. * Under x86, it returns 0, while under sparc it returns -1. * * In addition to this, the runtime requires the trunc function, or its * solaris counterpart, aintl, to do some double->int conversions. If this * function is not available, it is emulated somewhat, but the results can be * strange. */ /* * SPARCV9 FIXME: * - optimize sparc_set according to the memory model * - when non-AOT compiling, compute patch targets immediately so we don't * have to emit the 6 byte template. * - varags * - struct arguments/returns */ /* * SPARCV9 ISSUES: * - sparc_call_simple can't be used in a lot of places since the displacement * might not fit into an imm30. * - g1 can't be used in a lot of places since it is used as a scratch reg in * sparc_set. * - sparc_f0 can't be used as a scratch register on V9 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie. * %d36 = %f5. * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down * - ins->dreg can't be used as a scatch register in r4 opcodes since it might * be a double precision register which has no single precision part. * - passing/returning structs is hard to implement, because: * - the spec is very hard to understand * - it requires knowledge about the fields of structure, needs to handle * nested structures etc. */ /* * Possible optimizations: * - delay slot scheduling * - allocate large constants to registers * - add more mul/div/rem optimizations */ #ifndef __linux__ #define MONO_SPARC_THR_TLS 1 #endif /* * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32, * causing infinite loops in dominator computation. So glib-2.4 is required. */ #ifdef SPARCV9 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4 #error "glib 2.4 or later is required for 64 bit mode." #endif #endif #define SIGNAL_STACK_SIZE (64 * 1024) #define STACK_BIAS MONO_SPARC_STACK_BIAS #ifdef SPARCV9 /* %g1 is used by sparc_set */ #define GP_SCRATCH_REG sparc_g4 /* %f0 is used for parameter passing */ #define FP_SCRATCH_REG sparc_f30 #define ARGS_OFFSET (STACK_BIAS + 128) #else #define FP_SCRATCH_REG sparc_f0 #define ARGS_OFFSET 68 #define GP_SCRATCH_REG sparc_g1 #endif /* Whenever this is a 64bit executable */ #if SPARCV9 static gboolean v64 = TRUE; #else static gboolean v64 = FALSE; #endif static gpointer mono_arch_get_lmf_addr (void); const char* mono_arch_regname (int reg) { static const char * rnames[] = { "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4", "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1", "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp", "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3", "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0", "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5", "sparc_fp", "sparc_retadr" }; if (reg >= 0 && reg < 32) return rnames [reg]; return "unknown"; } const char* mono_arch_fregname (int reg) { static const char *rnames [] = { "sparc_f0", "sparc_f1", "sparc_f2", "sparc_f3", "sparc_f4", "sparc_f5", "sparc_f6", "sparc_f7", "sparc_f8", "sparc_f9", "sparc_f10", "sparc_f11", "sparc_f12", "sparc_f13", "sparc_f14", "sparc_f15", "sparc_f16", "sparc_f17", "sparc_f18", "sparc_f19", "sparc_f20", "sparc_f21", "sparc_f22", "sparc_f23", "sparc_f24", "sparc_f25", "sparc_f26", "sparc_f27", "sparc_f28", "sparc_f29", "sparc_f30", "sparc_f31" }; if (reg >= 0 && reg < 32) return rnames [reg]; else return "unknown"; } /* * Initialize the cpu to execute managed code. */ void mono_arch_cpu_init (void) { } /* * Initialize architecture specific code. */ void mono_arch_init (void) { } /* * Cleanup architecture specific code. */ void mono_arch_cleanup (void) { } gboolean mono_arch_have_fast_tls (void) { return FALSE; } /* * This function returns the optimizations supported on this cpu. */ guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { guint32 opts = 0; *exclude_mask = 0; /* * On some processors, the cmov instructions are even slower than the * normal ones... */ if (mono_hwcap_sparc_is_v9) opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV; else *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV; return opts; } #ifdef __GNUC__ #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory") #else /* assume Sun's compiler */ static void flushi(void *addr) { asm("flush %i0"); } #endif #ifndef __linux__ void sync_instruction_memory(caddr_t addr, int len); #endif void mono_arch_flush_icache (guint8 *code, gint size) { #ifndef __linux__ /* Hopefully this is optimized based on the actual CPU */ sync_instruction_memory (code, size); #else gulong start = (gulong) code; gulong end = start + size; gulong align; /* Sparcv9 chips only need flushes on 32 byte * cacheline boundaries. * * Sparcv8 needs a flush every 8 bytes. */ align = (mono_hwcap_sparc_is_v9 ? 32 : 8); start &= ~(align - 1); end = (end + (align - 1)) & ~(align - 1); while (start < end) { #ifdef __GNUC__ __asm__ __volatile__ ("iflush %0"::"r"(start)); #else flushi (start); #endif start += align; } #endif } /* * mono_sparc_flushw: * * Flush all register windows to memory. Every register window is saved to * a 16 word area on the stack pointed to by its %sp register. */ void mono_sparc_flushw (void) { static guint32 start [64]; static int inited = 0; guint32 *code; static void (*flushw) (void); if (!inited) { code = start; sparc_save_imm (code, sparc_sp, -160, sparc_sp); sparc_flushw (code); sparc_ret (code); sparc_restore_simple (code); g_assert ((code - start) < 64); mono_arch_flush_icache ((guint8*)start, (guint8*)code - (guint8*)start); flushw = (gpointer)start; inited = 1; } flushw (); } void mono_arch_flush_register_windows (void) { mono_sparc_flushw (); } gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return sparc_is_imm13 (imm); } gboolean mono_sparc_is_v9 (void) { return mono_hwcap_sparc_is_v9; } gboolean mono_sparc_is_sparc64 (void) { return v64; } typedef enum { ArgInIReg, ArgInIRegPair, ArgInSplitRegStack, ArgInFReg, ArgInFRegPair, ArgOnStack, ArgOnStackPair, ArgInFloatReg, /* V9 only */ ArgInDoubleReg /* V9 only */ } ArgStorage; typedef struct { gint16 offset; /* This needs to be offset by %i0 or %o0 depending on caller/callee */ gint8 reg; ArgStorage storage; guint32 vt_offset; /* for valuetypes */ } ArgInfo; struct CallInfo { int nargs; guint32 stack_usage; guint32 reg_usage; ArgInfo ret; ArgInfo sig_cookie; ArgInfo args [1]; }; #define DEBUG(a) /* %o0..%o5 */ #define PARAM_REGS 6 static void inline add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair) { ainfo->offset = *stack_size; if (!pair) { if (*gr >= PARAM_REGS) { ainfo->storage = ArgOnStack; } else { ainfo->storage = ArgInIReg; ainfo->reg = *gr; (*gr) ++; } /* Allways reserve stack space for parameters passed in registers */ (*stack_size) += sizeof (target_mgreg_t); } else { if (*gr < PARAM_REGS - 1) { /* A pair of registers */ ainfo->storage = ArgInIRegPair; ainfo->reg = *gr; (*gr) += 2; } else if (*gr >= PARAM_REGS) { /* A pair of stack locations */ ainfo->storage = ArgOnStackPair; } else { ainfo->storage = ArgInSplitRegStack; ainfo->reg = *gr; (*gr) ++; } (*stack_size) += 2 * sizeof (target_mgreg_t); } } #ifdef SPARCV9 #define FLOAT_PARAM_REGS 32 static void inline add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean single) { ainfo->offset = *stack_size; if (single) { if (*gr >= FLOAT_PARAM_REGS) { ainfo->storage = ArgOnStack; } else { /* A single is passed in an even numbered fp register */ ainfo->storage = ArgInFloatReg; ainfo->reg = *gr + 1; (*gr) += 2; } } else { if (*gr < FLOAT_PARAM_REGS) { /* A double register */ ainfo->storage = ArgInDoubleReg; ainfo->reg = *gr; (*gr) += 2; } else { ainfo->storage = ArgOnStack; } } (*stack_size) += sizeof (target_mgreg_t); } #endif /* * get_call_info: * * Obtain information about a call according to the calling convention. * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version * document for more information. * For V9, see the "Low Level System Information (64-bit psABI)" chapter in * the 'Sparc Compliance Definition 2.4' document. */ static CallInfo* get_call_info (MonoCompile *cfg, MonoMethodSignature *sig, gboolean is_pinvoke) { guint32 i, gr, fr; int n = sig->hasthis + sig->param_count; guint32 stack_size = 0; CallInfo *cinfo; MonoType *ret_type; cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n)); gr = 0; fr = 0; #ifdef SPARCV9 if (MONO_TYPE_ISSTRUCT ((sig->ret))) { /* The address of the return value is passed in %o0 */ add_general (&gr, &stack_size, &cinfo->ret, FALSE); cinfo->ret.reg += sparc_i0; /* FIXME: Pass this after this as on other platforms */ NOT_IMPLEMENTED; } #endif /* this */ if (sig->hasthis) add_general (&gr, &stack_size, cinfo->args + 0, FALSE); if ((sig->call_convention == MONO_CALL_VARARG) && (n == 0)) { gr = PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE); } for (i = 0; i < sig->param_count; ++i) { ArgInfo *ainfo = &cinfo->args [sig->hasthis + i]; MonoType *ptype; if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { gr = PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE); } DEBUG(printf("param %d: ", i)); if (m_type_is_byref (sig->params [i])) { DEBUG(printf("byref\n")); add_general (&gr, &stack_size, ainfo, FALSE); continue; } ptype = mini_get_underlying_type (sig->params [i]); switch (ptype->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: add_general (&gr, &stack_size, ainfo, FALSE); /* the value is in the ls byte */ ainfo->offset += sizeof (target_mgreg_t) - 1; break; case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: add_general (&gr, &stack_size, ainfo, FALSE); /* the value is in the ls word */ ainfo->offset += sizeof (target_mgreg_t) - 2; break; case MONO_TYPE_I4: case MONO_TYPE_U4: add_general (&gr, &stack_size, ainfo, FALSE); /* the value is in the ls dword */ ainfo->offset += sizeof (target_mgreg_t) - 4; break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: add_general (&gr, &stack_size, ainfo, FALSE); break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ptype)) { add_general (&gr, &stack_size, ainfo, FALSE); break; } /* Fall through */ case MONO_TYPE_VALUETYPE: #ifdef SPARCV9 if (sig->pinvoke) NOT_IMPLEMENTED; #endif add_general (&gr, &stack_size, ainfo, FALSE); break; case MONO_TYPE_TYPEDBYREF: add_general (&gr, &stack_size, ainfo, FALSE); break; case MONO_TYPE_U8: case MONO_TYPE_I8: #ifdef SPARCV9 add_general (&gr, &stack_size, ainfo, FALSE); #else add_general (&gr, &stack_size, ainfo, TRUE); #endif break; case MONO_TYPE_R4: #ifdef SPARCV9 add_float (&fr, &stack_size, ainfo, TRUE); gr ++; #else /* single precision values are passed in integer registers */ add_general (&gr, &stack_size, ainfo, FALSE); #endif break; case MONO_TYPE_R8: #ifdef SPARCV9 add_float (&fr, &stack_size, ainfo, FALSE); gr ++; #else /* double precision values are passed in a pair of registers */ add_general (&gr, &stack_size, ainfo, TRUE); #endif break; default: g_assert_not_reached (); } } if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) { gr = PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE); } /* return value */ ret_type = mini_get_underlying_type (sig->ret); switch (ret_type->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_STRING: cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = sparc_i0; if (gr < 1) gr = 1; break; case MONO_TYPE_U8: case MONO_TYPE_I8: #ifdef SPARCV9 cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = sparc_i0; if (gr < 1) gr = 1; #else cinfo->ret.storage = ArgInIRegPair; cinfo->ret.reg = sparc_i0; if (gr < 2) gr = 2; #endif break; case MONO_TYPE_R4: case MONO_TYPE_R8: cinfo->ret.storage = ArgInFReg; cinfo->ret.reg = sparc_f0; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ret_type)) { cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = sparc_i0; if (gr < 1) gr = 1; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: if (v64) { if (sig->pinvoke) NOT_IMPLEMENTED; else /* Already done */ ; } else cinfo->ret.storage = ArgOnStack; break; case MONO_TYPE_TYPEDBYREF: if (v64) { if (sig->pinvoke) /* Same as a valuetype with size 24 */ NOT_IMPLEMENTED; else /* Already done */ ; } else cinfo->ret.storage = ArgOnStack; break; case MONO_TYPE_VOID: break; default: g_error ("Can't handle as return value 0x%x", sig->ret->type); } cinfo->stack_usage = stack_size; cinfo->reg_usage = gr; return cinfo; } GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; /* * FIXME: If an argument is allocated to a register, then load it from the * stack in the prolog. */ for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; /* FIXME: Make arguments on stack allocateable to registers */ if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG)) continue; if (mono_is_regsize_var (ins->inst_vtype)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE); } } return vars; } GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; int i; MonoMethodSignature *sig; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); cinfo = get_call_info (cfg, sig, FALSE); /* Use unused input registers */ for (i = cinfo->reg_usage; i < 6; ++i) regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i)); /* Use %l0..%l6 as global registers */ for (i = sparc_l0; i < sparc_l7; ++i) regs = g_list_prepend (regs, GUINT_TO_POINTER (i)); g_free (cinfo); return regs; } /* * mono_arch_regalloc_cost: * * Return the cost, in number of memory references, of the action of * allocating the variable VMV into a register during global register * allocation. */ guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { return 0; } /* * Set var information according to the calling convention. sparc version. * The locals var stuff should most likely be split in another method. */ void mono_arch_allocate_vars (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; MonoInst *inst; int i, offset, size, align, curinst; CallInfo *cinfo; header = cfg->header; sig = mono_method_signature_internal (cfg->method); cinfo = get_call_info (cfg, sig, FALSE); if (sig->ret->type != MONO_TYPE_VOID) { switch (cinfo->ret.storage) { case ArgInIReg: case ArgInFReg: cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cinfo->ret.reg; break; case ArgInIRegPair: { MonoType *t = mini_get_underlying_type (sig->ret); if (((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) { MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->ret->dreg)); MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->ret->dreg)); low->opcode = OP_REGVAR; low->dreg = cinfo->ret.reg + 1; high->opcode = OP_REGVAR; high->dreg = cinfo->ret.reg; } cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cinfo->ret.reg; break; } case ArgOnStack: #ifdef SPARCV9 g_assert_not_reached (); #else /* valuetypes */ cfg->vret_addr->opcode = OP_REGOFFSET; cfg->vret_addr->inst_basereg = sparc_fp; cfg->vret_addr->inst_offset = 64; #endif break; default: NOT_IMPLEMENTED; } cfg->ret->dreg = cfg->ret->inst_c0; } /* * We use the ABI calling conventions for managed code as well. * Exception: valuetypes are never returned in registers on V9. * FIXME: Use something more optimized. */ /* Locals are allocated backwards from %fp */ cfg->frame_reg = sparc_fp; offset = 0; /* * Reserve a stack slot for holding information used during exception * handling. */ if (header->num_clauses) offset += sizeof (target_mgreg_t) * 2; if (cfg->method->save_lmf) { offset += sizeof (MonoLMF); cfg->arch.lmf_offset = offset; } curinst = cfg->locals_start; for (i = curinst; i < cfg->num_varinfo; ++i) { inst = cfg->varinfo [i]; if ((inst->opcode == OP_REGVAR) || (inst->opcode == OP_REGOFFSET)) { //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg)); continue; } if (inst->flags & MONO_INST_IS_DEAD) continue; /* inst->backend.is_pinvoke indicates native sized value types, this is used by the * pinvoke wrappers when they call functions returning structure */ if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) size = mono_class_native_size (mono_class_from_mono_type_internal (inst->inst_vtype), &align); else size = mini_type_stack_size (inst->inst_vtype, &align); /* * This is needed since structures containing doubles must be doubleword * aligned. * FIXME: Do this only if needed. */ if (MONO_TYPE_ISSTRUCT (inst->inst_vtype)) align = 8; /* * variables are accessed as negative offsets from %fp, so increase * the offset before assigning it to a variable */ offset += size; offset += align - 1; offset &= ~(align - 1); inst->opcode = OP_REGOFFSET; inst->inst_basereg = sparc_fp; inst->inst_offset = STACK_BIAS + -offset; //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset); } if (sig->call_convention == MONO_CALL_VARARG) { cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET; } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { inst = cfg->args [i]; if (inst->opcode != OP_REGVAR) { ArgInfo *ainfo = &cinfo->args [i]; gboolean inreg = TRUE; MonoType *arg_type; ArgStorage storage; if (sig->hasthis && (i == 0)) arg_type = mono_get_object_type (); else arg_type = sig->params [i - sig->hasthis]; #ifndef SPARCV9 if (!m_type_is_byref (arg_type) && ((arg_type->type == MONO_TYPE_R4) || (arg_type->type == MONO_TYPE_R8))) /* * Since float arguments are passed in integer registers, we need to * save them to the stack in the prolog. */ inreg = FALSE; #endif /* FIXME: Allocate volatile arguments to registers */ /* FIXME: This makes the argument holding a vtype address into volatile */ if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) inreg = FALSE; if (MONO_TYPE_ISSTRUCT (arg_type)) /* FIXME: this isn't needed */ inreg = FALSE; inst->opcode = OP_REGOFFSET; if (!inreg) storage = ArgOnStack; else storage = ainfo->storage; switch (storage) { case ArgInIReg: inst->opcode = OP_REGVAR; inst->dreg = sparc_i0 + ainfo->reg; break; case ArgInIRegPair: if (inst->type == STACK_I8) { MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (inst->dreg)); MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (inst->dreg)); low->opcode = OP_REGVAR; low->dreg = sparc_i0 + ainfo->reg + 1; high->opcode = OP_REGVAR; high->dreg = sparc_i0 + ainfo->reg; } inst->opcode = OP_REGVAR; inst->dreg = sparc_i0 + ainfo->reg; break; case ArgInFloatReg: case ArgInDoubleReg: /* * Since float regs are volatile, we save the arguments to * the stack in the prolog. * FIXME: Avoid this if the method contains no calls. */ case ArgOnStack: case ArgOnStackPair: case ArgInSplitRegStack: /* Split arguments are saved to the stack in the prolog */ inst->opcode = OP_REGOFFSET; /* in parent frame */ inst->inst_basereg = sparc_fp; inst->inst_offset = ainfo->offset + ARGS_OFFSET; if (!m_type_is_byref (arg_type) && (arg_type->type == MONO_TYPE_R8)) { /* * It is very hard to load doubles from non-doubleword aligned * memory locations. So if the offset is misaligned, we copy the * argument to a stack location in the prolog. */ if ((inst->inst_offset - STACK_BIAS) % 8) { inst->inst_basereg = sparc_fp; offset += 8; align = 8; offset += align - 1; offset &= ~(align - 1); inst->inst_offset = STACK_BIAS + -offset; } } break; default: NOT_IMPLEMENTED; } if (MONO_TYPE_ISSTRUCT (arg_type)) { /* Add a level of indirection */ /* * It would be easier to add OP_LDIND_I here, but ldind_i instructions * are destructively modified in a lot of places in inssel.brg. */ MonoInst *indir; MONO_INST_NEW (cfg, indir, 0); *indir = *inst; inst->opcode = OP_VTARG_ADDR; inst->inst_left = indir; } } } /* * spillvars are stored between the normal locals and the storage reserved * by the ABI. */ cfg->stack_offset = offset; g_free (cinfo); } void mono_arch_create_vars (MonoCompile *cfg) { MonoMethodSignature *sig; sig = mono_method_signature_internal (cfg->method); if (MONO_TYPE_ISSTRUCT ((sig->ret))) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG); if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr = "); mono_print_ins (cfg->vret_addr); } } if (!m_type_is_byref (sig->ret) && (sig->ret->type == MONO_TYPE_I8 || sig->ret->type == MONO_TYPE_U8)) { MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->ret->dreg)); MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->ret->dreg)); low->flags |= MONO_INST_VOLATILE; high->flags |= MONO_INST_VOLATILE; } /* Add a properly aligned dword for use by int<->float conversion opcodes */ cfg->arch.float_spill_slot = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_ARG); ((MonoInst*)cfg->arch.float_spill_slot)->flags |= MONO_INST_VOLATILE; } static void add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, guint32 sreg) { MonoInst *arg; MONO_INST_NEW (cfg, arg, 0); arg->sreg1 = sreg; switch (storage) { case ArgInIReg: arg->opcode = OP_MOVE; arg->dreg = mono_alloc_ireg (cfg); mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE); break; case ArgInFloatReg: arg->opcode = OP_FMOVE; arg->dreg = mono_alloc_freg (cfg); mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE); break; default: g_assert_not_reached (); } MONO_ADD_INS (cfg->cbb, arg); } static void add_outarg_load (MonoCompile *cfg, MonoCallInst *call, int opcode, int basereg, int offset, int reg) { int dreg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, sparc_sp, offset); mono_call_inst_add_outarg_reg (cfg, call, dreg, reg, FALSE); } static void emit_pass_long (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in) { int offset = ARGS_OFFSET + ainfo->offset; switch (ainfo->storage) { case ArgInIRegPair: add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg + 1, MONO_LVREG_LS (in->dreg)); add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, MONO_LVREG_MS (in->dreg)); break; case ArgOnStackPair: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset, MONO_LVREG_MS (in->dreg)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, MONO_LVREG_LS (in->dreg)); break; case ArgInSplitRegStack: add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, MONO_LVREG_MS (in->dreg)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, MONO_LVREG_LS (in->dreg)); break; default: g_assert_not_reached (); } } static void emit_pass_double (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in) { int offset = ARGS_OFFSET + ainfo->offset; switch (ainfo->storage) { case ArgInIRegPair: /* floating-point <-> integer transfer must go through memory */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg); /* Load into a register pair */ add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg); add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset + 4, sparc_o0 + ainfo->reg + 1); break; case ArgOnStackPair: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg); break; case ArgInSplitRegStack: /* floating-point <-> integer transfer must go through memory */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg); /* Load most significant word into register */ add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg); break; default: g_assert_not_reached (); } } static void emit_pass_float (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in) { int offset = ARGS_OFFSET + ainfo->offset; switch (ainfo->storage) { case ArgInIReg: /* floating-point <-> integer transfer must go through memory */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg); add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg); break; case ArgOnStack: MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg); break; default: g_assert_not_reached (); } } static void emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in); static void emit_pass_vtype (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in, gboolean pinvoke) { MonoInst *arg; guint32 align, offset, pad, size; if (arg_type->type == MONO_TYPE_TYPEDBYREF) { size = MONO_ABI_SIZEOF (MonoTypedRef); align = sizeof (target_mgreg_t); } else if (pinvoke) size = mono_type_native_stack_size (m_class_get_byval_arg (in->klass), &align); else { /* * Other backends use mono_type_stack_size (), but that * aligns the size to 8, which is larger than the size of * the source, leading to reads of invalid memory if the * source is at the end of address space. */ size = mono_class_value_size (in->klass, &align); } /* The first 6 argument locations are reserved */ if (cinfo->stack_usage < 6 * sizeof (target_mgreg_t)) cinfo->stack_usage = 6 * sizeof (target_mgreg_t); offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align); pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage); cinfo->stack_usage += size; cinfo->stack_usage += pad; /* * We use OP_OUTARG_VT to copy the valuetype to a stack location, then * use the normal OUTARG opcodes to pass the address of the location to * the callee. */ if (size > 0) { MONO_INST_NEW (cfg, arg, OP_OUTARG_VT); arg->sreg1 = in->dreg; arg->klass = in->klass; arg->backend.size = size; arg->inst_p0 = call; arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo)); ((ArgInfo*)(arg->inst_p1))->offset = STACK_BIAS + offset; MONO_ADD_INS (cfg->cbb, arg); MONO_INST_NEW (cfg, arg, OP_ADD_IMM); arg->dreg = mono_alloc_preg (cfg); arg->sreg1 = sparc_sp; arg->inst_imm = STACK_BIAS + offset; MONO_ADD_INS (cfg->cbb, arg); emit_pass_other (cfg, call, ainfo, NULL, arg); } } static void emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in) { int offset = ARGS_OFFSET + ainfo->offset; int opcode; switch (ainfo->storage) { case ArgInIReg: add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg); break; case ArgOnStack: #ifdef SPARCV9 NOT_IMPLEMENTED; #else if (offset & 0x1) opcode = OP_STOREI1_MEMBASE_REG; else if (offset & 0x2) opcode = OP_STOREI2_MEMBASE_REG; else opcode = OP_STOREI4_MEMBASE_REG; MONO_EMIT_NEW_STORE_MEMBASE (cfg, opcode, sparc_sp, offset, in->dreg); #endif break; default: g_assert_not_reached (); } } static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { MonoMethodSignature *tmp_sig; /* * mono_ArgIterator_Setup assumes the signature cookie is * passed first and all the arguments which were before it are * passed on the stack after the signature. So compensate by * passing a different signature. */ tmp_sig = mono_metadata_signature_dup (call->signature); tmp_sig->param_count -= call->signature->sentinelpos; tmp_sig->sentinelpos = 0; memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*)); /* FIXME: Add support for signature tokens to AOT */ cfg->disable_aot = TRUE; /* We allways pass the signature on the stack for simplicity */ MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset, tmp_sig); } void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoInst *in; MonoMethodSignature *sig; int i, n; CallInfo *cinfo; ArgInfo *ainfo; guint32 extra_space = 0; sig = call->signature; n = sig->param_count + sig->hasthis; cinfo = get_call_info (cfg, sig, sig->pinvoke); if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) { /* Set the 'struct/union return pointer' location on the stack */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, call->vret_var->dreg); } for (i = 0; i < n; ++i) { MonoType *arg_type; ainfo = cinfo->args + i; if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* Emit the signature cookie just before the first implicit argument */ emit_sig_cookie (cfg, call, cinfo); } in = call->args [i]; if (sig->hasthis && (i == 0)) arg_type = mono_get_object_type (); else arg_type = sig->params [i - sig->hasthis]; arg_type = mini_get_underlying_type (arg_type); if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) emit_pass_vtype (cfg, call, cinfo, ainfo, arg_type, in, sig->pinvoke && !sig->marshalling_disabled); else if (!m_type_is_byref (arg_type) && ((arg_type->type == MONO_TYPE_I8) || (arg_type->type == MONO_TYPE_U8))) emit_pass_long (cfg, call, ainfo, in); else if (!m_type_is_byref (arg_type) && (arg_type->type == MONO_TYPE_R8)) emit_pass_double (cfg, call, ainfo, in); else if (!m_type_is_byref (arg_type) && (arg_type->type == MONO_TYPE_R4)) emit_pass_float (cfg, call, ainfo, in); else emit_pass_other (cfg, call, ainfo, arg_type, in); } /* Handle the case where there are no implicit arguments */ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) { emit_sig_cookie (cfg, call, cinfo); } call->stack_usage = cinfo->stack_usage + extra_space; g_free (cinfo); } void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { ArgInfo *ainfo = (ArgInfo*)ins->inst_p1; int size = ins->backend.size; mini_emit_memcpy (cfg, sparc_sp, ainfo->offset, src->dreg, 0, size, TARGET_SIZEOF_VOID_P); } void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { CallInfo *cinfo = get_call_info (cfg, mono_method_signature_internal (method), FALSE); MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret); switch (cinfo->ret.storage) { case ArgInIReg: MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); break; case ArgInIRegPair: if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) { MONO_EMIT_NEW_UNALU (cfg, OP_LMOVE, cfg->ret->dreg, val->dreg); } else { MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_MS (cfg->ret->dreg), MONO_LVREG_MS (val->dreg)); MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (cfg->ret->dreg), MONO_LVREG_LS (val->dreg)); } break; case ArgInFReg: if (ret->type == MONO_TYPE_R4) MONO_EMIT_NEW_UNALU (cfg, OP_SETFRET, cfg->ret->dreg, val->dreg); else MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); break; default: g_assert_not_reached (); } g_assert (cinfo); } int cond_to_sparc_cond [][3] = { {sparc_be, sparc_be, sparc_fbe}, {sparc_bne, sparc_bne, 0}, {sparc_ble, sparc_ble, sparc_fble}, {sparc_bge, sparc_bge, sparc_fbge}, {sparc_bl, sparc_bl, sparc_fbl}, {sparc_bg, sparc_bg, sparc_fbg}, {sparc_bleu, sparc_bleu, 0}, {sparc_beu, sparc_beu, 0}, {sparc_blu, sparc_blu, sparc_fbl}, {sparc_bgu, sparc_bgu, sparc_fbg} }; /* Map opcode to the sparc condition codes */ static SparcCond opcode_to_sparc_cond (int opcode) { CompRelation rel; CompType t; switch (opcode) { case OP_COND_EXC_OV: case OP_COND_EXC_IOV: return sparc_bvs; case OP_COND_EXC_C: case OP_COND_EXC_IC: return sparc_bcs; case OP_COND_EXC_NO: case OP_COND_EXC_NC: NOT_IMPLEMENTED; default: rel = mono_opcode_to_cond (opcode); t = mono_opcode_to_type (opcode, -1); return cond_to_sparc_cond [rel][t]; break; } return -1; } #define COMPUTE_DISP(ins) \ if (ins->inst_true_bb->native_offset) \ disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \ else { \ disp = 0; \ mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \ } #ifdef SPARCV9 #define DEFAULT_ICC sparc_xcc_short #else #define DEFAULT_ICC sparc_icc_short #endif #ifdef SPARCV9 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \ do { \ gint32 disp; \ guint32 predict; \ COMPUTE_DISP(ins); \ predict = (disp != 0) ? 1 : 0; \ g_assert (sparc_is_imm19 (disp)); \ sparc_branchp (code, (annul), cond, icc, (predict), disp); \ if (filldelay) sparc_nop (code); \ } while (0) #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short)) #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \ do { \ gint32 disp; \ guint32 predict; \ COMPUTE_DISP(ins); \ predict = (disp != 0) ? 1 : 0; \ g_assert (sparc_is_imm19 (disp)); \ sparc_fbranch (code, (annul), cond, disp); \ if (filldelay) sparc_nop (code); \ } while (0) #else #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached () #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \ do { \ gint32 disp; \ COMPUTE_DISP(ins); \ g_assert (sparc_is_imm22 (disp)); \ sparc_ ## bop (code, (annul), cond, disp); \ if (filldelay) sparc_nop (code); \ } while (0) #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay) #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay) #endif #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \ do { \ gint32 disp; \ guint32 predict; \ COMPUTE_DISP(ins); \ predict = (disp != 0) ? 1 : 0; \ g_assert (sparc_is_imm19 (disp)); \ sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \ if (filldelay) sparc_nop (code); \ } while (0) #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \ do { \ gint32 disp; \ COMPUTE_DISP(ins); \ g_assert (sparc_is_imm22 (disp)); \ sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \ if (filldelay) sparc_nop (code); \ } while (0) /* emit an exception if condition is fail */ /* * We put the exception throwing code out-of-line, at the end of the method */ #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \ mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \ MONO_PATCH_INFO_EXC, sexc_name); \ if (mono_hwcap_sparc_is_v9 && ((icc) != sparc_icc_short)) { \ sparc_branchp (code, 0, (cond), (icc), 0, 0); \ } \ else { \ sparc_branch (code, 0, cond, 0); \ } \ if (filldelay) sparc_nop (code); \ } while (0); #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC) #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \ mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \ MONO_PATCH_INFO_EXC, sexc_name); \ sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \ sparc_nop (code); \ } while (0); #define EMIT_ALU_IMM(ins,op,setcc) do { \ if (sparc_is_imm13 ((ins)->inst_imm)) \ sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \ else { \ sparc_set (code, ins->inst_imm, sparc_o7); \ sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \ } \ } while (0); #define EMIT_LOAD_MEMBASE(ins,op) do { \ if (sparc_is_imm13 (ins->inst_offset)) \ sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \ else { \ sparc_set (code, ins->inst_offset, sparc_o7); \ sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \ } \ } while (0); /* max len = 5 */ #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \ guint32 sreg; \ if (ins->inst_imm == 0) \ sreg = sparc_g0; \ else { \ sparc_set (code, ins->inst_imm, sparc_o7); \ sreg = sparc_o7; \ } \ if (!sparc_is_imm13 (ins->inst_offset)) { \ sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \ sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \ } \ else \ sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \ } while (0); #define EMIT_STORE_MEMBASE_REG(ins,op) do { \ if (!sparc_is_imm13 (ins->inst_offset)) { \ sparc_set (code, ins->inst_offset, sparc_o7); \ sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \ } \ else \ sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \ } while (0); #define EMIT_CALL() do { \ if (v64) { \ sparc_set_template (code, sparc_o7); \ sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \ } \ else { \ sparc_call_simple (code, 0); \ } \ sparc_nop (code); \ } while (0); /* * A call template is 7 instructions long, so we want to avoid it if possible. */ static guint32* emit_call (MonoCompile *cfg, guint32 *code, guint32 patch_type, gconstpointer data) { ERROR_DECL (error); gpointer target; /* FIXME: This only works if the target method is already compiled */ if (0 && v64 && !cfg->compile_aot) { MonoJumpInfo patch_info; patch_info.type = patch_type; patch_info.data.target = data; target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, &patch_info, FALSE, error); mono_error_raise_exception_deprecated (error); /* FIXME: don't raise here */ /* FIXME: Add optimizations if the target is close enough */ sparc_set (code, target, sparc_o7); sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); sparc_nop (code); } else { mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, patch_type, data); EMIT_CALL (); } return code; } void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { } void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n, *last_ins = NULL; ins = bb->code; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { switch (ins->opcode) { case OP_MUL_IMM: /* remove unnecessary multiplication with 1 */ if (ins->inst_imm == 1) { if (ins->dreg != ins->sreg1) { ins->opcode = OP_MOVE; } else { MONO_DELETE_INS (bb, ins); continue; } } break; #ifndef SPARCV9 case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: /* * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg */ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG || last_ins->opcode == OP_STORE_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } else { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_MOVE; ins->sreg1 = last_ins->sreg1; } /* * Note: reg1 must be different from the basereg in the second load * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_LOAD_MEMBASE offset(basereg), reg2 * --> * OP_LOAD_MEMBASE offset(basereg), reg1 * OP_MOVE reg1, reg2 */ } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE || last_ins->opcode == OP_LOAD_MEMBASE) && ins->inst_basereg != last_ins->dreg && ins->inst_basereg == last_ins->inst_basereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->dreg) { MONO_DELETE_INS (bb, ins); continue; } else { ins->opcode = OP_MOVE; ins->sreg1 = last_ins->dreg; } //g_assert_not_reached (); #if 0 /* * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_LOAD_MEMBASE offset(basereg), reg * --> * OP_STORE_MEMBASE_IMM imm, offset(basereg) * OP_ICONST reg, imm */ } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM || last_ins->opcode == OP_STORE_MEMBASE_IMM) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_ICONST; ins->inst_c0 = last_ins->inst_imm; g_assert_not_reached (); // check this rule #endif } break; #endif case OP_LOADI1_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } else { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_MOVE; ins->sreg1 = last_ins->sreg1; } } break; case OP_LOADI2_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { if (ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } else { //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++); ins->opcode = OP_MOVE; ins->sreg1 = last_ins->sreg1; } } break; case OP_STOREI4_MEMBASE_IMM: /* Convert pairs of 0 stores to a dword 0 store */ /* Used when initializing temporaries */ /* We know sparc_fp is dword aligned */ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) && (ins->inst_destbasereg == last_ins->inst_destbasereg) && (ins->inst_destbasereg == sparc_fp) && (ins->inst_offset < 0) && ((ins->inst_offset % 8) == 0) && ((ins->inst_offset == last_ins->inst_offset - 4)) && (ins->inst_imm == 0) && (last_ins->inst_imm == 0)) { if (mono_hwcap_sparc_is_v9) { last_ins->opcode = OP_STOREI8_MEMBASE_IMM; last_ins->inst_offset = ins->inst_offset; MONO_DELETE_INS (bb, ins); continue; } } break; case OP_IBEQ: case OP_IBNE_UN: case OP_IBLT: case OP_IBGT: case OP_IBGE: case OP_IBLE: case OP_COND_EXC_EQ: case OP_COND_EXC_GE: case OP_COND_EXC_GT: case OP_COND_EXC_LE: case OP_COND_EXC_LT: case OP_COND_EXC_NE_UN: /* * Convert compare with zero+branch to BRcc */ /* * This only works in 64 bit mode, since it examines all 64 * bits of the register. * Only do this if the method is small since BPr only has a 16bit * displacement. */ if (v64 && (cfg->header->code_size < 10000) && last_ins && (last_ins->opcode == OP_COMPARE_IMM) && (last_ins->inst_imm == 0)) { switch (ins->opcode) { case OP_IBEQ: ins->opcode = OP_SPARC_BRZ; break; case OP_IBNE_UN: ins->opcode = OP_SPARC_BRNZ; break; case OP_IBLT: ins->opcode = OP_SPARC_BRLZ; break; case OP_IBGT: ins->opcode = OP_SPARC_BRGZ; break; case OP_IBGE: ins->opcode = OP_SPARC_BRGEZ; break; case OP_IBLE: ins->opcode = OP_SPARC_BRLEZ; break; case OP_COND_EXC_EQ: ins->opcode = OP_SPARC_COND_EXC_EQZ; break; case OP_COND_EXC_GE: ins->opcode = OP_SPARC_COND_EXC_GEZ; break; case OP_COND_EXC_GT: ins->opcode = OP_SPARC_COND_EXC_GTZ; break; case OP_COND_EXC_LE: ins->opcode = OP_SPARC_COND_EXC_LEZ; break; case OP_COND_EXC_LT: ins->opcode = OP_SPARC_COND_EXC_LTZ; break; case OP_COND_EXC_NE_UN: ins->opcode = OP_SPARC_COND_EXC_NEZ; break; default: g_assert_not_reached (); } ins->sreg1 = last_ins->sreg1; *last_ins = *ins; MONO_DELETE_INS (bb, ins); continue; } break; case OP_MOVE: /* * OP_MOVE reg, reg */ if (ins->dreg == ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } /* * OP_MOVE sreg, dreg * OP_MOVE dreg, sreg */ if (last_ins && last_ins->opcode == OP_MOVE && ins->sreg1 == last_ins->dreg && ins->dreg == last_ins->sreg1) { MONO_DELETE_INS (bb, ins); continue; } break; } last_ins = ins; ins = ins->next; } bb->last_ins = last_ins; } void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins) { switch (ins->opcode) { case OP_LNEG: MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, MONO_LVREG_LS (ins->dreg), 0, MONO_LVREG_LS (ins->sreg1)); MONO_EMIT_NEW_BIALU (cfg, OP_SBB, MONO_LVREG_MS (ins->dreg), 0, MONO_LVREG_MS (ins->sreg1)); NULLIFY_INS (ins); break; default: break; } } void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { } /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */ static void sparc_patch (guint32 *code, const gpointer target) { guint32 *c = code; guint32 ins = *code; guint32 op = ins >> 30; guint32 op2 = (ins >> 22) & 0x7; guint32 rd = (ins >> 25) & 0x1f; guint8* target8 = (guint8*)target; gint64 disp = (target8 - (guint8*)code) >> 2; int reg; // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target); if ((op == 0) && (op2 == 2)) { if (!sparc_is_imm22 (disp)) NOT_IMPLEMENTED; /* Bicc */ *code = ((ins >> 22) << 22) | (disp & 0x3fffff); } else if ((op == 0) && (op2 == 1)) { if (!sparc_is_imm19 (disp)) NOT_IMPLEMENTED; /* BPcc */ *code = ((ins >> 19) << 19) | (disp & 0x7ffff); } else if ((op == 0) && (op2 == 3)) { if (!sparc_is_imm16 (disp)) NOT_IMPLEMENTED; /* BPr */ *code &= ~(0x180000 | 0x3fff); *code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff); } else if ((op == 0) && (op2 == 6)) { if (!sparc_is_imm22 (disp)) NOT_IMPLEMENTED; /* FBicc */ *code = ((ins >> 22) << 22) | (disp & 0x3fffff); } else if ((op == 0) && (op2 == 4)) { guint32 ins2 = code [1]; if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) { /* sethi followed by or */ guint32 *p = code; sparc_set (p, target8, rd); while (p <= (code + 1)) sparc_nop (p); } else if (ins2 == 0x01000000) { /* sethi followed by nop */ guint32 *p = code; sparc_set (p, target8, rd); while (p <= (code + 1)) sparc_nop (p); } else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) { /* sethi followed by load/store */ #ifndef SPARCV9 guint32 t = (guint32)target8; *code &= ~(0x3fffff); *code |= (t >> 10); *(code + 1) &= ~(0x3ff); *(code + 1) |= (t & 0x3ff); #endif } else if (v64 && (sparc_inst_rd (ins) == sparc_g1) && (sparc_inst_op (c [1]) == 0) && (sparc_inst_op2 (c [1]) == 4) && (sparc_inst_op (c [2]) == 2) && (sparc_inst_op3 (c [2]) == 2) && (sparc_inst_op (c [3]) == 2) && (sparc_inst_op3 (c [3]) == 2)) { /* sparc_set */ guint32 *p = c; reg = sparc_inst_rd (c [1]); sparc_set (p, target8, reg); while (p < (c + 6)) sparc_nop (p); } else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) && (sparc_inst_imm (ins2))) { /* sethi followed by jmpl */ #ifndef SPARCV9 guint32 t = (guint32)target8; *code &= ~(0x3fffff); *code |= (t >> 10); *(code + 1) &= ~(0x3ff); *(code + 1) |= (t & 0x3ff); #endif } else NOT_IMPLEMENTED; } else if (op == 01) { gint64 disp = (target8 - (guint8*)code) >> 2; if (!sparc_is_imm30 (disp)) NOT_IMPLEMENTED; sparc_call_simple (code, target8 - (guint8*)code); } else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) { /* mov imm, reg */ g_assert (sparc_is_imm13 (target8)); *code &= ~(0x1fff); *code |= (guint32)target8; } else if ((sparc_inst_op (ins) == 2) && (sparc_inst_op3 (ins) == 0x7)) { /* sparc_set case 5. */ guint32 *p = c; g_assert (v64); reg = sparc_inst_rd (c [3]); sparc_set (p, target, reg); while (p < (c + 6)) sparc_nop (p); } else NOT_IMPLEMENTED; // g_print ("patched with 0x%08x\n", ins); } /* * mono_sparc_emit_save_lmf: * * Emit the code neccesary to push a new entry onto the lmf stack. Used by * trampolines as well. */ guint32* mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset) { /* Save lmf_addr */ sparc_sti_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr)); /* Save previous_lmf */ sparc_ldi (code, sparc_o0, sparc_g0, sparc_o7); sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf)); /* Set new lmf */ sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7); sparc_sti (code, sparc_o7, sparc_o0, sparc_g0); return code; } guint32* mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset) { /* Load previous_lmf */ sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0); /* Load lmf_addr */ sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1); /* *(lmf) = previous_lmf */ sparc_sti (code, sparc_l0, sparc_l1, sparc_g0); return code; } static guint32* emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code) { /* * Since register windows are saved to the current value of %sp, we need to * set the sp field in the lmf before the call, not in the prolog. */ if (cfg->method->save_lmf) { gint32 lmf_offset = MONO_SPARC_STACK_BIAS - cfg->arch.lmf_offset; /* Save sp */ sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp)); } return code; } static guint32* emit_vret_token (MonoInst *ins, guint32 *code) { MonoCallInst *call = (MonoCallInst*)ins; guint32 size; /* * The sparc ABI requires that calls to functions which return a structure * contain an additional unimpl instruction which is checked by the callee. */ if (call->signature->pinvoke && !call->signature->marshalling_disabled && MONO_TYPE_ISSTRUCT(call->signature->ret)) { if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF) size = mini_type_stack_size (call->signature->ret, NULL); else size = mono_class_native_size (call->signature->ret->data.klass, NULL); sparc_unimp (code, size & 0xfff); } return code; } static guint32* emit_move_return_value (MonoInst *ins, guint32 *code) { /* Move return value to the target register */ /* FIXME: do more things in the local reg allocator */ switch (ins->opcode) { case OP_VOIDCALL: case OP_VOIDCALL_REG: case OP_VOIDCALL_MEMBASE: break; case OP_CALL: case OP_CALL_REG: case OP_CALL_MEMBASE: g_assert (ins->dreg == sparc_o0); break; case OP_LCALL: case OP_LCALL_REG: case OP_LCALL_MEMBASE: /* * ins->dreg is the least significant reg due to the lreg: LCALL rule * in inssel-long32.brg. */ #ifdef SPARCV9 sparc_mov_reg_reg (code, sparc_o0, ins->dreg); #else g_assert (ins->dreg == sparc_o1); #endif break; case OP_FCALL: case OP_FCALL_REG: case OP_FCALL_MEMBASE: #ifdef SPARCV9 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) { sparc_fmovs (code, sparc_f0, ins->dreg); sparc_fstod (code, ins->dreg, ins->dreg); } else sparc_fmovd (code, sparc_f0, ins->dreg); #else sparc_fmovs (code, sparc_f0, ins->dreg); if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) sparc_fstod (code, ins->dreg, ins->dreg); else sparc_fmovs (code, sparc_f1, ins->dreg + 1); #endif break; case OP_VCALL: case OP_VCALL_REG: case OP_VCALL_MEMBASE: case OP_VCALL2: case OP_VCALL2_REG: case OP_VCALL2_MEMBASE: break; default: NOT_IMPLEMENTED; } return code; } /* * emit_load_volatile_arguments: * * Load volatile arguments from the stack to the original input registers. * Required before a tailcall. */ static guint32* emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code) { MonoMethod *method = cfg->method; MonoMethodSignature *sig; MonoInst *inst; CallInfo *cinfo; guint32 i, ireg; /* FIXME: Generate intermediate code instead */ sig = mono_method_signature_internal (method); cinfo = get_call_info (cfg, sig, FALSE); /* This is the opposite of the code in emit_prolog */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; gint32 stack_offset; MonoType *arg_type; inst = cfg->args [i]; if (sig->hasthis && (i == 0)) arg_type = mono_get_object_type (); else arg_type = sig->params [i - sig->hasthis]; stack_offset = ainfo->offset + ARGS_OFFSET; ireg = sparc_i0 + ainfo->reg; if (ainfo->storage == ArgInSplitRegStack) { g_assert (inst->opcode == OP_REGOFFSET); if (!sparc_is_imm13 (stack_offset)) NOT_IMPLEMENTED; sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5); } if (!v64 && !m_type_is_byref (arg_type) && (arg_type->type == MONO_TYPE_R8)) { if (ainfo->storage == ArgInIRegPair) { if (!sparc_is_imm13 (inst->inst_offset + 4)) NOT_IMPLEMENTED; sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg); sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1); } else if (ainfo->storage == ArgInSplitRegStack) { if (stack_offset != inst->inst_offset) { sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5); sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7); sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4); } } else if (ainfo->storage == ArgOnStackPair) { if (stack_offset != inst->inst_offset) { /* stack_offset is not dword aligned, so we need to make a copy */ sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7); sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset); sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7); sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4); } } else g_assert_not_reached (); } else if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) { /* Argument in register, but need to be saved to stack */ if (!sparc_is_imm13 (stack_offset)) NOT_IMPLEMENTED; if ((stack_offset - ARGS_OFFSET) & 0x1) /* FIXME: Is this ldsb or ldub ? */ sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg); else if ((stack_offset - ARGS_OFFSET) & 0x2) sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg); else if ((stack_offset - ARGS_OFFSET) & 0x4) sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg); else { if (v64) sparc_ldx_imm (code, inst->inst_basereg, stack_offset, ireg); else sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg); } } else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) { /* Argument in regpair, but need to be saved to stack */ if (!sparc_is_imm13 (inst->inst_offset + 4)) NOT_IMPLEMENTED; sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg); sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1); } else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) { NOT_IMPLEMENTED; } else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) { NOT_IMPLEMENTED; } if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack)) if (inst->opcode == OP_REGVAR) /* FIXME: Load the argument into memory */ NOT_IMPLEMENTED; } g_free (cinfo); return code; } /* * mono_sparc_is_virtual_call: * * Determine whenever the instruction at CODE is a virtual call. */ gboolean mono_sparc_is_virtual_call (guint32 *code) { guint32 buf[1]; guint32 *p; p = buf; if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) { /* * Register indirect call. If it is a virtual call, then the * instruction in the delay slot is a special kind of nop. */ /* Construct special nop */ sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0); p --; if (code [1] == p [0]) return TRUE; } return FALSE; } #define CMP_SIZE 3 #define BR_SMALL_SIZE 2 #define BR_LARGE_SIZE 2 #define JUMP_IMM_SIZE 5 #define ENABLE_WRONG_METHOD_CHECK 0 gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int i; int size = 0; guint32 *code, *start; for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) item->chunk_size += CMP_SIZE; item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE; } else { if (fail_tramp) item->chunk_size += 16; item->chunk_size += JUMP_IMM_SIZE; #if ENABLE_WRONG_METHOD_CHECK item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1; #endif } } else { item->chunk_size += CMP_SIZE + BR_LARGE_SIZE; imt_entries [item->check_target_idx]->compare_done = TRUE; } size += item->chunk_size; } if (fail_tramp) { code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size * 4); } else { MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); code = mono_mem_manager_code_reserve (mem_manager, size * 4); } start = code; for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; item->code_target = (guint8*)code; if (item->is_equals) { gboolean fail_case = !item->check_target_idx && fail_tramp; if (item->check_target_idx || fail_case) { if (!item->compare_done || fail_case) { sparc_set (code, (guint32)item->key, sparc_g5); sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5); } item->jmp_code = (guint8*)code; sparc_branch (code, 0, sparc_bne, 0); sparc_nop (code); if (item->has_target_code) { sparc_set (code, item->value.target_code, sparc_f5); } else { sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5); sparc_ld (code, sparc_g5, 0, sparc_g5); } sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0); sparc_nop (code); if (fail_case) { sparc_patch (item->jmp_code, code); sparc_set (code, fail_tramp, sparc_g5); sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0); sparc_nop (code); item->jmp_code = NULL; } } else { /* enable the commented code to assert on wrong method */ #if ENABLE_WRONG_METHOD_CHECK g_assert_not_reached (); #endif sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5); sparc_ld (code, sparc_g5, 0, sparc_g5); sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0); sparc_nop (code); #if ENABLE_WRONG_METHOD_CHECK g_assert_not_reached (); #endif } } else { sparc_set (code, (guint32)item->key, sparc_g5); sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5); item->jmp_code = (guint8*)code; sparc_branch (code, 0, sparc_beu, 0); sparc_nop (code); } } /* patch the branches to get to the target items */ for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code) { if (item->check_target_idx) { sparc_patch ((guint32*)item->jmp_code, imt_entries [item->check_target_idx]->code_target); } } } mono_arch_flush_icache ((guint8*)start, (code - start)); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL)); UnlockedAdd (&mono_stats.imt_trampolines_size, (code - start)); g_assert (code - start <= size); mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), NULL); return start; } MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { #ifdef SPARCV9 g_assert_not_reached (); #endif return (MonoMethod*)regs [sparc_g1]; } gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code) { mono_sparc_flushw (); return (gpointer)regs [sparc_o0]; } /* * Some conventions used in the following code. * 2) The only scratch registers we have are o7 and g1. We try to * stick to o7 when we can, and use g1 when necessary. */ void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins; MonoCallInst *call; guint offset; guint32 *code = (guint32*)(cfg->native_code + cfg->code_len); MonoInst *last_ins = NULL; int max_len, cpos; const char *spec; if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); cpos = bb->max_offset; MONO_BB_FOR_EACH_INS (bb, ins) { guint8* code_start; offset = (guint8*)code - cfg->native_code; spec = ins_get_spec (ins->opcode); max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); code_start = (guint8*)code; // if (ins->cil_code) // g_print ("cil code\n"); mono_debug_record_line_number (cfg, ins, offset); switch (ins->opcode) { case OP_STOREI1_MEMBASE_IMM: EMIT_STORE_MEMBASE_IMM (ins, stb); break; case OP_STOREI2_MEMBASE_IMM: EMIT_STORE_MEMBASE_IMM (ins, sth); break; case OP_STORE_MEMBASE_IMM: EMIT_STORE_MEMBASE_IMM (ins, sti); break; case OP_STOREI4_MEMBASE_IMM: EMIT_STORE_MEMBASE_IMM (ins, st); break; case OP_STOREI8_MEMBASE_IMM: #ifdef SPARCV9 EMIT_STORE_MEMBASE_IMM (ins, stx); #else /* Only generated by peephole opts */ g_assert ((ins->inst_offset % 8) == 0); g_assert (ins->inst_imm == 0); EMIT_STORE_MEMBASE_IMM (ins, stx); #endif break; case OP_STOREI1_MEMBASE_REG: EMIT_STORE_MEMBASE_REG (ins, stb); break; case OP_STOREI2_MEMBASE_REG: EMIT_STORE_MEMBASE_REG (ins, sth); break; case OP_STOREI4_MEMBASE_REG: EMIT_STORE_MEMBASE_REG (ins, st); break; case OP_STOREI8_MEMBASE_REG: #ifdef SPARCV9 EMIT_STORE_MEMBASE_REG (ins, stx); #else /* Only used by OP_MEMSET */ EMIT_STORE_MEMBASE_REG (ins, std); #endif break; case OP_STORE_MEMBASE_REG: EMIT_STORE_MEMBASE_REG (ins, sti); break; case OP_LOADU4_MEM: sparc_set (code, ins->inst_c0, ins->dreg); sparc_ld (code, ins->dreg, sparc_g0, ins->dreg); break; case OP_LOADI4_MEMBASE: #ifdef SPARCV9 EMIT_LOAD_MEMBASE (ins, ldsw); #else EMIT_LOAD_MEMBASE (ins, ld); #endif break; case OP_LOADU4_MEMBASE: EMIT_LOAD_MEMBASE (ins, ld); break; case OP_LOADU1_MEMBASE: EMIT_LOAD_MEMBASE (ins, ldub); break; case OP_LOADI1_MEMBASE: EMIT_LOAD_MEMBASE (ins, ldsb); break; case OP_LOADU2_MEMBASE: EMIT_LOAD_MEMBASE (ins, lduh); break; case OP_LOADI2_MEMBASE: EMIT_LOAD_MEMBASE (ins, ldsh); break; case OP_LOAD_MEMBASE: #ifdef SPARCV9 EMIT_LOAD_MEMBASE (ins, ldx); #else EMIT_LOAD_MEMBASE (ins, ld); #endif break; #ifdef SPARCV9 case OP_LOADI8_MEMBASE: EMIT_LOAD_MEMBASE (ins, ldx); break; #endif case OP_ICONV_TO_I1: sparc_sll_imm (code, ins->sreg1, 24, sparc_o7); sparc_sra_imm (code, sparc_o7, 24, ins->dreg); break; case OP_ICONV_TO_I2: sparc_sll_imm (code, ins->sreg1, 16, sparc_o7); sparc_sra_imm (code, sparc_o7, 16, ins->dreg); break; case OP_ICONV_TO_U1: sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg); break; case OP_ICONV_TO_U2: sparc_sll_imm (code, ins->sreg1, 16, sparc_o7); sparc_srl_imm (code, sparc_o7, 16, ins->dreg); break; case OP_LCONV_TO_OVF_U4: case OP_ICONV_TO_OVF_U4: /* Only used on V9 */ sparc_cmp_imm (code, ins->sreg1, 0); mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, MONO_PATCH_INFO_EXC, "OverflowException"); sparc_branchp (code, 0, sparc_bl, sparc_xcc_short, 0, 0); /* Delay slot */ sparc_set (code, 1, sparc_o7); sparc_sllx_imm (code, sparc_o7, 32, sparc_o7); sparc_cmp (code, ins->sreg1, sparc_o7); mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, MONO_PATCH_INFO_EXC, "OverflowException"); sparc_branchp (code, 0, sparc_bge, sparc_xcc_short, 0, 0); sparc_nop (code); sparc_mov_reg_reg (code, ins->sreg1, ins->dreg); break; case OP_LCONV_TO_OVF_I4_UN: case OP_ICONV_TO_OVF_I4_UN: /* Only used on V9 */ NOT_IMPLEMENTED; break; case OP_COMPARE: case OP_LCOMPARE: case OP_ICOMPARE: sparc_cmp (code, ins->sreg1, ins->sreg2); break; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: if (sparc_is_imm13 (ins->inst_imm)) sparc_cmp_imm (code, ins->sreg1, ins->inst_imm); else { sparc_set (code, ins->inst_imm, sparc_o7); sparc_cmp (code, ins->sreg1, sparc_o7); } break; case OP_BREAK: /* * gdb does not like encountering 'ta 1' in the debugged code. So * instead of emitting a trap, we emit a call a C function and place a * breakpoint there. */ //sparc_ta (code, 1); mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break)); EMIT_CALL(); break; case OP_ADDCC: case OP_IADDCC: sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_IADD: sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_ADDCC_IMM: case OP_ADD_IMM: case OP_IADD_IMM: /* according to inssel-long32.brg, this should set cc */ EMIT_ALU_IMM (ins, add, TRUE); break; case OP_ADC: case OP_IADC: /* according to inssel-long32.brg, this should set cc */ sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_ADC_IMM: case OP_IADC_IMM: EMIT_ALU_IMM (ins, addx, TRUE); break; case OP_SUBCC: case OP_ISUBCC: sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_ISUB: sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_SUBCC_IMM: case OP_SUB_IMM: case OP_ISUB_IMM: /* according to inssel-long32.brg, this should set cc */ EMIT_ALU_IMM (ins, sub, TRUE); break; case OP_SBB: case OP_ISBB: /* according to inssel-long32.brg, this should set cc */ sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_SBB_IMM: case OP_ISBB_IMM: EMIT_ALU_IMM (ins, subx, TRUE); break; case OP_IAND: sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_AND_IMM: case OP_IAND_IMM: EMIT_ALU_IMM (ins, and, FALSE); break; case OP_IDIV: /* Sign extend sreg1 into %y */ sparc_sra_imm (code, ins->sreg1, 31, sparc_o7); sparc_wry (code, sparc_o7, sparc_g0); sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg); EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short); break; case OP_IDIV_UN: sparc_wry (code, sparc_g0, sparc_g0); sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_DIV_IMM: case OP_IDIV_IMM: { int i, imm; /* Transform division into a shift */ for (i = 1; i < 30; ++i) { imm = (1 << i); if (ins->inst_imm == imm) break; } if (i < 30) { if (i == 1) { /* gcc 2.95.3 */ sparc_srl_imm (code, ins->sreg1, 31, sparc_o7); sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg); sparc_sra_imm (code, ins->dreg, 1, ins->dreg); } else { /* http://compilers.iecc.com/comparch/article/93-04-079 */ sparc_sra_imm (code, ins->sreg1, 31, sparc_o7); sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7); sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg); sparc_sra_imm (code, ins->dreg, i, ins->dreg); } } else { /* Sign extend sreg1 into %y */ sparc_sra_imm (code, ins->sreg1, 31, sparc_o7); sparc_wry (code, sparc_o7, sparc_g0); EMIT_ALU_IMM (ins, sdiv, TRUE); EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short); } break; } case OP_IDIV_UN_IMM: sparc_wry (code, sparc_g0, sparc_g0); EMIT_ALU_IMM (ins, udiv, FALSE); break; case OP_IREM: /* Sign extend sreg1 into %y */ sparc_sra_imm (code, ins->sreg1, 31, sparc_o7); sparc_wry (code, sparc_o7, sparc_g0); sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7); EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short); sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7); sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg); break; case OP_IREM_UN: sparc_wry (code, sparc_g0, sparc_g0); sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7); sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7); sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg); break; case OP_REM_IMM: case OP_IREM_IMM: /* Sign extend sreg1 into %y */ sparc_sra_imm (code, ins->sreg1, 31, sparc_o7); sparc_wry (code, sparc_o7, sparc_g0); if (!sparc_is_imm13 (ins->inst_imm)) { sparc_set (code, ins->inst_imm, GP_SCRATCH_REG); sparc_sdiv (code, TRUE, ins->sreg1, GP_SCRATCH_REG, sparc_o7); EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short); sparc_smul (code, FALSE, sparc_o7, GP_SCRATCH_REG, sparc_o7); } else { sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7); EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short); sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7); } sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg); break; case OP_IREM_UN_IMM: sparc_set (code, ins->inst_imm, GP_SCRATCH_REG); sparc_wry (code, sparc_g0, sparc_g0); sparc_udiv (code, FALSE, ins->sreg1, GP_SCRATCH_REG, sparc_o7); sparc_umul (code, FALSE, GP_SCRATCH_REG, sparc_o7, sparc_o7); sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg); break; case OP_IOR: sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_OR_IMM: case OP_IOR_IMM: EMIT_ALU_IMM (ins, or, FALSE); break; case OP_IXOR: sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_XOR_IMM: case OP_IXOR_IMM: EMIT_ALU_IMM (ins, xor, FALSE); break; case OP_ISHL: sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_SHL_IMM: case OP_ISHL_IMM: if (ins->inst_imm < (1 << 5)) sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg); else { sparc_set (code, ins->inst_imm, sparc_o7); sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg); } break; case OP_ISHR: sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_ISHR_IMM: case OP_SHR_IMM: if (ins->inst_imm < (1 << 5)) sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg); else { sparc_set (code, ins->inst_imm, sparc_o7); sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg); } break; case OP_SHR_UN_IMM: case OP_ISHR_UN_IMM: if (ins->inst_imm < (1 << 5)) sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg); else { sparc_set (code, ins->inst_imm, sparc_o7); sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg); } break; case OP_ISHR_UN: sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_LSHL: sparc_sllx (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_LSHL_IMM: if (ins->inst_imm < (1 << 6)) sparc_sllx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg); else { sparc_set (code, ins->inst_imm, sparc_o7); sparc_sllx (code, ins->sreg1, sparc_o7, ins->dreg); } break; case OP_LSHR: sparc_srax (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_LSHR_IMM: if (ins->inst_imm < (1 << 6)) sparc_srax_imm (code, ins->sreg1, ins->inst_imm, ins->dreg); else { sparc_set (code, ins->inst_imm, sparc_o7); sparc_srax (code, ins->sreg1, sparc_o7, ins->dreg); } break; case OP_LSHR_UN: sparc_srlx (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_LSHR_UN_IMM: if (ins->inst_imm < (1 << 6)) sparc_srlx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg); else { sparc_set (code, ins->inst_imm, sparc_o7); sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg); } break; case OP_INOT: /* can't use sparc_not */ sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg); break; case OP_INEG: /* can't use sparc_neg */ sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg); break; case OP_IMUL: sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_IMUL_IMM: case OP_MUL_IMM: { int i, imm; if ((ins->inst_imm == 1) && (ins->sreg1 == ins->dreg)) break; /* Transform multiplication into a shift */ for (i = 0; i < 30; ++i) { imm = (1 << i); if (ins->inst_imm == imm) break; } if (i < 30) sparc_sll_imm (code, ins->sreg1, i, ins->dreg); else EMIT_ALU_IMM (ins, smul, FALSE); break; } case OP_IMUL_OVF: sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg); sparc_rdy (code, sparc_g1); sparc_sra_imm (code, ins->dreg, 31, sparc_o7); sparc_cmp (code, sparc_g1, sparc_o7); EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short); break; case OP_IMUL_OVF_UN: sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg); sparc_rdy (code, sparc_o7); sparc_cmp (code, sparc_o7, sparc_g0); EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short); break; case OP_ICONST: sparc_set (code, ins->inst_c0, ins->dreg); break; case OP_I8CONST: sparc_set (code, ins->inst_l, ins->dreg); break; case OP_AOTCONST: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0); sparc_set_template (code, ins->dreg); break; case OP_JUMP_TABLE: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0); sparc_set_template (code, ins->dreg); break; case OP_ICONV_TO_I4: case OP_ICONV_TO_U4: case OP_MOVE: if (ins->sreg1 != ins->dreg) sparc_mov_reg_reg (code, ins->sreg1, ins->dreg); break; case OP_FMOVE: #ifdef SPARCV9 if (ins->sreg1 != ins->dreg) sparc_fmovd (code, ins->sreg1, ins->dreg); #else sparc_fmovs (code, ins->sreg1, ins->dreg); sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1); #endif break; case OP_CHECK_THIS: /* ensure ins->sreg1 is not NULL */ /* Might be misaligned in case of vtypes so use a byte load */ sparc_ldsb_imm (code, ins->sreg1, 0, sparc_g0); break; case OP_ARGLIST: sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7); sparc_sti_imm (code, sparc_o7, ins->sreg1, 0); break; case OP_FCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: { call = (MonoCallInst*)ins; g_assert (!call->virtual); code = emit_save_sp_to_lmf (cfg, code); const MonoJumpInfoTarget patch = mono_call_to_patch (call); code = emit_call (cfg, code, patch.type, patch.target); code = emit_vret_token (ins, code); code = emit_move_return_value (ins, code); break; } case OP_FCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: call = (MonoCallInst*)ins; code = emit_save_sp_to_lmf (cfg, code); sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite); /* * We emit a special kind of nop in the delay slot to tell the * trampoline code that this is a virtual call, thus an unbox * trampoline might need to be called. */ if (call->virtual) sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0); else sparc_nop (code); code = emit_vret_token (ins, code); code = emit_move_return_value (ins, code); break; case OP_FCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: call = (MonoCallInst*)ins; code = emit_save_sp_to_lmf (cfg, code); if (sparc_is_imm13 (ins->inst_offset)) { sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7); } else { sparc_set (code, ins->inst_offset, sparc_o7); sparc_ldi (code, ins->inst_basereg, sparc_o7, sparc_o7); } sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite); if (call->virtual) sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0); else sparc_nop (code); code = emit_vret_token (ins, code); code = emit_move_return_value (ins, code); break; case OP_SETFRET: if (mono_method_signature_internal (cfg->method)->ret->type == MONO_TYPE_R4) sparc_fdtos (code, ins->sreg1, sparc_f0); else { #ifdef SPARCV9 sparc_fmovd (code, ins->sreg1, ins->dreg); #else /* FIXME: Why not use fmovd ? */ sparc_fmovs (code, ins->sreg1, ins->dreg); sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1); #endif } break; case OP_LOCALLOC: { guint32 size_reg; gint32 offset2; #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK /* Perform stack touching */ NOT_IMPLEMENTED; #endif /* Keep alignment */ /* Add 4 to compensate for the rounding of localloc_offset */ sparc_add_imm (code, FALSE, ins->sreg1, 4 + MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg); sparc_set (code, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1), sparc_o7); sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg); if ((ins->flags & MONO_INST_INIT) && (ins->sreg1 == ins->dreg)) { #ifdef SPARCV9 size_reg = sparc_g4; #else size_reg = sparc_g1; #endif sparc_mov_reg_reg (code, ins->dreg, size_reg); } else size_reg = ins->sreg1; sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg); /* Keep %sp valid at all times */ sparc_mov_reg_reg (code, ins->dreg, sparc_sp); /* Round localloc_offset too so the result is at least 8 aligned */ offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8); g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2)); sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + offset2, ins->dreg); if (ins->flags & MONO_INST_INIT) { guint32 *br [3]; /* Initialize memory region */ sparc_cmp_imm (code, size_reg, 0); br [0] = code; sparc_branch (code, 0, sparc_be, 0); /* delay slot */ sparc_set (code, 0, sparc_o7); sparc_sub_imm (code, 0, size_reg, mono_hwcap_sparc_is_v9 ? 8 : 4, size_reg); /* start of loop */ br [1] = code; if (mono_hwcap_sparc_is_v9) sparc_stx (code, sparc_g0, ins->dreg, sparc_o7); else sparc_st (code, sparc_g0, ins->dreg, sparc_o7); sparc_cmp (code, sparc_o7, size_reg); br [2] = code; sparc_branch (code, 0, sparc_bl, 0); sparc_patch (br [2], br [1]); /* delay slot */ sparc_add_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7); sparc_patch (br [0], code); } break; } case OP_LOCALLOC_IMM: { gint32 offset = ins->inst_imm; gint32 offset2; #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK /* Perform stack touching */ NOT_IMPLEMENTED; #endif /* To compensate for the rounding of localloc_offset */ offset += sizeof (target_mgreg_t); offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT); if (sparc_is_imm13 (offset)) sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp); else { sparc_set (code, offset, sparc_o7); sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp); } /* Round localloc_offset too so the result is at least 8 aligned */ offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8); g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2)); sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + offset2, ins->dreg); if ((ins->flags & MONO_INST_INIT) && (offset > 0)) { guint32 *br [2]; int i; if (offset <= 16) { i = 0; while (i < offset) { if (mono_hwcap_sparc_is_v9) { sparc_stx_imm (code, sparc_g0, ins->dreg, i); i += 8; } else { sparc_st_imm (code, sparc_g0, ins->dreg, i); i += 4; } } } else { sparc_set (code, offset, sparc_o7); sparc_sub_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7); /* beginning of loop */ br [0] = code; if (mono_hwcap_sparc_is_v9) sparc_stx (code, sparc_g0, ins->dreg, sparc_o7); else sparc_st (code, sparc_g0, ins->dreg, sparc_o7); sparc_cmp_imm (code, sparc_o7, 0); br [1] = code; sparc_branch (code, 0, sparc_bne, 0); /* delay slot */ sparc_sub_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7); sparc_patch (br [1], br [0]); } } break; } case OP_THROW: sparc_mov_reg_reg (code, ins->sreg1, sparc_o0); mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception)); EMIT_CALL (); break; case OP_RETHROW: sparc_mov_reg_reg (code, ins->sreg1, sparc_o0); mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception)); EMIT_CALL (); break; case OP_START_HANDLER: { /* * The START_HANDLER instruction marks the beginning of a handler * block. It is called using a call instruction, so %o7 contains * the return address. Since the handler executes in the same stack * frame as the method itself, we can't use save/restore to save * the return address. Instead, we save it into a dedicated * variable. */ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); if (!sparc_is_imm13 (spvar->inst_offset)) { sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG); sparc_sti (code, sparc_o7, spvar->inst_basereg, GP_SCRATCH_REG); } else sparc_sti_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset); break; } case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); if (!sparc_is_imm13 (spvar->inst_offset)) { sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG); sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7); } else sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7); sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0); /* Delay slot */ sparc_mov_reg_reg (code, ins->sreg1, sparc_o0); break; } case OP_ENDFINALLY: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); if (!sparc_is_imm13 (spvar->inst_offset)) { sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG); sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7); } else sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7); sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0); sparc_nop (code); break; } case OP_CALL_HANDLER: mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); /* This is a jump inside the method, so call_simple works even on V9 */ sparc_call_simple (code, 0); sparc_nop (code); for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); break; case OP_LABEL: ins->inst_c0 = (guint8*)code - cfg->native_code; break; case OP_RELAXED_NOP: case OP_NOP: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_I8CONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: case OP_NOT_REACHED: case OP_NOT_NULL: break; case OP_BR: //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins); if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins) break; if (ins->inst_target_bb->native_offset) { gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; g_assert (sparc_is_imm22 (disp)); sparc_branch (code, 1, sparc_ba, disp); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); sparc_branch (code, 1, sparc_ba, 0); } sparc_nop (code); break; case OP_BR_REG: sparc_jmp (code, ins->sreg1, sparc_g0); sparc_nop (code); break; case OP_CEQ: case OP_CLT: case OP_CLT_UN: case OP_CGT: case OP_CGT_UN: if (v64 && (cfg->opt & MONO_OPT_CMOV)) { sparc_clr_reg (code, ins->dreg); sparc_movcc_imm (code, sparc_xcc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg); } else { sparc_clr_reg (code, ins->dreg); #ifdef SPARCV9 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), DEFAULT_ICC, 0, 2); #else sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2); #endif /* delay slot */ sparc_set (code, 1, ins->dreg); } break; case OP_ICEQ: case OP_ICLT: case OP_ICLT_UN: case OP_ICGT: case OP_ICGT_UN: if (v64 && (cfg->opt & MONO_OPT_CMOV)) { sparc_clr_reg (code, ins->dreg); sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg); } else { sparc_clr_reg (code, ins->dreg); sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), sparc_icc_short, 0, 2); /* delay slot */ sparc_set (code, 1, ins->dreg); } break; case OP_COND_EXC_EQ: case OP_COND_EXC_NE_UN: case OP_COND_EXC_LT: case OP_COND_EXC_LT_UN: case OP_COND_EXC_GT: case OP_COND_EXC_GT_UN: case OP_COND_EXC_GE: case OP_COND_EXC_GE_UN: case OP_COND_EXC_LE: case OP_COND_EXC_LE_UN: case OP_COND_EXC_OV: case OP_COND_EXC_NO: case OP_COND_EXC_C: case OP_COND_EXC_NC: case OP_COND_EXC_IEQ: case OP_COND_EXC_INE_UN: case OP_COND_EXC_ILT: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_IGT: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_IGE: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_ILE: case OP_COND_EXC_ILE_UN: case OP_COND_EXC_IOV: case OP_COND_EXC_INO: case OP_COND_EXC_IC: case OP_COND_EXC_INC: #ifdef SPARCV9 NOT_IMPLEMENTED; #else EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1); #endif break; case OP_SPARC_COND_EXC_EQZ: EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1); break; case OP_SPARC_COND_EXC_GEZ: EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgez, ins->inst_p1); break; case OP_SPARC_COND_EXC_GTZ: EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgz, ins->inst_p1); break; case OP_SPARC_COND_EXC_LEZ: EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlez, ins->inst_p1); break; case OP_SPARC_COND_EXC_LTZ: EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlz, ins->inst_p1); break; case OP_SPARC_COND_EXC_NEZ: EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1); break; case OP_IBEQ: case OP_IBNE_UN: case OP_IBLT: case OP_IBLT_UN: case OP_IBGT: case OP_IBGT_UN: case OP_IBGE: case OP_IBGE_UN: case OP_IBLE: case OP_IBLE_UN: { if (mono_hwcap_sparc_is_v9) EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1); else EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1); break; } case OP_SPARC_BRZ: EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1); break; case OP_SPARC_BRLEZ: EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1); break; case OP_SPARC_BRLZ: EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1); break; case OP_SPARC_BRNZ: EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1); break; case OP_SPARC_BRGZ: EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1); break; case OP_SPARC_BRGEZ: EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1); break; /* floating point opcodes */ case OP_R8CONST: mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0); #ifdef SPARCV9 sparc_set_template (code, sparc_o7); #else sparc_sethi (code, 0, sparc_o7); #endif sparc_lddf_imm (code, sparc_o7, 0, ins->dreg); break; case OP_R4CONST: mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0); #ifdef SPARCV9 sparc_set_template (code, sparc_o7); #else sparc_sethi (code, 0, sparc_o7); #endif sparc_ldf_imm (code, sparc_o7, 0, FP_SCRATCH_REG); /* Extend to double */ sparc_fstod (code, FP_SCRATCH_REG, ins->dreg); break; case OP_STORER8_MEMBASE_REG: if (!sparc_is_imm13 (ins->inst_offset + 4)) { sparc_set (code, ins->inst_offset, sparc_o7); /* SPARCV9 handles misaligned fp loads/stores */ if (!v64 && (ins->inst_offset % 8)) { /* Misaligned */ sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7); sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0); sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4); } else sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); } else { if (!v64 && (ins->inst_offset % 8)) { /* Misaligned */ sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4); } else sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); } break; case OP_LOADR8_MEMBASE: EMIT_LOAD_MEMBASE (ins, lddf); break; case OP_STORER4_MEMBASE_REG: /* This requires a double->single conversion */ sparc_fdtos (code, ins->sreg1, FP_SCRATCH_REG); if (!sparc_is_imm13 (ins->inst_offset)) { sparc_set (code, ins->inst_offset, sparc_o7); sparc_stf (code, FP_SCRATCH_REG, ins->inst_destbasereg, sparc_o7); } else sparc_stf_imm (code, FP_SCRATCH_REG, ins->inst_destbasereg, ins->inst_offset); break; case OP_LOADR4_MEMBASE: { /* ldf needs a single precision register */ int dreg = ins->dreg; ins->dreg = FP_SCRATCH_REG; EMIT_LOAD_MEMBASE (ins, ldf); ins->dreg = dreg; /* Extend to double */ sparc_fstod (code, FP_SCRATCH_REG, ins->dreg); break; } case OP_ICONV_TO_R4: { MonoInst *spill = cfg->arch.float_spill_slot; gint32 reg = spill->inst_basereg; gint32 offset = spill->inst_offset; g_assert (spill->opcode == OP_REGOFFSET); #ifdef SPARCV9 if (!sparc_is_imm13 (offset)) { sparc_set (code, offset, sparc_o7); sparc_stx (code, ins->sreg1, reg, offset); sparc_lddf (code, reg, offset, FP_SCRATCH_REG); } else { sparc_stx_imm (code, ins->sreg1, reg, offset); sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG); } sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG); #else if (!sparc_is_imm13 (offset)) { sparc_set (code, offset, sparc_o7); sparc_st (code, ins->sreg1, reg, sparc_o7); sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG); } else { sparc_st_imm (code, ins->sreg1, reg, offset); sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG); } sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG); #endif sparc_fstod (code, FP_SCRATCH_REG, ins->dreg); break; } case OP_ICONV_TO_R8: { MonoInst *spill = cfg->arch.float_spill_slot; gint32 reg = spill->inst_basereg; gint32 offset = spill->inst_offset; g_assert (spill->opcode == OP_REGOFFSET); #ifdef SPARCV9 if (!sparc_is_imm13 (offset)) { sparc_set (code, offset, sparc_o7); sparc_stx (code, ins->sreg1, reg, sparc_o7); sparc_lddf (code, reg, sparc_o7, FP_SCRATCH_REG); } else { sparc_stx_imm (code, ins->sreg1, reg, offset); sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG); } sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg); #else if (!sparc_is_imm13 (offset)) { sparc_set (code, offset, sparc_o7); sparc_st (code, ins->sreg1, reg, sparc_o7); sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG); } else { sparc_st_imm (code, ins->sreg1, reg, offset); sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG); } sparc_fitod (code, FP_SCRATCH_REG, ins->dreg); #endif break; } case OP_FCONV_TO_I1: case OP_FCONV_TO_U1: case OP_FCONV_TO_I2: case OP_FCONV_TO_U2: case OP_FCONV_TO_I4: case OP_FCONV_TO_U4: { MonoInst *spill = cfg->arch.float_spill_slot; gint32 reg = spill->inst_basereg; gint32 offset = spill->inst_offset; g_assert (spill->opcode == OP_REGOFFSET); sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG); if (!sparc_is_imm13 (offset)) { sparc_set (code, offset, sparc_o7); sparc_stdf (code, FP_SCRATCH_REG, reg, sparc_o7); sparc_ld (code, reg, sparc_o7, ins->dreg); } else { sparc_stdf_imm (code, FP_SCRATCH_REG, reg, offset); sparc_ld_imm (code, reg, offset, ins->dreg); } switch (ins->opcode) { case OP_FCONV_TO_I1: case OP_FCONV_TO_U1: sparc_and_imm (code, 0, ins->dreg, 0xff, ins->dreg); break; case OP_FCONV_TO_I2: case OP_FCONV_TO_U2: sparc_set (code, 0xffff, sparc_o7); sparc_and (code, 0, ins->dreg, sparc_o7, ins->dreg); break; default: break; } break; } case OP_FCONV_TO_I8: case OP_FCONV_TO_U8: /* Emulated */ g_assert_not_reached (); break; case OP_FCONV_TO_R4: /* FIXME: Change precision ? */ #ifdef SPARCV9 sparc_fmovd (code, ins->sreg1, ins->dreg); #else sparc_fmovs (code, ins->sreg1, ins->dreg); sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1); #endif break; case OP_LCONV_TO_R_UN: { /* Emulated */ g_assert_not_reached (); break; } case OP_LCONV_TO_OVF_I: case OP_LCONV_TO_OVF_I4_2: { guint32 *br [3], *label [1]; /* * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000 */ sparc_cmp_imm (code, ins->sreg1, 0); br [0] = code; sparc_branch (code, 1, sparc_bneg, 0); sparc_nop (code); /* positive */ /* ms word must be 0 */ sparc_cmp_imm (code, ins->sreg2, 0); br [1] = code; sparc_branch (code, 1, sparc_be, 0); sparc_nop (code); label [0] = code; EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException"); /* negative */ sparc_patch (br [0], code); /* ms word must 0xfffffff */ sparc_cmp_imm (code, ins->sreg2, -1); br [2] = code; sparc_branch (code, 1, sparc_bne, 0); sparc_nop (code); sparc_patch (br [2], label [0]); /* Ok */ sparc_patch (br [1], code); if (ins->sreg1 != ins->dreg) sparc_mov_reg_reg (code, ins->sreg1, ins->dreg); break; } case OP_FADD: sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_FSUB: sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_FMUL: sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_FDIV: sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg); break; case OP_FNEG: #ifdef SPARCV9 sparc_fnegd (code, ins->sreg1, ins->dreg); #else /* FIXME: why don't use fnegd ? */ sparc_fnegs (code, ins->sreg1, ins->dreg); #endif break; case OP_FREM: sparc_fdivd (code, ins->sreg1, ins->sreg2, FP_SCRATCH_REG); sparc_fmuld (code, ins->sreg2, FP_SCRATCH_REG, FP_SCRATCH_REG); sparc_fsubd (code, ins->sreg1, FP_SCRATCH_REG, ins->dreg); break; case OP_FCOMPARE: sparc_fcmpd (code, ins->sreg1, ins->sreg2); break; case OP_FCEQ: case OP_FCLT: case OP_FCLT_UN: case OP_FCGT: case OP_FCGT_UN: sparc_fcmpd (code, ins->sreg1, ins->sreg2); sparc_clr_reg (code, ins->dreg); switch (ins->opcode) { case OP_FCLT_UN: case OP_FCGT_UN: sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4); /* delay slot */ sparc_set (code, 1, ins->dreg); sparc_fbranch (code, 1, sparc_fbu, 2); /* delay slot */ sparc_set (code, 1, ins->dreg); break; default: sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2); /* delay slot */ sparc_set (code, 1, ins->dreg); } break; case OP_FBEQ: case OP_FBLT: case OP_FBGT: EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1); break; case OP_FBGE: { /* clt.un + brfalse */ guint32 *p = code; sparc_fbranch (code, 1, sparc_fbul, 0); /* delay slot */ sparc_nop (code); EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1); sparc_patch (p, (guint8*)code); break; } case OP_FBLE: { /* cgt.un + brfalse */ guint32 *p = code; sparc_fbranch (code, 1, sparc_fbug, 0); /* delay slot */ sparc_nop (code); EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1); sparc_patch (p, (guint8*)code); break; } case OP_FBNE_UN: EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1); EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1); break; case OP_FBLT_UN: EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1); EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1); break; case OP_FBGT_UN: EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1); EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1); break; case OP_FBGE_UN: EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1); EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1); break; case OP_FBLE_UN: EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1); EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1); break; case OP_CKFINITE: { MonoInst *spill = cfg->arch.float_spill_slot; gint32 reg = spill->inst_basereg; gint32 offset = spill->inst_offset; g_assert (spill->opcode == OP_REGOFFSET); if (!sparc_is_imm13 (offset)) { sparc_set (code, offset, sparc_o7); sparc_stdf (code, ins->sreg1, reg, sparc_o7); sparc_lduh (code, reg, sparc_o7, sparc_o7); } else { sparc_stdf_imm (code, ins->sreg1, reg, offset); sparc_lduh_imm (code, reg, offset, sparc_o7); } sparc_srl_imm (code, sparc_o7, 4, sparc_o7); sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7); sparc_cmp_imm (code, sparc_o7, 2047); EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "OverflowException"); #ifdef SPARCV9 sparc_fmovd (code, ins->sreg1, ins->dreg); #else sparc_fmovs (code, ins->sreg1, ins->dreg); sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1); #endif break; } case OP_MEMORY_BARRIER: sparc_membar (code, sparc_membar_all); break; case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: break; default: #ifdef __GNUC__ g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__); #else g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode)); #endif g_assert_not_reached (); } if ((((guint8*)code) - code_start) > max_len) { g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)", mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start); g_assert_not_reached (); } cpos += max_len; last_ins = ins; } set_code_cursor (cfg, code); } void mono_arch_register_lowlevel_calls (void) { mono_register_jit_icall (mono_arch_get_lmf_addr, NULL, TRUE); } void mono_arch_patch_code (MonoCompile *cfg, MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors, MonoError *error) { MonoJumpInfo *patch_info; error_init (error); /* FIXME: Move part of this to arch independent code */ for (patch_info = ji; patch_info; patch_info = patch_info->next) { unsigned char *ip = patch_info->ip.i + code; gpointer target; target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors, error); return_if_nok (error); switch (patch_info->type) { case MONO_PATCH_INFO_NONE: continue; case MONO_PATCH_INFO_METHOD_JUMP: { guint32 *ip2 = (guint32*)ip; /* Might already been patched */ sparc_set_template (ip2, sparc_o7); break; } default: break; } sparc_patch ((guint32*)ip, target); } } #error obsolete tracing? void* mono_arch_instrument_prolog (MonoCompile *cfg, MonoJitICallId func, void *p, gboolean enable_arguments) { int i; guint32 *code = (guint32*)p; MonoMethodSignature *sig = mono_method_signature_internal (cfg->method); CallInfo *cinfo; /* Save registers to stack */ for (i = 0; i < 6; ++i) sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (target_mgreg_t))); cinfo = get_call_info (cfg, sig, FALSE); /* Save float regs on V9, since they are caller saved */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; gint32 stack_offset; stack_offset = ainfo->offset + ARGS_OFFSET; if (ainfo->storage == ArgInFloatReg) { if (!sparc_is_imm13 (stack_offset)) NOT_IMPLEMENTED; sparc_stf_imm (code, ainfo->reg, sparc_fp, stack_offset); } else if (ainfo->storage == ArgInDoubleReg) { /* The offset is guaranteed to be aligned by the ABI rules */ sparc_stdf_imm (code, ainfo->reg, sparc_fp, stack_offset); } } sparc_set (code, cfg->method, sparc_o0); sparc_add_imm (code, FALSE, sparc_fp, MONO_SPARC_STACK_BIAS, sparc_o1); mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (func)); EMIT_CALL (); /* Restore float regs on V9 */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; gint32 stack_offset; stack_offset = ainfo->offset + ARGS_OFFSET; if (ainfo->storage == ArgInFloatReg) { if (!sparc_is_imm13 (stack_offset)) NOT_IMPLEMENTED; sparc_ldf_imm (code, sparc_fp, stack_offset, ainfo->reg); } else if (ainfo->storage == ArgInDoubleReg) { /* The offset is guaranteed to be aligned by the ABI rules */ sparc_lddf_imm (code, sparc_fp, stack_offset, ainfo->reg); } } g_free (cinfo); return code; } enum { SAVE_NONE, SAVE_STRUCT, SAVE_ONE, SAVE_TWO, SAVE_FP }; #error obsolete tracing? void* mono_arch_instrument_epilog (MonoCompile *cfg, MonoJitICallId func, void *p, gboolean enable_arguments) { guint32 *code = (guint32*)p; int save_mode = SAVE_NONE; MonoMethod *method = cfg->method; switch (mini_get_underlying_type (mono_method_signature_internal (method)->ret)->type) { case MONO_TYPE_VOID: /* special case string .ctor icall */ if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class) save_mode = SAVE_ONE; else save_mode = SAVE_NONE; break; case MONO_TYPE_I8: case MONO_TYPE_U8: #ifdef SPARCV9 save_mode = SAVE_ONE; #else save_mode = SAVE_TWO; #endif break; case MONO_TYPE_R4: case MONO_TYPE_R8: save_mode = SAVE_FP; break; case MONO_TYPE_VALUETYPE: save_mode = SAVE_STRUCT; break; default: save_mode = SAVE_ONE; break; } /* Save the result to the stack and also put it into the output registers */ switch (save_mode) { case SAVE_TWO: /* V8 only */ sparc_st_imm (code, sparc_i0, sparc_fp, 68); sparc_st_imm (code, sparc_i0, sparc_fp, 72); sparc_mov_reg_reg (code, sparc_i0, sparc_o1); sparc_mov_reg_reg (code, sparc_i1, sparc_o2); break; case SAVE_ONE: sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET); sparc_mov_reg_reg (code, sparc_i0, sparc_o1); break; case SAVE_FP: #ifdef SPARCV9 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET); #else sparc_stdf_imm (code, sparc_f0, sparc_fp, 72); sparc_ld_imm (code, sparc_fp, 72, sparc_o1); sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2); #endif break; case SAVE_STRUCT: #ifdef SPARCV9 sparc_mov_reg_reg (code, sparc_i0, sparc_o1); #else sparc_ld_imm (code, sparc_fp, 64, sparc_o1); #endif break; case SAVE_NONE: default: break; } sparc_set (code, cfg->method, sparc_o0); mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (func)); EMIT_CALL (); /* Restore result */ switch (save_mode) { case SAVE_TWO: sparc_ld_imm (code, sparc_fp, 68, sparc_i0); sparc_ld_imm (code, sparc_fp, 72, sparc_i0); break; case SAVE_ONE: sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0); break; case SAVE_FP: sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0); break; case SAVE_NONE: default: break; } return code; } guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoMethodSignature *sig; MonoInst *inst; guint32 *code; CallInfo *cinfo; guint32 i, offset; cfg->code_size = 256; cfg->native_code = g_malloc (cfg->code_size); code = (guint32*)cfg->native_code; /* FIXME: Generate intermediate code instead */ offset = cfg->stack_offset; offset += (16 * sizeof (target_mgreg_t)); /* register save area */ #ifndef SPARCV9 offset += 4; /* struct/union return pointer */ #endif /* add parameter area size for called functions */ if (cfg->param_area < (6 * sizeof (target_mgreg_t))) /* Reserve space for the first 6 arguments even if it is unused */ offset += 6 * sizeof (target_mgreg_t); else offset += cfg->param_area; /* align the stack size */ offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT); /* * localloc'd memory is stored between the local variables (whose * size is given by cfg->stack_offset), and between the space reserved * by the ABI. */ cfg->arch.localloc_offset = offset - cfg->stack_offset; cfg->stack_offset = offset; #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK /* Perform stack touching */ NOT_IMPLEMENTED; #endif if (!sparc_is_imm13 (- cfg->stack_offset)) { /* Can't use sparc_o7 here, since we're still in the caller's frame */ sparc_set (code, (- cfg->stack_offset), GP_SCRATCH_REG); sparc_save (code, sparc_sp, GP_SCRATCH_REG, sparc_sp); } else sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp); /* if (strstr (cfg->method->name, "foo")) { mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_sparc_break)); sparc_call_simple (code, 0); sparc_nop (code); } */ sig = mono_method_signature_internal (method); cinfo = get_call_info (cfg, sig, FALSE); /* Keep in sync with emit_load_volatile_arguments */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = cinfo->args + i; gint32 stack_offset; MonoType *arg_type; inst = cfg->args [i]; if (sig->hasthis && (i == 0)) arg_type = mono_get_object_type (); else arg_type = sig->params [i - sig->hasthis]; stack_offset = ainfo->offset + ARGS_OFFSET; /* Save the split arguments so they will reside entirely on the stack */ if (ainfo->storage == ArgInSplitRegStack) { /* Save the register to the stack */ g_assert (inst->opcode == OP_REGOFFSET); if (!sparc_is_imm13 (stack_offset)) NOT_IMPLEMENTED; sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset); } if (!v64 && !m_type_is_byref (arg_type) && (arg_type->type == MONO_TYPE_R8)) { /* Save the argument to a dword aligned stack location */ /* * stack_offset contains the offset of the argument on the stack. * inst->inst_offset contains the dword aligned offset where the value * should be stored. */ if (ainfo->storage == ArgInIRegPair) { if (!sparc_is_imm13 (inst->inst_offset + 4)) NOT_IMPLEMENTED; sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset); sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4); } else if (ainfo->storage == ArgInSplitRegStack) { #ifdef SPARCV9 g_assert_not_reached (); #endif if (stack_offset != inst->inst_offset) { /* stack_offset is not dword aligned, so we need to make a copy */ sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset); sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7); sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4); } } else if (ainfo->storage == ArgOnStackPair) { #ifdef SPARCV9 g_assert_not_reached (); #endif if (stack_offset != inst->inst_offset) { /* stack_offset is not dword aligned, so we need to make a copy */ sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7); sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset); sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7); sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4); } } else g_assert_not_reached (); } else if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) { /* Argument in register, but need to be saved to stack */ if (!sparc_is_imm13 (stack_offset)) NOT_IMPLEMENTED; if ((stack_offset - ARGS_OFFSET) & 0x1) sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset); else if ((stack_offset - ARGS_OFFSET) & 0x2) sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset); else if ((stack_offset - ARGS_OFFSET) & 0x4) sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset); else { if (v64) sparc_stx_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset); else sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset); } } else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) { #ifdef SPARCV9 NOT_IMPLEMENTED; #endif /* Argument in regpair, but need to be saved to stack */ if (!sparc_is_imm13 (inst->inst_offset + 4)) NOT_IMPLEMENTED; sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset); sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4); } else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) { if (!sparc_is_imm13 (stack_offset)) NOT_IMPLEMENTED; sparc_stf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) { /* The offset is guaranteed to be aligned by the ABI rules */ sparc_stdf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset); } if ((ainfo->storage == ArgInFloatReg) && (inst->opcode == OP_REGVAR)) { /* Need to move into the a double precision register */ sparc_fstod (code, ainfo->reg, ainfo->reg - 1); } if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack)) if (inst->opcode == OP_REGVAR) /* FIXME: Load the argument into memory */ NOT_IMPLEMENTED; } g_free (cinfo); if (cfg->method->save_lmf) { gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset; /* Save ip */ mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL); sparc_set_template (code, sparc_o7); sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip)); /* Save sp */ sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp)); /* Save fp */ sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp)); /* Save method */ /* FIXME: add a relocation for this */ sparc_set (code, cfg->method, sparc_o7); sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method)); mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_get_lmf_addr)); EMIT_CALL (); code = (guint32*)mono_sparc_emit_save_lmf (code, lmf_offset); } #error obsolete tracing? if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) code = (guint32*)mono_arch_instrument_prolog (cfg, MONO_JIT_ICALL_mono_trace_enter_method, code, TRUE); set_code_cursor (cfg, code); return (guint8*)code; } void mono_arch_emit_epilog (MonoCompile *cfg) { MonoMethod *method = cfg->method; guint32 *code; int can_fold = 0; int max_epilog_size = 16 + 20 * 4; if (cfg->method->save_lmf) max_epilog_size += 128; if (mono_jit_trace_calls != NULL) max_epilog_size += 50; code = (guint32 *)realloc_code (cfg, max_epilog_size); #error obsolete tracing? if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) code = (guint32*)mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE); if (cfg->method->save_lmf) { gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset; code = mono_sparc_emit_restore_lmf (code, lmf_offset); } /* * The V8 ABI requires that calls to functions which return a structure * return to %i7+12 */ if (!v64 && mono_method_signature_internal (cfg->method)->pinvoke && MONO_TYPE_ISSTRUCT(mono_method_signature_internal (cfg->method)->ret)) sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0); else sparc_ret (code); /* Only fold last instruction into the restore if the exit block has an in count of 1 and the previous block hasn't been optimized away since it may have an in count > 1 */ if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset) can_fold = 1; /* * FIXME: The last instruction might have a branch pointing into it like in * int_ceq sparc_i0 <- */ can_fold = 0; /* Try folding last instruction into the restore */ if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) { /* or reg, imm, %i0 */ int reg = sparc_inst_rs1 (code [-2]); int imm = (((gint32)(sparc_inst_imm13 (code [-2]))) << 19) >> 19; code [-2] = code [-1]; code --; sparc_restore_imm (code, reg, imm, sparc_o0); } else if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && (!sparc_inst_imm (code [-2])) && (sparc_inst_rd (code [-2]) == sparc_i0)) { /* or reg, reg, %i0 */ int reg1 = sparc_inst_rs1 (code [-2]); int reg2 = sparc_inst_rs2 (code [-2]); code [-2] = code [-1]; code --; sparc_restore (code, reg1, reg2, sparc_o0); } else sparc_restore_imm (code, sparc_g0, 0, sparc_g0); set_code_cursor (cfg, code); } void mono_arch_emit_exceptions (MonoCompile *cfg) { MonoJumpInfo *patch_info; guint32 *code; int nthrows = 0, i; int exc_count = 0; guint32 code_size; MonoClass *exc_classes [16]; guint8 *exc_throw_start [16], *exc_throw_end [16]; /* Compute needed space */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { if (patch_info->type == MONO_PATCH_INFO_EXC) exc_count++; } /* * make sure we have enough space for exceptions */ #ifdef SPARCV9 code_size = exc_count * (20 * 4); #else code_size = exc_count * 24; #endif code = (guint32*)realloc_code (cfg, code_size); for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { switch (patch_info->type) { case MONO_PATCH_INFO_EXC: { MonoClass *exc_class; guint32 *buf, *buf2; guint32 throw_ip, type_idx; gint32 disp; sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code); exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name); type_idx = m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF; throw_ip = patch_info->ip.i; /* Find a throw sequence for the same exception class */ for (i = 0; i < nthrows; ++i) if (exc_classes [i] == exc_class) break; if (i < nthrows) { guint32 throw_offset = (((guint8*)exc_throw_end [i] - cfg->native_code) - throw_ip) >> 2; if (!sparc_is_imm13 (throw_offset)) sparc_set32 (code, throw_offset, sparc_o1); disp = (exc_throw_start [i] - (guint8*)code) >> 2; g_assert (sparc_is_imm22 (disp)); sparc_branch (code, 0, sparc_ba, disp); if (sparc_is_imm13 (throw_offset)) sparc_set32 (code, throw_offset, sparc_o1); else sparc_nop (code); patch_info->type = MONO_PATCH_INFO_NONE; } else { /* Emit the template for setting o1 */ buf = code; if (sparc_is_imm13 (((((guint8*)code - cfg->native_code) - throw_ip) >> 2) - 8)) /* Can use a short form */ sparc_nop (code); else sparc_set_template (code, sparc_o1); buf2 = code; if (nthrows < 16) { exc_classes [nthrows] = exc_class; exc_throw_start [nthrows] = (guint8*)code; } /* mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_sparc_break)); EMIT_CALL(); */ /* first arg = type token */ /* Pass the type index to reduce the size of the sparc_set */ if (!sparc_is_imm13 (type_idx)) sparc_set32 (code, type_idx, sparc_o0); /* second arg = offset between the throw ip and the current ip */ /* On sparc, the saved ip points to the call instruction */ disp = (((guint8*)code - cfg->native_code) - throw_ip) >> 2; sparc_set32 (buf, disp, sparc_o1); while (buf < buf2) sparc_nop (buf); if (nthrows < 16) { exc_throw_end [nthrows] = (guint8*)code; nthrows ++; } patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception; patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID; patch_info->ip.i = (guint8*)code - cfg->native_code; EMIT_CALL (); if (sparc_is_imm13 (type_idx)) { /* Put it into the delay slot */ code --; buf = code; sparc_set32 (code, type_idx, sparc_o0); g_assert (code - buf == 1); } } break; } default: /* do nothing */ break; } set_code_cursor (cfg, code); } set_code_cursor (cfg, code); } gboolean lmf_addr_key_inited = FALSE; #ifdef MONO_SPARC_THR_TLS thread_key_t lmf_addr_key; #else pthread_key_t lmf_addr_key; #endif gpointer mono_arch_get_lmf_addr (void) { /* This is perf critical so we bypass the IO layer */ /* The thr_... functions seem to be somewhat faster */ #ifdef MONO_SPARC_THR_TLS gpointer res; thr_getspecific (lmf_addr_key, &res); return res; #else return pthread_getspecific (lmf_addr_key); #endif } #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK /* * There seems to be no way to determine stack boundaries under solaris, * so it's not possible to determine whenever a SIGSEGV is caused by stack * overflow or not. */ #error "--with-sigaltstack=yes not supported on solaris" #endif void mono_arch_tls_init (void) { MonoJitTlsData *jit_tls; if (!lmf_addr_key_inited) { int res; lmf_addr_key_inited = TRUE; #ifdef MONO_SPARC_THR_TLS res = thr_keycreate (&lmf_addr_key, NULL); #else res = pthread_key_create (&lmf_addr_key, NULL); #endif g_assert (res == 0); } jit_tls = mono_get_jit_tls (); #ifdef MONO_SPARC_THR_TLS thr_setspecific (lmf_addr_key, &jit_tls->lmf); #else pthread_setspecific (lmf_addr_key, &jit_tls->lmf); #endif } void mono_arch_finish_init (void) { } MonoInst* mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins = NULL; return ins; } /* * mono_arch_get_argument_info: * @csig: a method signature * @param_count: the number of parameters to consider * @arg_info: an array to store the result infos * * Gathers information on parameters such as size, alignment and * padding. arg_info should be large enought to hold param_count + 1 entries. * * Returns the size of the activation frame. */ int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { int k, align; CallInfo *cinfo; ArgInfo *ainfo; cinfo = get_call_info (NULL, csig, FALSE); if (csig->hasthis) { ainfo = &cinfo->args [0]; arg_info [0].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset; } for (k = 0; k < param_count; k++) { ainfo = &cinfo->args [k + csig->hasthis]; arg_info [k + 1].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset; arg_info [k + 1].size = mono_type_size (csig->params [k], &align); } g_free (cinfo); return 0; } host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { /* FIXME: implement */ g_assert_not_reached (); } host_mgreg_t* mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { /* FIXME: implement */ g_assert_not_reached (); } gboolean mono_arch_opcode_supported (int opcode) { return FALSE; } gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { return FALSE; } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { return NULL; }
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/mini-x86.c
/** * \file * x86 backend for the Mono code generator * * Authors: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * Patrik Torstensson * * Copyright 2003 Ximian, Inc. * Copyright 2003-2011 Novell Inc. * Copyright 2011 Xamarin Inc. * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include "mini.h" #include <string.h> #include <math.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <mono/metadata/abi-details.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/threads.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/gc-internals.h> #include <mono/metadata/tokentype.h> #include <mono/utils/mono-math.h> #include <mono/utils/mono-counters.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-memory-model.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/mono-threads.h> #include <mono/utils/unlocked.h> #include "mini-x86.h" #include "cpu-x86.h" #include "ir-emit.h" #include "mini-gc.h" #include "aot-runtime.h" #include "mini-runtime.h" #ifndef TARGET_WIN32 #ifdef MONO_XEN_OPT static gboolean optimize_for_xen = TRUE; #else #define optimize_for_xen 0 #endif #endif static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math") /* The single step trampoline */ static gpointer ss_trampoline; /* The breakpoint trampoline */ static gpointer bp_trampoline; #define ARGS_OFFSET 8 #ifdef TARGET_WIN32 /* Under windows, the default pinvoke calling convention is stdcall */ #define CALLCONV_IS_STDCALL(sig) ((sig)->pinvoke && ((sig)->call_convention == MONO_CALL_STDCALL || (sig)->call_convention == MONO_CALL_DEFAULT || (sig)->call_convention == MONO_CALL_THISCALL)) #else #define CALLCONV_IS_STDCALL(sig) ((sig)->pinvoke && ((sig)->call_convention == MONO_CALL_STDCALL || (sig)->call_convention == MONO_CALL_THISCALL)) #endif #define X86_IS_CALLEE_SAVED_REG(reg) (((reg) == X86_EBX) || ((reg) == X86_EDI) || ((reg) == X86_ESI)) #define OP_SEQ_POINT_BP_OFFSET 7 const char* mono_arch_regname (int reg) { switch (reg) { case X86_EAX: return "%eax"; case X86_EBX: return "%ebx"; case X86_ECX: return "%ecx"; case X86_EDX: return "%edx"; case X86_ESP: return "%esp"; case X86_EBP: return "%ebp"; case X86_EDI: return "%edi"; case X86_ESI: return "%esi"; } return "unknown"; } const char* mono_arch_fregname (int reg) { switch (reg) { case 0: return "%fr0"; case 1: return "%fr1"; case 2: return "%fr2"; case 3: return "%fr3"; case 4: return "%fr4"; case 5: return "%fr5"; case 6: return "%fr6"; case 7: return "%fr7"; default: return "unknown"; } } const char * mono_arch_xregname (int reg) { switch (reg) { case 0: return "%xmm0"; case 1: return "%xmm1"; case 2: return "%xmm2"; case 3: return "%xmm3"; case 4: return "%xmm4"; case 5: return "%xmm5"; case 6: return "%xmm6"; case 7: return "%xmm7"; default: return "unknown"; } } void mono_x86_patch (unsigned char* code, gpointer target) { mono_x86_patch_inline (code, target); } #define FLOAT_PARAM_REGS 0 static const guint32 thiscall_param_regs [] = { X86_ECX, X86_NREG }; static const guint32 *callconv_param_regs(MonoMethodSignature *sig) { if (!sig->pinvoke) return NULL; switch (sig->call_convention) { case MONO_CALL_THISCALL: return thiscall_param_regs; default: return NULL; } } #if defined(TARGET_WIN32) || defined(__APPLE__) || defined(__FreeBSD__) #define SMALL_STRUCTS_IN_REGS static X86_Reg_No return_regs [] = { X86_EAX, X86_EDX }; #endif static void inline add_general (guint32 *gr, const guint32 *param_regs, guint32 *stack_size, ArgInfo *ainfo) { ainfo->offset = *stack_size; if (!param_regs || param_regs [*gr] == X86_NREG) { ainfo->storage = ArgOnStack; ainfo->nslots = 1; (*stack_size) += sizeof (target_mgreg_t); } else { ainfo->storage = ArgInIReg; ainfo->reg = param_regs [*gr]; (*gr) ++; } } static void inline add_general_pair (guint32 *gr, const guint32 *param_regs , guint32 *stack_size, ArgInfo *ainfo) { ainfo->offset = *stack_size; g_assert(!param_regs || param_regs[*gr] == X86_NREG); ainfo->storage = ArgOnStack; (*stack_size) += sizeof (target_mgreg_t) * 2; ainfo->nslots = 2; } static void inline add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double) { ainfo->offset = *stack_size; if (*gr >= FLOAT_PARAM_REGS) { ainfo->storage = ArgOnStack; (*stack_size) += is_double ? 8 : 4; ainfo->nslots = is_double ? 2 : 1; } else { /* A double register */ if (is_double) ainfo->storage = ArgInDoubleSSEReg; else ainfo->storage = ArgInFloatSSEReg; ainfo->reg = *gr; (*gr) += 1; } } static void add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type, gboolean is_return, guint32 *gr, const guint32 *param_regs, guint32 *fr, guint32 *stack_size) { guint32 size; MonoClass *klass; klass = mono_class_from_mono_type_internal (type); size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, sig->pinvoke && !sig->marshalling_disabled); #if defined(TARGET_WIN32) /* * Standard C and C++ doesn't allow empty structs, empty structs will always have a size of 1 byte. * GCC have an extension to allow empty structs, https://gcc.gnu.org/onlinedocs/gcc/Empty-Structures.html. * This cause a little dilemma since runtime build using none GCC compiler will not be compatible with * GCC build C libraries and the other way around. On platforms where empty structs has size of 1 byte * it must be represented in call and cannot be dropped. */ if (size == 0 && MONO_TYPE_ISSTRUCT (type) && sig->pinvoke) { /* Empty structs (1 byte size) needs to be represented in a stack slot */ ainfo->pass_empty_struct = TRUE; size = 1; } #endif #ifdef SMALL_STRUCTS_IN_REGS if (sig->pinvoke && is_return) { MonoMarshalType *info; info = mono_marshal_load_type_info (klass); g_assert (info); ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone; /* Ignore empty struct return value, if used. */ if (info->num_fields == 0 && ainfo->pass_empty_struct) { ainfo->storage = ArgValuetypeInReg; return; } /* * Windows x86 ABI for returning structs of size 4 or 8 bytes (regardless of type) dictates that * values are passed in EDX:EAX register pairs, https://msdn.microsoft.com/en-us/library/984x0h58.aspx. * This is different compared to for example float or double return types (not in struct) that will be returned * in ST(0), https://msdn.microsoft.com/en-us/library/ha59cbfz.aspx. * * Apples OSX x86 ABI for returning structs of size 4 or 8 bytes uses a slightly different approach. * If a struct includes only one scalar value, it will be handled with the same rules as scalar values. * This means that structs with one float or double will be returned in ST(0). For more details, * https://developer.apple.com/library/mac/documentation/DeveloperTools/Conceptual/LowLevelABI/130-IA-32_Function_Calling_Conventions/IA32.html. */ #if !defined(TARGET_WIN32) /* Special case structs with only a float member */ if (info->num_fields == 1) { int ftype = mini_get_underlying_type (info->fields [0].field->type)->type; if ((info->native_size == 8) && (ftype == MONO_TYPE_R8)) { ainfo->storage = ArgValuetypeInReg; ainfo->pair_storage [0] = ArgOnDoubleFpStack; return; } if ((info->native_size == 4) && (ftype == MONO_TYPE_R4)) { ainfo->storage = ArgValuetypeInReg; ainfo->pair_storage [0] = ArgOnFloatFpStack; return; } } #endif if ((info->native_size == 1) || (info->native_size == 2) || (info->native_size == 4) || (info->native_size == 8)) { ainfo->storage = ArgValuetypeInReg; ainfo->pair_storage [0] = ArgInIReg; ainfo->pair_regs [0] = return_regs [0]; if (info->native_size > 4) { ainfo->pair_storage [1] = ArgInIReg; ainfo->pair_regs [1] = return_regs [1]; } return; } } #endif if (param_regs && param_regs [*gr] != X86_NREG && !is_return) { g_assert (size <= 4); ainfo->storage = ArgValuetypeInReg; ainfo->reg = param_regs [*gr]; (*gr)++; return; } ainfo->offset = *stack_size; ainfo->storage = ArgOnStack; *stack_size += ALIGN_TO (size, sizeof (target_mgreg_t)); ainfo->nslots = ALIGN_TO (size, sizeof (target_mgreg_t)) / sizeof (target_mgreg_t); } /* * get_call_info: * * Obtain information about a call according to the calling convention. * For x86 ELF, see the "System V Application Binary Interface Intel386 * Architecture Processor Supplment, Fourth Edition" document for more * information. * For x86 win32, see https://msdn.microsoft.com/en-us/library/984x0h58.aspx. */ static CallInfo* get_call_info_internal (CallInfo *cinfo, MonoMethodSignature *sig) { guint32 i, gr, fr, pstart; const guint32 *param_regs; MonoType *ret_type; int n = sig->hasthis + sig->param_count; guint32 stack_size = 0; gboolean is_pinvoke = sig->pinvoke; gr = 0; fr = 0; cinfo->nargs = n; param_regs = callconv_param_regs(sig); /* return value */ { ret_type = mini_get_underlying_type (sig->ret); switch (ret_type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = X86_EAX; break; case MONO_TYPE_U8: case MONO_TYPE_I8: cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = X86_EAX; cinfo->ret.is_pair = TRUE; break; case MONO_TYPE_R4: cinfo->ret.storage = ArgOnFloatFpStack; break; case MONO_TYPE_R8: cinfo->ret.storage = ArgOnDoubleFpStack; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ret_type)) { cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = X86_EAX; break; } if (mini_is_gsharedvt_type (ret_type)) { cinfo->ret.storage = ArgOnStack; cinfo->vtype_retaddr = TRUE; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: { guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0; add_valuetype (sig, &cinfo->ret, ret_type, TRUE, &tmp_gr, NULL, &tmp_fr, &tmp_stacksize); if (cinfo->ret.storage == ArgOnStack) { cinfo->vtype_retaddr = TRUE; /* The caller passes the address where the value is stored */ } break; } case MONO_TYPE_VAR: case MONO_TYPE_MVAR: g_assert (mini_is_gsharedvt_type (ret_type)); cinfo->ret.storage = ArgOnStack; cinfo->vtype_retaddr = TRUE; break; case MONO_TYPE_VOID: cinfo->ret.storage = ArgNone; break; default: g_error ("Can't handle as return value 0x%x", ret_type->type); } } pstart = 0; /* * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after * the first argument, allowing 'this' to be always passed in the first arg reg. * Also do this if the first argument is a reference type, since virtual calls * are sometimes made using calli without sig->hasthis set, like in the delegate * invoke wrappers. */ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) { if (sig->hasthis) { add_general (&gr, param_regs, &stack_size, cinfo->args + 0); } else { add_general (&gr, param_regs, &stack_size, &cinfo->args [sig->hasthis + 0]); pstart = 1; } cinfo->vret_arg_offset = stack_size; add_general (&gr, NULL, &stack_size, &cinfo->ret); cinfo->vret_arg_index = 1; } else { /* this */ if (sig->hasthis) add_general (&gr, param_regs, &stack_size, cinfo->args + 0); if (cinfo->vtype_retaddr) add_general (&gr, NULL, &stack_size, &cinfo->ret); } if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) { fr = FLOAT_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, param_regs, &stack_size, &cinfo->sig_cookie); } for (i = pstart; i < sig->param_count; ++i) { ArgInfo *ainfo = &cinfo->args [sig->hasthis + i]; MonoType *ptype; if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* We allways pass the sig cookie on the stack for simplicity */ /* * Prevent implicit arguments + the sig cookie from being passed * in registers. */ fr = FLOAT_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, param_regs, &stack_size, &cinfo->sig_cookie); } if (m_type_is_byref (sig->params [i])) { add_general (&gr, param_regs, &stack_size, ainfo); continue; } ptype = mini_get_underlying_type (sig->params [i]); switch (ptype->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: add_general (&gr, param_regs, &stack_size, ainfo); break; case MONO_TYPE_I2: case MONO_TYPE_U2: add_general (&gr, param_regs, &stack_size, ainfo); break; case MONO_TYPE_I4: case MONO_TYPE_U4: add_general (&gr, param_regs, &stack_size, ainfo); break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: add_general (&gr, param_regs, &stack_size, ainfo); break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ptype)) { add_general (&gr, param_regs, &stack_size, ainfo); break; } if (mini_is_gsharedvt_type (ptype)) { /* gsharedvt arguments are passed by ref */ add_general (&gr, param_regs, &stack_size, ainfo); g_assert (ainfo->storage == ArgOnStack); ainfo->storage = ArgGSharedVt; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: add_valuetype (sig, ainfo, ptype, FALSE, &gr, param_regs, &fr, &stack_size); break; case MONO_TYPE_U8: case MONO_TYPE_I8: add_general_pair (&gr, param_regs, &stack_size, ainfo); break; case MONO_TYPE_R4: add_float (&fr, &stack_size, ainfo, FALSE); break; case MONO_TYPE_R8: add_float (&fr, &stack_size, ainfo, TRUE); break; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: /* gsharedvt arguments are passed by ref */ g_assert (mini_is_gsharedvt_type (ptype)); add_general (&gr, param_regs, &stack_size, ainfo); g_assert (ainfo->storage == ArgOnStack); ainfo->storage = ArgGSharedVt; break; default: g_error ("unexpected type 0x%x", ptype->type); g_assert_not_reached (); } } if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) { fr = FLOAT_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, param_regs, &stack_size, &cinfo->sig_cookie); } if (cinfo->vtype_retaddr) { /* if the function returns a struct on stack, the called method already does a ret $0x4 */ cinfo->callee_stack_pop = 4; } else if (CALLCONV_IS_STDCALL (sig)) { /* Have to compensate for the stack space popped by the native callee */ cinfo->callee_stack_pop = stack_size; } if (mono_do_x86_stack_align && (stack_size % MONO_ARCH_FRAME_ALIGNMENT) != 0) { cinfo->need_stack_align = TRUE; cinfo->stack_align_amount = MONO_ARCH_FRAME_ALIGNMENT - (stack_size % MONO_ARCH_FRAME_ALIGNMENT); stack_size += cinfo->stack_align_amount; } cinfo->stack_usage = stack_size; cinfo->reg_usage = gr; cinfo->freg_usage = fr; return cinfo; } static CallInfo* get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { int n = sig->hasthis + sig->param_count; CallInfo *cinfo; if (mp) cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n)); else cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n)); return get_call_info_internal (cinfo, sig); } static gboolean storage_in_ireg (ArgStorage storage) { return (storage == ArgInIReg || storage == ArgValuetypeInReg); } static int arg_need_temp (ArgInfo *ainfo) { /* * We always fetch the double value from the fpstack. In that case, we * need to have a separate tmp that is the double value casted to float */ if (ainfo->storage == ArgOnFloatFpStack) return sizeof (float); return 0; } static gpointer arg_get_storage (CallContext *ccontext, ArgInfo *ainfo) { switch (ainfo->storage) { case ArgOnStack: return ccontext->stack + ainfo->offset; case ArgOnDoubleFpStack: return &ccontext->fret; case ArgInIReg: /* If pair, the storage is for EDX:EAX */ return &ccontext->eax; default: g_error ("Arg storage type not yet supported"); } } static void arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest) { g_assert (ainfo->storage == ArgOnFloatFpStack); *(float*) dest = (float)ccontext->fret; } void mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { CallInfo *cinfo = get_call_info (NULL, sig); const MonoEECallbacks *interp_cb = mini_get_interp_callbacks (); gpointer storage; ArgInfo *ainfo; memset (ccontext, 0, sizeof (CallContext)); ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT); if (ccontext->stack_size) ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size); if (sig->ret->type != MONO_TYPE_VOID) { ainfo = &cinfo->ret; if (ainfo->storage == ArgOnStack) { /* This is a value type return. The pointer to vt storage is pushed as first argument */ g_assert (ainfo->offset == 0); g_assert (ainfo->nslots == 1); storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1); *(host_mgreg_t*)ccontext->stack = (host_mgreg_t)storage; } } g_assert (!sig->hasthis); for (int i = 0; i < sig->param_count; i++) { ainfo = &cinfo->args [i]; storage = arg_get_storage (ccontext, ainfo); interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage); } g_free (cinfo); } void mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb; CallInfo *cinfo; ArgInfo *ainfo; gpointer storage; /* No return value */ if (sig->ret->type == MONO_TYPE_VOID) return; interp_cb = mini_get_interp_callbacks (); cinfo = get_call_info (NULL, sig); ainfo = &cinfo->ret; /* Check if return value was stored directly at address passed in reg */ if (cinfo->ret.storage != ArgOnStack) { int temp_size = arg_need_temp (ainfo); if (temp_size) { storage = alloca (temp_size); arg_get_val (ccontext, ainfo, storage); } else { storage = arg_get_storage (ccontext, ainfo); } interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage); } g_free (cinfo); } /* * mono_arch_get_argument_info: * @csig: a method signature * @param_count: the number of parameters to consider * @arg_info: an array to store the result infos * * Gathers information on parameters such as size, alignment and * padding. arg_info should be large enought to hold param_count + 1 entries. * * Returns the size of the argument area on the stack. * This should be signal safe, since it is called from * mono_arch_unwind_frame (). * FIXME: The metadata calls might not be signal safe. */ int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { int len, k, args_size = 0; int size, pad; guint32 align; int offset = 8; CallInfo *cinfo; int prev_stackarg; int num_regs; /* Avoid g_malloc as it is not signal safe */ len = sizeof (CallInfo) + (sizeof (ArgInfo) * (csig->param_count + 1)); cinfo = (CallInfo*)g_alloca (len); memset (cinfo, 0, len); cinfo = get_call_info_internal (cinfo, csig); arg_info [0].offset = offset; if (cinfo->vtype_retaddr && cinfo->vret_arg_index == 0) { args_size += sizeof (target_mgreg_t); offset += 4; } if (csig->hasthis && !storage_in_ireg (cinfo->args [0].storage)) { args_size += sizeof (target_mgreg_t); offset += 4; } if (cinfo->vtype_retaddr && cinfo->vret_arg_index == 1 && csig->hasthis) { /* Emitted after this */ args_size += sizeof (target_mgreg_t); offset += 4; } arg_info [0].size = args_size; prev_stackarg = 0; for (k = 0; k < param_count; k++) { size = mini_type_stack_size_full (csig->params [k], &align, csig->pinvoke && !csig->marshalling_disabled); if (storage_in_ireg (cinfo->args [csig->hasthis + k].storage)) { /* not in stack, we'll give it an offset at the end */ arg_info [k + 1].pad = 0; arg_info [k + 1].size = size; } else { /* ignore alignment for now */ align = 1; args_size += pad = (align - (args_size & (align - 1))) & (align - 1); arg_info [prev_stackarg].pad = pad; args_size += size; arg_info [k + 1].pad = 0; arg_info [k + 1].size = size; offset += pad; arg_info [k + 1].offset = offset; offset += size; prev_stackarg = k + 1; } if (k == 0 && cinfo->vtype_retaddr && cinfo->vret_arg_index == 1 && !csig->hasthis) { /* Emitted after the first arg */ args_size += sizeof (target_mgreg_t); offset += 4; } } if (mono_do_x86_stack_align && !CALLCONV_IS_STDCALL (csig)) align = MONO_ARCH_FRAME_ALIGNMENT; else align = 4; args_size += pad = (align - (args_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; /* Add offsets for any reg parameters */ num_regs = 0; if (csig->hasthis && storage_in_ireg (cinfo->args [0].storage)) arg_info [0].offset = args_size + 4 * num_regs++; for (k=0; k < param_count; k++) { if (storage_in_ireg (cinfo->args[csig->hasthis + k].storage)) { arg_info [k + 1].offset = args_size + 4 * num_regs++; } } return args_size; } #ifndef DISABLE_JIT gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { g_assert (caller_sig); g_assert (callee_sig); // Direct AOT calls usually go through the PLT/GOT. // Unless we can determine here if is_direct_callable will return TRUE? // But the PLT/GOT is addressed with nonvolatile ebx, which // gets restored before the jump. // See https://github.com/mono/mono/commit/f5373adc8a89d4b0d1d549fdd6d9adc3ded4b400 // See https://github.com/mono/mono/issues/11265 if (!virtual_ && cfg->compile_aot && !cfg->full_aot) return FALSE; CallInfo *caller_info = get_call_info (NULL, caller_sig); CallInfo *callee_info = get_call_info (NULL, callee_sig); /* * Tailcalls with more callee stack usage than the caller cannot be supported, since * the extra stack space would be left on the stack after the tailcall. */ gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage) && IS_SUPPORTED_TAILCALL (caller_info->ret.storage == callee_info->ret.storage); if (!res && !mono_tailcall_print_enabled ()) goto exit; // Limit stack_usage to 1G. res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (1 << 30)); res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (1 << 30)); exit: g_free (caller_info); g_free (callee_info); return res; } #endif /* * Initialize the cpu to execute managed code. */ void mono_arch_cpu_init (void) { /* spec compliance requires running with double precision */ #ifndef _MSC_VER guint16 fpcw; __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw)); fpcw &= ~X86_FPCW_PRECC_MASK; fpcw |= X86_FPCW_PREC_DOUBLE; __asm__ __volatile__ ("fldcw %0\n": : "m" (fpcw)); __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw)); #else _control87 (_PC_53, MCW_PC); #endif } /* * Initialize architecture specific code. */ void mono_arch_init (void) { if (!mono_aot_only) bp_trampoline = mini_get_breakpoint_trampoline (); } /* * Cleanup architecture specific code. */ void mono_arch_cleanup (void) { } /* * This function returns the optimizations supported on this cpu. */ guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { guint32 opts = 0; *exclude_mask = 0; if (mono_hwcap_x86_has_cmov) { opts |= MONO_OPT_CMOV; if (mono_hwcap_x86_has_fcmov) opts |= MONO_OPT_FCMOV; else *exclude_mask |= MONO_OPT_FCMOV; } else { *exclude_mask |= MONO_OPT_CMOV; } if (mono_hwcap_x86_has_sse2) opts |= MONO_OPT_SSE2; else *exclude_mask |= MONO_OPT_SSE2; #ifdef MONO_ARCH_SIMD_INTRINSICS /*SIMD intrinsics require at least SSE2.*/ if (!mono_hwcap_x86_has_sse2) *exclude_mask |= MONO_OPT_SIMD; #endif return opts; } MonoCPUFeatures mono_arch_get_cpu_features (void) { guint64 features = MONO_CPU_INITED; if (mono_hwcap_x86_has_sse1) features |= MONO_CPU_X86_SSE; if (mono_hwcap_x86_has_sse2) features |= MONO_CPU_X86_SSE2; if (mono_hwcap_x86_has_sse3) features |= MONO_CPU_X86_SSE3; if (mono_hwcap_x86_has_ssse3) features |= MONO_CPU_X86_SSSE3; if (mono_hwcap_x86_has_sse41) features |= MONO_CPU_X86_SSE41; if (mono_hwcap_x86_has_sse42) features |= MONO_CPU_X86_SSE42; return (MonoCPUFeatures)features; } /* * Determine whenever the trap whose info is in SIGINFO is caused by * integer overflow. */ gboolean mono_arch_is_int_overflow (void *sigctx, void *info) { MonoContext ctx; guint8* ip; mono_sigctx_to_monoctx (sigctx, &ctx); ip = (guint8*)ctx.eip; if ((ip [0] == 0xf7) && (x86_modrm_mod (ip [1]) == 0x3) && (x86_modrm_reg (ip [1]) == 0x7)) { gint32 reg; /* idiv REG */ switch (x86_modrm_rm (ip [1])) { case X86_EAX: reg = ctx.eax; break; case X86_ECX: reg = ctx.ecx; break; case X86_EDX: reg = ctx.edx; break; case X86_EBX: reg = ctx.ebx; break; case X86_ESI: reg = ctx.esi; break; case X86_EDI: reg = ctx.edi; break; default: g_assert_not_reached (); reg = -1; } if (reg == -1) return TRUE; } return FALSE; } GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG)) continue; /* we dont allocate I1 to registers because there is no simply way to sign extend * 8bit quantities in caller saved registers on x86 */ if (mono_is_regsize_var (ins->inst_vtype) && (ins->inst_vtype->type != MONO_TYPE_I1)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = g_list_prepend (vars, vmv); } } vars = mono_varlist_sort (cfg, vars, 0); return vars; } GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; /* we can use 3 registers for global allocation */ regs = g_list_prepend (regs, (gpointer)X86_EBX); regs = g_list_prepend (regs, (gpointer)X86_ESI); regs = g_list_prepend (regs, (gpointer)X86_EDI); return regs; } /* * mono_arch_regalloc_cost: * * Return the cost, in number of memory references, of the action of * allocating the variable VMV into a register during global register * allocation. */ guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { MonoInst *ins = cfg->varinfo [vmv->idx]; if (cfg->method->save_lmf) /* The register is already saved */ return (ins->opcode == OP_ARG) ? 1 : 0; else /* push+pop+possible load if it is an argument */ return (ins->opcode == OP_ARG) ? 3 : 2; } static void set_needs_stack_frame (MonoCompile *cfg, gboolean flag) { static int inited = FALSE; static int count = 0; if (cfg->arch.need_stack_frame_inited) { g_assert (cfg->arch.need_stack_frame == flag); return; } cfg->arch.need_stack_frame = flag; cfg->arch.need_stack_frame_inited = TRUE; if (flag) return; if (!inited) { mono_counters_register ("Could eliminate stack frame", MONO_COUNTER_INT|MONO_COUNTER_JIT, &count); inited = TRUE; } ++count; //g_print ("will eliminate %s.%s.%s\n", cfg->method->klass->name_space, cfg->method->klass->name, cfg->method->name); } static gboolean needs_stack_frame (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; gboolean result = FALSE; #if defined (__APPLE__) /*OSX requires stack frame code to have the correct alignment. */ return TRUE; #endif if (cfg->arch.need_stack_frame_inited) return cfg->arch.need_stack_frame; header = cfg->header; sig = mono_method_signature_internal (cfg->method); if (cfg->disable_omit_fp) result = TRUE; else if (cfg->flags & MONO_CFG_HAS_ALLOCA) result = TRUE; else if (cfg->method->save_lmf) result = TRUE; else if (cfg->stack_offset) result = TRUE; else if (cfg->param_area) result = TRUE; else if (cfg->flags & (MONO_CFG_HAS_CALLS | MONO_CFG_HAS_ALLOCA | MONO_CFG_HAS_TAILCALL)) result = TRUE; else if (header->num_clauses) result = TRUE; else if (sig->param_count + sig->hasthis) result = TRUE; else if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) result = TRUE; set_needs_stack_frame (cfg, result); return cfg->arch.need_stack_frame; } /* * Set var information according to the calling convention. X86 version. * The locals var stuff should most likely be split in another method. */ void mono_arch_allocate_vars (MonoCompile *cfg) { MonoMethodSignature *sig; MonoInst *inst; guint32 locals_stack_size, locals_stack_align; int i, offset; gint32 *offsets; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; cfg->frame_reg = X86_EBP; offset = 0; if (cfg->has_atomic_add_i4 || cfg->has_atomic_exchange_i4) { /* The opcode implementations use callee-saved regs as scratch regs by pushing and pop-ing them, but that is not async safe */ cfg->used_int_regs |= (1 << X86_EBX) | (1 << X86_EDI) | (1 << X86_ESI); } /* Reserve space to save LMF and caller saved registers */ if (cfg->method->save_lmf) { /* The LMF var is allocated normally */ } else { if (cfg->used_int_regs & (1 << X86_EBX)) { offset += 4; } if (cfg->used_int_regs & (1 << X86_EDI)) { offset += 4; } if (cfg->used_int_regs & (1 << X86_ESI)) { offset += 4; } } switch (cinfo->ret.storage) { case ArgValuetypeInReg: /* Allocate a local to hold the result, the epilog will copy it to the correct place */ offset += 8; cfg->ret->opcode = OP_REGOFFSET; cfg->ret->inst_basereg = X86_EBP; cfg->ret->inst_offset = - offset; break; default: break; } /* Allocate a local for any register arguments that need them. */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = &cinfo->args [i]; inst = cfg->args [i]; if (inst->opcode != OP_REGVAR && storage_in_ireg (ainfo->storage)) { offset += 4; cfg->args[i]->opcode = OP_REGOFFSET; cfg->args[i]->inst_basereg = X86_EBP; cfg->args[i]->inst_offset = - offset; } } /* Allocate locals */ offsets = mono_allocate_stack_slots (cfg, TRUE, &locals_stack_size, &locals_stack_align); if (locals_stack_size > MONO_ARCH_MAX_FRAME_SIZE) { char *mname = mono_method_full_name (cfg->method, TRUE); mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s stack is too big.", mname)); g_free (mname); return; } if (locals_stack_align) { int prev_offset = offset; offset += (locals_stack_align - 1); offset &= ~(locals_stack_align - 1); while (prev_offset < offset) { prev_offset += 4; mini_gc_set_slot_type_from_fp (cfg, - prev_offset, SLOT_NOREF); } } cfg->locals_min_stack_offset = - (offset + locals_stack_size); cfg->locals_max_stack_offset = - offset; /* * EBP is at alignment 8 % MONO_ARCH_FRAME_ALIGNMENT, so if we * have locals larger than 8 bytes we need to make sure that * they have the appropriate offset. */ if (MONO_ARCH_FRAME_ALIGNMENT > 8 && locals_stack_align > 8) { int extra_size = MONO_ARCH_FRAME_ALIGNMENT - sizeof (target_mgreg_t) * 2; offset += extra_size; locals_stack_size += extra_size; } for (i = cfg->locals_start; i < cfg->num_varinfo; i++) { if (offsets [i] != -1) { MonoInst *inst = cfg->varinfo [i]; inst->opcode = OP_REGOFFSET; inst->inst_basereg = X86_EBP; inst->inst_offset = - (offset + offsets [i]); //printf ("allocated local %d to ", i); mono_print_tree_nl (inst); } } offset += locals_stack_size; /* * Allocate arguments+return value */ switch (cinfo->ret.storage) { case ArgOnStack: if (cfg->vret_addr) { /* * In the new IR, the cfg->vret_addr variable represents the * vtype return value. */ cfg->vret_addr->opcode = OP_REGOFFSET; cfg->vret_addr->inst_basereg = cfg->frame_reg; cfg->vret_addr->inst_offset = cinfo->ret.offset + ARGS_OFFSET; if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr ="); mono_print_ins (cfg->vret_addr); } } else { cfg->ret->opcode = OP_REGOFFSET; cfg->ret->inst_basereg = X86_EBP; cfg->ret->inst_offset = cinfo->ret.offset + ARGS_OFFSET; } break; case ArgValuetypeInReg: break; case ArgInIReg: cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cinfo->ret.reg; cfg->ret->dreg = cinfo->ret.reg; break; case ArgNone: case ArgOnFloatFpStack: case ArgOnDoubleFpStack: break; default: g_assert_not_reached (); } if (sig->call_convention == MONO_CALL_VARARG) { g_assert (cinfo->sig_cookie.storage == ArgOnStack); cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET; } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = &cinfo->args [i]; inst = cfg->args [i]; if (inst->opcode != OP_REGVAR) { if (storage_in_ireg (ainfo->storage)) { /* We already allocated locals for register arguments. */ } else { inst->opcode = OP_REGOFFSET; inst->inst_basereg = X86_EBP; inst->inst_offset = ainfo->offset + ARGS_OFFSET; } } } cfg->stack_offset = offset; } void mono_arch_create_vars (MonoCompile *cfg) { MonoType *sig_ret; MonoMethodSignature *sig; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; sig_ret = mini_get_underlying_type (sig->ret); if (cinfo->ret.storage == ArgValuetypeInReg) cfg->ret_var_is_local = TRUE; if ((cinfo->ret.storage != ArgValuetypeInReg) && (MONO_TYPE_ISSTRUCT (sig_ret) || mini_is_gsharedvt_variable_type (sig_ret))) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG); } if (cfg->gen_sdb_seq_points) { MonoInst *ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.ss_tramp_var = ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.bp_tramp_var = ins; } if (cfg->method->save_lmf) { cfg->create_lmf_var = TRUE; cfg->lmf_ir = TRUE; } cfg->arch_eh_jit_info = 1; } /* * It is expensive to adjust esp for each individual fp argument pushed on the stack * so we try to do it just once when we have multiple fp arguments in a row. * We don't use this mechanism generally because for int arguments the generated code * is slightly bigger and new generation cpus optimize away the dependency chains * created by push instructions on the esp value. * fp_arg_setup is the first argument in the execution sequence where the esp register * is modified. */ static G_GNUC_UNUSED int collect_fp_stack_space (MonoMethodSignature *sig, int start_arg, int *fp_arg_setup) { int fp_space = 0; MonoType *t; for (; start_arg < sig->param_count; ++start_arg) { t = mini_get_underlying_type (sig->params [start_arg]); if (!m_type_is_byref (t) && t->type == MONO_TYPE_R8) { fp_space += sizeof (double); *fp_arg_setup = start_arg; } else { break; } } return fp_space; } static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { MonoMethodSignature *tmp_sig; int sig_reg; /* * mono_ArgIterator_Setup assumes the signature cookie is * passed first and all the arguments which were before it are * passed on the stack after the signature. So compensate by * passing a different signature. */ tmp_sig = mono_metadata_signature_dup (call->signature); tmp_sig->param_count -= call->signature->sentinelpos; tmp_sig->sentinelpos = 0; memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*)); if (cfg->compile_aot) { sig_reg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, cinfo->sig_cookie.offset, sig_reg); } else { MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, X86_ESP, cinfo->sig_cookie.offset, (gsize)tmp_sig); } } #ifdef ENABLE_LLVM LLVMCallInfo* mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig) { int i, n; CallInfo *cinfo; ArgInfo *ainfo; LLVMCallInfo *linfo; MonoType *t, *sig_ret; n = sig->param_count + sig->hasthis; cinfo = get_call_info (cfg->mempool, sig); sig_ret = sig->ret; linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n)); /* * LLVM always uses the native ABI while we use our own ABI, the * only difference is the handling of vtypes: * - we only pass/receive them in registers in some cases, and only * in 1 or 2 integer registers. */ if (cinfo->ret.storage == ArgValuetypeInReg) { if (sig->pinvoke) { cfg->exception_message = g_strdup ("pinvoke + vtypes"); cfg->disable_llvm = TRUE; return linfo; } cfg->exception_message = g_strdup ("vtype ret in call"); cfg->disable_llvm = TRUE; /* linfo->ret.storage = LLVMArgVtypeInReg; for (j = 0; j < 2; ++j) linfo->ret.pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, cinfo->ret.pair_storage [j]); */ } if (mini_type_is_vtype (sig_ret) && cinfo->ret.storage == ArgInIReg) { /* Vtype returned using a hidden argument */ linfo->ret.storage = LLVMArgVtypeRetAddr; linfo->vret_arg_index = cinfo->vret_arg_index; } if (mini_type_is_vtype (sig_ret) && cinfo->ret.storage != ArgInIReg) { // FIXME: cfg->exception_message = g_strdup ("vtype ret in call"); cfg->disable_llvm = TRUE; } for (i = 0; i < n; ++i) { ainfo = cinfo->args + i; if (i >= sig->hasthis) t = sig->params [i - sig->hasthis]; else t = mono_get_int_type (); linfo->args [i].storage = LLVMArgNone; switch (ainfo->storage) { case ArgInIReg: linfo->args [i].storage = LLVMArgNormal; break; case ArgInDoubleSSEReg: case ArgInFloatSSEReg: linfo->args [i].storage = LLVMArgNormal; break; case ArgOnStack: if (mini_type_is_vtype (t)) { if (mono_class_value_size (mono_class_from_mono_type_internal (t), NULL) == 0) /* LLVM seems to allocate argument space for empty structures too */ linfo->args [i].storage = LLVMArgNone; else linfo->args [i].storage = LLVMArgVtypeByVal; } else { linfo->args [i].storage = LLVMArgNormal; } break; case ArgValuetypeInReg: if (sig->pinvoke) { cfg->exception_message = g_strdup ("pinvoke + vtypes"); cfg->disable_llvm = TRUE; return linfo; } cfg->exception_message = g_strdup ("vtype arg"); cfg->disable_llvm = TRUE; /* linfo->args [i].storage = LLVMArgVtypeInReg; for (j = 0; j < 2; ++j) linfo->args [i].pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, ainfo->pair_storage [j]); */ break; case ArgGSharedVt: linfo->args [i].storage = LLVMArgGSharedVt; break; default: cfg->exception_message = g_strdup ("ainfo->storage"); cfg->disable_llvm = TRUE; break; } } return linfo; } #endif static void emit_gc_param_slot_def (MonoCompile *cfg, int sp_offset, MonoType *t) { if (cfg->compute_gc_maps) { MonoInst *def; /* Needs checking if the feature will be enabled again */ g_assert_not_reached (); /* On x86, the offsets are from the sp value before the start of the call sequence */ if (t == NULL) t = mono_get_int_type (); EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, sp_offset, t); } } void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoType *sig_ret; MonoInst *arg, *in; MonoMethodSignature *sig; int i, j, n; CallInfo *cinfo; int sentinelpos = 0, sp_offset = 0; sig = call->signature; n = sig->param_count + sig->hasthis; sig_ret = mini_get_underlying_type (sig->ret); cinfo = get_call_info (cfg->mempool, sig); call->call_info = cinfo; if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) sentinelpos = sig->sentinelpos + (sig->hasthis ? 1 : 0); if (sig_ret && MONO_TYPE_ISSTRUCT (sig_ret)) { if (cinfo->ret.storage == ArgValuetypeInReg && cinfo->ret.pair_storage[0] != ArgNone ) { /* * Tell the JIT to use a more efficient calling convention: call using * OP_CALL, compute the result location after the call, and save the * result there. */ call->vret_in_reg = TRUE; #if defined (__APPLE__) if (cinfo->ret.pair_storage [0] == ArgOnDoubleFpStack || cinfo->ret.pair_storage [0] == ArgOnFloatFpStack) call->vret_in_reg_fp = TRUE; #endif if (call->vret_var) NULLIFY_INS (call->vret_var); } } // FIXME: Emit EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF everywhere /* Handle the case where there are no implicit arguments */ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sentinelpos)) { emit_sig_cookie (cfg, call, cinfo); sp_offset = cinfo->sig_cookie.offset; emit_gc_param_slot_def (cfg, sp_offset, NULL); } /* Arguments are pushed in the reverse order */ for (i = n - 1; i >= 0; i --) { ArgInfo *ainfo = cinfo->args + i; MonoType *orig_type, *t; int argsize; if (cinfo->vtype_retaddr && cinfo->vret_arg_index == 1 && i == 0) { MonoInst *vtarg; /* Push the vret arg before the first argument */ MONO_INST_NEW (cfg, vtarg, OP_STORE_MEMBASE_REG); vtarg->type = STACK_MP; vtarg->inst_destbasereg = X86_ESP; vtarg->sreg1 = call->vret_var->dreg; vtarg->inst_offset = cinfo->ret.offset; MONO_ADD_INS (cfg->cbb, vtarg); emit_gc_param_slot_def (cfg, cinfo->ret.offset, NULL); } if (i >= sig->hasthis) t = sig->params [i - sig->hasthis]; else t = mono_get_int_type (); orig_type = t; t = mini_get_underlying_type (t); MONO_INST_NEW (cfg, arg, OP_X86_PUSH); in = call->args [i]; arg->cil_code = in->cil_code; arg->sreg1 = in->dreg; arg->type = in->type; g_assert (in->dreg != -1); if (ainfo->storage == ArgGSharedVt) { arg->opcode = OP_OUTARG_VT; arg->sreg1 = in->dreg; arg->klass = in->klass; arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo)); sp_offset += 4; MONO_ADD_INS (cfg->cbb, arg); } else if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(t))) { guint32 align; guint32 size; g_assert (in->klass); if (t->type == MONO_TYPE_TYPEDBYREF) { size = MONO_ABI_SIZEOF (MonoTypedRef); align = sizeof (target_mgreg_t); } else { size = mini_type_stack_size_full (m_class_get_byval_arg (in->klass), &align, sig->pinvoke && !sig->marshalling_disabled); } if (size > 0 || ainfo->pass_empty_struct) { arg->opcode = OP_OUTARG_VT; arg->sreg1 = in->dreg; arg->klass = in->klass; arg->backend.size = size; arg->inst_p0 = call; arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, arg); if (ainfo->storage != ArgValuetypeInReg) { emit_gc_param_slot_def (cfg, ainfo->offset, orig_type); } } } else { switch (ainfo->storage) { case ArgOnStack: if (!m_type_is_byref (t)) { if (t->type == MONO_TYPE_R4) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, X86_ESP, ainfo->offset, in->dreg); argsize = 4; } else if (t->type == MONO_TYPE_R8) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, X86_ESP, ainfo->offset, in->dreg); argsize = 8; } else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_U8) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, ainfo->offset + 4, MONO_LVREG_MS (in->dreg)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, ainfo->offset, MONO_LVREG_LS (in->dreg)); argsize = 4; } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, ainfo->offset, in->dreg); argsize = 4; } } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, ainfo->offset, in->dreg); argsize = 4; } break; case ArgInIReg: arg->opcode = OP_MOVE; arg->dreg = ainfo->reg; MONO_ADD_INS (cfg->cbb, arg); argsize = 0; break; default: g_assert_not_reached (); } if (cfg->compute_gc_maps) { if (argsize == 4) { /* FIXME: The == STACK_OBJ check might be fragile ? */ if (sig->hasthis && i == 0 && call->args [i]->type == STACK_OBJ) { /* this */ if (call->need_unbox_trampoline) /* The unbox trampoline transforms this into a managed pointer */ emit_gc_param_slot_def (cfg, ainfo->offset, mono_class_get_byref_type (mono_defaults.int_class)); else emit_gc_param_slot_def (cfg, ainfo->offset, mono_get_object_type ()); } else { emit_gc_param_slot_def (cfg, ainfo->offset, orig_type); } } else { /* i8/r8 */ for (j = 0; j < argsize; j += 4) emit_gc_param_slot_def (cfg, ainfo->offset + j, NULL); } } } if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sentinelpos)) { /* Emit the signature cookie just before the implicit arguments */ emit_sig_cookie (cfg, call, cinfo); emit_gc_param_slot_def (cfg, cinfo->sig_cookie.offset, NULL); } } if (sig_ret && (MONO_TYPE_ISSTRUCT (sig_ret) || cinfo->vtype_retaddr)) { MonoInst *vtarg; if (cinfo->ret.storage == ArgValuetypeInReg) { /* Already done */ } else if (cinfo->ret.storage == ArgInIReg) { NOT_IMPLEMENTED; /* The return address is passed in a register */ MONO_INST_NEW (cfg, vtarg, OP_MOVE); vtarg->sreg1 = call->inst.dreg; vtarg->dreg = mono_alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, vtarg); mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE); } else if (cinfo->vtype_retaddr && cinfo->vret_arg_index == 0) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, cinfo->ret.offset, call->vret_var->dreg); emit_gc_param_slot_def (cfg, cinfo->ret.offset, NULL); } } call->stack_usage = cinfo->stack_usage; call->stack_align_amount = cinfo->stack_align_amount; } void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst*)ins->inst_p0; ArgInfo *ainfo = (ArgInfo*)ins->inst_p1; int size = ins->backend.size; if (ainfo->storage == ArgValuetypeInReg) { int dreg = mono_alloc_ireg (cfg); switch (size) { case 1: MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, 0); break; case 2: MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, 0); break; case 4: MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, 0); break; case 3: /* FIXME */ default: g_assert_not_reached (); } mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, FALSE); } else { if (cfg->gsharedvt && mini_is_gsharedvt_klass (ins->klass)) { /* Pass by addr */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, ainfo->offset, src->dreg); } else if (size <= 4) { int dreg = mono_alloc_ireg (cfg); if (ainfo->pass_empty_struct) { //Pass empty struct value as 0 on platforms representing empty structs as 1 byte. MONO_EMIT_NEW_ICONST (cfg, dreg, 0); } else { MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, 0); } MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, ainfo->offset, dreg); } else if (size <= 20) { mini_emit_memcpy (cfg, X86_ESP, ainfo->offset, src->dreg, 0, size, 4); } else { // FIXME: Code growth mini_emit_memcpy (cfg, X86_ESP, ainfo->offset, src->dreg, 0, size, 4); } } } void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret); if (!m_type_is_byref (ret)) { if (ret->type == MONO_TYPE_R4) { if (COMPILE_LLVM (cfg)) MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); /* Nothing to do */ return; } else if (ret->type == MONO_TYPE_R8) { if (COMPILE_LLVM (cfg)) MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); /* Nothing to do */ return; } else if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) { if (COMPILE_LLVM (cfg)) MONO_EMIT_NEW_UNALU (cfg, OP_LMOVE, cfg->ret->dreg, val->dreg); else { MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, X86_EAX, MONO_LVREG_LS (val->dreg)); MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, X86_EDX, MONO_LVREG_MS (val->dreg)); } return; } } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); } #define EMIT_COND_BRANCH(ins,cond,sign) \ if (ins->inst_true_bb->native_offset) { \ x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \ } else { \ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \ if ((cfg->opt & MONO_OPT_BRANCH) && \ x86_is_imm8 (ins->inst_true_bb->max_offset - cpos)) \ x86_branch8 (code, cond, 0, sign); \ else \ x86_branch32 (code, cond, 0, sign); \ } /* * Emit an exception if condition is fail and * if possible do a directly branch to target */ #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \ do { \ MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \ if (tins == NULL) { \ mono_add_patch_info (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_EXC, exc_name); \ x86_branch32 (code, cond, 0, signed); \ } else { \ EMIT_COND_BRANCH (tins, cond, signed); \ } \ } while (0); #define EMIT_FPCOMPARE(code) do { \ x86_fcompp (code); \ x86_fnstsw (code); \ } while (0); static guint8* x86_align_and_patch (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data) { gboolean needs_paddings = TRUE; guint32 pad_size; MonoJumpInfo *jinfo = NULL; if (cfg->abs_patches) { jinfo = (MonoJumpInfo*)g_hash_table_lookup (cfg->abs_patches, data); if (jinfo && (jinfo->type == MONO_PATCH_INFO_JIT_ICALL_ADDR || jinfo->type == MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR)) needs_paddings = FALSE; } if (cfg->compile_aot) needs_paddings = FALSE; /*The address must be 4 bytes aligned to avoid spanning multiple cache lines. This is required for code patching to be safe on SMP machines. */ pad_size = (guint32)(code + 1 - cfg->native_code) & 0x3; if (needs_paddings && pad_size) x86_padding (code, 4 - pad_size); mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)patch_type, data); return code; } static guint8* emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data) { code = x86_align_and_patch (cfg, code, patch_type, data); x86_call_code (code, 0); return code; } #define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_IADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_ISBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB_IMM))) /* * mono_peephole_pass_1: * * Perform peephole opts which should/can be performed before local regalloc */ void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { MonoInst *last_ins = mono_inst_prev (ins, FILTER_IL_SEQ_POINT); switch (ins->opcode) { case OP_IADD_IMM: case OP_ADD_IMM: if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS)) { /* * X86_LEA is like ADD, but doesn't have the * sreg1==dreg restriction. */ ins->opcode = OP_X86_LEA_MEMBASE; ins->inst_basereg = ins->sreg1; } else if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1)) ins->opcode = OP_X86_INC_REG; break; case OP_SUB_IMM: case OP_ISUB_IMM: if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS)) { ins->opcode = OP_X86_LEA_MEMBASE; ins->inst_basereg = ins->sreg1; ins->inst_imm = -ins->inst_imm; } else if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1)) ins->opcode = OP_X86_DEC_REG; break; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: /* OP_COMPARE_IMM (reg, 0) * --> * OP_X86_TEST_NULL (reg) */ if (!ins->inst_imm) ins->opcode = OP_X86_TEST_NULL; break; case OP_X86_COMPARE_MEMBASE_IMM: /* * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm * --> * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_COMPARE_IMM reg, imm * * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL */ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = OP_COMPARE_IMM; ins->sreg1 = last_ins->sreg1; /* check if we can remove cmp reg,0 with test null */ if (!ins->inst_imm) ins->opcode = OP_X86_TEST_NULL; } break; case OP_X86_PUSH_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG || last_ins->opcode == OP_STORE_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = OP_X86_PUSH; ins->sreg1 = last_ins->sreg1; } break; } mono_peephole_ins (bb, ins); } } void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { switch (ins->opcode) { case OP_ICONST: /* reg = 0 -> XOR (reg, reg) */ /* XOR sets cflags on x86, so we cant do it always */ if (ins->inst_c0 == 0 && (!ins->next || (ins->next && INST_IGNORES_CFLAGS (ins->next->opcode)))) { MonoInst *ins2; ins->opcode = OP_IXOR; ins->sreg1 = ins->dreg; ins->sreg2 = ins->dreg; /* * Convert succeeding STORE_MEMBASE_IMM 0 ins to STORE_MEMBASE_REG * since it takes 3 bytes instead of 7. */ for (ins2 = mono_inst_next (ins, FILTER_IL_SEQ_POINT); ins2; ins2 = ins2->next) { if ((ins2->opcode == OP_STORE_MEMBASE_IMM) && (ins2->inst_imm == 0)) { ins2->opcode = OP_STORE_MEMBASE_REG; ins2->sreg1 = ins->dreg; } else if ((ins2->opcode == OP_STOREI4_MEMBASE_IMM) && (ins2->inst_imm == 0)) { ins2->opcode = OP_STOREI4_MEMBASE_REG; ins2->sreg1 = ins->dreg; } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM)) { /* Continue iteration */ } else break; } } break; case OP_IADD_IMM: case OP_ADD_IMM: if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1)) ins->opcode = OP_X86_INC_REG; break; case OP_ISUB_IMM: case OP_SUB_IMM: if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1)) ins->opcode = OP_X86_DEC_REG; break; } mono_peephole_ins (bb, ins); } } #define NEW_INS(cfg,ins,dest,op) do { \ MONO_INST_NEW ((cfg), (dest), (op)); \ (dest)->cil_code = (ins)->cil_code; \ mono_bblock_insert_before_ins (bb, ins, (dest)); \ } while (0) /* * mono_arch_lowering_pass: * * Converts complex opcodes into simpler ones so that each IR instruction * corresponds to one machine instruction. */ void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *next; /* * FIXME: Need to add more instructions, but the current machine * description can't model some parts of the composite instructions like * cdq. */ MONO_BB_FOR_EACH_INS_SAFE (bb, next, ins) { switch (ins->opcode) { case OP_IREM_IMM: case OP_IDIV_IMM: case OP_IDIV_UN_IMM: case OP_IREM_UN_IMM: /* * Keep the cases where we could generated optimized code, otherwise convert * to the non-imm variant. */ if ((ins->opcode == OP_IREM_IMM) && mono_is_power_of_two (ins->inst_imm) >= 0) break; mono_decompose_op_imm (cfg, bb, ins); break; #ifdef MONO_ARCH_SIMD_INTRINSICS case OP_EXPAND_I1: { MonoInst *temp; int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); int original_reg = ins->sreg1; NEW_INS (cfg, ins, temp, OP_ICONV_TO_U1); temp->sreg1 = original_reg; temp->dreg = temp_reg1; NEW_INS (cfg, ins, temp, OP_SHL_IMM); temp->sreg1 = temp_reg1; temp->dreg = temp_reg2; temp->inst_imm = 8; NEW_INS (cfg, ins, temp, OP_IOR); temp->sreg1 = temp->dreg = temp_reg2; temp->sreg2 = temp_reg1; ins->opcode = OP_EXPAND_I2; ins->sreg1 = temp_reg2; } break; #endif default: break; } } bb->max_vreg = cfg->next_vreg; } static const int branch_cc_table [] = { X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT, X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT, X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC }; /* Maps CMP_... constants to X86_CC_... constants */ static const int cc_table [] = { X86_CC_EQ, X86_CC_NE, X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT, X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT }; static const int cc_signed_table [] = { TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE }; static unsigned char* emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int size, gboolean is_signed) { #define XMM_TEMP_REG 0 /*This SSE2 optimization must not be done which OPT_SIMD in place as it clobbers xmm0.*/ /*The xmm pass decomposes OP_FCONV_ ops anyway anyway.*/ if (cfg->opt & MONO_OPT_SSE2 && size < 8 && !(cfg->opt & MONO_OPT_SIMD)) { /* optimize by assigning a local var for this use so we avoid * the stack manipulations */ x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8); x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE); x86_movsd_reg_membase (code, XMM_TEMP_REG, X86_ESP, 0); x86_cvttsd2si (code, dreg, XMM_TEMP_REG); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8); if (size == 1) x86_widen_reg (code, dreg, dreg, is_signed, FALSE); else if (size == 2) x86_widen_reg (code, dreg, dreg, is_signed, TRUE); return code; } x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4); x86_fnstcw_membase(code, X86_ESP, 0); x86_mov_reg_membase (code, dreg, X86_ESP, 0, 2); x86_alu_reg_imm (code, X86_OR, dreg, 0xc00); x86_mov_membase_reg (code, X86_ESP, 2, dreg, 2); x86_fldcw_membase (code, X86_ESP, 2); if (size == 8) { x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8); x86_fist_pop_membase (code, X86_ESP, 0, TRUE); x86_pop_reg (code, dreg); /* FIXME: need the high register * x86_pop_reg (code, dreg_high); */ } else { x86_push_reg (code, X86_EAX); // SP = SP - 4 x86_fist_pop_membase (code, X86_ESP, 0, FALSE); x86_pop_reg (code, dreg); } x86_fldcw_membase (code, X86_ESP, 0); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); if (size == 1) x86_widen_reg (code, dreg, dreg, is_signed, FALSE); else if (size == 2) x86_widen_reg (code, dreg, dreg, is_signed, TRUE); return code; } static unsigned char* mono_emit_stack_alloc (MonoCompile *cfg, guchar *code, MonoInst* tree) { int sreg = tree->sreg1; int need_touch = FALSE; #if defined (TARGET_WIN32) || defined (MONO_ARCH_SIGSEGV_ON_ALTSTACK) need_touch = TRUE; #endif if (need_touch) { guint8* br[5]; /* * Under Windows: * If requested stack size is larger than one page, * perform stack-touch operation */ /* * Generate stack probe code. * Under Windows, it is necessary to allocate one page at a time, * "touching" stack after each successful sub-allocation. This is * because of the way stack growth is implemented - there is a * guard page before the lowest stack page that is currently commited. * Stack normally grows sequentially so OS traps access to the * guard page and commits more pages when needed. */ x86_test_reg_imm (code, sreg, ~0xFFF); br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE); br[2] = code; /* loop */ x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000); x86_test_membase_reg (code, X86_ESP, 0, X86_ESP); /* * By the end of the loop, sreg2 is smaller than 0x1000, so the init routine * that follows only initializes the last part of the area. */ /* Same as the init code below with size==0x1000 */ if (tree->flags & MONO_INST_INIT) { x86_push_reg (code, X86_EAX); x86_push_reg (code, X86_ECX); x86_push_reg (code, X86_EDI); x86_mov_reg_imm (code, X86_ECX, (0x1000 >> 2)); x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX); if (cfg->param_area) x86_lea_membase (code, X86_EDI, X86_ESP, 12 + ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT)); else x86_lea_membase (code, X86_EDI, X86_ESP, 12); x86_cld (code); x86_prefix (code, X86_REP_PREFIX); x86_stosl (code); x86_pop_reg (code, X86_EDI); x86_pop_reg (code, X86_ECX); x86_pop_reg (code, X86_EAX); } x86_alu_reg_imm (code, X86_SUB, sreg, 0x1000); x86_alu_reg_imm (code, X86_CMP, sreg, 0x1000); br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE); x86_patch (br[3], br[2]); x86_test_reg_reg (code, sreg, sreg); br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE); x86_alu_reg_reg (code, X86_SUB, X86_ESP, sreg); br[1] = code; x86_jump8 (code, 0); x86_patch (br[0], code); x86_alu_reg_reg (code, X86_SUB, X86_ESP, sreg); x86_patch (br[1], code); x86_patch (br[4], code); } else x86_alu_reg_reg (code, X86_SUB, X86_ESP, tree->sreg1); if (tree->flags & MONO_INST_INIT) { int offset = 0; if (tree->dreg != X86_EAX && sreg != X86_EAX) { x86_push_reg (code, X86_EAX); offset += 4; } if (tree->dreg != X86_ECX && sreg != X86_ECX) { x86_push_reg (code, X86_ECX); offset += 4; } if (tree->dreg != X86_EDI && sreg != X86_EDI) { x86_push_reg (code, X86_EDI); offset += 4; } x86_shift_reg_imm (code, X86_SHR, sreg, 2); x86_mov_reg_reg (code, X86_ECX, sreg); x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX); if (cfg->param_area) x86_lea_membase (code, X86_EDI, X86_ESP, offset + ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT)); else x86_lea_membase (code, X86_EDI, X86_ESP, offset); x86_cld (code); x86_prefix (code, X86_REP_PREFIX); x86_stosl (code); if (tree->dreg != X86_EDI && sreg != X86_EDI) x86_pop_reg (code, X86_EDI); if (tree->dreg != X86_ECX && sreg != X86_ECX) x86_pop_reg (code, X86_ECX); if (tree->dreg != X86_EAX && sreg != X86_EAX) x86_pop_reg (code, X86_EAX); } return code; } static guint8* emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code) { /* Move return value to the target register */ switch (ins->opcode) { case OP_CALL: case OP_CALL_REG: case OP_CALL_MEMBASE: x86_mov_reg_reg (code, ins->dreg, X86_EAX); break; default: break; } return code; } #ifdef TARGET_MACH static int tls_gs_offset; #endif gboolean mono_arch_have_fast_tls (void) { #ifdef TARGET_MACH static gboolean have_fast_tls = FALSE; static gboolean inited = FALSE; guint32 *ins; if (mini_debug_options.use_fallback_tls) return FALSE; if (inited) return have_fast_tls; ins = (guint32*)pthread_getspecific; /* * We're looking for these two instructions: * * mov 0x4(%esp),%eax * mov %gs:[offset](,%eax,4),%eax */ have_fast_tls = ins [0] == 0x0424448b && ins [1] == 0x85048b65; tls_gs_offset = ins [2]; inited = TRUE; return have_fast_tls; #elif defined(TARGET_ANDROID) return FALSE; #else if (mini_debug_options.use_fallback_tls) return FALSE; return TRUE; #endif } static guint8* mono_x86_emit_tls_get (guint8* code, int dreg, int tls_offset) { #if defined (TARGET_MACH) x86_prefix (code, X86_GS_PREFIX); x86_mov_reg_mem (code, dreg, tls_gs_offset + (tls_offset * 4), 4); #elif defined (TARGET_WIN32) /* * See the Under the Hood article in the May 1996 issue of Microsoft Systems * Journal and/or a disassembly of the TlsGet () function. */ x86_prefix (code, X86_FS_PREFIX); x86_mov_reg_mem (code, dreg, 0x18, 4); if (tls_offset < 64) { x86_mov_reg_membase (code, dreg, dreg, 3600 + (tls_offset * 4), 4); } else { guint8 *buf [16]; g_assert (tls_offset < 0x440); /* Load TEB->TlsExpansionSlots */ x86_mov_reg_membase (code, dreg, dreg, 0xf94, 4); x86_test_reg_reg (code, dreg, dreg); buf [0] = code; x86_branch (code, X86_CC_EQ, code, TRUE); x86_mov_reg_membase (code, dreg, dreg, (tls_offset * 4) - 0x100, 4); x86_patch (buf [0], code); } #else if (optimize_for_xen) { x86_prefix (code, X86_GS_PREFIX); x86_mov_reg_mem (code, dreg, 0, 4); x86_mov_reg_membase (code, dreg, dreg, tls_offset, 4); } else { x86_prefix (code, X86_GS_PREFIX); x86_mov_reg_mem (code, dreg, tls_offset, 4); } #endif return code; } static guint8* mono_x86_emit_tls_set (guint8* code, int sreg, int tls_offset) { #if defined (TARGET_MACH) x86_prefix (code, X86_GS_PREFIX); x86_mov_mem_reg (code, tls_gs_offset + (tls_offset * 4), sreg, 4); #elif defined (TARGET_WIN32) g_assert_not_reached (); #else x86_prefix (code, X86_GS_PREFIX); x86_mov_mem_reg (code, tls_offset, sreg, 4); #endif return code; } /* * emit_setup_lmf: * * Emit code to initialize an LMF structure at LMF_OFFSET. */ static guint8* emit_setup_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, int cfa_offset) { /* save all caller saved regs */ x86_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, ebx), X86_EBX, sizeof (target_mgreg_t)); mono_emit_unwind_op_offset (cfg, code, X86_EBX, - cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, ebx)); x86_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, edi), X86_EDI, sizeof (target_mgreg_t)); mono_emit_unwind_op_offset (cfg, code, X86_EDI, - cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, edi)); x86_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, esi), X86_ESI, sizeof (target_mgreg_t)); mono_emit_unwind_op_offset (cfg, code, X86_ESI, - cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, esi)); x86_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, ebp), X86_EBP, sizeof (target_mgreg_t)); /* save the current IP */ if (cfg->compile_aot) { /* This pushes the current ip */ x86_call_imm (code, 0); x86_pop_reg (code, X86_EAX); } else { mono_add_patch_info (cfg, code + 1 - cfg->native_code, MONO_PATCH_INFO_IP, NULL); x86_mov_reg_imm (code, X86_EAX, 0); } x86_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, eip), X86_EAX, sizeof (target_mgreg_t)); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, eip), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, ebp), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, esi), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, edi), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, ebx), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, esp), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, method), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF); return code; } #ifdef TARGET_WIN32 #define TEB_LAST_ERROR_OFFSET 0x34 static guint8* emit_get_last_error (guint8* code, int dreg) { /* Threads last error value is located in TEB_LAST_ERROR_OFFSET. */ x86_prefix (code, X86_FS_PREFIX); x86_mov_reg_mem (code, dreg, TEB_LAST_ERROR_OFFSET, sizeof (guint32)); return code; } #else static guint8* emit_get_last_error (guint8* code, int dreg) { g_assert_not_reached (); } #endif /* benchmark and set based on cpu */ #define LOOP_ALIGNMENT 8 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting) #ifndef DISABLE_JIT void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins; MonoCallInst *call; guint8 *code = cfg->native_code + cfg->code_len; if (cfg->opt & MONO_OPT_LOOP) { int pad, align = LOOP_ALIGNMENT; /* set alignment depending on cpu */ if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) { pad = align - pad; /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/ x86_padding (code, pad); cfg->code_len += pad; bb->native_offset = cfg->code_len; } } if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); int cpos = bb->max_offset; set_code_cursor (cfg, code); mono_debug_open_block (cfg, bb, code - cfg->native_code); if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) x86_breakpoint (code); MONO_BB_FOR_EACH_INS (bb, ins) { const guint offset = code - cfg->native_code; set_code_cursor (cfg, code); int max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); if (cfg->debug_info) mono_debug_record_line_number (cfg, ins, offset); switch (ins->opcode) { case OP_BIGMUL: x86_mul_reg (code, ins->sreg2, TRUE); break; case OP_BIGMUL_UN: x86_mul_reg (code, ins->sreg2, FALSE); break; case OP_X86_SETEQ_MEMBASE: case OP_X86_SETNE_MEMBASE: x86_set_membase (code, ins->opcode == OP_X86_SETEQ_MEMBASE ? X86_CC_EQ : X86_CC_NE, ins->inst_basereg, ins->inst_offset, TRUE); break; case OP_STOREI1_MEMBASE_IMM: x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1); break; case OP_STOREI2_MEMBASE_IMM: x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2); break; case OP_STORE_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_STOREI1_MEMBASE_REG: x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1); break; case OP_STOREI2_MEMBASE_REG: x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2); break; case OP_STORE_MEMBASE_REG: case OP_STOREI4_MEMBASE_REG: x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4); break; case OP_LOADU4_MEM: x86_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4); break; case OP_LOAD_MEM: case OP_LOADI4_MEM: /* These are created by the cprop pass so they use inst_imm as the source */ x86_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4); break; case OP_LOADU1_MEM: x86_widen_mem (code, ins->dreg, ins->inst_imm, FALSE, FALSE); break; case OP_LOADU2_MEM: x86_widen_mem (code, ins->dreg, ins->inst_imm, FALSE, TRUE); break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: case OP_LOADU4_MEMBASE: x86_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4); break; case OP_LOADU1_MEMBASE: x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE); break; case OP_LOADI1_MEMBASE: x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE); break; case OP_LOADU2_MEMBASE: x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE); break; case OP_LOADI2_MEMBASE: x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE); break; case OP_ICONV_TO_I1: case OP_SEXT_I1: x86_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE); break; case OP_ICONV_TO_I2: case OP_SEXT_I2: x86_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE); break; case OP_ICONV_TO_U1: x86_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE); break; case OP_ICONV_TO_U2: x86_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE); break; case OP_COMPARE: case OP_ICOMPARE: x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); break; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: x86_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm); break; case OP_X86_COMPARE_MEMBASE_REG: x86_alu_membase_reg (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2); break; case OP_X86_COMPARE_MEMBASE_IMM: x86_alu_membase_imm (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm); break; case OP_X86_COMPARE_MEMBASE8_IMM: x86_alu_membase8_imm (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm); break; case OP_X86_COMPARE_REG_MEMBASE: x86_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_X86_COMPARE_MEM_IMM: x86_alu_mem_imm (code, X86_CMP, ins->inst_offset, ins->inst_imm); break; case OP_X86_TEST_NULL: x86_test_reg_reg (code, ins->sreg1, ins->sreg1); break; case OP_X86_ADD_MEMBASE_IMM: x86_alu_membase_imm (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm); break; case OP_X86_ADD_REG_MEMBASE: x86_alu_reg_membase (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_X86_SUB_MEMBASE_IMM: x86_alu_membase_imm (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm); break; case OP_X86_SUB_REG_MEMBASE: x86_alu_reg_membase (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_X86_AND_MEMBASE_IMM: x86_alu_membase_imm (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->inst_imm); break; case OP_X86_OR_MEMBASE_IMM: x86_alu_membase_imm (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->inst_imm); break; case OP_X86_XOR_MEMBASE_IMM: x86_alu_membase_imm (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->inst_imm); break; case OP_X86_ADD_MEMBASE_REG: x86_alu_membase_reg (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->sreg2); break; case OP_X86_SUB_MEMBASE_REG: x86_alu_membase_reg (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->sreg2); break; case OP_X86_AND_MEMBASE_REG: x86_alu_membase_reg (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->sreg2); break; case OP_X86_OR_MEMBASE_REG: x86_alu_membase_reg (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->sreg2); break; case OP_X86_XOR_MEMBASE_REG: x86_alu_membase_reg (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->sreg2); break; case OP_X86_INC_MEMBASE: x86_inc_membase (code, ins->inst_basereg, ins->inst_offset); break; case OP_X86_INC_REG: x86_inc_reg (code, ins->dreg); break; case OP_X86_DEC_MEMBASE: x86_dec_membase (code, ins->inst_basereg, ins->inst_offset); break; case OP_X86_DEC_REG: x86_dec_reg (code, ins->dreg); break; case OP_X86_MUL_REG_MEMBASE: x86_imul_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_X86_AND_REG_MEMBASE: x86_alu_reg_membase (code, X86_AND, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_X86_OR_REG_MEMBASE: x86_alu_reg_membase (code, X86_OR, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_X86_XOR_REG_MEMBASE: x86_alu_reg_membase (code, X86_XOR, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_BREAK: x86_breakpoint (code); break; case OP_RELAXED_NOP: x86_prefix (code, X86_REP_PREFIX); x86_nop (code); break; case OP_HARD_NOP: x86_nop (code); break; case OP_NOP: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: case OP_NOT_REACHED: case OP_NOT_NULL: break; case OP_IL_SEQ_POINT: mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); break; case OP_SEQ_POINT: { int i; if (cfg->compile_aot) NOT_IMPLEMENTED; /* Have to use ecx as a temp reg since this can occur after OP_SETRET */ /* * We do this _before_ the breakpoint, so single stepping after * a breakpoint is hit will step to the next IL offset. */ if (ins->flags & MONO_INST_SINGLE_STEP_LOC) { MonoInst *var = cfg->arch.ss_tramp_var; guint8 *br [1]; g_assert (var); g_assert (var->opcode == OP_REGOFFSET); /* Load ss_tramp_var */ /* This is equal to &ss_trampoline */ x86_mov_reg_membase (code, X86_ECX, var->inst_basereg, var->inst_offset, sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_ECX, X86_ECX, 0, sizeof (target_mgreg_t)); x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0); br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); x86_call_reg (code, X86_ECX); x86_patch (br [0], code); } /* * Many parts of sdb depend on the ip after the single step trampoline call to be equal to the seq point offset. * This means we have to put the loading of bp_tramp_var after the offset. */ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); MonoInst *var = cfg->arch.bp_tramp_var; g_assert (var); g_assert (var->opcode == OP_REGOFFSET); /* Load the address of the bp trampoline */ /* This needs to be constant size */ guint8 *start = code; x86_mov_reg_membase (code, X86_ECX, var->inst_basereg, var->inst_offset, 4); if (code < start + OP_SEQ_POINT_BP_OFFSET) { int size = start + OP_SEQ_POINT_BP_OFFSET - code; x86_padding (code, size); } /* * A placeholder for a possible breakpoint inserted by * mono_arch_set_breakpoint (). */ for (i = 0; i < 2; ++i) x86_nop (code); /* * Add an additional nop so skipping the bp doesn't cause the ip to point * to another IL offset. */ x86_nop (code); break; } case OP_ADDCC: case OP_IADDCC: case OP_IADD: x86_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2); break; case OP_ADC: case OP_IADC: x86_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2); break; case OP_ADDCC_IMM: case OP_ADD_IMM: case OP_IADD_IMM: x86_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm); break; case OP_ADC_IMM: case OP_IADC_IMM: x86_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm); break; case OP_SUBCC: case OP_ISUBCC: case OP_ISUB: x86_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2); break; case OP_SBB: case OP_ISBB: x86_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2); break; case OP_SUBCC_IMM: case OP_SUB_IMM: case OP_ISUB_IMM: x86_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm); break; case OP_SBB_IMM: case OP_ISBB_IMM: x86_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm); break; case OP_IAND: x86_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2); break; case OP_AND_IMM: case OP_IAND_IMM: x86_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm); break; case OP_IDIV: case OP_IREM: /* * The code is the same for div/rem, the allocator will allocate dreg * to RAX/RDX as appropriate. */ if (ins->sreg2 == X86_EDX) { /* cdq clobbers this */ x86_push_reg (code, ins->sreg2); x86_cdq (code); x86_div_membase (code, X86_ESP, 0, TRUE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); } else { x86_cdq (code); x86_div_reg (code, ins->sreg2, TRUE); } break; case OP_IDIV_UN: case OP_IREM_UN: if (ins->sreg2 == X86_EDX) { x86_push_reg (code, ins->sreg2); x86_alu_reg_reg (code, X86_XOR, X86_EDX, X86_EDX); x86_div_membase (code, X86_ESP, 0, FALSE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); } else { x86_alu_reg_reg (code, X86_XOR, X86_EDX, X86_EDX); x86_div_reg (code, ins->sreg2, FALSE); } break; case OP_DIV_IMM: x86_mov_reg_imm (code, ins->sreg2, ins->inst_imm); x86_cdq (code); x86_div_reg (code, ins->sreg2, TRUE); break; case OP_IREM_IMM: { int power = mono_is_power_of_two (ins->inst_imm); g_assert (ins->sreg1 == X86_EAX); g_assert (ins->dreg == X86_EAX); g_assert (power >= 0); if (power == 1) { /* Based on http://compilers.iecc.com/comparch/article/93-04-079 */ x86_cdq (code); x86_alu_reg_imm (code, X86_AND, X86_EAX, 1); /* * If the divident is >= 0, this does not nothing. If it is positive, it * it transforms %eax=0 into %eax=0, and %eax=1 into %eax=-1. */ x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EDX); x86_alu_reg_reg (code, X86_SUB, X86_EAX, X86_EDX); } else if (power == 0) { x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); } else { /* Based on gcc code */ /* Add compensation for negative dividents */ x86_cdq (code); x86_shift_reg_imm (code, X86_SHR, X86_EDX, 32 - power); x86_alu_reg_reg (code, X86_ADD, X86_EAX, X86_EDX); /* Compute remainder */ x86_alu_reg_imm (code, X86_AND, X86_EAX, (1 << power) - 1); /* Remove compensation */ x86_alu_reg_reg (code, X86_SUB, X86_EAX, X86_EDX); } break; } case OP_IOR: x86_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2); break; case OP_OR_IMM: case OP_IOR_IMM: x86_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm); break; case OP_IXOR: x86_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2); break; case OP_XOR_IMM: case OP_IXOR_IMM: x86_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm); break; case OP_ISHL: g_assert (ins->sreg2 == X86_ECX); x86_shift_reg (code, X86_SHL, ins->dreg); break; case OP_ISHR: g_assert (ins->sreg2 == X86_ECX); x86_shift_reg (code, X86_SAR, ins->dreg); break; case OP_SHR_IMM: case OP_ISHR_IMM: x86_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm); break; case OP_SHR_UN_IMM: case OP_ISHR_UN_IMM: x86_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm); break; case OP_ISHR_UN: g_assert (ins->sreg2 == X86_ECX); x86_shift_reg (code, X86_SHR, ins->dreg); break; case OP_SHL_IMM: case OP_ISHL_IMM: x86_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm); break; case OP_LSHL: { guint8 *jump_to_end; /* handle shifts below 32 bits */ x86_shld_reg (code, ins->backend.reg3, ins->sreg1); x86_shift_reg (code, X86_SHL, ins->sreg1); x86_test_reg_imm (code, X86_ECX, 32); jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE); /* handle shift over 32 bit */ x86_mov_reg_reg (code, ins->backend.reg3, ins->sreg1); x86_clear_reg (code, ins->sreg1); x86_patch (jump_to_end, code); } break; case OP_LSHR: { guint8 *jump_to_end; /* handle shifts below 32 bits */ x86_shrd_reg (code, ins->sreg1, ins->backend.reg3); x86_shift_reg (code, X86_SAR, ins->backend.reg3); x86_test_reg_imm (code, X86_ECX, 32); jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); /* handle shifts over 31 bits */ x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3); x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, 31); x86_patch (jump_to_end, code); } break; case OP_LSHR_UN: { guint8 *jump_to_end; /* handle shifts below 32 bits */ x86_shrd_reg (code, ins->sreg1, ins->backend.reg3); x86_shift_reg (code, X86_SHR, ins->backend.reg3); x86_test_reg_imm (code, X86_ECX, 32); jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); /* handle shifts over 31 bits */ x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3); x86_clear_reg (code, ins->backend.reg3); x86_patch (jump_to_end, code); } break; case OP_LSHL_IMM: if (ins->inst_imm >= 32) { x86_mov_reg_reg (code, ins->backend.reg3, ins->sreg1); x86_clear_reg (code, ins->sreg1); x86_shift_reg_imm (code, X86_SHL, ins->backend.reg3, ins->inst_imm - 32); } else { x86_shld_reg_imm (code, ins->backend.reg3, ins->sreg1, ins->inst_imm); x86_shift_reg_imm (code, X86_SHL, ins->sreg1, ins->inst_imm); } break; case OP_LSHR_IMM: if (ins->inst_imm >= 32) { x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3); x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, 0x1f); x86_shift_reg_imm (code, X86_SAR, ins->sreg1, ins->inst_imm - 32); } else { x86_shrd_reg_imm (code, ins->sreg1, ins->backend.reg3, ins->inst_imm); x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, ins->inst_imm); } break; case OP_LSHR_UN_IMM: if (ins->inst_imm >= 32) { x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3); x86_clear_reg (code, ins->backend.reg3); x86_shift_reg_imm (code, X86_SHR, ins->sreg1, ins->inst_imm - 32); } else { x86_shrd_reg_imm (code, ins->sreg1, ins->backend.reg3, ins->inst_imm); x86_shift_reg_imm (code, X86_SHR, ins->backend.reg3, ins->inst_imm); } break; case OP_INOT: x86_not_reg (code, ins->sreg1); break; case OP_INEG: x86_neg_reg (code, ins->sreg1); break; case OP_IMUL: x86_imul_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MUL_IMM: case OP_IMUL_IMM: switch (ins->inst_imm) { case 2: /* MOV r1, r2 */ /* ADD r1, r1 */ x86_mov_reg_reg (code, ins->dreg, ins->sreg1); x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg); break; case 3: /* LEA r1, [r2 + r2*2] */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1); break; case 5: /* LEA r1, [r2 + r2*4] */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); break; case 6: /* LEA r1, [r2 + r2*2] */ /* ADD r1, r1 */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1); x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg); break; case 9: /* LEA r1, [r2 + r2*8] */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 3); break; case 10: /* LEA r1, [r2 + r2*4] */ /* ADD r1, r1 */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg); break; case 12: /* LEA r1, [r2 + r2*2] */ /* SHL r1, 2 */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1); x86_shift_reg_imm (code, X86_SHL, ins->dreg, 2); break; case 25: /* LEA r1, [r2 + r2*4] */ /* LEA r1, [r1 + r1*4] */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); x86_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2); break; case 100: /* LEA r1, [r2 + r2*4] */ /* SHL r1, 2 */ /* LEA r1, [r1 + r1*4] */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); x86_shift_reg_imm (code, X86_SHL, ins->dreg, 2); x86_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2); break; default: x86_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm); break; } break; case OP_IMUL_OVF: x86_imul_reg_reg (code, ins->sreg1, ins->sreg2); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, ins->inst_exc_name); break; case OP_IMUL_OVF_UN: { /* the mul operation and the exception check should most likely be split */ int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE; /*g_assert (ins->sreg2 == X86_EAX); g_assert (ins->dreg == X86_EAX);*/ if (ins->sreg2 == X86_EAX) { non_eax_reg = ins->sreg1; } else if (ins->sreg1 == X86_EAX) { non_eax_reg = ins->sreg2; } else { /* no need to save since we're going to store to it anyway */ if (ins->dreg != X86_EAX) { saved_eax = TRUE; x86_push_reg (code, X86_EAX); } x86_mov_reg_reg (code, X86_EAX, ins->sreg1); non_eax_reg = ins->sreg2; } if (ins->dreg == X86_EDX) { if (!saved_eax) { saved_eax = TRUE; x86_push_reg (code, X86_EAX); } } else { saved_edx = TRUE; x86_push_reg (code, X86_EDX); } x86_mul_reg (code, non_eax_reg, FALSE); /* save before the check since pop and mov don't change the flags */ x86_mov_reg_reg (code, ins->dreg, X86_EAX); if (saved_edx) x86_pop_reg (code, X86_EDX); if (saved_eax) x86_pop_reg (code, X86_EAX); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, ins->inst_exc_name); break; } case OP_ICONST: x86_mov_reg_imm (code, ins->dreg, ins->inst_c0); break; case OP_AOTCONST: g_assert_not_reached (); mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0); x86_mov_reg_imm (code, ins->dreg, 0); break; case OP_JUMP_TABLE: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0); x86_mov_reg_imm (code, ins->dreg, 0); break; case OP_LOAD_GOTADDR: g_assert (ins->dreg == MONO_ARCH_GOT_REG); code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL); break; case OP_GOT_ENTRY: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_right->inst_i1, ins->inst_right->inst_p0); x86_mov_reg_membase (code, ins->dreg, ins->inst_basereg, 0xf0f0f0f0, 4); break; case OP_X86_PUSH_GOT_ENTRY: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_right->inst_i1, ins->inst_right->inst_p0); x86_push_membase (code, ins->inst_basereg, 0xf0f0f0f0); break; case OP_MOVE: x86_mov_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_TAILCALL_PARAMETER: // This opcode helps compute sizes, i.e. // of the subsequent OP_TAILCALL, but contributes no code. g_assert (ins->next); break; case OP_TAILCALL: case OP_TAILCALL_MEMBASE: case OP_TAILCALL_REG: { call = (MonoCallInst*)ins; int pos = 0, i; gboolean const tailcall_membase = ins->opcode == OP_TAILCALL_MEMBASE; gboolean const tailcall_reg = (ins->opcode == OP_TAILCALL_REG); int const sreg1 = ins->sreg1; gboolean const sreg1_ecx = sreg1 == X86_ECX; gboolean const tailcall_membase_ecx = tailcall_membase && sreg1_ecx; gboolean const tailcall_membase_not_ecx = tailcall_membase && !sreg1_ecx; max_len += (call->stack_usage - call->stack_align_amount) / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER); code = realloc_code (cfg, max_len); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; g_assert (!cfg->method->save_lmf); // Ecx is volatile, not used for parameters, or rgctx/imt (edx). // It is also not used for return value, though that does not matter. // Ecx is preserved across the tailcall formation. // // Eax could also be used here at the cost of a push/pop moving the parameters. // Edx must be preserved as it is rgctx/imt. // // If ecx happens to be the base of the tailcall_membase, then // just end with jmp [ecx+offset] -- one instruction. // if ecx is not the base, then move ecx, [reg+offset] and later jmp [ecx] -- two instructions. if (tailcall_reg) { g_assert (sreg1 > -1); x86_mov_reg_reg (code, X86_ECX, sreg1); } else if (tailcall_membase_not_ecx) { g_assert (sreg1 > -1); x86_mov_reg_membase (code, X86_ECX, sreg1, ins->inst_offset, 4); } /* restore callee saved registers */ for (i = 0; i < X86_NREG; ++i) if (X86_IS_CALLEE_SAVED_REG (i) && cfg->used_int_regs & (1 << i)) pos -= 4; if (cfg->used_int_regs & (1 << X86_ESI)) { x86_mov_reg_membase (code, X86_ESI, X86_EBP, pos, 4); pos += 4; } if (cfg->used_int_regs & (1 << X86_EDI)) { x86_mov_reg_membase (code, X86_EDI, X86_EBP, pos, 4); pos += 4; } if (cfg->used_int_regs & (1 << X86_EBX)) { x86_mov_reg_membase (code, X86_EBX, X86_EBP, pos, 4); pos += 4; } /* Copy arguments on the stack to our argument area */ // FIXME use rep mov for constant code size, before nonvolatiles // restored, first saving esi, edi into volatiles for (i = 0; i < call->stack_usage - call->stack_align_amount; i += 4) { x86_mov_reg_membase (code, X86_EAX, X86_ESP, i, 4); x86_mov_membase_reg (code, X86_EBP, 8 + i, X86_EAX, 4); } /* restore ESP/EBP */ x86_leave (code); if (tailcall_membase_ecx) { x86_jump_membase (code, X86_ECX, ins->inst_offset); } else if (tailcall_reg || tailcall_membase_not_ecx) { x86_jump_reg (code, X86_ECX); } else { // FIXME Patch data instead of code. code = x86_align_and_patch (cfg, code, MONO_PATCH_INFO_METHOD_JUMP, call->method); x86_jump32 (code, 0); } ins->flags |= MONO_INST_GC_CALLSITE; break; } case OP_CHECK_THIS: /* ensure ins->sreg1 is not NULL * note that cmp DWORD PTR [eax], eax is one byte shorter than * cmp DWORD PTR [eax], 0 */ x86_alu_membase_reg (code, X86_CMP, ins->sreg1, 0, ins->sreg1); break; case OP_ARGLIST: { int hreg = ins->sreg1 == X86_EAX? X86_ECX: X86_EAX; x86_push_reg (code, hreg); x86_lea_membase (code, hreg, X86_EBP, cfg->sig_cookie); x86_mov_membase_reg (code, ins->sreg1, 0, hreg, 4); x86_pop_reg (code, hreg); break; } case OP_FCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: case OP_FCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: case OP_FCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: { CallInfo *cinfo; call = (MonoCallInst*)ins; cinfo = call->call_info; switch (ins->opcode) { case OP_FCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: { const MonoJumpInfoTarget patch = mono_call_to_patch (call); code = emit_call (cfg, code, patch.type, patch.target); break; } case OP_FCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: x86_call_reg (code, ins->sreg1); break; case OP_FCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: x86_call_membase (code, ins->sreg1, ins->inst_offset); break; default: g_assert_not_reached (); break; } ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; if (cinfo->callee_stack_pop) { /* Have to compensate for the stack space popped by the callee */ x86_alu_reg_imm (code, X86_SUB, X86_ESP, cinfo->callee_stack_pop); } code = emit_move_return_value (cfg, ins, code); break; } case OP_X86_LEA: x86_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->backend.shift_amount); break; case OP_X86_LEA_MEMBASE: x86_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_X86_XCHG: x86_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4); break; case OP_LOCALLOC: /* keep alignment */ x86_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_LOCALLOC_ALIGNMENT - 1); x86_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1)); code = mono_emit_stack_alloc (cfg, code, ins); x86_mov_reg_reg (code, ins->dreg, X86_ESP); if (cfg->param_area) x86_alu_reg_imm (code, X86_ADD, ins->dreg, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT)); break; case OP_LOCALLOC_IMM: { guint32 size = ins->inst_imm; size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1); if (ins->flags & MONO_INST_INIT) { /* FIXME: Optimize this */ x86_mov_reg_imm (code, ins->dreg, size); ins->sreg1 = ins->dreg; code = mono_emit_stack_alloc (cfg, code, ins); x86_mov_reg_reg (code, ins->dreg, X86_ESP); } else { x86_alu_reg_imm (code, X86_SUB, X86_ESP, size); x86_mov_reg_reg (code, ins->dreg, X86_ESP); } if (cfg->param_area) x86_alu_reg_imm (code, X86_ADD, ins->dreg, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT)); break; } case OP_THROW: { x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4); x86_push_reg (code, ins->sreg1); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception)); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; break; } case OP_RETHROW: { x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4); x86_push_reg (code, ins->sreg1); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception)); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; break; } case OP_CALL_HANDLER: x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb); x86_call_imm (code, 0); for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); x86_alu_reg_imm (code, X86_ADD, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4); break; case OP_START_HANDLER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); x86_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, X86_ESP, 4); if (cfg->param_area) x86_alu_reg_imm (code, X86_SUB, X86_ESP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT)); break; } case OP_ENDFINALLY: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); x86_mov_reg_membase (code, X86_ESP, spvar->inst_basereg, spvar->inst_offset, 4); x86_ret (code); break; } case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); x86_mov_reg_membase (code, X86_ESP, spvar->inst_basereg, spvar->inst_offset, 4); /* The local allocator will put the result into EAX */ x86_ret (code); break; } case OP_GET_EX_OBJ: x86_mov_reg_reg (code, ins->dreg, X86_EAX); break; case OP_LABEL: ins->inst_c0 = code - cfg->native_code; break; case OP_BR: if (ins->inst_target_bb->native_offset) { x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); if ((cfg->opt & MONO_OPT_BRANCH) && x86_is_imm8 (ins->inst_target_bb->max_offset - cpos)) x86_jump8 (code, 0); else x86_jump32 (code, 0); } break; case OP_BR_REG: x86_jump_reg (code, ins->sreg1); break; case OP_ICNEQ: case OP_ICGE: case OP_ICLE: case OP_ICGE_UN: case OP_ICLE_UN: case OP_CEQ: case OP_CLT: case OP_CLT_UN: case OP_CGT: case OP_CGT_UN: case OP_CNE: case OP_ICEQ: case OP_ICLT: case OP_ICLT_UN: case OP_ICGT: case OP_ICGT_UN: x86_set_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, cc_signed_table [mono_opcode_to_cond (ins->opcode)]); x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); break; case OP_COND_EXC_EQ: case OP_COND_EXC_NE_UN: case OP_COND_EXC_LT: case OP_COND_EXC_LT_UN: case OP_COND_EXC_GT: case OP_COND_EXC_GT_UN: case OP_COND_EXC_GE: case OP_COND_EXC_GE_UN: case OP_COND_EXC_LE: case OP_COND_EXC_LE_UN: case OP_COND_EXC_IEQ: case OP_COND_EXC_INE_UN: case OP_COND_EXC_ILT: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_IGT: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_IGE: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_ILE: case OP_COND_EXC_ILE_UN: EMIT_COND_SYSTEM_EXCEPTION (cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], (const char*)ins->inst_p1); break; case OP_COND_EXC_OV: case OP_COND_EXC_NO: case OP_COND_EXC_C: case OP_COND_EXC_NC: EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], (ins->opcode < OP_COND_EXC_NE_UN), (const char*)ins->inst_p1); break; case OP_COND_EXC_IOV: case OP_COND_EXC_INO: case OP_COND_EXC_IC: case OP_COND_EXC_INC: EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_IEQ], (ins->opcode < OP_COND_EXC_INE_UN), (const char*)ins->inst_p1); break; case OP_IBEQ: case OP_IBNE_UN: case OP_IBLT: case OP_IBLT_UN: case OP_IBGT: case OP_IBGT_UN: case OP_IBGE: case OP_IBGE_UN: case OP_IBLE: case OP_IBLE_UN: EMIT_COND_BRANCH (ins, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)]); break; case OP_CMOV_IEQ: case OP_CMOV_IGE: case OP_CMOV_IGT: case OP_CMOV_ILE: case OP_CMOV_ILT: case OP_CMOV_INE_UN: case OP_CMOV_IGE_UN: case OP_CMOV_IGT_UN: case OP_CMOV_ILE_UN: case OP_CMOV_ILT_UN: g_assert (ins->dreg == ins->sreg1); x86_cmov_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, ins->sreg2); break; /* floating point opcodes */ case OP_R8CONST: { double d = *(double *)ins->inst_p0; if ((d == 0.0) && (mono_signbit (d) == 0)) { x86_fldz (code); } else if (d == 1.0) { x86_fld1 (code); } else { if (cfg->compile_aot) { guint32 *val = (guint32*)&d; x86_push_imm (code, val [1]); x86_push_imm (code, val [0]); x86_fld_membase (code, X86_ESP, 0, TRUE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8); } else { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_R8, ins->inst_p0); x86_fld (code, NULL, TRUE); } } break; } case OP_R4CONST: { float f = *(float *)ins->inst_p0; if ((f == 0.0) && (mono_signbit (f) == 0)) { x86_fldz (code); } else if (f == 1.0) { x86_fld1 (code); } else { if (cfg->compile_aot) { guint32 val = *(guint32*)&f; x86_push_imm (code, val); x86_fld_membase (code, X86_ESP, 0, FALSE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); } else { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_R4, ins->inst_p0); x86_fld (code, NULL, FALSE); } } break; } case OP_STORER8_MEMBASE_REG: x86_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, TRUE, TRUE); break; case OP_LOADR8_MEMBASE: x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE); break; case OP_STORER4_MEMBASE_REG: x86_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, FALSE, TRUE); break; case OP_LOADR4_MEMBASE: x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, FALSE); break; case OP_ICONV_TO_R4: x86_push_reg (code, ins->sreg1); x86_fild_membase (code, X86_ESP, 0, FALSE); /* Change precision */ x86_fst_membase (code, X86_ESP, 0, FALSE, TRUE); x86_fld_membase (code, X86_ESP, 0, FALSE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); break; case OP_ICONV_TO_R8: x86_push_reg (code, ins->sreg1); x86_fild_membase (code, X86_ESP, 0, FALSE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); break; case OP_ICONV_TO_R_UN: x86_push_imm (code, 0); x86_push_reg (code, ins->sreg1); x86_fild_membase (code, X86_ESP, 0, TRUE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8); break; case OP_X86_FP_LOAD_I8: x86_fild_membase (code, ins->inst_basereg, ins->inst_offset, TRUE); break; case OP_X86_FP_LOAD_I4: x86_fild_membase (code, ins->inst_basereg, ins->inst_offset, FALSE); break; case OP_FCONV_TO_R4: /* Change precision */ x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4); x86_fst_membase (code, X86_ESP, 0, FALSE, TRUE); x86_fld_membase (code, X86_ESP, 0, FALSE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); break; case OP_FCONV_TO_I1: code = emit_float_to_int (cfg, code, ins->dreg, 1, TRUE); break; case OP_FCONV_TO_U1: code = emit_float_to_int (cfg, code, ins->dreg, 1, FALSE); break; case OP_FCONV_TO_I2: code = emit_float_to_int (cfg, code, ins->dreg, 2, TRUE); break; case OP_FCONV_TO_U2: code = emit_float_to_int (cfg, code, ins->dreg, 2, FALSE); break; case OP_FCONV_TO_I4: case OP_FCONV_TO_I: code = emit_float_to_int (cfg, code, ins->dreg, 4, TRUE); break; case OP_FCONV_TO_I8: x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4); x86_fnstcw_membase(code, X86_ESP, 0); x86_mov_reg_membase (code, ins->dreg, X86_ESP, 0, 2); x86_alu_reg_imm (code, X86_OR, ins->dreg, 0xc00); x86_mov_membase_reg (code, X86_ESP, 2, ins->dreg, 2); x86_fldcw_membase (code, X86_ESP, 2); x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8); x86_fist_pop_membase (code, X86_ESP, 0, TRUE); x86_pop_reg (code, ins->dreg); x86_pop_reg (code, ins->backend.reg3); x86_fldcw_membase (code, X86_ESP, 0); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); break; case OP_LCONV_TO_R8_2: x86_push_reg (code, ins->sreg2); x86_push_reg (code, ins->sreg1); x86_fild_membase (code, X86_ESP, 0, TRUE); /* Change precision */ x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE); x86_fld_membase (code, X86_ESP, 0, TRUE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8); break; case OP_LCONV_TO_R4_2: x86_push_reg (code, ins->sreg2); x86_push_reg (code, ins->sreg1); x86_fild_membase (code, X86_ESP, 0, TRUE); /* Change precision */ x86_fst_membase (code, X86_ESP, 0, FALSE, TRUE); x86_fld_membase (code, X86_ESP, 0, FALSE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8); break; case OP_LCONV_TO_R_UN_2: { static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 }; guint8 *br; /* load 64bit integer to FP stack */ x86_push_reg (code, ins->sreg2); x86_push_reg (code, ins->sreg1); x86_fild_membase (code, X86_ESP, 0, TRUE); /* test if lreg is negative */ x86_test_reg_reg (code, ins->sreg2, ins->sreg2); br = code; x86_branch8 (code, X86_CC_GEZ, 0, TRUE); /* add correction constant mn */ if (cfg->compile_aot) { x86_push_imm (code, (((guint32)mn [9]) << 24) | ((guint32)mn [8] << 16) | ((guint32)mn [7] << 8) | ((guint32)mn [6])); x86_push_imm (code, (((guint32)mn [5]) << 24) | ((guint32)mn [4] << 16) | ((guint32)mn [3] << 8) | ((guint32)mn [2])); x86_push_imm (code, (((guint32)mn [1]) << 24) | ((guint32)mn [0] << 16)); x86_fld80_membase (code, X86_ESP, 2); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 12); } else { x86_fld80_mem (code, (gsize)&mn); } x86_fp_op_reg (code, X86_FADD, 1, TRUE); x86_patch (br, code); /* Change precision */ x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE); x86_fld_membase (code, X86_ESP, 0, TRUE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8); break; } case OP_LCONV_TO_OVF_I: case OP_LCONV_TO_OVF_I4_2: { guint8 *br [3], *label [1]; MonoInst *tins; /* * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000 */ x86_test_reg_reg (code, ins->sreg1, ins->sreg1); /* If the low word top bit is set, see if we are negative */ br [0] = code; x86_branch8 (code, X86_CC_LT, 0, TRUE); /* We are not negative (no top bit set, check for our top word to be zero */ x86_test_reg_reg (code, ins->sreg2, ins->sreg2); br [1] = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE); label [0] = code; /* throw exception */ tins = mono_branch_optimize_exception_target (cfg, bb, "OverflowException"); if (tins) { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, tins->inst_true_bb); if ((cfg->opt & MONO_OPT_BRANCH) && x86_is_imm8 (tins->inst_true_bb->max_offset - cpos)) x86_jump8 (code, 0); else x86_jump32 (code, 0); } else { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException"); x86_jump32 (code, 0); } x86_patch (br [0], code); /* our top bit is set, check that top word is 0xfffffff */ x86_alu_reg_imm (code, X86_CMP, ins->sreg2, 0xffffffff); x86_patch (br [1], code); /* nope, emit exception */ br [2] = code; x86_branch8 (code, X86_CC_NE, 0, TRUE); x86_patch (br [2], label [0]); x86_mov_reg_reg (code, ins->dreg, ins->sreg1); break; } case OP_FMOVE: /* Not needed on the fp stack */ break; case OP_MOVE_F_TO_I4: x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE, TRUE); x86_mov_reg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, 4); break; case OP_MOVE_I4_TO_F: x86_mov_membase_reg (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1, 4); x86_fld_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE); break; case OP_FADD: x86_fp_op_reg (code, X86_FADD, 1, TRUE); break; case OP_FSUB: x86_fp_op_reg (code, X86_FSUB, 1, TRUE); break; case OP_FMUL: x86_fp_op_reg (code, X86_FMUL, 1, TRUE); break; case OP_FDIV: x86_fp_op_reg (code, X86_FDIV, 1, TRUE); break; case OP_FNEG: x86_fchs (code); break; case OP_ABS: x86_fabs (code); break; case OP_TAN: { /* * it really doesn't make sense to inline all this code, * it's here just to show that things may not be as simple * as they appear. */ guchar *check_pos, *end_tan, *pop_jump; x86_push_reg (code, X86_EAX); x86_fptan (code); x86_fnstsw (code); x86_test_reg_imm (code, X86_EAX, X86_FP_C2); check_pos = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); x86_fstp (code, 0); /* pop the 1.0 */ end_tan = code; x86_jump8 (code, 0); x86_fldpi (code); x86_fp_op (code, X86_FADD, 0); x86_fxch (code, 1); x86_fprem1 (code); x86_fstsw (code); x86_test_reg_imm (code, X86_EAX, X86_FP_C2); pop_jump = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); x86_fstp (code, 1); x86_fptan (code); x86_patch (pop_jump, code); x86_fstp (code, 0); /* pop the 1.0 */ x86_patch (check_pos, code); x86_patch (end_tan, code); x86_fldz (code); x86_fp_op_reg (code, X86_FADD, 1, TRUE); x86_pop_reg (code, X86_EAX); break; } case OP_ATAN: x86_fld1 (code); x86_fpatan (code); x86_fldz (code); x86_fp_op_reg (code, X86_FADD, 1, TRUE); break; case OP_SQRT: x86_fsqrt (code); break; case OP_ROUND: x86_frndint (code); break; case OP_IMIN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); x86_cmov_reg (code, X86_CC_GT, TRUE, ins->dreg, ins->sreg2); break; case OP_IMIN_UN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); x86_cmov_reg (code, X86_CC_GT, FALSE, ins->dreg, ins->sreg2); break; case OP_IMAX: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); x86_cmov_reg (code, X86_CC_LT, TRUE, ins->dreg, ins->sreg2); break; case OP_IMAX_UN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); x86_cmov_reg (code, X86_CC_LT, FALSE, ins->dreg, ins->sreg2); break; case OP_X86_FPOP: x86_fstp (code, 0); break; case OP_X86_FXCH: x86_fxch (code, ins->inst_imm); break; case OP_FREM: { guint8 *l1, *l2; x86_push_reg (code, X86_EAX); /* we need to exchange ST(0) with ST(1) */ x86_fxch (code, 1); /* this requires a loop, because fprem somtimes * returns a partial remainder */ l1 = code; /* looks like MS is using fprem instead of the IEEE compatible fprem1 */ /* x86_fprem1 (code); */ x86_fprem (code); x86_fnstsw (code); x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_C2); l2 = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); x86_patch (l2, l1); /* pop result */ x86_fstp (code, 1); x86_pop_reg (code, X86_EAX); break; } case OP_FCOMPARE: if (cfg->opt & MONO_OPT_FCMOV) { x86_fcomip (code, 1); x86_fstp (code, 0); break; } /* this overwrites EAX */ EMIT_FPCOMPARE(code); x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK); break; case OP_FCEQ: case OP_FCNEQ: if (cfg->opt & MONO_OPT_FCMOV) { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ guchar *unordered_check; x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); x86_fcomip (code, 1); x86_fstp (code, 0); unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); if (ins->opcode == OP_FCEQ) { x86_set_reg (code, X86_CC_EQ, ins->dreg, FALSE); x86_patch (unordered_check, code); } else { guchar *jump_to_end; x86_set_reg (code, X86_CC_NE, ins->dreg, FALSE); jump_to_end = code; x86_jump8 (code, 0); x86_patch (unordered_check, code); x86_inc_reg (code, ins->dreg); x86_patch (jump_to_end, code); } break; } if (ins->dreg != X86_EAX) x86_push_reg (code, X86_EAX); EMIT_FPCOMPARE(code); x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK); x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4000); x86_set_reg (code, ins->opcode == OP_FCEQ ? X86_CC_EQ : X86_CC_NE, ins->dreg, TRUE); x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); if (ins->dreg != X86_EAX) x86_pop_reg (code, X86_EAX); break; case OP_FCLT: case OP_FCLT_UN: if (cfg->opt & MONO_OPT_FCMOV) { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); x86_fcomip (code, 1); x86_fstp (code, 0); if (ins->opcode == OP_FCLT_UN) { guchar *unordered_check = code; guchar *jump_to_end; x86_branch8 (code, X86_CC_P, 0, FALSE); x86_set_reg (code, X86_CC_GT, ins->dreg, FALSE); jump_to_end = code; x86_jump8 (code, 0); x86_patch (unordered_check, code); x86_inc_reg (code, ins->dreg); x86_patch (jump_to_end, code); } else { x86_set_reg (code, X86_CC_GT, ins->dreg, FALSE); } break; } if (ins->dreg != X86_EAX) x86_push_reg (code, X86_EAX); EMIT_FPCOMPARE(code); x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK); if (ins->opcode == OP_FCLT_UN) { guchar *is_not_zero_check, *end_jump; is_not_zero_check = code; x86_branch8 (code, X86_CC_NZ, 0, TRUE); end_jump = code; x86_jump8 (code, 0); x86_patch (is_not_zero_check, code); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK); x86_patch (end_jump, code); } x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE); x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); if (ins->dreg != X86_EAX) x86_pop_reg (code, X86_EAX); break; case OP_FCLE: { guchar *unordered_check; guchar *jump_to_end; if (cfg->opt & MONO_OPT_FCMOV) { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); x86_fcomip (code, 1); x86_fstp (code, 0); unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); x86_set_reg (code, X86_CC_NB, ins->dreg, FALSE); x86_patch (unordered_check, code); break; } if (ins->dreg != X86_EAX) x86_push_reg (code, X86_EAX); EMIT_FPCOMPARE(code); x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK); x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4500); unordered_check = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0); x86_set_reg (code, X86_CC_NE, ins->dreg, TRUE); x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); jump_to_end = code; x86_jump8 (code, 0); x86_patch (unordered_check, code); x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); x86_patch (jump_to_end, code); if (ins->dreg != X86_EAX) x86_pop_reg (code, X86_EAX); break; } case OP_FCGT: case OP_FCGT_UN: if (cfg->opt & MONO_OPT_FCMOV) { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ guchar *unordered_check; x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); x86_fcomip (code, 1); x86_fstp (code, 0); if (ins->opcode == OP_FCGT) { unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); x86_set_reg (code, X86_CC_LT, ins->dreg, FALSE); x86_patch (unordered_check, code); } else { x86_set_reg (code, X86_CC_LT, ins->dreg, FALSE); } break; } if (ins->dreg != X86_EAX) x86_push_reg (code, X86_EAX); EMIT_FPCOMPARE(code); x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0); if (ins->opcode == OP_FCGT_UN) { guchar *is_not_zero_check, *end_jump; is_not_zero_check = code; x86_branch8 (code, X86_CC_NZ, 0, TRUE); end_jump = code; x86_jump8 (code, 0); x86_patch (is_not_zero_check, code); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK); x86_patch (end_jump, code); } x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE); x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); if (ins->dreg != X86_EAX) x86_pop_reg (code, X86_EAX); break; case OP_FCGE: { guchar *unordered_check; guchar *jump_to_end; if (cfg->opt & MONO_OPT_FCMOV) { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); x86_fcomip (code, 1); x86_fstp (code, 0); unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); x86_set_reg (code, X86_CC_NA, ins->dreg, FALSE); x86_patch (unordered_check, code); break; } if (ins->dreg != X86_EAX) x86_push_reg (code, X86_EAX); EMIT_FPCOMPARE(code); x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK); x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4500); unordered_check = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0); x86_set_reg (code, X86_CC_GE, ins->dreg, TRUE); x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); jump_to_end = code; x86_jump8 (code, 0); x86_patch (unordered_check, code); x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); x86_patch (jump_to_end, code); if (ins->dreg != X86_EAX) x86_pop_reg (code, X86_EAX); break; } case OP_FBEQ: if (cfg->opt & MONO_OPT_FCMOV) { guchar *jump = code; x86_branch8 (code, X86_CC_P, 0, TRUE); EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); x86_patch (jump, code); break; } x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4000); EMIT_COND_BRANCH (ins, X86_CC_EQ, TRUE); break; case OP_FBNE_UN: /* Branch if C013 != 100 */ if (cfg->opt & MONO_OPT_FCMOV) { /* branch if !ZF or (PF|CF) */ EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE); EMIT_COND_BRANCH (ins, X86_CC_P, FALSE); EMIT_COND_BRANCH (ins, X86_CC_B, FALSE); break; } x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C3); EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE); break; case OP_FBLT: if (cfg->opt & MONO_OPT_FCMOV) { EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE); break; } EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); break; case OP_FBLT_UN: if (cfg->opt & MONO_OPT_FCMOV) { EMIT_COND_BRANCH (ins, X86_CC_P, FALSE); EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE); break; } if (ins->opcode == OP_FBLT_UN) { guchar *is_not_zero_check, *end_jump; is_not_zero_check = code; x86_branch8 (code, X86_CC_NZ, 0, TRUE); end_jump = code; x86_jump8 (code, 0); x86_patch (is_not_zero_check, code); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK); x86_patch (end_jump, code); } EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); break; case OP_FBGT: case OP_FBGT_UN: if (cfg->opt & MONO_OPT_FCMOV) { if (ins->opcode == OP_FBGT) { guchar *br1; /* skip branch if C1=1 */ br1 = code; x86_branch8 (code, X86_CC_P, 0, FALSE); /* branch if (C0 | C3) = 1 */ EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE); x86_patch (br1, code); } else { EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE); } break; } x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0); if (ins->opcode == OP_FBGT_UN) { guchar *is_not_zero_check, *end_jump; is_not_zero_check = code; x86_branch8 (code, X86_CC_NZ, 0, TRUE); end_jump = code; x86_jump8 (code, 0); x86_patch (is_not_zero_check, code); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK); x86_patch (end_jump, code); } EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); break; case OP_FBGE: /* Branch if C013 == 100 or 001 */ if (cfg->opt & MONO_OPT_FCMOV) { guchar *br1; /* skip branch if C1=1 */ br1 = code; x86_branch8 (code, X86_CC_P, 0, FALSE); /* branch if (C0 | C3) = 1 */ EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE); x86_patch (br1, code); break; } x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0); EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C3); EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); break; case OP_FBGE_UN: /* Branch if C013 == 000 */ if (cfg->opt & MONO_OPT_FCMOV) { EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE); break; } EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE); break; case OP_FBLE: /* Branch if C013=000 or 100 */ if (cfg->opt & MONO_OPT_FCMOV) { guchar *br1; /* skip branch if C1=1 */ br1 = code; x86_branch8 (code, X86_CC_P, 0, FALSE); /* branch if C0=0 */ EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE); x86_patch (br1, code); break; } x86_alu_reg_imm (code, X86_AND, X86_EAX, (X86_FP_C0|X86_FP_C1)); x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0); EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); break; case OP_FBLE_UN: /* Branch if C013 != 001 */ if (cfg->opt & MONO_OPT_FCMOV) { EMIT_COND_BRANCH (ins, X86_CC_P, FALSE); EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE); break; } x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0); EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE); break; case OP_CKFINITE: { guchar *br1; x86_push_reg (code, X86_EAX); x86_fxam (code); x86_fnstsw (code); x86_alu_reg_imm (code, X86_AND, X86_EAX, 0x4100); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0); x86_pop_reg (code, X86_EAX); /* Have to clean up the fp stack before throwing the exception */ br1 = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); x86_fstp (code, 0); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException"); x86_patch (br1, code); break; } case OP_TLS_GET: { code = mono_x86_emit_tls_get (code, ins->dreg, ins->inst_offset); break; } case OP_TLS_SET: { code = mono_x86_emit_tls_set (code, ins->sreg1, ins->inst_offset); break; } case OP_MEMORY_BARRIER: { if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) { x86_prefix (code, X86_LOCK_PREFIX); x86_alu_membase_imm (code, X86_ADD, X86_ESP, 0, 0); } break; } case OP_ATOMIC_ADD_I4: { int dreg = ins->dreg; g_assert (cfg->has_atomic_add_i4); /* hack: limit in regalloc, dreg != sreg1 && dreg != sreg2 */ if (ins->sreg2 == dreg) { if (dreg == X86_EBX) { dreg = X86_EDI; if (ins->inst_basereg == X86_EDI) dreg = X86_ESI; } else { dreg = X86_EBX; if (ins->inst_basereg == X86_EBX) dreg = X86_EDI; } } else if (ins->inst_basereg == dreg) { if (dreg == X86_EBX) { dreg = X86_EDI; if (ins->sreg2 == X86_EDI) dreg = X86_ESI; } else { dreg = X86_EBX; if (ins->sreg2 == X86_EBX) dreg = X86_EDI; } } if (dreg != ins->dreg) { x86_push_reg (code, dreg); } x86_mov_reg_reg (code, dreg, ins->sreg2); x86_prefix (code, X86_LOCK_PREFIX); x86_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, 4); /* dreg contains the old value, add with sreg2 value */ x86_alu_reg_reg (code, X86_ADD, dreg, ins->sreg2); if (ins->dreg != dreg) { x86_mov_reg_reg (code, ins->dreg, dreg); x86_pop_reg (code, dreg); } break; } case OP_ATOMIC_EXCHANGE_I4: { guchar *br[2]; int sreg2 = ins->sreg2; int breg = ins->inst_basereg; g_assert (cfg->has_atomic_exchange_i4); /* cmpxchg uses eax as comperand, need to make sure we can use it * hack to overcome limits in x86 reg allocator * (req: dreg == eax and sreg2 != eax and breg != eax) */ g_assert (ins->dreg == X86_EAX); /* We need the EAX reg for the cmpxchg */ if (ins->sreg2 == X86_EAX) { sreg2 = (breg == X86_EDX) ? X86_EBX : X86_EDX; x86_push_reg (code, sreg2); x86_mov_reg_reg (code, sreg2, X86_EAX); } if (breg == X86_EAX) { breg = (sreg2 == X86_ESI) ? X86_EDI : X86_ESI; x86_push_reg (code, breg); x86_mov_reg_reg (code, breg, X86_EAX); } x86_mov_reg_membase (code, X86_EAX, breg, ins->inst_offset, 4); br [0] = code; x86_prefix (code, X86_LOCK_PREFIX); x86_cmpxchg_membase_reg (code, breg, ins->inst_offset, sreg2); br [1] = code; x86_branch8 (code, X86_CC_NE, -1, FALSE); x86_patch (br [1], br [0]); if (breg != ins->inst_basereg) x86_pop_reg (code, breg); if (ins->sreg2 != sreg2) x86_pop_reg (code, sreg2); break; } case OP_ATOMIC_CAS_I4: { g_assert (ins->dreg == X86_EAX); g_assert (ins->sreg3 == X86_EAX); g_assert (ins->sreg1 != X86_EAX); g_assert (ins->sreg1 != ins->sreg2); x86_prefix (code, X86_LOCK_PREFIX); x86_cmpxchg_membase_reg (code, ins->sreg1, ins->inst_offset, ins->sreg2); break; } case OP_ATOMIC_LOAD_I1: { x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE); break; } case OP_ATOMIC_LOAD_U1: { x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE); break; } case OP_ATOMIC_LOAD_I2: { x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE); break; } case OP_ATOMIC_LOAD_U2: { x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE); break; } case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_U4: { x86_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4); break; } case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_LOAD_R8: { x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, ins->opcode == OP_ATOMIC_LOAD_R8); break; } case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U4: { int size; switch (ins->opcode) { case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_U1: size = 1; break; case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_U2: size = 2; break; case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U4: size = 4; break; } x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, size); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) x86_mfence (code); break; } case OP_ATOMIC_STORE_R4: case OP_ATOMIC_STORE_R8: { x86_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, ins->opcode == OP_ATOMIC_STORE_R8, TRUE); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) x86_mfence (code); break; } case OP_CARD_TABLE_WBARRIER: { int ptr = ins->sreg1; int value = ins->sreg2; guchar *br = NULL; int nursery_shift, card_table_shift; gpointer card_table_mask; size_t nursery_size; gulong card_table = (gsize)mono_gc_get_card_table (&card_table_shift, &card_table_mask); gulong nursery_start = (gsize)mono_gc_get_nursery (&nursery_shift, &nursery_size); gboolean card_table_nursery_check = mono_gc_card_table_nursery_check (); /* * We need one register we can clobber, we choose EDX and make sreg1 * fixed EAX to work around limitations in the local register allocator. * sreg2 might get allocated to EDX, but that is not a problem since * we use it before clobbering EDX. */ g_assert (ins->sreg1 == X86_EAX); /* * This is the code we produce: * * edx = value * edx >>= nursery_shift * cmp edx, (nursery_start >> nursery_shift) * jne done * edx = ptr * edx >>= card_table_shift * card_table[edx] = 1 * done: */ if (card_table_nursery_check) { if (value != X86_EDX) x86_mov_reg_reg (code, X86_EDX, value); x86_shift_reg_imm (code, X86_SHR, X86_EDX, nursery_shift); x86_alu_reg_imm (code, X86_CMP, X86_EDX, nursery_start >> nursery_shift); br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE); } x86_mov_reg_reg (code, X86_EDX, ptr); x86_shift_reg_imm (code, X86_SHR, X86_EDX, card_table_shift); if (card_table_mask) x86_alu_reg_imm (code, X86_AND, X86_EDX, (gsize)card_table_mask); x86_mov_membase_imm (code, X86_EDX, card_table, 1, 1); if (card_table_nursery_check) x86_patch (br, code); break; } #ifdef MONO_ARCH_SIMD_INTRINSICS case OP_ADDPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_ADD, ins->sreg1, ins->sreg2); break; case OP_DIVPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_DIV, ins->sreg1, ins->sreg2); break; case OP_MULPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_MUL, ins->sreg1, ins->sreg2); break; case OP_SUBPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_SUB, ins->sreg1, ins->sreg2); break; case OP_MAXPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_MAX, ins->sreg1, ins->sreg2); break; case OP_MINPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_MIN, ins->sreg1, ins->sreg2); break; case OP_COMPPS: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7); x86_sse_alu_ps_reg_reg_imm (code, X86_SSE_COMP, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ANDPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_AND, ins->sreg1, ins->sreg2); break; case OP_ANDNPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_ANDN, ins->sreg1, ins->sreg2); break; case OP_ORPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_OR, ins->sreg1, ins->sreg2); break; case OP_XORPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_XOR, ins->sreg1, ins->sreg2); break; case OP_SQRTPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_SQRT, ins->dreg, ins->sreg1); break; case OP_RSQRTPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_RSQRT, ins->dreg, ins->sreg1); break; case OP_RCPPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_RCP, ins->dreg, ins->sreg1); break; case OP_ADDSUBPS: x86_sse_alu_sd_reg_reg (code, X86_SSE_ADDSUB, ins->sreg1, ins->sreg2); break; case OP_HADDPS: x86_sse_alu_sd_reg_reg (code, X86_SSE_HADD, ins->sreg1, ins->sreg2); break; case OP_HSUBPS: x86_sse_alu_sd_reg_reg (code, X86_SSE_HSUB, ins->sreg1, ins->sreg2); break; case OP_DUPPS_HIGH: x86_sse_alu_ss_reg_reg (code, X86_SSE_MOVSHDUP, ins->dreg, ins->sreg1); break; case OP_DUPPS_LOW: x86_sse_alu_ss_reg_reg (code, X86_SSE_MOVSLDUP, ins->dreg, ins->sreg1); break; case OP_PSHUFLEW_HIGH: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); x86_pshufw_reg_reg (code, ins->dreg, ins->sreg1, ins->inst_c0, 1); break; case OP_PSHUFLEW_LOW: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); x86_pshufw_reg_reg (code, ins->dreg, ins->sreg1, ins->inst_c0, 0); break; case OP_PSHUFLED: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_SHUFPS: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); x86_sse_alu_reg_reg_imm8 (code, X86_SSE_SHUFP, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_SHUFPD: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0x3); x86_sse_alu_pd_reg_reg_imm8 (code, X86_SSE_SHUFP, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ADDPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_ADD, ins->sreg1, ins->sreg2); break; case OP_DIVPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_DIV, ins->sreg1, ins->sreg2); break; case OP_MULPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_MUL, ins->sreg1, ins->sreg2); break; case OP_SUBPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_SUB, ins->sreg1, ins->sreg2); break; case OP_MAXPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_MAX, ins->sreg1, ins->sreg2); break; case OP_MINPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_MIN, ins->sreg1, ins->sreg2); break; case OP_COMPPD: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7); x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_COMP, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ANDPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_AND, ins->sreg1, ins->sreg2); break; case OP_ANDNPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_ANDN, ins->sreg1, ins->sreg2); break; case OP_ORPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_OR, ins->sreg1, ins->sreg2); break; case OP_XORPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_XOR, ins->sreg1, ins->sreg2); break; case OP_SQRTPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_SQRT, ins->dreg, ins->sreg1); break; case OP_ADDSUBPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_ADDSUB, ins->sreg1, ins->sreg2); break; case OP_HADDPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_HADD, ins->sreg1, ins->sreg2); break; case OP_HSUBPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_HSUB, ins->sreg1, ins->sreg2); break; case OP_DUPPD: x86_sse_alu_sd_reg_reg (code, X86_SSE_MOVDDUP, ins->dreg, ins->sreg1); break; case OP_EXTRACT_MASK: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMOVMSKB, ins->dreg, ins->sreg1); break; case OP_PAND: x86_sse_alu_pd_reg_reg (code, X86_SSE_PAND, ins->sreg1, ins->sreg2); break; case OP_POR: x86_sse_alu_pd_reg_reg (code, X86_SSE_POR, ins->sreg1, ins->sreg2); break; case OP_PXOR: x86_sse_alu_pd_reg_reg (code, X86_SSE_PXOR, ins->sreg1, ins->sreg2); break; case OP_PADDB: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDB, ins->sreg1, ins->sreg2); break; case OP_PADDW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDW, ins->sreg1, ins->sreg2); break; case OP_PADDD: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDD, ins->sreg1, ins->sreg2); break; case OP_PADDQ: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDQ, ins->sreg1, ins->sreg2); break; case OP_PSUBB: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBB, ins->sreg1, ins->sreg2); break; case OP_PSUBW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBW, ins->sreg1, ins->sreg2); break; case OP_PSUBD: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBD, ins->sreg1, ins->sreg2); break; case OP_PSUBQ: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBQ, ins->sreg1, ins->sreg2); break; case OP_PMAXB_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMAXUB, ins->sreg1, ins->sreg2); break; case OP_PMAXW_UN: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXUW, ins->sreg1, ins->sreg2); break; case OP_PMAXD_UN: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXUD, ins->sreg1, ins->sreg2); break; case OP_PMAXB: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXSB, ins->sreg1, ins->sreg2); break; case OP_PMAXW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMAXSW, ins->sreg1, ins->sreg2); break; case OP_PMAXD: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXSD, ins->sreg1, ins->sreg2); break; case OP_PAVGB_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PAVGB, ins->sreg1, ins->sreg2); break; case OP_PAVGW_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PAVGW, ins->sreg1, ins->sreg2); break; case OP_PMINB_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMINUB, ins->sreg1, ins->sreg2); break; case OP_PMINW_UN: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINUW, ins->sreg1, ins->sreg2); break; case OP_PMIND_UN: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINUD, ins->sreg1, ins->sreg2); break; case OP_PMINB: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINSB, ins->sreg1, ins->sreg2); break; case OP_PMINW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMINSW, ins->sreg1, ins->sreg2); break; case OP_PMIND: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINSD, ins->sreg1, ins->sreg2); break; case OP_PCMPEQB: x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQB, ins->sreg1, ins->sreg2); break; case OP_PCMPEQW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQW, ins->sreg1, ins->sreg2); break; case OP_PCMPEQD: x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQD, ins->sreg1, ins->sreg2); break; case OP_PCMPEQQ: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PCMPEQQ, ins->sreg1, ins->sreg2); break; case OP_PCMPGTB: x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPGTB, ins->sreg1, ins->sreg2); break; case OP_PCMPGTW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPGTW, ins->sreg1, ins->sreg2); break; case OP_PCMPGTD: x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPGTD, ins->sreg1, ins->sreg2); break; case OP_PCMPGTQ: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PCMPGTQ, ins->sreg1, ins->sreg2); break; case OP_PSUM_ABS_DIFF: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSADBW, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWB: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLBW, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLWD, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWD: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLDQ, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWQ: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLQDQ, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_UNPCKL, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_UNPCKL, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHB: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHBW, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHWD, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHD: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHDQ, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHQ: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHQDQ, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_UNPCKH, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_UNPCKH, ins->sreg1, ins->sreg2); break; case OP_PACKW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PACKSSWB, ins->sreg1, ins->sreg2); break; case OP_PACKD: x86_sse_alu_pd_reg_reg (code, X86_SSE_PACKSSDW, ins->sreg1, ins->sreg2); break; case OP_PACKW_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PACKUSWB, ins->sreg1, ins->sreg2); break; case OP_PACKD_UN: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PACKUSDW, ins->sreg1, ins->sreg2); break; case OP_PADDB_SAT_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDUSB, ins->sreg1, ins->sreg2); break; case OP_PSUBB_SAT_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBUSB, ins->sreg1, ins->sreg2); break; case OP_PADDW_SAT_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDUSW, ins->sreg1, ins->sreg2); break; case OP_PSUBW_SAT_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBUSW, ins->sreg1, ins->sreg2); break; case OP_PADDB_SAT: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDSB, ins->sreg1, ins->sreg2); break; case OP_PSUBB_SAT: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBSB, ins->sreg1, ins->sreg2); break; case OP_PADDW_SAT: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDSW, ins->sreg1, ins->sreg2); break; case OP_PSUBW_SAT: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBSW, ins->sreg1, ins->sreg2); break; case OP_PMULW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULLW, ins->sreg1, ins->sreg2); break; case OP_PMULD: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMULLD, ins->sreg1, ins->sreg2); break; case OP_PMULQ: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULUDQ, ins->sreg1, ins->sreg2); break; case OP_PMULW_HIGH_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULHUW, ins->sreg1, ins->sreg2); break; case OP_PMULW_HIGH: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULHW, ins->sreg1, ins->sreg2); break; case OP_PSHRW: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTW, X86_SSE_SHR, ins->dreg, ins->inst_imm); break; case OP_PSHRW_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSRLW_REG, ins->dreg, ins->sreg2); break; case OP_PSARW: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTW, X86_SSE_SAR, ins->dreg, ins->inst_imm); break; case OP_PSARW_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSRAW_REG, ins->dreg, ins->sreg2); break; case OP_PSHLW: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTW, X86_SSE_SHL, ins->dreg, ins->inst_imm); break; case OP_PSHLW_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSLLW_REG, ins->dreg, ins->sreg2); break; case OP_PSHRD: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTD, X86_SSE_SHR, ins->dreg, ins->inst_imm); break; case OP_PSHRD_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSRLD_REG, ins->dreg, ins->sreg2); break; case OP_PSARD: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTD, X86_SSE_SAR, ins->dreg, ins->inst_imm); break; case OP_PSARD_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSRAD_REG, ins->dreg, ins->sreg2); break; case OP_PSHLD: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTD, X86_SSE_SHL, ins->dreg, ins->inst_imm); break; case OP_PSHLD_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSLLD_REG, ins->dreg, ins->sreg2); break; case OP_PSHRQ: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTQ, X86_SSE_SHR, ins->dreg, ins->inst_imm); break; case OP_PSHRQ_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSRLQ_REG, ins->dreg, ins->sreg2); break; case OP_PSHLQ: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTQ, X86_SSE_SHL, ins->dreg, ins->inst_imm); break; case OP_PSHLQ_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSLLQ_REG, ins->dreg, ins->sreg2); break; case OP_ICONV_TO_X: x86_movd_xreg_reg (code, ins->dreg, ins->sreg1); break; case OP_EXTRACT_I4: x86_movd_reg_xreg (code, ins->dreg, ins->sreg1); break; case OP_EXTRACT_I1: x86_movd_reg_xreg (code, ins->dreg, ins->sreg1); if (ins->inst_c0) x86_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_c0 * 8); x86_widen_reg (code, ins->dreg, ins->dreg, ins->inst_c1 == MONO_TYPE_I1, FALSE); break; case OP_EXTRACT_I2: x86_movd_reg_xreg (code, ins->dreg, ins->sreg1); if (ins->inst_c0) x86_shift_reg_imm (code, X86_SHR, ins->dreg, 16); x86_widen_reg (code, ins->dreg, ins->dreg, ins->inst_c1 == MONO_TYPE_I2, TRUE); break; case OP_EXTRACT_R8: if (ins->inst_c0) x86_sse_alu_pd_membase_reg (code, X86_SSE_MOVHPD_MEMBASE_REG, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1); else x86_sse_alu_sd_membase_reg (code, X86_SSE_MOVSD_MEMBASE_REG, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1); x86_fld_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE); break; case OP_INSERT_I2: x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_EXTRACTX_U2: x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PEXTRW, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_INSERTX_U1_SLOW: /*sreg1 is the extracted ireg (scratch) /sreg2 is the to be inserted ireg (scratch) /dreg is the xreg to receive the value*/ /*clear the bits from the extracted word*/ x86_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_c0 & 1 ? 0x00FF : 0xFF00); /*shift the value to insert if needed*/ if (ins->inst_c0 & 1) x86_shift_reg_imm (code, X86_SHL, ins->sreg2, 8); /*join them together*/ x86_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2); x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg1, ins->inst_c0 / 2); break; case OP_INSERTX_I4_SLOW: x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg2, ins->inst_c0 * 2); x86_shift_reg_imm (code, X86_SHR, ins->sreg2, 16); x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg2, ins->inst_c0 * 2 + 1); break; case OP_INSERTX_R4_SLOW: x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE, TRUE); /*TODO if inst_c0 == 0 use movss*/ x86_sse_alu_pd_reg_membase_imm (code, X86_SSE_PINSRW, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset + 0, ins->inst_c0 * 2); x86_sse_alu_pd_reg_membase_imm (code, X86_SSE_PINSRW, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset + 2, ins->inst_c0 * 2 + 1); break; case OP_INSERTX_R8_SLOW: x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE, TRUE); if (cfg->verbose_level) printf ("CONVERTING a OP_INSERTX_R8_SLOW %d offset %x\n", ins->inst_c0, offset); if (ins->inst_c0) x86_sse_alu_pd_reg_membase (code, X86_SSE_MOVHPD_REG_MEMBASE, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset); else x86_movsd_reg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset); break; case OP_STOREX_MEMBASE_REG: case OP_STOREX_MEMBASE: x86_movups_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1); break; case OP_LOADX_MEMBASE: x86_movups_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_LOADX_ALIGNED_MEMBASE: x86_movaps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_STOREX_ALIGNED_MEMBASE_REG: x86_movaps_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1); break; case OP_STOREX_NTA_MEMBASE_REG: x86_sse_alu_reg_membase (code, X86_SSE_MOVNTPS, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_PREFETCH_MEMBASE: x86_sse_alu_reg_membase (code, X86_SSE_PREFETCH, ins->backend.arg_info, ins->sreg1, ins->inst_offset); break; case OP_XMOVE: /*FIXME the peephole pass should have killed this*/ if (ins->dreg != ins->sreg1) x86_movaps_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_XZERO: x86_sse_alu_pd_reg_reg (code, X86_SSE_PXOR, ins->dreg, ins->dreg); break; case OP_XONES: x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQB, ins->dreg, ins->dreg); break; case OP_FCONV_TO_R8_X: x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE, TRUE); x86_movsd_reg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset); break; case OP_XCONV_R8_TO_I4: x86_cvttsd2si (code, ins->dreg, ins->sreg1); switch (ins->backend.source_opcode) { case OP_FCONV_TO_I1: x86_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE); break; case OP_FCONV_TO_U1: x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); break; case OP_FCONV_TO_I2: x86_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE); break; case OP_FCONV_TO_U2: x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE); break; } break; case OP_EXPAND_I2: x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg1, 0); x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg1, 1); x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_I4: x86_movd_xreg_reg (code, ins->dreg, ins->sreg1); x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_R4: x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE, TRUE); x86_movd_xreg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset); x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_R8: x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE, TRUE); x86_movsd_reg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset); x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->dreg, 0x44); break; case OP_CVTDQ2PD: x86_sse_alu_ss_reg_reg (code, X86_SSE_CVTDQ2PD, ins->dreg, ins->sreg1); break; case OP_CVTDQ2PS: x86_sse_alu_ps_reg_reg (code, X86_SSE_CVTDQ2PS, ins->dreg, ins->sreg1); break; case OP_CVTPD2DQ: x86_sse_alu_sd_reg_reg (code, X86_SSE_CVTPD2DQ, ins->dreg, ins->sreg1); break; case OP_CVTPD2PS: x86_sse_alu_pd_reg_reg (code, X86_SSE_CVTPD2PS, ins->dreg, ins->sreg1); break; case OP_CVTPS2DQ: x86_sse_alu_pd_reg_reg (code, X86_SSE_CVTPS2DQ, ins->dreg, ins->sreg1); break; case OP_CVTPS2PD: x86_sse_alu_ps_reg_reg (code, X86_SSE_CVTPS2PD, ins->dreg, ins->sreg1); break; case OP_CVTTPD2DQ: x86_sse_alu_pd_reg_reg (code, X86_SSE_CVTTPD2DQ, ins->dreg, ins->sreg1); break; case OP_CVTTPS2DQ: x86_sse_alu_ss_reg_reg (code, X86_SSE_CVTTPS2DQ, ins->dreg, ins->sreg1); break; #endif case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: { guint8 *br [1]; x86_test_membase_imm (code, ins->sreg1, 0, 1); br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll)); x86_patch (br [0], code); break; } case OP_GC_LIVENESS_DEF: case OP_GC_LIVENESS_USE: case OP_GC_PARAM_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; break; case OP_GC_SPILL_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins); break; case OP_GET_SP: x86_mov_reg_reg (code, ins->dreg, X86_ESP); break; case OP_SET_SP: x86_mov_reg_reg (code, X86_ESP, ins->sreg1); break; case OP_FILL_PROF_CALL_CTX: x86_mov_membase_reg (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, esp), X86_ESP, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, ebp), X86_EBP, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, ebx), X86_EBX, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, esi), X86_ESI, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, edi), X86_EDI, sizeof (target_mgreg_t)); break; case OP_GET_LAST_ERROR: code = emit_get_last_error (code, ins->dreg); break; default: g_warning ("unknown opcode %s\n", mono_inst_name (ins->opcode)); g_assert_not_reached (); } if (G_UNLIKELY ((code - cfg->native_code - offset) > max_len)) { g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)", mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset); g_assert_not_reached (); } cpos += max_len; } set_code_cursor (cfg, code); } #endif /* DISABLE_JIT */ void mono_arch_register_lowlevel_calls (void) { } void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target) { unsigned char *ip = ji->ip.i + code; switch (ji->type) { case MONO_PATCH_INFO_IP: *((gconstpointer *)(ip)) = target; break; case MONO_PATCH_INFO_ABS: case MONO_PATCH_INFO_METHOD: case MONO_PATCH_INFO_METHOD_JUMP: case MONO_PATCH_INFO_JIT_ICALL_ID: case MONO_PATCH_INFO_BB: case MONO_PATCH_INFO_LABEL: case MONO_PATCH_INFO_RGCTX_FETCH: case MONO_PATCH_INFO_JIT_ICALL_ADDR: case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR: x86_patch (ip, (unsigned char*)target); break; case MONO_PATCH_INFO_NONE: break; case MONO_PATCH_INFO_R4: case MONO_PATCH_INFO_R8: { guint32 offset = mono_arch_get_patch_offset (ip); *((gconstpointer *)(ip + offset)) = target; break; } default: { guint32 offset = mono_arch_get_patch_offset (ip); *((gconstpointer *)(ip + offset)) = target; break; } } } static G_GNUC_UNUSED void stack_unaligned (MonoMethod *m, gpointer caller) { printf ("%s\n", mono_method_full_name (m, TRUE)); g_assert_not_reached (); } #ifndef DISABLE_JIT guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoBasicBlock *bb; MonoMethodSignature *sig; MonoInst *inst; CallInfo *cinfo; ArgInfo *ainfo; int alloc_size, pos, max_offset, i, cfa_offset; guint8 *code; gboolean need_stack_frame; cfg->code_size = MAX (cfg->header->code_size * 4, 10240); code = cfg->native_code = g_malloc (cfg->code_size); #if 0 { guint8 *br [16]; /* Check that the stack is aligned on osx */ x86_mov_reg_reg (code, X86_EAX, X86_ESP); x86_alu_reg_imm (code, X86_AND, X86_EAX, 15); x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0xc); br [0] = code; x86_branch_disp (code, X86_CC_Z, 0, FALSE); x86_push_membase (code, X86_ESP, 0); x86_push_imm (code, cfg->method); x86_mov_reg_imm (code, X86_EAX, stack_unaligned); x86_call_reg (code, X86_EAX); x86_patch (br [0], code); } #endif /* Offset between RSP and the CFA */ cfa_offset = 0; // CFA = sp + 4 cfa_offset = 4; mono_emit_unwind_op_def_cfa (cfg, code, X86_ESP, cfa_offset); // IP saved at CFA - 4 /* There is no IP reg on x86 */ mono_emit_unwind_op_offset (cfg, code, X86_NREG, -cfa_offset); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF); need_stack_frame = needs_stack_frame (cfg); if (need_stack_frame) { x86_push_reg (code, X86_EBP); cfa_offset += 4; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); mono_emit_unwind_op_offset (cfg, code, X86_EBP, - cfa_offset); x86_mov_reg_reg (code, X86_EBP, X86_ESP); mono_emit_unwind_op_def_cfa_reg (cfg, code, X86_EBP); /* These are handled automatically by the stack marking code */ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF); } else { cfg->frame_reg = X86_ESP; } cfg->stack_offset += cfg->param_area; cfg->stack_offset = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT); alloc_size = cfg->stack_offset; pos = 0; if (!method->save_lmf) { if (cfg->used_int_regs & (1 << X86_EBX)) { x86_push_reg (code, X86_EBX); pos += 4; cfa_offset += 4; mono_emit_unwind_op_offset (cfg, code, X86_EBX, - cfa_offset); /* These are handled automatically by the stack marking code */ mini_gc_set_slot_type_from_cfa (cfg, - cfa_offset, SLOT_NOREF); } if (cfg->used_int_regs & (1 << X86_EDI)) { x86_push_reg (code, X86_EDI); pos += 4; cfa_offset += 4; mono_emit_unwind_op_offset (cfg, code, X86_EDI, - cfa_offset); mini_gc_set_slot_type_from_cfa (cfg, - cfa_offset, SLOT_NOREF); } if (cfg->used_int_regs & (1 << X86_ESI)) { x86_push_reg (code, X86_ESI); pos += 4; cfa_offset += 4; mono_emit_unwind_op_offset (cfg, code, X86_ESI, - cfa_offset); mini_gc_set_slot_type_from_cfa (cfg, - cfa_offset, SLOT_NOREF); } } alloc_size -= pos; /* the original alloc_size is already aligned: there is %ebp and retip pushed, so realign */ if (mono_do_x86_stack_align && need_stack_frame) { int tot = alloc_size + pos + 4; /* ret ip */ if (need_stack_frame) tot += 4; /* ebp */ tot &= MONO_ARCH_FRAME_ALIGNMENT - 1; if (tot) { alloc_size += MONO_ARCH_FRAME_ALIGNMENT - tot; for (i = 0; i < MONO_ARCH_FRAME_ALIGNMENT - tot; i += sizeof (target_mgreg_t)) mini_gc_set_slot_type_from_fp (cfg, - (alloc_size + pos - i), SLOT_NOREF); } } cfg->arch.sp_fp_offset = alloc_size + pos; if (alloc_size) { /* See mono_emit_stack_alloc */ #if defined (TARGET_WIN32) || defined (MONO_ARCH_SIGSEGV_ON_ALTSTACK) guint32 remaining_size = alloc_size; /*FIXME handle unbounded code expansion, we should use a loop in case of more than X interactions*/ guint32 required_code_size = ((remaining_size / 0x1000) + 1) * 8; /*8 is the max size of x86_alu_reg_imm + x86_test_membase_reg*/ set_code_cursor (cfg, code); code = realloc_code (cfg, required_code_size); while (remaining_size >= 0x1000) { x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000); x86_test_membase_reg (code, X86_ESP, 0, X86_ESP); remaining_size -= 0x1000; } if (remaining_size) x86_alu_reg_imm (code, X86_SUB, X86_ESP, remaining_size); #else x86_alu_reg_imm (code, X86_SUB, X86_ESP, alloc_size); #endif g_assert (need_stack_frame); } if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED || cfg->method->wrapper_type == MONO_WRAPPER_RUNTIME_INVOKE) { x86_alu_reg_imm (code, X86_AND, X86_ESP, -MONO_ARCH_FRAME_ALIGNMENT); } #if DEBUG_STACK_ALIGNMENT /* check the stack is aligned */ if (need_stack_frame && method->wrapper_type == MONO_WRAPPER_NONE) { x86_mov_reg_reg (code, X86_ECX, X86_ESP); x86_alu_reg_imm (code, X86_AND, X86_ECX, MONO_ARCH_FRAME_ALIGNMENT - 1); x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0); x86_branch_disp (code, X86_CC_EQ, 3, FALSE); x86_breakpoint (code); } #endif /* compute max_offset in order to use short forward jumps */ max_offset = 0; if (cfg->opt & MONO_OPT_BRANCH) { for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins; bb->max_offset = max_offset; /* max alignment for loops */ if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb)) max_offset += LOOP_ALIGNMENT; MONO_BB_FOR_EACH_INS (bb, ins) { if (ins->opcode == OP_LABEL) ins->inst_c1 = max_offset; max_offset += ins_get_size (ins->opcode); } } } /* store runtime generic context */ if (cfg->rgctx_var) { g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET && cfg->rgctx_var->inst_basereg == X86_EBP); x86_mov_membase_reg (code, X86_EBP, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, 4); } if (method->save_lmf) code = emit_setup_lmf (cfg, code, cfg->lmf_var->inst_offset, cfa_offset); { MonoInst *ins; if (cfg->arch.ss_tramp_var) { /* Initialize ss_tramp_var */ ins = cfg->arch.ss_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); g_assert (!cfg->compile_aot); x86_mov_membase_imm (code, ins->inst_basereg, ins->inst_offset, (gsize)&ss_trampoline, 4); } if (cfg->arch.bp_tramp_var) { /* Initialize bp_tramp_var */ ins = cfg->arch.bp_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); g_assert (!cfg->compile_aot); x86_mov_membase_imm (code, ins->inst_basereg, ins->inst_offset, (gsize)&bp_trampoline, 4); } } /* load arguments allocated to register from the stack */ sig = mono_method_signature_internal (method); pos = 0; cinfo = cfg->arch.cinfo; for (i = 0; i < sig->param_count + sig->hasthis; ++i) { inst = cfg->args [pos]; ainfo = &cinfo->args [pos]; if (inst->opcode == OP_REGVAR) { if (storage_in_ireg (ainfo->storage)) { x86_mov_reg_reg (code, inst->dreg, ainfo->reg); } else { g_assert (need_stack_frame); x86_mov_reg_membase (code, inst->dreg, X86_EBP, ainfo->offset + ARGS_OFFSET, 4); } if (cfg->verbose_level > 2) g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg)); } else { if (storage_in_ireg (ainfo->storage)) { x86_mov_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg, 4); } } pos++; } set_code_cursor (cfg, code); return code; } #endif void mono_arch_emit_epilog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoMethodSignature *sig = mono_method_signature_internal (method); int i, quad, pos; guint32 stack_to_pop; guint8 *code; int max_epilog_size = 16; CallInfo *cinfo; gboolean need_stack_frame = needs_stack_frame (cfg); if (cfg->method->save_lmf) max_epilog_size += 128; code = realloc_code (cfg, max_epilog_size); /* the code restoring the registers must be kept in sync with OP_TAILCALL */ pos = 0; if (method->save_lmf) { gint32 lmf_offset = cfg->lmf_var->inst_offset; /* restore caller saved regs */ if (cfg->used_int_regs & (1 << X86_EBX)) { x86_mov_reg_membase (code, X86_EBX, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, ebx), 4); } if (cfg->used_int_regs & (1 << X86_EDI)) { x86_mov_reg_membase (code, X86_EDI, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, edi), 4); } if (cfg->used_int_regs & (1 << X86_ESI)) { x86_mov_reg_membase (code, X86_ESI, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, esi), 4); } /* EBP is restored by LEAVE */ } else { for (i = 0; i < X86_NREG; ++i) { if ((cfg->used_int_regs & X86_CALLER_REGS & (1 << i)) && (i != X86_EBP)) { pos -= 4; } } g_assert (!pos || need_stack_frame); if (pos) { x86_lea_membase (code, X86_ESP, X86_EBP, pos); } if (cfg->used_int_regs & (1 << X86_ESI)) { x86_pop_reg (code, X86_ESI); } if (cfg->used_int_regs & (1 << X86_EDI)) { x86_pop_reg (code, X86_EDI); } if (cfg->used_int_regs & (1 << X86_EBX)) { x86_pop_reg (code, X86_EBX); } } /* Load returned vtypes into registers if needed */ cinfo = cfg->arch.cinfo; if (cinfo->ret.storage == ArgValuetypeInReg) { for (quad = 0; quad < 2; quad ++) { switch (cinfo->ret.pair_storage [quad]) { case ArgInIReg: x86_mov_reg_membase (code, cinfo->ret.pair_regs [quad], cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (target_mgreg_t)), 4); break; case ArgOnFloatFpStack: x86_fld_membase (code, cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (target_mgreg_t)), FALSE); break; case ArgOnDoubleFpStack: x86_fld_membase (code, cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (target_mgreg_t)), TRUE); break; case ArgNone: break; default: g_assert_not_reached (); } } } if (need_stack_frame) x86_leave (code); if (CALLCONV_IS_STDCALL (sig)) { MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, sig->param_count + 1); stack_to_pop = mono_arch_get_argument_info (sig, sig->param_count, arg_info); } else if (cinfo->callee_stack_pop) stack_to_pop = cinfo->callee_stack_pop; else stack_to_pop = 0; if (stack_to_pop) { g_assert (need_stack_frame); x86_ret_imm (code, stack_to_pop); } else { x86_ret (code); } set_code_cursor (cfg, code); } void mono_arch_emit_exceptions (MonoCompile *cfg) { MonoJumpInfo *patch_info; int nthrows, i; guint8 *code; MonoClass *exc_classes [16]; guint8 *exc_throw_start [16], *exc_throw_end [16]; guint32 code_size; int exc_count = 0; /* Compute needed space */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { if (patch_info->type == MONO_PATCH_INFO_EXC) exc_count++; } /* * make sure we have enough space for exceptions * 16 is the size of two push_imm instructions and a call */ if (cfg->compile_aot) code_size = exc_count * 32; else code_size = exc_count * 16; code = realloc_code (cfg, code_size); nthrows = 0; for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { switch (patch_info->type) { case MONO_PATCH_INFO_EXC: { MonoClass *exc_class; guint8 *buf, *buf2; guint32 throw_ip; x86_patch (patch_info->ip.i + cfg->native_code, code); exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name); throw_ip = patch_info->ip.i; /* Find a throw sequence for the same exception class */ for (i = 0; i < nthrows; ++i) if (exc_classes [i] == exc_class) break; if (i < nthrows) { x86_push_imm (code, (exc_throw_end [i] - cfg->native_code) - throw_ip); x86_jump_code (code, exc_throw_start [i]); patch_info->type = MONO_PATCH_INFO_NONE; } else { guint32 size; /* Compute size of code following the push <OFFSET> */ size = 5 + 5; /*This is aligned to 16 bytes by the callee. This way we save a few bytes here.*/ if ((code - cfg->native_code) - throw_ip < 126 - size) { /* Use the shorter form */ buf = buf2 = code; x86_push_imm (code, 0); } else { buf = code; x86_push_imm (code, 0xf0f0f0f0); buf2 = code; } if (nthrows < 16) { exc_classes [nthrows] = exc_class; exc_throw_start [nthrows] = code; } x86_push_imm (code, m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF); patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception; patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID; patch_info->ip.i = code - cfg->native_code; x86_call_code (code, 0); x86_push_imm (buf, (code - cfg->native_code) - throw_ip); while (buf < buf2) x86_nop (buf); if (nthrows < 16) { exc_throw_end [nthrows] = code; nthrows ++; } } break; } default: /* do nothing */ break; } set_code_cursor (cfg, code); } set_code_cursor (cfg, code); } MONO_NEVER_INLINE void mono_arch_flush_icache (guint8 *code, gint size) { /* call/ret required (or likely other control transfer) */ } void mono_arch_flush_register_windows (void) { } gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return TRUE; } void mono_arch_finish_init (void) { char *mono_no_tls = g_getenv ("MONO_NO_TLS"); if (!mono_no_tls) { #ifndef TARGET_WIN32 #if MONO_XEN_OPT optimize_for_xen = access ("/proc/xen", F_OK) == 0; #endif #endif } else { g_free (mono_no_tls); } } // Linear handler, the bsearch head compare is shorter //[2 + 4] x86_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm); //[1 + 1] x86_branch8(inst,cond,imm,is_signed) // x86_patch(ins,target) //[1 + 5] x86_jump_mem(inst,mem) #define CMP_SIZE 6 #define BR_SMALL_SIZE 2 #define BR_LARGE_SIZE 5 #define JUMP_IMM_SIZE 6 #define ENABLE_WRONG_METHOD_CHECK 0 #define DEBUG_IMT 0 static int imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target) { int i, distance = 0; for (i = start; i < target; ++i) distance += imt_entries [i]->chunk_size; return distance; } /* * LOCKING: called with the domain lock held */ gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int i; int size = 0; guint8 *code, *start; GSList *unwind_ops; MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) item->chunk_size += CMP_SIZE; item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE; } else { if (fail_tramp) { item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + JUMP_IMM_SIZE * 2; } else { item->chunk_size += JUMP_IMM_SIZE; #if ENABLE_WRONG_METHOD_CHECK item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1; #endif } } } else { item->chunk_size += CMP_SIZE + BR_LARGE_SIZE; imt_entries [item->check_target_idx]->compare_done = TRUE; } size += item->chunk_size; } if (fail_tramp) { code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size); } else { code = mono_mem_manager_code_reserve (mem_manager, size); } start = code; unwind_ops = mono_arch_get_cie_program (); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; item->code_target = code; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gsize)item->key); item->jmp_code = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); if (item->has_target_code) x86_jump_code (code, item->value.target_code); else x86_jump_mem (code, (gsize)&vtable->vtable [item->value.vtable_slot]); } else { if (fail_tramp) { x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gsize)item->key); item->jmp_code = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); if (item->has_target_code) x86_jump_code (code, item->value.target_code); else x86_jump_mem (code, (gsize)&vtable->vtable [item->value.vtable_slot]); x86_patch (item->jmp_code, code); x86_jump_code (code, fail_tramp); item->jmp_code = NULL; } else { /* enable the commented code to assert on wrong method */ #if ENABLE_WRONG_METHOD_CHECK x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gsize)item->key); item->jmp_code = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); #endif if (item->has_target_code) x86_jump_code (code, item->value.target_code); else x86_jump_mem (code, (gsize)&vtable->vtable [item->value.vtable_slot]); #if ENABLE_WRONG_METHOD_CHECK x86_patch (item->jmp_code, code); x86_breakpoint (code); item->jmp_code = NULL; #endif } } } else { x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gsize)item->key); item->jmp_code = code; if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx))) x86_branch8 (code, X86_CC_GE, 0, FALSE); else x86_branch32 (code, X86_CC_GE, 0, FALSE); } } /* patch the branches to get to the target items */ for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code) { if (item->check_target_idx) { x86_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target); } } } if (!fail_tramp) UnlockedAdd (&mono_stats.imt_trampolines_size, code - start); g_assertf (code - start <= size, "%d %d", (int)(code - start), size); #if DEBUG_IMT { char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count); mono_disassemble_code (NULL, (guint8*)start, code - start, buff); g_free (buff); } #endif if (mono_jit_map_is_enabled ()) { char *buff; if (vtable) buff = g_strdup_printf ("imt_%s_%s_entries_%d", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count); else buff = g_strdup_printf ("imt_trampoline_entries_%d", count); mono_emit_jit_tramp (start, code - start, buff); g_free (buff); } MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL)); mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), mem_manager); return start; } MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { return (MonoMethod*) regs [MONO_ARCH_IMT_REG]; } MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code) { return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG]; } GSList* mono_arch_get_cie_program (void) { GSList *l = NULL; mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, X86_ESP, 4); mono_add_unwind_op_offset (l, (guint8*)NULL, (guint8*)NULL, X86_NREG, -4); return l; } MonoInst* mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins = NULL; int opcode = 0; if (cmethod->klass == mono_class_try_get_math_class ()) { if (strcmp (cmethod->name, "Tan") == 0) { opcode = OP_TAN; } else if (strcmp (cmethod->name, "Atan") == 0) { opcode = OP_ATAN; } else if (strcmp (cmethod->name, "Sqrt") == 0) { opcode = OP_SQRT; } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) { opcode = OP_ABS; } else if (strcmp (cmethod->name, "Round") == 0 && fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) { opcode = OP_ROUND; } if (opcode && fsig->param_count == 1) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_R8; ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = args [0]->dreg; MONO_ADD_INS (cfg->cbb, ins); } if (cfg->opt & MONO_OPT_CMOV) { opcode = 0; if (strcmp (cmethod->name, "Min") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMIN; } else if (strcmp (cmethod->name, "Max") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMAX; } if (opcode && fsig->param_count == 2) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_I4; ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; MONO_ADD_INS (cfg->cbb, ins); } } #if 0 /* OP_FREM is not IEEE compatible */ else if (strcmp (cmethod->name, "IEEERemainder") == 0 && fsig->param_count == 2) { MONO_INST_NEW (cfg, ins, OP_FREM); ins->inst_i0 = args [0]; ins->inst_i1 = args [1]; } #endif } return ins; } guint32 mono_arch_get_patch_offset (guint8 *code) { if ((code [0] == 0x8b) && (x86_modrm_mod (code [1]) == 0x2)) return 2; else if (code [0] == 0xba) return 1; else if (code [0] == 0x68) /* push IMM */ return 1; else if ((code [0] == 0xff) && (x86_modrm_reg (code [1]) == 0x6)) /* push <OFFSET>(<REG>) */ return 2; else if ((code [0] == 0xff) && (x86_modrm_reg (code [1]) == 0x2)) /* call *<OFFSET>(<REG>) */ return 2; else if ((code [0] == 0xdd) || (code [0] == 0xd9)) /* fldl <ADDR> */ return 2; else if ((code [0] == 0x58) && (code [1] == 0x05)) /* pop %eax; add <OFFSET>, %eax */ return 2; else if ((code [0] >= 0x58) && (code [0] <= 0x58 + X86_NREG) && (code [1] == 0x81)) /* pop <REG>; add <OFFSET>, <REG> */ return 3; else if ((code [0] >= 0xb8) && (code [0] < 0xb8 + 8)) /* mov <REG>, imm */ return 1; else if (code [0] == 0xE9) /* jmp eip+32b */ return 1; g_assert_not_reached (); return -1; } /** * \return TRUE if no sw breakpoint was present (always). * * Copy \p size bytes from \p code - \p offset to the buffer \p buf. If the debugger inserted software * breakpoints in the original code, they are removed in the copy. */ gboolean mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size) { /* * If method_start is non-NULL we need to perform bound checks, since we access memory * at code - offset we could go before the start of the method and end up in a different * page of memory that is not mapped or read incorrect data anyway. We zero-fill the bytes * instead. */ if (!method_start || code - offset >= method_start) { memcpy (buf, code - offset, size); } else { int diff = code - method_start; memset (buf, 0, size); memcpy (buf + offset - diff, method_start, diff + size - offset); } return TRUE; } /* * mono_x86_get_this_arg_offset: * * Return the offset of the stack location where this is passed during a virtual * call. */ guint32 mono_x86_get_this_arg_offset (MonoMethodSignature *sig) { return 0; } gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code) { host_mgreg_t esp = regs [X86_ESP]; gpointer res; int offset; offset = 0; /* * The stack looks like: * <other args> * <this=delegate> */ res = ((MonoObject**)esp) [0]; return res; } #define MAX_ARCH_DELEGATE_PARAMS 10 static gpointer get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, guint32 param_count) { guint8 *code, *start; int code_reserve = 64; GSList *unwind_ops; unwind_ops = mono_arch_get_cie_program (); /* * The stack contains: * <delegate> * <return addr> */ if (has_target) { start = code = mono_global_codeman_reserve (code_reserve); /* Replace the this argument with the target */ x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4); x86_mov_reg_membase (code, X86_ECX, X86_EAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 4); x86_mov_membase_reg (code, X86_ESP, 4, X86_ECX, 4); x86_jump_membase (code, X86_EAX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); } else { int i = 0; /* 8 for mov_reg and jump, plus 8 for each parameter */ code_reserve = 8 + (param_count * 8); /* * The stack contains: * <args in reverse order> * <delegate> * <return addr> * * and we need: * <args in reverse order> * <return addr> * * without unbalancing the stack. * So move each arg up a spot in the stack (overwriting un-needed 'this' arg) * and leaving original spot of first arg as placeholder in stack so * when callee pops stack everything works. */ start = code = mono_global_codeman_reserve (code_reserve); /* store delegate for access to method_ptr */ x86_mov_reg_membase (code, X86_ECX, X86_ESP, 4, 4); /* move args up */ for (i = 0; i < param_count; ++i) { x86_mov_reg_membase (code, X86_EAX, X86_ESP, (i+2)*4, 4); x86_mov_membase_reg (code, X86_ESP, (i+1)*4, X86_EAX, 4); } x86_jump_membase (code, X86_ECX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); } g_assertf ((code - start) <= code_reserve, "%d %d", (int)(code - start), code_reserve); if (has_target) { *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops); } else { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count); *info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops); g_free (name); } if (mono_jit_map_is_enabled ()) { char *buff; if (has_target) buff = (char*)"delegate_invoke_has_target"; else buff = g_strdup_printf ("delegate_invoke_no_target_%d", param_count); mono_emit_jit_tramp (start, code - start, buff); if (!has_target) g_free (buff); } MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); return start; } #define MAX_VIRTUAL_DELEGATE_OFFSET 32 static gpointer get_delegate_virtual_invoke_impl (MonoTrampInfo **info, gboolean load_imt_reg, int offset) { guint8 *code, *start; int size = 24; char *tramp_name; GSList *unwind_ops; if (offset / (int)sizeof (target_mgreg_t) > MAX_VIRTUAL_DELEGATE_OFFSET) return NULL; /* * The stack contains: * <delegate> * <return addr> */ start = code = mono_global_codeman_reserve (size); unwind_ops = mono_arch_get_cie_program (); /* Replace the this argument with the target */ x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4); x86_mov_reg_membase (code, X86_ECX, X86_EAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 4); x86_mov_membase_reg (code, X86_ESP, 4, X86_ECX, 4); if (load_imt_reg) { /* Load the IMT reg */ x86_mov_reg_membase (code, MONO_ARCH_IMT_REG, X86_EAX, MONO_STRUCT_OFFSET (MonoDelegate, method), 4); } /* Load the vtable */ x86_mov_reg_membase (code, X86_EAX, X86_ECX, MONO_STRUCT_OFFSET (MonoObject, vtable), 4); x86_jump_membase (code, X86_EAX, offset); g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); tramp_name = mono_get_delegate_virtual_invoke_impl_name (load_imt_reg, offset); *info = mono_tramp_info_create (tramp_name, start, code - start, NULL, unwind_ops); g_free (tramp_name); return start; } GSList* mono_arch_get_delegate_invoke_impls (void) { GSList *res = NULL; MonoTrampInfo *info; int i; get_delegate_invoke_impl (&info, TRUE, 0); res = g_slist_prepend (res, info); for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) { get_delegate_invoke_impl (&info, FALSE, i); res = g_slist_prepend (res, info); } for (i = 0; i <= MAX_VIRTUAL_DELEGATE_OFFSET; ++i) { get_delegate_virtual_invoke_impl (&info, TRUE, - i * TARGET_SIZEOF_VOID_P); res = g_slist_prepend (res, info); get_delegate_virtual_invoke_impl (&info, FALSE, i * TARGET_SIZEOF_VOID_P); res = g_slist_prepend (res, info); } return res; } gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target) { guint8 *code, *start; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; /* FIXME: Support more cases */ if (MONO_TYPE_ISSTRUCT (sig->ret)) return NULL; /* * The stack contains: * <delegate> * <return addr> */ if (has_target) { static guint8* cached = NULL; if (cached) return cached; if (mono_ee_features.use_aot_trampolines) { start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target"); } else { MonoTrampInfo *info; start = (guint8*)get_delegate_invoke_impl (&info, TRUE, 0); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cached = start; } else { static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL}; int i = 0; for (i = 0; i < sig->param_count; ++i) if (!mono_is_regsize_var (sig->params [i])) return NULL; code = cache [sig->param_count]; if (code) return code; if (mono_ee_features.use_aot_trampolines) { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); start = (guint8*)mono_aot_get_trampoline (name); g_free (name); } else { MonoTrampInfo *info; start = (guint8*)get_delegate_invoke_impl (&info, FALSE, sig->param_count); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cache [sig->param_count] = start; } return start; } gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg) { MonoTrampInfo *info; gpointer code; code = get_delegate_virtual_invoke_impl (&info, load_imt_reg, offset); if (code) mono_tramp_info_register (info, NULL); return code; } host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { switch (reg) { case X86_EAX: return ctx->eax; case X86_EBX: return ctx->ebx; case X86_ECX: return ctx->ecx; case X86_EDX: return ctx->edx; case X86_ESP: return ctx->esp; case X86_EBP: return ctx->ebp; case X86_ESI: return ctx->esi; case X86_EDI: return ctx->edi; default: g_assert_not_reached (); return 0; } } host_mgreg_t* mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { switch (reg) { case X86_EAX: return &ctx->eax; case X86_EBX: return &ctx->ebx; case X86_ECX: return &ctx->ecx; case X86_EDX: return &ctx->edx; case X86_ESP: return &ctx->esp; case X86_EBP: return &ctx->ebp; case X86_ESI: return &ctx->esi; case X86_EDI: return &ctx->edi; default: g_assert_not_reached (); return 0; } } void mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val) { switch (reg) { case X86_EAX: ctx->eax = val; break; case X86_EBX: ctx->ebx = val; break; case X86_ECX: ctx->ecx = val; break; case X86_EDX: ctx->edx = val; break; case X86_ESP: ctx->esp = val; break; case X86_EBP: ctx->ebp = val; break; case X86_ESI: ctx->esi = val; break; case X86_EDI: ctx->edi = val; break; default: g_assert_not_reached (); } } #ifdef MONO_ARCH_SIMD_INTRINSICS static MonoInst* get_float_to_x_spill_area (MonoCompile *cfg) { if (!cfg->fconv_to_r8_x_var) { cfg->fconv_to_r8_x_var = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL); cfg->fconv_to_r8_x_var->flags |= MONO_INST_VOLATILE; /*FIXME, use the don't regalloc flag*/ } return cfg->fconv_to_r8_x_var; } /* * Convert all fconv opts that MONO_OPT_SSE2 would get wrong. */ void mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins) { MonoInst *fconv; int dreg, src_opcode; if (!(cfg->opt & MONO_OPT_SSE2) || !(cfg->opt & MONO_OPT_SIMD) || COMPILE_LLVM (cfg)) return; switch (src_opcode = ins->opcode) { case OP_FCONV_TO_I1: case OP_FCONV_TO_U1: case OP_FCONV_TO_I2: case OP_FCONV_TO_U2: case OP_FCONV_TO_I4: case OP_FCONV_TO_I: break; default: return; } /* dreg is the IREG and sreg1 is the FREG */ MONO_INST_NEW (cfg, fconv, OP_FCONV_TO_R8_X); fconv->klass = NULL; /*FIXME, what can I use here as the Mono.Simd lib might not be loaded yet*/ fconv->sreg1 = ins->sreg1; fconv->dreg = mono_alloc_ireg (cfg); fconv->type = STACK_VTYPE; fconv->backend.spill_var = get_float_to_x_spill_area (cfg); mono_bblock_insert_before_ins (cfg->cbb, ins, fconv); dreg = ins->dreg; NULLIFY_INS (ins); ins->opcode = OP_XCONV_R8_TO_I4; ins->klass = mono_defaults.int32_class; ins->sreg1 = fconv->dreg; ins->dreg = dreg; ins->type = STACK_I4; ins->backend.source_opcode = src_opcode; } #endif /* #ifdef MONO_ARCH_SIMD_INTRINSICS */ void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins) { MonoInst *ins; int vreg; if (long_ins->opcode == OP_LNEG) { ins = long_ins; MONO_EMIT_NEW_UNALU (cfg, OP_INEG, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1)); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADC_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), 0); MONO_EMIT_NEW_UNALU (cfg, OP_INEG, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->dreg)); NULLIFY_INS (ins); return; } #ifdef MONO_ARCH_SIMD_INTRINSICS if (!(cfg->opt & MONO_OPT_SIMD)) return; /*TODO move this to simd-intrinsic.c once we support sse 4.1 dword extractors since we need the runtime caps info */ switch (long_ins->opcode) { case OP_EXTRACT_I8: vreg = long_ins->sreg1; if (long_ins->inst_c0) { MONO_INST_NEW (cfg, ins, OP_PSHUFLED); ins->klass = long_ins->klass; ins->sreg1 = long_ins->sreg1; ins->inst_c0 = 2; ins->type = STACK_VTYPE; ins->dreg = vreg = alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, ins); } MONO_INST_NEW (cfg, ins, OP_EXTRACT_I4); ins->klass = mono_defaults.int32_class; ins->sreg1 = vreg; ins->type = STACK_I4; ins->dreg = MONO_LVREG_LS (long_ins->dreg); MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_PSHUFLED); ins->klass = long_ins->klass; ins->sreg1 = long_ins->sreg1; ins->inst_c0 = long_ins->inst_c0 ? 3 : 1; ins->type = STACK_VTYPE; ins->dreg = vreg = alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_EXTRACT_I4); ins->klass = mono_defaults.int32_class; ins->sreg1 = vreg; ins->type = STACK_I4; ins->dreg = MONO_LVREG_MS (long_ins->dreg); MONO_ADD_INS (cfg->cbb, ins); long_ins->opcode = OP_NOP; break; case OP_INSERTX_I8_SLOW: MONO_INST_NEW (cfg, ins, OP_INSERTX_I4_SLOW); ins->dreg = long_ins->dreg; ins->sreg1 = long_ins->dreg; ins->sreg2 = MONO_LVREG_LS (long_ins->sreg2); ins->inst_c0 = long_ins->inst_c0 * 2; MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_INSERTX_I4_SLOW); ins->dreg = long_ins->dreg; ins->sreg1 = long_ins->dreg; ins->sreg2 = MONO_LVREG_MS (long_ins->sreg2); ins->inst_c0 = long_ins->inst_c0 * 2 + 1; MONO_ADD_INS (cfg->cbb, ins); long_ins->opcode = OP_NOP; break; case OP_EXPAND_I8: MONO_INST_NEW (cfg, ins, OP_ICONV_TO_X); ins->dreg = long_ins->dreg; ins->sreg1 = MONO_LVREG_LS (long_ins->sreg1); ins->klass = long_ins->klass; ins->type = STACK_VTYPE; MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_INSERTX_I4_SLOW); ins->dreg = long_ins->dreg; ins->sreg1 = long_ins->dreg; ins->sreg2 = MONO_LVREG_MS (long_ins->sreg1); ins->inst_c0 = 1; ins->klass = long_ins->klass; ins->type = STACK_VTYPE; MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_PSHUFLED); ins->dreg = long_ins->dreg; ins->sreg1 = long_ins->dreg; ins->inst_c0 = 0x44; /*Magic number for swizzling (X,Y,X,Y)*/ ins->klass = long_ins->klass; ins->type = STACK_VTYPE; MONO_ADD_INS (cfg->cbb, ins); long_ins->opcode = OP_NOP; break; } #endif /* MONO_ARCH_SIMD_INTRINSICS */ } /* * mono_aot_emit_load_got_addr: * * Emit code to load the got address. * On x86, the result is placed into EBX. */ guint8* mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji) { x86_call_imm (code, 0); /* * The patch needs to point to the pop, since the GOT offset needs * to be added to that address. */ if (cfg) mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GOT_OFFSET, NULL); else *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL); x86_pop_reg (code, MONO_ARCH_GOT_REG); x86_alu_reg_imm (code, X86_ADD, MONO_ARCH_GOT_REG, 0xf0f0f0f0); if (cfg) set_code_cursor (cfg, code); return code; } /* * mono_arch_emit_load_aotconst: * * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and * TARGET from the mscorlib GOT in full-aot code. * On x86, the GOT address is assumed to be in EBX, and the result is placed into * EAX. */ guint8* mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, MonoJumpInfoType tramp_type, gconstpointer target) { /* Load the mscorlib got address */ x86_mov_reg_membase (code, X86_EAX, MONO_ARCH_GOT_REG, sizeof (target_mgreg_t), 4); *ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target); /* arch_emit_got_access () patches this */ x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0xf0f0f0f0, 4); return code; } /* Can't put this into mini-x86.h */ gpointer mono_x86_get_signal_exception_trampoline (MonoTrampInfo **info, gboolean aot); GSList * mono_arch_get_trampolines (gboolean aot) { MonoTrampInfo *info; GSList *tramps = NULL; mono_x86_get_signal_exception_trampoline (&info, aot); tramps = g_slist_append (tramps, info); return tramps; } /* Soft Debug support */ #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED /* * mono_arch_set_breakpoint: * * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET. * The location should contain code emitted by OP_SEQ_POINT. */ void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip + OP_SEQ_POINT_BP_OFFSET; g_assert (code [0] == 0x90); x86_call_membase (code, X86_ECX, 0); } /* * mono_arch_clear_breakpoint: * * Clear the breakpoint at IP. */ void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip + OP_SEQ_POINT_BP_OFFSET; int i; for (i = 0; i < 2; ++i) x86_nop (code); } /* * mono_arch_start_single_stepping: * * Start single stepping. */ void mono_arch_start_single_stepping (void) { ss_trampoline = mini_get_single_step_trampoline (); } /* * mono_arch_stop_single_stepping: * * Stop single stepping. */ void mono_arch_stop_single_stepping (void) { ss_trampoline = NULL; } /* * mono_arch_is_single_step_event: * * Return whenever the machine state in SIGCTX corresponds to a single * step event. */ gboolean mono_arch_is_single_step_event (void *info, void *sigctx) { /* We use soft breakpoints */ return FALSE; } gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx) { /* We use soft breakpoints */ return FALSE; } #define BREAKPOINT_SIZE 2 /* * mono_arch_skip_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji) { g_assert_not_reached (); } /* * mono_arch_skip_single_step: * * See mini-amd64.c for docs. */ void mono_arch_skip_single_step (MonoContext *ctx) { g_assert_not_reached (); } /* * mono_arch_get_seq_point_info: * * See mini-amd64.c for docs. */ SeqPointInfo* mono_arch_get_seq_point_info (guint8 *code) { NOT_IMPLEMENTED; return NULL; } #endif gboolean mono_arch_opcode_supported (int opcode) { switch (opcode) { case OP_ATOMIC_ADD_I4: case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_CAS_I4: case OP_ATOMIC_LOAD_I1: case OP_ATOMIC_LOAD_I2: case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_U1: case OP_ATOMIC_LOAD_U2: case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_LOAD_R8: case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_U4: case OP_ATOMIC_STORE_R4: case OP_ATOMIC_STORE_R8: return TRUE; default: return FALSE; } } CallInfo* mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { return get_call_info (mp, sig); } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { gpointer target = NULL; switch (jit_icall_id) { #undef MONO_AOT_ICALL #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break; MONO_AOT_ICALL (mono_x86_start_gsharedvt_call) MONO_AOT_ICALL (mono_x86_throw_corlib_exception) MONO_AOT_ICALL (mono_x86_throw_exception) } return target; }
/** * \file * x86 backend for the Mono code generator * * Authors: * Paolo Molaro ([email protected]) * Dietmar Maurer ([email protected]) * Patrik Torstensson * * Copyright 2003 Ximian, Inc. * Copyright 2003-2011 Novell Inc. * Copyright 2011 Xamarin Inc. * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include "mini.h" #include <string.h> #include <math.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <mono/metadata/abi-details.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/threads.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/gc-internals.h> #include <mono/metadata/tokentype.h> #include <mono/utils/mono-math.h> #include <mono/utils/mono-counters.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-memory-model.h> #include <mono/utils/mono-hwcap.h> #include <mono/utils/mono-threads.h> #include <mono/utils/unlocked.h> #include "mini-x86.h" #include "cpu-x86.h" #include "ir-emit.h" #include "mini-gc.h" #include "aot-runtime.h" #include "mini-runtime.h" #ifndef TARGET_WIN32 #ifdef MONO_XEN_OPT static gboolean optimize_for_xen = TRUE; #else #define optimize_for_xen 0 #endif #endif static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math") /* The single step trampoline */ static gpointer ss_trampoline; /* The breakpoint trampoline */ static gpointer bp_trampoline; #define ARGS_OFFSET 8 #ifdef TARGET_WIN32 /* Under windows, the default pinvoke calling convention is stdcall */ #define CALLCONV_IS_STDCALL(sig) ((sig)->pinvoke && ((sig)->call_convention == MONO_CALL_STDCALL || (sig)->call_convention == MONO_CALL_DEFAULT || (sig)->call_convention == MONO_CALL_THISCALL)) #else #define CALLCONV_IS_STDCALL(sig) ((sig)->pinvoke && ((sig)->call_convention == MONO_CALL_STDCALL || (sig)->call_convention == MONO_CALL_THISCALL)) #endif #define X86_IS_CALLEE_SAVED_REG(reg) (((reg) == X86_EBX) || ((reg) == X86_EDI) || ((reg) == X86_ESI)) #define OP_SEQ_POINT_BP_OFFSET 7 const char* mono_arch_regname (int reg) { switch (reg) { case X86_EAX: return "%eax"; case X86_EBX: return "%ebx"; case X86_ECX: return "%ecx"; case X86_EDX: return "%edx"; case X86_ESP: return "%esp"; case X86_EBP: return "%ebp"; case X86_EDI: return "%edi"; case X86_ESI: return "%esi"; } return "unknown"; } const char* mono_arch_fregname (int reg) { switch (reg) { case 0: return "%fr0"; case 1: return "%fr1"; case 2: return "%fr2"; case 3: return "%fr3"; case 4: return "%fr4"; case 5: return "%fr5"; case 6: return "%fr6"; case 7: return "%fr7"; default: return "unknown"; } } const char * mono_arch_xregname (int reg) { switch (reg) { case 0: return "%xmm0"; case 1: return "%xmm1"; case 2: return "%xmm2"; case 3: return "%xmm3"; case 4: return "%xmm4"; case 5: return "%xmm5"; case 6: return "%xmm6"; case 7: return "%xmm7"; default: return "unknown"; } } void mono_x86_patch (unsigned char* code, gpointer target) { mono_x86_patch_inline (code, target); } #define FLOAT_PARAM_REGS 0 static const guint32 thiscall_param_regs [] = { X86_ECX, X86_NREG }; static const guint32 *callconv_param_regs(MonoMethodSignature *sig) { if (!sig->pinvoke) return NULL; switch (sig->call_convention) { case MONO_CALL_THISCALL: return thiscall_param_regs; default: return NULL; } } #if defined(TARGET_WIN32) || defined(__APPLE__) || defined(__FreeBSD__) #define SMALL_STRUCTS_IN_REGS static X86_Reg_No return_regs [] = { X86_EAX, X86_EDX }; #endif static void inline add_general (guint32 *gr, const guint32 *param_regs, guint32 *stack_size, ArgInfo *ainfo) { ainfo->offset = *stack_size; if (!param_regs || param_regs [*gr] == X86_NREG) { ainfo->storage = ArgOnStack; ainfo->nslots = 1; (*stack_size) += sizeof (target_mgreg_t); } else { ainfo->storage = ArgInIReg; ainfo->reg = param_regs [*gr]; (*gr) ++; } } static void inline add_general_pair (guint32 *gr, const guint32 *param_regs , guint32 *stack_size, ArgInfo *ainfo) { ainfo->offset = *stack_size; g_assert(!param_regs || param_regs[*gr] == X86_NREG); ainfo->storage = ArgOnStack; (*stack_size) += sizeof (target_mgreg_t) * 2; ainfo->nslots = 2; } static void inline add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double) { ainfo->offset = *stack_size; if (*gr >= FLOAT_PARAM_REGS) { ainfo->storage = ArgOnStack; (*stack_size) += is_double ? 8 : 4; ainfo->nslots = is_double ? 2 : 1; } else { /* A double register */ if (is_double) ainfo->storage = ArgInDoubleSSEReg; else ainfo->storage = ArgInFloatSSEReg; ainfo->reg = *gr; (*gr) += 1; } } static void add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type, gboolean is_return, guint32 *gr, const guint32 *param_regs, guint32 *fr, guint32 *stack_size) { guint32 size; MonoClass *klass; klass = mono_class_from_mono_type_internal (type); size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, sig->pinvoke && !sig->marshalling_disabled); #if defined(TARGET_WIN32) /* * Standard C and C++ doesn't allow empty structs, empty structs will always have a size of 1 byte. * GCC have an extension to allow empty structs, https://gcc.gnu.org/onlinedocs/gcc/Empty-Structures.html. * This cause a little dilemma since runtime build using none GCC compiler will not be compatible with * GCC build C libraries and the other way around. On platforms where empty structs has size of 1 byte * it must be represented in call and cannot be dropped. */ if (size == 0 && MONO_TYPE_ISSTRUCT (type) && sig->pinvoke) { /* Empty structs (1 byte size) needs to be represented in a stack slot */ ainfo->pass_empty_struct = TRUE; size = 1; } #endif #ifdef SMALL_STRUCTS_IN_REGS if (sig->pinvoke && is_return) { MonoMarshalType *info; info = mono_marshal_load_type_info (klass); g_assert (info); ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone; /* Ignore empty struct return value, if used. */ if (info->num_fields == 0 && ainfo->pass_empty_struct) { ainfo->storage = ArgValuetypeInReg; return; } /* * Windows x86 ABI for returning structs of size 4 or 8 bytes (regardless of type) dictates that * values are passed in EDX:EAX register pairs, https://msdn.microsoft.com/en-us/library/984x0h58.aspx. * This is different compared to for example float or double return types (not in struct) that will be returned * in ST(0), https://msdn.microsoft.com/en-us/library/ha59cbfz.aspx. * * Apples OSX x86 ABI for returning structs of size 4 or 8 bytes uses a slightly different approach. * If a struct includes only one scalar value, it will be handled with the same rules as scalar values. * This means that structs with one float or double will be returned in ST(0). For more details, * https://developer.apple.com/library/mac/documentation/DeveloperTools/Conceptual/LowLevelABI/130-IA-32_Function_Calling_Conventions/IA32.html. */ #if !defined(TARGET_WIN32) /* Special case structs with only a float member */ if (info->num_fields == 1) { int ftype = mini_get_underlying_type (info->fields [0].field->type)->type; if ((info->native_size == 8) && (ftype == MONO_TYPE_R8)) { ainfo->storage = ArgValuetypeInReg; ainfo->pair_storage [0] = ArgOnDoubleFpStack; return; } if ((info->native_size == 4) && (ftype == MONO_TYPE_R4)) { ainfo->storage = ArgValuetypeInReg; ainfo->pair_storage [0] = ArgOnFloatFpStack; return; } } #endif if ((info->native_size == 1) || (info->native_size == 2) || (info->native_size == 4) || (info->native_size == 8)) { ainfo->storage = ArgValuetypeInReg; ainfo->pair_storage [0] = ArgInIReg; ainfo->pair_regs [0] = return_regs [0]; if (info->native_size > 4) { ainfo->pair_storage [1] = ArgInIReg; ainfo->pair_regs [1] = return_regs [1]; } return; } } #endif if (param_regs && param_regs [*gr] != X86_NREG && !is_return) { g_assert (size <= 4); ainfo->storage = ArgValuetypeInReg; ainfo->reg = param_regs [*gr]; (*gr)++; return; } ainfo->offset = *stack_size; ainfo->storage = ArgOnStack; *stack_size += ALIGN_TO (size, sizeof (target_mgreg_t)); ainfo->nslots = ALIGN_TO (size, sizeof (target_mgreg_t)) / sizeof (target_mgreg_t); } /* * get_call_info: * * Obtain information about a call according to the calling convention. * For x86 ELF, see the "System V Application Binary Interface Intel386 * Architecture Processor Supplment, Fourth Edition" document for more * information. * For x86 win32, see https://msdn.microsoft.com/en-us/library/984x0h58.aspx. */ static CallInfo* get_call_info_internal (CallInfo *cinfo, MonoMethodSignature *sig) { guint32 i, gr, fr, pstart; const guint32 *param_regs; MonoType *ret_type; int n = sig->hasthis + sig->param_count; guint32 stack_size = 0; gboolean is_pinvoke = sig->pinvoke; gr = 0; fr = 0; cinfo->nargs = n; param_regs = callconv_param_regs(sig); /* return value */ { ret_type = mini_get_underlying_type (sig->ret); switch (ret_type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = X86_EAX; break; case MONO_TYPE_U8: case MONO_TYPE_I8: cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = X86_EAX; cinfo->ret.is_pair = TRUE; break; case MONO_TYPE_R4: cinfo->ret.storage = ArgOnFloatFpStack; break; case MONO_TYPE_R8: cinfo->ret.storage = ArgOnDoubleFpStack; break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ret_type)) { cinfo->ret.storage = ArgInIReg; cinfo->ret.reg = X86_EAX; break; } if (mini_is_gsharedvt_type (ret_type)) { cinfo->ret.storage = ArgOnStack; cinfo->vtype_retaddr = TRUE; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: { guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0; add_valuetype (sig, &cinfo->ret, ret_type, TRUE, &tmp_gr, NULL, &tmp_fr, &tmp_stacksize); if (cinfo->ret.storage == ArgOnStack) { cinfo->vtype_retaddr = TRUE; /* The caller passes the address where the value is stored */ } break; } case MONO_TYPE_VAR: case MONO_TYPE_MVAR: g_assert (mini_is_gsharedvt_type (ret_type)); cinfo->ret.storage = ArgOnStack; cinfo->vtype_retaddr = TRUE; break; case MONO_TYPE_VOID: cinfo->ret.storage = ArgNone; break; default: g_error ("Can't handle as return value 0x%x", ret_type->type); } } pstart = 0; /* * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after * the first argument, allowing 'this' to be always passed in the first arg reg. * Also do this if the first argument is a reference type, since virtual calls * are sometimes made using calli without sig->hasthis set, like in the delegate * invoke wrappers. */ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) { if (sig->hasthis) { add_general (&gr, param_regs, &stack_size, cinfo->args + 0); } else { add_general (&gr, param_regs, &stack_size, &cinfo->args [sig->hasthis + 0]); pstart = 1; } cinfo->vret_arg_offset = stack_size; add_general (&gr, NULL, &stack_size, &cinfo->ret); cinfo->vret_arg_index = 1; } else { /* this */ if (sig->hasthis) add_general (&gr, param_regs, &stack_size, cinfo->args + 0); if (cinfo->vtype_retaddr) add_general (&gr, NULL, &stack_size, &cinfo->ret); } if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) { fr = FLOAT_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, param_regs, &stack_size, &cinfo->sig_cookie); } for (i = pstart; i < sig->param_count; ++i) { ArgInfo *ainfo = &cinfo->args [sig->hasthis + i]; MonoType *ptype; if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) { /* We allways pass the sig cookie on the stack for simplicity */ /* * Prevent implicit arguments + the sig cookie from being passed * in registers. */ fr = FLOAT_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, param_regs, &stack_size, &cinfo->sig_cookie); } if (m_type_is_byref (sig->params [i])) { add_general (&gr, param_regs, &stack_size, ainfo); continue; } ptype = mini_get_underlying_type (sig->params [i]); switch (ptype->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: add_general (&gr, param_regs, &stack_size, ainfo); break; case MONO_TYPE_I2: case MONO_TYPE_U2: add_general (&gr, param_regs, &stack_size, ainfo); break; case MONO_TYPE_I4: case MONO_TYPE_U4: add_general (&gr, param_regs, &stack_size, ainfo); break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_OBJECT: add_general (&gr, param_regs, &stack_size, ainfo); break; case MONO_TYPE_GENERICINST: if (!mono_type_generic_inst_is_valuetype (ptype)) { add_general (&gr, param_regs, &stack_size, ainfo); break; } if (mini_is_gsharedvt_type (ptype)) { /* gsharedvt arguments are passed by ref */ add_general (&gr, param_regs, &stack_size, ainfo); g_assert (ainfo->storage == ArgOnStack); ainfo->storage = ArgGSharedVt; break; } /* Fall through */ case MONO_TYPE_VALUETYPE: case MONO_TYPE_TYPEDBYREF: add_valuetype (sig, ainfo, ptype, FALSE, &gr, param_regs, &fr, &stack_size); break; case MONO_TYPE_U8: case MONO_TYPE_I8: add_general_pair (&gr, param_regs, &stack_size, ainfo); break; case MONO_TYPE_R4: add_float (&fr, &stack_size, ainfo, FALSE); break; case MONO_TYPE_R8: add_float (&fr, &stack_size, ainfo, TRUE); break; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: /* gsharedvt arguments are passed by ref */ g_assert (mini_is_gsharedvt_type (ptype)); add_general (&gr, param_regs, &stack_size, ainfo); g_assert (ainfo->storage == ArgOnStack); ainfo->storage = ArgGSharedVt; break; default: g_error ("unexpected type 0x%x", ptype->type); g_assert_not_reached (); } } if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) { fr = FLOAT_PARAM_REGS; /* Emit the signature cookie just before the implicit arguments */ add_general (&gr, param_regs, &stack_size, &cinfo->sig_cookie); } if (cinfo->vtype_retaddr) { /* if the function returns a struct on stack, the called method already does a ret $0x4 */ cinfo->callee_stack_pop = 4; } else if (CALLCONV_IS_STDCALL (sig)) { /* Have to compensate for the stack space popped by the native callee */ cinfo->callee_stack_pop = stack_size; } if (mono_do_x86_stack_align && (stack_size % MONO_ARCH_FRAME_ALIGNMENT) != 0) { cinfo->need_stack_align = TRUE; cinfo->stack_align_amount = MONO_ARCH_FRAME_ALIGNMENT - (stack_size % MONO_ARCH_FRAME_ALIGNMENT); stack_size += cinfo->stack_align_amount; } cinfo->stack_usage = stack_size; cinfo->reg_usage = gr; cinfo->freg_usage = fr; return cinfo; } static CallInfo* get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { int n = sig->hasthis + sig->param_count; CallInfo *cinfo; if (mp) cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n)); else cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n)); return get_call_info_internal (cinfo, sig); } static gboolean storage_in_ireg (ArgStorage storage) { return (storage == ArgInIReg || storage == ArgValuetypeInReg); } static int arg_need_temp (ArgInfo *ainfo) { /* * We always fetch the double value from the fpstack. In that case, we * need to have a separate tmp that is the double value casted to float */ if (ainfo->storage == ArgOnFloatFpStack) return sizeof (float); return 0; } static gpointer arg_get_storage (CallContext *ccontext, ArgInfo *ainfo) { switch (ainfo->storage) { case ArgOnStack: return ccontext->stack + ainfo->offset; case ArgOnDoubleFpStack: return &ccontext->fret; case ArgInIReg: /* If pair, the storage is for EDX:EAX */ return &ccontext->eax; default: g_error ("Arg storage type not yet supported"); } } static void arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest) { g_assert (ainfo->storage == ArgOnFloatFpStack); *(float*) dest = (float)ccontext->fret; } void mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { CallInfo *cinfo = get_call_info (NULL, sig); const MonoEECallbacks *interp_cb = mini_get_interp_callbacks (); gpointer storage; ArgInfo *ainfo; memset (ccontext, 0, sizeof (CallContext)); ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT); if (ccontext->stack_size) ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size); if (sig->ret->type != MONO_TYPE_VOID) { ainfo = &cinfo->ret; if (ainfo->storage == ArgOnStack) { /* This is a value type return. The pointer to vt storage is pushed as first argument */ g_assert (ainfo->offset == 0); g_assert (ainfo->nslots == 1); storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1); *(host_mgreg_t*)ccontext->stack = (host_mgreg_t)storage; } } g_assert (!sig->hasthis); for (int i = 0; i < sig->param_count; i++) { ainfo = &cinfo->args [i]; storage = arg_get_storage (ccontext, ainfo); interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage); } g_free (cinfo); } void mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig) { const MonoEECallbacks *interp_cb; CallInfo *cinfo; ArgInfo *ainfo; gpointer storage; /* No return value */ if (sig->ret->type == MONO_TYPE_VOID) return; interp_cb = mini_get_interp_callbacks (); cinfo = get_call_info (NULL, sig); ainfo = &cinfo->ret; /* Check if return value was stored directly at address passed in reg */ if (cinfo->ret.storage != ArgOnStack) { int temp_size = arg_need_temp (ainfo); if (temp_size) { storage = alloca (temp_size); arg_get_val (ccontext, ainfo, storage); } else { storage = arg_get_storage (ccontext, ainfo); } interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage); } g_free (cinfo); } /* * mono_arch_get_argument_info: * @csig: a method signature * @param_count: the number of parameters to consider * @arg_info: an array to store the result infos * * Gathers information on parameters such as size, alignment and * padding. arg_info should be large enought to hold param_count + 1 entries. * * Returns the size of the argument area on the stack. * This should be signal safe, since it is called from * mono_arch_unwind_frame (). * FIXME: The metadata calls might not be signal safe. */ int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) { int len, k, args_size = 0; int size, pad; guint32 align; int offset = 8; CallInfo *cinfo; int prev_stackarg; int num_regs; /* Avoid g_malloc as it is not signal safe */ len = sizeof (CallInfo) + (sizeof (ArgInfo) * (csig->param_count + 1)); cinfo = (CallInfo*)g_alloca (len); memset (cinfo, 0, len); cinfo = get_call_info_internal (cinfo, csig); arg_info [0].offset = offset; if (cinfo->vtype_retaddr && cinfo->vret_arg_index == 0) { args_size += sizeof (target_mgreg_t); offset += 4; } if (csig->hasthis && !storage_in_ireg (cinfo->args [0].storage)) { args_size += sizeof (target_mgreg_t); offset += 4; } if (cinfo->vtype_retaddr && cinfo->vret_arg_index == 1 && csig->hasthis) { /* Emitted after this */ args_size += sizeof (target_mgreg_t); offset += 4; } arg_info [0].size = args_size; prev_stackarg = 0; for (k = 0; k < param_count; k++) { size = mini_type_stack_size_full (csig->params [k], &align, csig->pinvoke && !csig->marshalling_disabled); if (storage_in_ireg (cinfo->args [csig->hasthis + k].storage)) { /* not in stack, we'll give it an offset at the end */ arg_info [k + 1].pad = 0; arg_info [k + 1].size = size; } else { /* ignore alignment for now */ align = 1; args_size += pad = (align - (args_size & (align - 1))) & (align - 1); arg_info [prev_stackarg].pad = pad; args_size += size; arg_info [k + 1].pad = 0; arg_info [k + 1].size = size; offset += pad; arg_info [k + 1].offset = offset; offset += size; prev_stackarg = k + 1; } if (k == 0 && cinfo->vtype_retaddr && cinfo->vret_arg_index == 1 && !csig->hasthis) { /* Emitted after the first arg */ args_size += sizeof (target_mgreg_t); offset += 4; } } if (mono_do_x86_stack_align && !CALLCONV_IS_STDCALL (csig)) align = MONO_ARCH_FRAME_ALIGNMENT; else align = 4; args_size += pad = (align - (args_size & (align - 1))) & (align - 1); arg_info [k].pad = pad; /* Add offsets for any reg parameters */ num_regs = 0; if (csig->hasthis && storage_in_ireg (cinfo->args [0].storage)) arg_info [0].offset = args_size + 4 * num_regs++; for (k=0; k < param_count; k++) { if (storage_in_ireg (cinfo->args[csig->hasthis + k].storage)) { arg_info [k + 1].offset = args_size + 4 * num_regs++; } } return args_size; } #ifndef DISABLE_JIT gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_) { g_assert (caller_sig); g_assert (callee_sig); // Direct AOT calls usually go through the PLT/GOT. // Unless we can determine here if is_direct_callable will return TRUE? // But the PLT/GOT is addressed with nonvolatile ebx, which // gets restored before the jump. // See https://github.com/mono/mono/commit/f5373adc8a89d4b0d1d549fdd6d9adc3ded4b400 // See https://github.com/mono/mono/issues/11265 if (!virtual_ && cfg->compile_aot && !cfg->full_aot) return FALSE; CallInfo *caller_info = get_call_info (NULL, caller_sig); CallInfo *callee_info = get_call_info (NULL, callee_sig); /* * Tailcalls with more callee stack usage than the caller cannot be supported, since * the extra stack space would be left on the stack after the tailcall. */ gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage) && IS_SUPPORTED_TAILCALL (caller_info->ret.storage == callee_info->ret.storage); if (!res && !mono_tailcall_print_enabled ()) goto exit; // Limit stack_usage to 1G. res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (1 << 30)); res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (1 << 30)); exit: g_free (caller_info); g_free (callee_info); return res; } #endif /* * Initialize the cpu to execute managed code. */ void mono_arch_cpu_init (void) { /* spec compliance requires running with double precision */ #ifndef _MSC_VER guint16 fpcw; __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw)); fpcw &= ~X86_FPCW_PRECC_MASK; fpcw |= X86_FPCW_PREC_DOUBLE; __asm__ __volatile__ ("fldcw %0\n": : "m" (fpcw)); __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw)); #else _control87 (_PC_53, MCW_PC); #endif } /* * Initialize architecture specific code. */ void mono_arch_init (void) { if (!mono_aot_only) bp_trampoline = mini_get_breakpoint_trampoline (); } /* * Cleanup architecture specific code. */ void mono_arch_cleanup (void) { } /* * This function returns the optimizations supported on this cpu. */ guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask) { guint32 opts = 0; *exclude_mask = 0; if (mono_hwcap_x86_has_cmov) { opts |= MONO_OPT_CMOV; if (mono_hwcap_x86_has_fcmov) opts |= MONO_OPT_FCMOV; else *exclude_mask |= MONO_OPT_FCMOV; } else { *exclude_mask |= MONO_OPT_CMOV; } if (mono_hwcap_x86_has_sse2) opts |= MONO_OPT_SSE2; else *exclude_mask |= MONO_OPT_SSE2; #ifdef MONO_ARCH_SIMD_INTRINSICS /*SIMD intrinsics require at least SSE2.*/ if (!mono_hwcap_x86_has_sse2) *exclude_mask |= MONO_OPT_SIMD; #endif return opts; } MonoCPUFeatures mono_arch_get_cpu_features (void) { guint64 features = MONO_CPU_INITED; if (mono_hwcap_x86_has_sse1) features |= MONO_CPU_X86_SSE; if (mono_hwcap_x86_has_sse2) features |= MONO_CPU_X86_SSE2; if (mono_hwcap_x86_has_sse3) features |= MONO_CPU_X86_SSE3; if (mono_hwcap_x86_has_ssse3) features |= MONO_CPU_X86_SSSE3; if (mono_hwcap_x86_has_sse41) features |= MONO_CPU_X86_SSE41; if (mono_hwcap_x86_has_sse42) features |= MONO_CPU_X86_SSE42; return (MonoCPUFeatures)features; } /* * Determine whenever the trap whose info is in SIGINFO is caused by * integer overflow. */ gboolean mono_arch_is_int_overflow (void *sigctx, void *info) { MonoContext ctx; guint8* ip; mono_sigctx_to_monoctx (sigctx, &ctx); ip = (guint8*)ctx.eip; if ((ip [0] == 0xf7) && (x86_modrm_mod (ip [1]) == 0x3) && (x86_modrm_reg (ip [1]) == 0x7)) { gint32 reg; /* idiv REG */ switch (x86_modrm_rm (ip [1])) { case X86_EAX: reg = ctx.eax; break; case X86_ECX: reg = ctx.ecx; break; case X86_EDX: reg = ctx.edx; break; case X86_EBX: reg = ctx.ebx; break; case X86_ESI: reg = ctx.esi; break; case X86_EDI: reg = ctx.edi; break; default: g_assert_not_reached (); reg = -1; } if (reg == -1) return TRUE; } return FALSE; } GList * mono_arch_get_allocatable_int_vars (MonoCompile *cfg) { GList *vars = NULL; int i; for (i = 0; i < cfg->num_varinfo; i++) { MonoInst *ins = cfg->varinfo [i]; MonoMethodVar *vmv = MONO_VARINFO (cfg, i); /* unused vars */ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos) continue; if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG)) continue; /* we dont allocate I1 to registers because there is no simply way to sign extend * 8bit quantities in caller saved registers on x86 */ if (mono_is_regsize_var (ins->inst_vtype) && (ins->inst_vtype->type != MONO_TYPE_I1)) { g_assert (MONO_VARINFO (cfg, i)->reg == -1); g_assert (i == vmv->idx); vars = g_list_prepend (vars, vmv); } } vars = mono_varlist_sort (cfg, vars, 0); return vars; } GList * mono_arch_get_global_int_regs (MonoCompile *cfg) { GList *regs = NULL; /* we can use 3 registers for global allocation */ regs = g_list_prepend (regs, (gpointer)X86_EBX); regs = g_list_prepend (regs, (gpointer)X86_ESI); regs = g_list_prepend (regs, (gpointer)X86_EDI); return regs; } /* * mono_arch_regalloc_cost: * * Return the cost, in number of memory references, of the action of * allocating the variable VMV into a register during global register * allocation. */ guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) { MonoInst *ins = cfg->varinfo [vmv->idx]; if (cfg->method->save_lmf) /* The register is already saved */ return (ins->opcode == OP_ARG) ? 1 : 0; else /* push+pop+possible load if it is an argument */ return (ins->opcode == OP_ARG) ? 3 : 2; } static void set_needs_stack_frame (MonoCompile *cfg, gboolean flag) { static int inited = FALSE; static int count = 0; if (cfg->arch.need_stack_frame_inited) { g_assert (cfg->arch.need_stack_frame == flag); return; } cfg->arch.need_stack_frame = flag; cfg->arch.need_stack_frame_inited = TRUE; if (flag) return; if (!inited) { mono_counters_register ("Could eliminate stack frame", MONO_COUNTER_INT|MONO_COUNTER_JIT, &count); inited = TRUE; } ++count; //g_print ("will eliminate %s.%s.%s\n", cfg->method->klass->name_space, cfg->method->klass->name, cfg->method->name); } static gboolean needs_stack_frame (MonoCompile *cfg) { MonoMethodSignature *sig; MonoMethodHeader *header; gboolean result = FALSE; #if defined (__APPLE__) /*OSX requires stack frame code to have the correct alignment. */ return TRUE; #endif if (cfg->arch.need_stack_frame_inited) return cfg->arch.need_stack_frame; header = cfg->header; sig = mono_method_signature_internal (cfg->method); if (cfg->disable_omit_fp) result = TRUE; else if (cfg->flags & MONO_CFG_HAS_ALLOCA) result = TRUE; else if (cfg->method->save_lmf) result = TRUE; else if (cfg->stack_offset) result = TRUE; else if (cfg->param_area) result = TRUE; else if (cfg->flags & (MONO_CFG_HAS_CALLS | MONO_CFG_HAS_ALLOCA | MONO_CFG_HAS_TAILCALL)) result = TRUE; else if (header->num_clauses) result = TRUE; else if (sig->param_count + sig->hasthis) result = TRUE; else if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) result = TRUE; set_needs_stack_frame (cfg, result); return cfg->arch.need_stack_frame; } /* * Set var information according to the calling convention. X86 version. * The locals var stuff should most likely be split in another method. */ void mono_arch_allocate_vars (MonoCompile *cfg) { MonoMethodSignature *sig; MonoInst *inst; guint32 locals_stack_size, locals_stack_align; int i, offset; gint32 *offsets; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; cfg->frame_reg = X86_EBP; offset = 0; if (cfg->has_atomic_add_i4 || cfg->has_atomic_exchange_i4) { /* The opcode implementations use callee-saved regs as scratch regs by pushing and pop-ing them, but that is not async safe */ cfg->used_int_regs |= (1 << X86_EBX) | (1 << X86_EDI) | (1 << X86_ESI); } /* Reserve space to save LMF and caller saved registers */ if (cfg->method->save_lmf) { /* The LMF var is allocated normally */ } else { if (cfg->used_int_regs & (1 << X86_EBX)) { offset += 4; } if (cfg->used_int_regs & (1 << X86_EDI)) { offset += 4; } if (cfg->used_int_regs & (1 << X86_ESI)) { offset += 4; } } switch (cinfo->ret.storage) { case ArgValuetypeInReg: /* Allocate a local to hold the result, the epilog will copy it to the correct place */ offset += 8; cfg->ret->opcode = OP_REGOFFSET; cfg->ret->inst_basereg = X86_EBP; cfg->ret->inst_offset = - offset; break; default: break; } /* Allocate a local for any register arguments that need them. */ for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = &cinfo->args [i]; inst = cfg->args [i]; if (inst->opcode != OP_REGVAR && storage_in_ireg (ainfo->storage)) { offset += 4; cfg->args[i]->opcode = OP_REGOFFSET; cfg->args[i]->inst_basereg = X86_EBP; cfg->args[i]->inst_offset = - offset; } } /* Allocate locals */ offsets = mono_allocate_stack_slots (cfg, TRUE, &locals_stack_size, &locals_stack_align); if (locals_stack_size > MONO_ARCH_MAX_FRAME_SIZE) { char *mname = mono_method_full_name (cfg->method, TRUE); mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s stack is too big.", mname)); g_free (mname); return; } if (locals_stack_align) { int prev_offset = offset; offset += (locals_stack_align - 1); offset &= ~(locals_stack_align - 1); while (prev_offset < offset) { prev_offset += 4; mini_gc_set_slot_type_from_fp (cfg, - prev_offset, SLOT_NOREF); } } cfg->locals_min_stack_offset = - (offset + locals_stack_size); cfg->locals_max_stack_offset = - offset; /* * EBP is at alignment 8 % MONO_ARCH_FRAME_ALIGNMENT, so if we * have locals larger than 8 bytes we need to make sure that * they have the appropriate offset. */ if (MONO_ARCH_FRAME_ALIGNMENT > 8 && locals_stack_align > 8) { int extra_size = MONO_ARCH_FRAME_ALIGNMENT - sizeof (target_mgreg_t) * 2; offset += extra_size; locals_stack_size += extra_size; } for (i = cfg->locals_start; i < cfg->num_varinfo; i++) { if (offsets [i] != -1) { MonoInst *inst = cfg->varinfo [i]; inst->opcode = OP_REGOFFSET; inst->inst_basereg = X86_EBP; inst->inst_offset = - (offset + offsets [i]); //printf ("allocated local %d to ", i); mono_print_tree_nl (inst); } } offset += locals_stack_size; /* * Allocate arguments+return value */ switch (cinfo->ret.storage) { case ArgOnStack: if (cfg->vret_addr) { /* * In the new IR, the cfg->vret_addr variable represents the * vtype return value. */ cfg->vret_addr->opcode = OP_REGOFFSET; cfg->vret_addr->inst_basereg = cfg->frame_reg; cfg->vret_addr->inst_offset = cinfo->ret.offset + ARGS_OFFSET; if (G_UNLIKELY (cfg->verbose_level > 1)) { printf ("vret_addr ="); mono_print_ins (cfg->vret_addr); } } else { cfg->ret->opcode = OP_REGOFFSET; cfg->ret->inst_basereg = X86_EBP; cfg->ret->inst_offset = cinfo->ret.offset + ARGS_OFFSET; } break; case ArgValuetypeInReg: break; case ArgInIReg: cfg->ret->opcode = OP_REGVAR; cfg->ret->inst_c0 = cinfo->ret.reg; cfg->ret->dreg = cinfo->ret.reg; break; case ArgNone: case ArgOnFloatFpStack: case ArgOnDoubleFpStack: break; default: g_assert_not_reached (); } if (sig->call_convention == MONO_CALL_VARARG) { g_assert (cinfo->sig_cookie.storage == ArgOnStack); cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET; } for (i = 0; i < sig->param_count + sig->hasthis; ++i) { ArgInfo *ainfo = &cinfo->args [i]; inst = cfg->args [i]; if (inst->opcode != OP_REGVAR) { if (storage_in_ireg (ainfo->storage)) { /* We already allocated locals for register arguments. */ } else { inst->opcode = OP_REGOFFSET; inst->inst_basereg = X86_EBP; inst->inst_offset = ainfo->offset + ARGS_OFFSET; } } } cfg->stack_offset = offset; } void mono_arch_create_vars (MonoCompile *cfg) { MonoType *sig_ret; MonoMethodSignature *sig; CallInfo *cinfo; sig = mono_method_signature_internal (cfg->method); if (!cfg->arch.cinfo) cfg->arch.cinfo = get_call_info (cfg->mempool, sig); cinfo = cfg->arch.cinfo; sig_ret = mini_get_underlying_type (sig->ret); if (cinfo->ret.storage == ArgValuetypeInReg) cfg->ret_var_is_local = TRUE; if ((cinfo->ret.storage != ArgValuetypeInReg) && (MONO_TYPE_ISSTRUCT (sig_ret) || mini_is_gsharedvt_variable_type (sig_ret))) { cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG); } if (cfg->gen_sdb_seq_points) { MonoInst *ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.ss_tramp_var = ins; ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL); ins->flags |= MONO_INST_VOLATILE; cfg->arch.bp_tramp_var = ins; } if (cfg->method->save_lmf) { cfg->create_lmf_var = TRUE; cfg->lmf_ir = TRUE; } cfg->arch_eh_jit_info = 1; } /* * It is expensive to adjust esp for each individual fp argument pushed on the stack * so we try to do it just once when we have multiple fp arguments in a row. * We don't use this mechanism generally because for int arguments the generated code * is slightly bigger and new generation cpus optimize away the dependency chains * created by push instructions on the esp value. * fp_arg_setup is the first argument in the execution sequence where the esp register * is modified. */ static G_GNUC_UNUSED int collect_fp_stack_space (MonoMethodSignature *sig, int start_arg, int *fp_arg_setup) { int fp_space = 0; MonoType *t; for (; start_arg < sig->param_count; ++start_arg) { t = mini_get_underlying_type (sig->params [start_arg]); if (!m_type_is_byref (t) && t->type == MONO_TYPE_R8) { fp_space += sizeof (double); *fp_arg_setup = start_arg; } else { break; } } return fp_space; } static void emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo) { MonoMethodSignature *tmp_sig; int sig_reg; /* * mono_ArgIterator_Setup assumes the signature cookie is * passed first and all the arguments which were before it are * passed on the stack after the signature. So compensate by * passing a different signature. */ tmp_sig = mono_metadata_signature_dup (call->signature); tmp_sig->param_count -= call->signature->sentinelpos; tmp_sig->sentinelpos = 0; memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*)); if (cfg->compile_aot) { sig_reg = mono_alloc_ireg (cfg); MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, cinfo->sig_cookie.offset, sig_reg); } else { MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, X86_ESP, cinfo->sig_cookie.offset, (gsize)tmp_sig); } } #ifdef ENABLE_LLVM LLVMCallInfo* mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig) { int i, n; CallInfo *cinfo; ArgInfo *ainfo; LLVMCallInfo *linfo; MonoType *t, *sig_ret; n = sig->param_count + sig->hasthis; cinfo = get_call_info (cfg->mempool, sig); sig_ret = sig->ret; linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n)); /* * LLVM always uses the native ABI while we use our own ABI, the * only difference is the handling of vtypes: * - we only pass/receive them in registers in some cases, and only * in 1 or 2 integer registers. */ if (cinfo->ret.storage == ArgValuetypeInReg) { if (sig->pinvoke) { cfg->exception_message = g_strdup ("pinvoke + vtypes"); cfg->disable_llvm = TRUE; return linfo; } cfg->exception_message = g_strdup ("vtype ret in call"); cfg->disable_llvm = TRUE; /* linfo->ret.storage = LLVMArgVtypeInReg; for (j = 0; j < 2; ++j) linfo->ret.pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, cinfo->ret.pair_storage [j]); */ } if (mini_type_is_vtype (sig_ret) && cinfo->ret.storage == ArgInIReg) { /* Vtype returned using a hidden argument */ linfo->ret.storage = LLVMArgVtypeRetAddr; linfo->vret_arg_index = cinfo->vret_arg_index; } if (mini_type_is_vtype (sig_ret) && cinfo->ret.storage != ArgInIReg) { // FIXME: cfg->exception_message = g_strdup ("vtype ret in call"); cfg->disable_llvm = TRUE; } for (i = 0; i < n; ++i) { ainfo = cinfo->args + i; if (i >= sig->hasthis) t = sig->params [i - sig->hasthis]; else t = mono_get_int_type (); linfo->args [i].storage = LLVMArgNone; switch (ainfo->storage) { case ArgInIReg: linfo->args [i].storage = LLVMArgNormal; break; case ArgInDoubleSSEReg: case ArgInFloatSSEReg: linfo->args [i].storage = LLVMArgNormal; break; case ArgOnStack: if (mini_type_is_vtype (t)) { if (mono_class_value_size (mono_class_from_mono_type_internal (t), NULL) == 0) /* LLVM seems to allocate argument space for empty structures too */ linfo->args [i].storage = LLVMArgNone; else linfo->args [i].storage = LLVMArgVtypeByVal; } else { linfo->args [i].storage = LLVMArgNormal; } break; case ArgValuetypeInReg: if (sig->pinvoke) { cfg->exception_message = g_strdup ("pinvoke + vtypes"); cfg->disable_llvm = TRUE; return linfo; } cfg->exception_message = g_strdup ("vtype arg"); cfg->disable_llvm = TRUE; /* linfo->args [i].storage = LLVMArgVtypeInReg; for (j = 0; j < 2; ++j) linfo->args [i].pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, ainfo->pair_storage [j]); */ break; case ArgGSharedVt: linfo->args [i].storage = LLVMArgGSharedVt; break; default: cfg->exception_message = g_strdup ("ainfo->storage"); cfg->disable_llvm = TRUE; break; } } return linfo; } #endif static void emit_gc_param_slot_def (MonoCompile *cfg, int sp_offset, MonoType *t) { if (cfg->compute_gc_maps) { MonoInst *def; /* Needs checking if the feature will be enabled again */ g_assert_not_reached (); /* On x86, the offsets are from the sp value before the start of the call sequence */ if (t == NULL) t = mono_get_int_type (); EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, sp_offset, t); } } void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) { MonoType *sig_ret; MonoInst *arg, *in; MonoMethodSignature *sig; int i, j, n; CallInfo *cinfo; int sentinelpos = 0, sp_offset = 0; sig = call->signature; n = sig->param_count + sig->hasthis; sig_ret = mini_get_underlying_type (sig->ret); cinfo = get_call_info (cfg->mempool, sig); call->call_info = cinfo; if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) sentinelpos = sig->sentinelpos + (sig->hasthis ? 1 : 0); if (sig_ret && MONO_TYPE_ISSTRUCT (sig_ret)) { if (cinfo->ret.storage == ArgValuetypeInReg && cinfo->ret.pair_storage[0] != ArgNone ) { /* * Tell the JIT to use a more efficient calling convention: call using * OP_CALL, compute the result location after the call, and save the * result there. */ call->vret_in_reg = TRUE; #if defined (__APPLE__) if (cinfo->ret.pair_storage [0] == ArgOnDoubleFpStack || cinfo->ret.pair_storage [0] == ArgOnFloatFpStack) call->vret_in_reg_fp = TRUE; #endif if (call->vret_var) NULLIFY_INS (call->vret_var); } } // FIXME: Emit EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF everywhere /* Handle the case where there are no implicit arguments */ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sentinelpos)) { emit_sig_cookie (cfg, call, cinfo); sp_offset = cinfo->sig_cookie.offset; emit_gc_param_slot_def (cfg, sp_offset, NULL); } /* Arguments are pushed in the reverse order */ for (i = n - 1; i >= 0; i --) { ArgInfo *ainfo = cinfo->args + i; MonoType *orig_type, *t; int argsize; if (cinfo->vtype_retaddr && cinfo->vret_arg_index == 1 && i == 0) { MonoInst *vtarg; /* Push the vret arg before the first argument */ MONO_INST_NEW (cfg, vtarg, OP_STORE_MEMBASE_REG); vtarg->type = STACK_MP; vtarg->inst_destbasereg = X86_ESP; vtarg->sreg1 = call->vret_var->dreg; vtarg->inst_offset = cinfo->ret.offset; MONO_ADD_INS (cfg->cbb, vtarg); emit_gc_param_slot_def (cfg, cinfo->ret.offset, NULL); } if (i >= sig->hasthis) t = sig->params [i - sig->hasthis]; else t = mono_get_int_type (); orig_type = t; t = mini_get_underlying_type (t); MONO_INST_NEW (cfg, arg, OP_X86_PUSH); in = call->args [i]; arg->cil_code = in->cil_code; arg->sreg1 = in->dreg; arg->type = in->type; g_assert (in->dreg != -1); if (ainfo->storage == ArgGSharedVt) { arg->opcode = OP_OUTARG_VT; arg->sreg1 = in->dreg; arg->klass = in->klass; arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo)); sp_offset += 4; MONO_ADD_INS (cfg->cbb, arg); } else if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(t))) { guint32 align; guint32 size; g_assert (in->klass); if (t->type == MONO_TYPE_TYPEDBYREF) { size = MONO_ABI_SIZEOF (MonoTypedRef); align = sizeof (target_mgreg_t); } else { size = mini_type_stack_size_full (m_class_get_byval_arg (in->klass), &align, sig->pinvoke && !sig->marshalling_disabled); } if (size > 0 || ainfo->pass_empty_struct) { arg->opcode = OP_OUTARG_VT; arg->sreg1 = in->dreg; arg->klass = in->klass; arg->backend.size = size; arg->inst_p0 = call; arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo)); memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo)); MONO_ADD_INS (cfg->cbb, arg); if (ainfo->storage != ArgValuetypeInReg) { emit_gc_param_slot_def (cfg, ainfo->offset, orig_type); } } } else { switch (ainfo->storage) { case ArgOnStack: if (!m_type_is_byref (t)) { if (t->type == MONO_TYPE_R4) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, X86_ESP, ainfo->offset, in->dreg); argsize = 4; } else if (t->type == MONO_TYPE_R8) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, X86_ESP, ainfo->offset, in->dreg); argsize = 8; } else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_U8) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, ainfo->offset + 4, MONO_LVREG_MS (in->dreg)); MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, ainfo->offset, MONO_LVREG_LS (in->dreg)); argsize = 4; } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, ainfo->offset, in->dreg); argsize = 4; } } else { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, ainfo->offset, in->dreg); argsize = 4; } break; case ArgInIReg: arg->opcode = OP_MOVE; arg->dreg = ainfo->reg; MONO_ADD_INS (cfg->cbb, arg); argsize = 0; break; default: g_assert_not_reached (); } if (cfg->compute_gc_maps) { if (argsize == 4) { /* FIXME: The == STACK_OBJ check might be fragile ? */ if (sig->hasthis && i == 0 && call->args [i]->type == STACK_OBJ) { /* this */ if (call->need_unbox_trampoline) /* The unbox trampoline transforms this into a managed pointer */ emit_gc_param_slot_def (cfg, ainfo->offset, mono_class_get_byref_type (mono_defaults.int_class)); else emit_gc_param_slot_def (cfg, ainfo->offset, mono_get_object_type ()); } else { emit_gc_param_slot_def (cfg, ainfo->offset, orig_type); } } else { /* i8/r8 */ for (j = 0; j < argsize; j += 4) emit_gc_param_slot_def (cfg, ainfo->offset + j, NULL); } } } if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sentinelpos)) { /* Emit the signature cookie just before the implicit arguments */ emit_sig_cookie (cfg, call, cinfo); emit_gc_param_slot_def (cfg, cinfo->sig_cookie.offset, NULL); } } if (sig_ret && (MONO_TYPE_ISSTRUCT (sig_ret) || cinfo->vtype_retaddr)) { MonoInst *vtarg; if (cinfo->ret.storage == ArgValuetypeInReg) { /* Already done */ } else if (cinfo->ret.storage == ArgInIReg) { NOT_IMPLEMENTED; /* The return address is passed in a register */ MONO_INST_NEW (cfg, vtarg, OP_MOVE); vtarg->sreg1 = call->inst.dreg; vtarg->dreg = mono_alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, vtarg); mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE); } else if (cinfo->vtype_retaddr && cinfo->vret_arg_index == 0) { MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, cinfo->ret.offset, call->vret_var->dreg); emit_gc_param_slot_def (cfg, cinfo->ret.offset, NULL); } } call->stack_usage = cinfo->stack_usage; call->stack_align_amount = cinfo->stack_align_amount; } void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) { MonoCallInst *call = (MonoCallInst*)ins->inst_p0; ArgInfo *ainfo = (ArgInfo*)ins->inst_p1; int size = ins->backend.size; if (ainfo->storage == ArgValuetypeInReg) { int dreg = mono_alloc_ireg (cfg); switch (size) { case 1: MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, 0); break; case 2: MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, 0); break; case 4: MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, 0); break; case 3: /* FIXME */ default: g_assert_not_reached (); } mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, FALSE); } else { if (cfg->gsharedvt && mini_is_gsharedvt_klass (ins->klass)) { /* Pass by addr */ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, ainfo->offset, src->dreg); } else if (size <= 4) { int dreg = mono_alloc_ireg (cfg); if (ainfo->pass_empty_struct) { //Pass empty struct value as 0 on platforms representing empty structs as 1 byte. MONO_EMIT_NEW_ICONST (cfg, dreg, 0); } else { MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, 0); } MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, ainfo->offset, dreg); } else if (size <= 20) { mini_emit_memcpy (cfg, X86_ESP, ainfo->offset, src->dreg, 0, size, 4); } else { // FIXME: Code growth mini_emit_memcpy (cfg, X86_ESP, ainfo->offset, src->dreg, 0, size, 4); } } } void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) { MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret); if (!m_type_is_byref (ret)) { if (ret->type == MONO_TYPE_R4) { if (COMPILE_LLVM (cfg)) MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); /* Nothing to do */ return; } else if (ret->type == MONO_TYPE_R8) { if (COMPILE_LLVM (cfg)) MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg); /* Nothing to do */ return; } else if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) { if (COMPILE_LLVM (cfg)) MONO_EMIT_NEW_UNALU (cfg, OP_LMOVE, cfg->ret->dreg, val->dreg); else { MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, X86_EAX, MONO_LVREG_LS (val->dreg)); MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, X86_EDX, MONO_LVREG_MS (val->dreg)); } return; } } MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg); } #define EMIT_COND_BRANCH(ins,cond,sign) \ if (ins->inst_true_bb->native_offset) { \ x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \ } else { \ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \ if ((cfg->opt & MONO_OPT_BRANCH) && \ x86_is_imm8 (ins->inst_true_bb->max_offset - cpos)) \ x86_branch8 (code, cond, 0, sign); \ else \ x86_branch32 (code, cond, 0, sign); \ } /* * Emit an exception if condition is fail and * if possible do a directly branch to target */ #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \ do { \ MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \ if (tins == NULL) { \ mono_add_patch_info (cfg, code - cfg->native_code, \ MONO_PATCH_INFO_EXC, exc_name); \ x86_branch32 (code, cond, 0, signed); \ } else { \ EMIT_COND_BRANCH (tins, cond, signed); \ } \ } while (0); #define EMIT_FPCOMPARE(code) do { \ x86_fcompp (code); \ x86_fnstsw (code); \ } while (0); static guint8* x86_align_and_patch (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data) { gboolean needs_paddings = TRUE; guint32 pad_size; MonoJumpInfo *jinfo = NULL; if (cfg->abs_patches) { jinfo = (MonoJumpInfo*)g_hash_table_lookup (cfg->abs_patches, data); if (jinfo && (jinfo->type == MONO_PATCH_INFO_JIT_ICALL_ADDR || jinfo->type == MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR)) needs_paddings = FALSE; } if (cfg->compile_aot) needs_paddings = FALSE; /*The address must be 4 bytes aligned to avoid spanning multiple cache lines. This is required for code patching to be safe on SMP machines. */ pad_size = (guint32)(code + 1 - cfg->native_code) & 0x3; if (needs_paddings && pad_size) x86_padding (code, 4 - pad_size); mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)patch_type, data); return code; } static guint8* emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data) { code = x86_align_and_patch (cfg, code, patch_type, data); x86_call_code (code, 0); return code; } #define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_IADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_ISBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB_IMM))) /* * mono_peephole_pass_1: * * Perform peephole opts which should/can be performed before local regalloc */ void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { MonoInst *last_ins = mono_inst_prev (ins, FILTER_IL_SEQ_POINT); switch (ins->opcode) { case OP_IADD_IMM: case OP_ADD_IMM: if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS)) { /* * X86_LEA is like ADD, but doesn't have the * sreg1==dreg restriction. */ ins->opcode = OP_X86_LEA_MEMBASE; ins->inst_basereg = ins->sreg1; } else if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1)) ins->opcode = OP_X86_INC_REG; break; case OP_SUB_IMM: case OP_ISUB_IMM: if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS)) { ins->opcode = OP_X86_LEA_MEMBASE; ins->inst_basereg = ins->sreg1; ins->inst_imm = -ins->inst_imm; } else if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1)) ins->opcode = OP_X86_DEC_REG; break; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: /* OP_COMPARE_IMM (reg, 0) * --> * OP_X86_TEST_NULL (reg) */ if (!ins->inst_imm) ins->opcode = OP_X86_TEST_NULL; break; case OP_X86_COMPARE_MEMBASE_IMM: /* * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm * --> * OP_STORE_MEMBASE_REG reg, offset(basereg) * OP_COMPARE_IMM reg, imm * * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL */ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = OP_COMPARE_IMM; ins->sreg1 = last_ins->sreg1; /* check if we can remove cmp reg,0 with test null */ if (!ins->inst_imm) ins->opcode = OP_X86_TEST_NULL; } break; case OP_X86_PUSH_MEMBASE: if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG || last_ins->opcode == OP_STORE_MEMBASE_REG) && ins->inst_basereg == last_ins->inst_destbasereg && ins->inst_offset == last_ins->inst_offset) { ins->opcode = OP_X86_PUSH; ins->sreg1 = last_ins->sreg1; } break; } mono_peephole_ins (bb, ins); } } void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *n; MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) { switch (ins->opcode) { case OP_ICONST: /* reg = 0 -> XOR (reg, reg) */ /* XOR sets cflags on x86, so we cant do it always */ if (ins->inst_c0 == 0 && (!ins->next || (ins->next && INST_IGNORES_CFLAGS (ins->next->opcode)))) { MonoInst *ins2; ins->opcode = OP_IXOR; ins->sreg1 = ins->dreg; ins->sreg2 = ins->dreg; /* * Convert succeeding STORE_MEMBASE_IMM 0 ins to STORE_MEMBASE_REG * since it takes 3 bytes instead of 7. */ for (ins2 = mono_inst_next (ins, FILTER_IL_SEQ_POINT); ins2; ins2 = ins2->next) { if ((ins2->opcode == OP_STORE_MEMBASE_IMM) && (ins2->inst_imm == 0)) { ins2->opcode = OP_STORE_MEMBASE_REG; ins2->sreg1 = ins->dreg; } else if ((ins2->opcode == OP_STOREI4_MEMBASE_IMM) && (ins2->inst_imm == 0)) { ins2->opcode = OP_STOREI4_MEMBASE_REG; ins2->sreg1 = ins->dreg; } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM)) { /* Continue iteration */ } else break; } } break; case OP_IADD_IMM: case OP_ADD_IMM: if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1)) ins->opcode = OP_X86_INC_REG; break; case OP_ISUB_IMM: case OP_SUB_IMM: if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1)) ins->opcode = OP_X86_DEC_REG; break; } mono_peephole_ins (bb, ins); } } #define NEW_INS(cfg,ins,dest,op) do { \ MONO_INST_NEW ((cfg), (dest), (op)); \ (dest)->cil_code = (ins)->cil_code; \ mono_bblock_insert_before_ins (bb, ins, (dest)); \ } while (0) /* * mono_arch_lowering_pass: * * Converts complex opcodes into simpler ones so that each IR instruction * corresponds to one machine instruction. */ void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins, *next; /* * FIXME: Need to add more instructions, but the current machine * description can't model some parts of the composite instructions like * cdq. */ MONO_BB_FOR_EACH_INS_SAFE (bb, next, ins) { switch (ins->opcode) { case OP_IREM_IMM: case OP_IDIV_IMM: case OP_IDIV_UN_IMM: case OP_IREM_UN_IMM: /* * Keep the cases where we could generated optimized code, otherwise convert * to the non-imm variant. */ if ((ins->opcode == OP_IREM_IMM) && mono_is_power_of_two (ins->inst_imm) >= 0) break; mono_decompose_op_imm (cfg, bb, ins); break; #ifdef MONO_ARCH_SIMD_INTRINSICS case OP_EXPAND_I1: { MonoInst *temp; int temp_reg1 = mono_alloc_ireg (cfg); int temp_reg2 = mono_alloc_ireg (cfg); int original_reg = ins->sreg1; NEW_INS (cfg, ins, temp, OP_ICONV_TO_U1); temp->sreg1 = original_reg; temp->dreg = temp_reg1; NEW_INS (cfg, ins, temp, OP_SHL_IMM); temp->sreg1 = temp_reg1; temp->dreg = temp_reg2; temp->inst_imm = 8; NEW_INS (cfg, ins, temp, OP_IOR); temp->sreg1 = temp->dreg = temp_reg2; temp->sreg2 = temp_reg1; ins->opcode = OP_EXPAND_I2; ins->sreg1 = temp_reg2; } break; #endif default: break; } } bb->max_vreg = cfg->next_vreg; } static const int branch_cc_table [] = { X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT, X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT, X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC }; /* Maps CMP_... constants to X86_CC_... constants */ static const int cc_table [] = { X86_CC_EQ, X86_CC_NE, X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT, X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT }; static const int cc_signed_table [] = { TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE }; static unsigned char* emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int size, gboolean is_signed) { #define XMM_TEMP_REG 0 /*This SSE2 optimization must not be done which OPT_SIMD in place as it clobbers xmm0.*/ /*The xmm pass decomposes OP_FCONV_ ops anyway anyway.*/ if (cfg->opt & MONO_OPT_SSE2 && size < 8 && !(cfg->opt & MONO_OPT_SIMD)) { /* optimize by assigning a local var for this use so we avoid * the stack manipulations */ x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8); x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE); x86_movsd_reg_membase (code, XMM_TEMP_REG, X86_ESP, 0); x86_cvttsd2si (code, dreg, XMM_TEMP_REG); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8); if (size == 1) x86_widen_reg (code, dreg, dreg, is_signed, FALSE); else if (size == 2) x86_widen_reg (code, dreg, dreg, is_signed, TRUE); return code; } x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4); x86_fnstcw_membase(code, X86_ESP, 0); x86_mov_reg_membase (code, dreg, X86_ESP, 0, 2); x86_alu_reg_imm (code, X86_OR, dreg, 0xc00); x86_mov_membase_reg (code, X86_ESP, 2, dreg, 2); x86_fldcw_membase (code, X86_ESP, 2); if (size == 8) { x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8); x86_fist_pop_membase (code, X86_ESP, 0, TRUE); x86_pop_reg (code, dreg); /* FIXME: need the high register * x86_pop_reg (code, dreg_high); */ } else { x86_push_reg (code, X86_EAX); // SP = SP - 4 x86_fist_pop_membase (code, X86_ESP, 0, FALSE); x86_pop_reg (code, dreg); } x86_fldcw_membase (code, X86_ESP, 0); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); if (size == 1) x86_widen_reg (code, dreg, dreg, is_signed, FALSE); else if (size == 2) x86_widen_reg (code, dreg, dreg, is_signed, TRUE); return code; } static unsigned char* mono_emit_stack_alloc (MonoCompile *cfg, guchar *code, MonoInst* tree) { int sreg = tree->sreg1; int need_touch = FALSE; #if defined (TARGET_WIN32) || defined (MONO_ARCH_SIGSEGV_ON_ALTSTACK) need_touch = TRUE; #endif if (need_touch) { guint8* br[5]; /* * Under Windows: * If requested stack size is larger than one page, * perform stack-touch operation */ /* * Generate stack probe code. * Under Windows, it is necessary to allocate one page at a time, * "touching" stack after each successful sub-allocation. This is * because of the way stack growth is implemented - there is a * guard page before the lowest stack page that is currently commited. * Stack normally grows sequentially so OS traps access to the * guard page and commits more pages when needed. */ x86_test_reg_imm (code, sreg, ~0xFFF); br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE); br[2] = code; /* loop */ x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000); x86_test_membase_reg (code, X86_ESP, 0, X86_ESP); /* * By the end of the loop, sreg2 is smaller than 0x1000, so the init routine * that follows only initializes the last part of the area. */ /* Same as the init code below with size==0x1000 */ if (tree->flags & MONO_INST_INIT) { x86_push_reg (code, X86_EAX); x86_push_reg (code, X86_ECX); x86_push_reg (code, X86_EDI); x86_mov_reg_imm (code, X86_ECX, (0x1000 >> 2)); x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX); if (cfg->param_area) x86_lea_membase (code, X86_EDI, X86_ESP, 12 + ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT)); else x86_lea_membase (code, X86_EDI, X86_ESP, 12); x86_cld (code); x86_prefix (code, X86_REP_PREFIX); x86_stosl (code); x86_pop_reg (code, X86_EDI); x86_pop_reg (code, X86_ECX); x86_pop_reg (code, X86_EAX); } x86_alu_reg_imm (code, X86_SUB, sreg, 0x1000); x86_alu_reg_imm (code, X86_CMP, sreg, 0x1000); br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE); x86_patch (br[3], br[2]); x86_test_reg_reg (code, sreg, sreg); br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE); x86_alu_reg_reg (code, X86_SUB, X86_ESP, sreg); br[1] = code; x86_jump8 (code, 0); x86_patch (br[0], code); x86_alu_reg_reg (code, X86_SUB, X86_ESP, sreg); x86_patch (br[1], code); x86_patch (br[4], code); } else x86_alu_reg_reg (code, X86_SUB, X86_ESP, tree->sreg1); if (tree->flags & MONO_INST_INIT) { int offset = 0; if (tree->dreg != X86_EAX && sreg != X86_EAX) { x86_push_reg (code, X86_EAX); offset += 4; } if (tree->dreg != X86_ECX && sreg != X86_ECX) { x86_push_reg (code, X86_ECX); offset += 4; } if (tree->dreg != X86_EDI && sreg != X86_EDI) { x86_push_reg (code, X86_EDI); offset += 4; } x86_shift_reg_imm (code, X86_SHR, sreg, 2); x86_mov_reg_reg (code, X86_ECX, sreg); x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX); if (cfg->param_area) x86_lea_membase (code, X86_EDI, X86_ESP, offset + ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT)); else x86_lea_membase (code, X86_EDI, X86_ESP, offset); x86_cld (code); x86_prefix (code, X86_REP_PREFIX); x86_stosl (code); if (tree->dreg != X86_EDI && sreg != X86_EDI) x86_pop_reg (code, X86_EDI); if (tree->dreg != X86_ECX && sreg != X86_ECX) x86_pop_reg (code, X86_ECX); if (tree->dreg != X86_EAX && sreg != X86_EAX) x86_pop_reg (code, X86_EAX); } return code; } static guint8* emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code) { /* Move return value to the target register */ switch (ins->opcode) { case OP_CALL: case OP_CALL_REG: case OP_CALL_MEMBASE: x86_mov_reg_reg (code, ins->dreg, X86_EAX); break; default: break; } return code; } #ifdef TARGET_MACH static int tls_gs_offset; #endif gboolean mono_arch_have_fast_tls (void) { #ifdef TARGET_MACH static gboolean have_fast_tls = FALSE; static gboolean inited = FALSE; guint32 *ins; if (mini_debug_options.use_fallback_tls) return FALSE; if (inited) return have_fast_tls; ins = (guint32*)pthread_getspecific; /* * We're looking for these two instructions: * * mov 0x4(%esp),%eax * mov %gs:[offset](,%eax,4),%eax */ have_fast_tls = ins [0] == 0x0424448b && ins [1] == 0x85048b65; tls_gs_offset = ins [2]; inited = TRUE; return have_fast_tls; #elif defined(TARGET_ANDROID) return FALSE; #else if (mini_debug_options.use_fallback_tls) return FALSE; return TRUE; #endif } static guint8* mono_x86_emit_tls_get (guint8* code, int dreg, int tls_offset) { #if defined (TARGET_MACH) x86_prefix (code, X86_GS_PREFIX); x86_mov_reg_mem (code, dreg, tls_gs_offset + (tls_offset * 4), 4); #elif defined (TARGET_WIN32) /* * See the Under the Hood article in the May 1996 issue of Microsoft Systems * Journal and/or a disassembly of the TlsGet () function. */ x86_prefix (code, X86_FS_PREFIX); x86_mov_reg_mem (code, dreg, 0x18, 4); if (tls_offset < 64) { x86_mov_reg_membase (code, dreg, dreg, 3600 + (tls_offset * 4), 4); } else { guint8 *buf [16]; g_assert (tls_offset < 0x440); /* Load TEB->TlsExpansionSlots */ x86_mov_reg_membase (code, dreg, dreg, 0xf94, 4); x86_test_reg_reg (code, dreg, dreg); buf [0] = code; x86_branch (code, X86_CC_EQ, code, TRUE); x86_mov_reg_membase (code, dreg, dreg, (tls_offset * 4) - 0x100, 4); x86_patch (buf [0], code); } #else if (optimize_for_xen) { x86_prefix (code, X86_GS_PREFIX); x86_mov_reg_mem (code, dreg, 0, 4); x86_mov_reg_membase (code, dreg, dreg, tls_offset, 4); } else { x86_prefix (code, X86_GS_PREFIX); x86_mov_reg_mem (code, dreg, tls_offset, 4); } #endif return code; } static guint8* mono_x86_emit_tls_set (guint8* code, int sreg, int tls_offset) { #if defined (TARGET_MACH) x86_prefix (code, X86_GS_PREFIX); x86_mov_mem_reg (code, tls_gs_offset + (tls_offset * 4), sreg, 4); #elif defined (TARGET_WIN32) g_assert_not_reached (); #else x86_prefix (code, X86_GS_PREFIX); x86_mov_mem_reg (code, tls_offset, sreg, 4); #endif return code; } /* * emit_setup_lmf: * * Emit code to initialize an LMF structure at LMF_OFFSET. */ static guint8* emit_setup_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, int cfa_offset) { /* save all caller saved regs */ x86_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, ebx), X86_EBX, sizeof (target_mgreg_t)); mono_emit_unwind_op_offset (cfg, code, X86_EBX, - cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, ebx)); x86_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, edi), X86_EDI, sizeof (target_mgreg_t)); mono_emit_unwind_op_offset (cfg, code, X86_EDI, - cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, edi)); x86_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, esi), X86_ESI, sizeof (target_mgreg_t)); mono_emit_unwind_op_offset (cfg, code, X86_ESI, - cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, esi)); x86_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, ebp), X86_EBP, sizeof (target_mgreg_t)); /* save the current IP */ if (cfg->compile_aot) { /* This pushes the current ip */ x86_call_imm (code, 0); x86_pop_reg (code, X86_EAX); } else { mono_add_patch_info (cfg, code + 1 - cfg->native_code, MONO_PATCH_INFO_IP, NULL); x86_mov_reg_imm (code, X86_EAX, 0); } x86_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, eip), X86_EAX, sizeof (target_mgreg_t)); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, eip), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, ebp), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, esi), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, edi), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, ebx), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, esp), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, method), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr), SLOT_NOREF); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset + lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF); return code; } #ifdef TARGET_WIN32 #define TEB_LAST_ERROR_OFFSET 0x34 static guint8* emit_get_last_error (guint8* code, int dreg) { /* Threads last error value is located in TEB_LAST_ERROR_OFFSET. */ x86_prefix (code, X86_FS_PREFIX); x86_mov_reg_mem (code, dreg, TEB_LAST_ERROR_OFFSET, sizeof (guint32)); return code; } #else static guint8* emit_get_last_error (guint8* code, int dreg) { g_assert_not_reached (); } #endif /* benchmark and set based on cpu */ #define LOOP_ALIGNMENT 8 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting) #ifndef DISABLE_JIT void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) { MonoInst *ins; MonoCallInst *call; guint8 *code = cfg->native_code + cfg->code_len; if (cfg->opt & MONO_OPT_LOOP) { int pad, align = LOOP_ALIGNMENT; /* set alignment depending on cpu */ if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) { pad = align - pad; /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/ x86_padding (code, pad); cfg->code_len += pad; bb->native_offset = cfg->code_len; } } if (cfg->verbose_level > 2) g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset); int cpos = bb->max_offset; set_code_cursor (cfg, code); mono_debug_open_block (cfg, bb, code - cfg->native_code); if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) x86_breakpoint (code); MONO_BB_FOR_EACH_INS (bb, ins) { const guint offset = code - cfg->native_code; set_code_cursor (cfg, code); int max_len = ins_get_size (ins->opcode); code = realloc_code (cfg, max_len); if (cfg->debug_info) mono_debug_record_line_number (cfg, ins, offset); switch (ins->opcode) { case OP_BIGMUL: x86_mul_reg (code, ins->sreg2, TRUE); break; case OP_BIGMUL_UN: x86_mul_reg (code, ins->sreg2, FALSE); break; case OP_X86_SETEQ_MEMBASE: case OP_X86_SETNE_MEMBASE: x86_set_membase (code, ins->opcode == OP_X86_SETEQ_MEMBASE ? X86_CC_EQ : X86_CC_NE, ins->inst_basereg, ins->inst_offset, TRUE); break; case OP_STOREI1_MEMBASE_IMM: x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1); break; case OP_STOREI2_MEMBASE_IMM: x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2); break; case OP_STORE_MEMBASE_IMM: case OP_STOREI4_MEMBASE_IMM: x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4); break; case OP_STOREI1_MEMBASE_REG: x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1); break; case OP_STOREI2_MEMBASE_REG: x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2); break; case OP_STORE_MEMBASE_REG: case OP_STOREI4_MEMBASE_REG: x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4); break; case OP_LOADU4_MEM: x86_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4); break; case OP_LOAD_MEM: case OP_LOADI4_MEM: /* These are created by the cprop pass so they use inst_imm as the source */ x86_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4); break; case OP_LOADU1_MEM: x86_widen_mem (code, ins->dreg, ins->inst_imm, FALSE, FALSE); break; case OP_LOADU2_MEM: x86_widen_mem (code, ins->dreg, ins->inst_imm, FALSE, TRUE); break; case OP_LOAD_MEMBASE: case OP_LOADI4_MEMBASE: case OP_LOADU4_MEMBASE: x86_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4); break; case OP_LOADU1_MEMBASE: x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE); break; case OP_LOADI1_MEMBASE: x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE); break; case OP_LOADU2_MEMBASE: x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE); break; case OP_LOADI2_MEMBASE: x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE); break; case OP_ICONV_TO_I1: case OP_SEXT_I1: x86_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE); break; case OP_ICONV_TO_I2: case OP_SEXT_I2: x86_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE); break; case OP_ICONV_TO_U1: x86_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE); break; case OP_ICONV_TO_U2: x86_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE); break; case OP_COMPARE: case OP_ICOMPARE: x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); break; case OP_COMPARE_IMM: case OP_ICOMPARE_IMM: x86_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm); break; case OP_X86_COMPARE_MEMBASE_REG: x86_alu_membase_reg (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2); break; case OP_X86_COMPARE_MEMBASE_IMM: x86_alu_membase_imm (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm); break; case OP_X86_COMPARE_MEMBASE8_IMM: x86_alu_membase8_imm (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm); break; case OP_X86_COMPARE_REG_MEMBASE: x86_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_X86_COMPARE_MEM_IMM: x86_alu_mem_imm (code, X86_CMP, ins->inst_offset, ins->inst_imm); break; case OP_X86_TEST_NULL: x86_test_reg_reg (code, ins->sreg1, ins->sreg1); break; case OP_X86_ADD_MEMBASE_IMM: x86_alu_membase_imm (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm); break; case OP_X86_ADD_REG_MEMBASE: x86_alu_reg_membase (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_X86_SUB_MEMBASE_IMM: x86_alu_membase_imm (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm); break; case OP_X86_SUB_REG_MEMBASE: x86_alu_reg_membase (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_X86_AND_MEMBASE_IMM: x86_alu_membase_imm (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->inst_imm); break; case OP_X86_OR_MEMBASE_IMM: x86_alu_membase_imm (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->inst_imm); break; case OP_X86_XOR_MEMBASE_IMM: x86_alu_membase_imm (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->inst_imm); break; case OP_X86_ADD_MEMBASE_REG: x86_alu_membase_reg (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->sreg2); break; case OP_X86_SUB_MEMBASE_REG: x86_alu_membase_reg (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->sreg2); break; case OP_X86_AND_MEMBASE_REG: x86_alu_membase_reg (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->sreg2); break; case OP_X86_OR_MEMBASE_REG: x86_alu_membase_reg (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->sreg2); break; case OP_X86_XOR_MEMBASE_REG: x86_alu_membase_reg (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->sreg2); break; case OP_X86_INC_MEMBASE: x86_inc_membase (code, ins->inst_basereg, ins->inst_offset); break; case OP_X86_INC_REG: x86_inc_reg (code, ins->dreg); break; case OP_X86_DEC_MEMBASE: x86_dec_membase (code, ins->inst_basereg, ins->inst_offset); break; case OP_X86_DEC_REG: x86_dec_reg (code, ins->dreg); break; case OP_X86_MUL_REG_MEMBASE: x86_imul_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_X86_AND_REG_MEMBASE: x86_alu_reg_membase (code, X86_AND, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_X86_OR_REG_MEMBASE: x86_alu_reg_membase (code, X86_OR, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_X86_XOR_REG_MEMBASE: x86_alu_reg_membase (code, X86_XOR, ins->sreg1, ins->sreg2, ins->inst_offset); break; case OP_BREAK: x86_breakpoint (code); break; case OP_RELAXED_NOP: x86_prefix (code, X86_REP_PREFIX); x86_nop (code); break; case OP_HARD_NOP: x86_nop (code); break; case OP_NOP: case OP_DUMMY_USE: case OP_DUMMY_ICONST: case OP_DUMMY_R8CONST: case OP_DUMMY_R4CONST: case OP_NOT_REACHED: case OP_NOT_NULL: break; case OP_IL_SEQ_POINT: mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); break; case OP_SEQ_POINT: { int i; if (cfg->compile_aot) NOT_IMPLEMENTED; /* Have to use ecx as a temp reg since this can occur after OP_SETRET */ /* * We do this _before_ the breakpoint, so single stepping after * a breakpoint is hit will step to the next IL offset. */ if (ins->flags & MONO_INST_SINGLE_STEP_LOC) { MonoInst *var = cfg->arch.ss_tramp_var; guint8 *br [1]; g_assert (var); g_assert (var->opcode == OP_REGOFFSET); /* Load ss_tramp_var */ /* This is equal to &ss_trampoline */ x86_mov_reg_membase (code, X86_ECX, var->inst_basereg, var->inst_offset, sizeof (target_mgreg_t)); x86_mov_reg_membase (code, X86_ECX, X86_ECX, 0, sizeof (target_mgreg_t)); x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0); br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); x86_call_reg (code, X86_ECX); x86_patch (br [0], code); } /* * Many parts of sdb depend on the ip after the single step trampoline call to be equal to the seq point offset. * This means we have to put the loading of bp_tramp_var after the offset. */ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code); MonoInst *var = cfg->arch.bp_tramp_var; g_assert (var); g_assert (var->opcode == OP_REGOFFSET); /* Load the address of the bp trampoline */ /* This needs to be constant size */ guint8 *start = code; x86_mov_reg_membase (code, X86_ECX, var->inst_basereg, var->inst_offset, 4); if (code < start + OP_SEQ_POINT_BP_OFFSET) { int size = start + OP_SEQ_POINT_BP_OFFSET - code; x86_padding (code, size); } /* * A placeholder for a possible breakpoint inserted by * mono_arch_set_breakpoint (). */ for (i = 0; i < 2; ++i) x86_nop (code); /* * Add an additional nop so skipping the bp doesn't cause the ip to point * to another IL offset. */ x86_nop (code); break; } case OP_ADDCC: case OP_IADDCC: case OP_IADD: x86_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2); break; case OP_ADC: case OP_IADC: x86_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2); break; case OP_ADDCC_IMM: case OP_ADD_IMM: case OP_IADD_IMM: x86_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm); break; case OP_ADC_IMM: case OP_IADC_IMM: x86_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm); break; case OP_SUBCC: case OP_ISUBCC: case OP_ISUB: x86_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2); break; case OP_SBB: case OP_ISBB: x86_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2); break; case OP_SUBCC_IMM: case OP_SUB_IMM: case OP_ISUB_IMM: x86_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm); break; case OP_SBB_IMM: case OP_ISBB_IMM: x86_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm); break; case OP_IAND: x86_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2); break; case OP_AND_IMM: case OP_IAND_IMM: x86_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm); break; case OP_IDIV: case OP_IREM: /* * The code is the same for div/rem, the allocator will allocate dreg * to RAX/RDX as appropriate. */ if (ins->sreg2 == X86_EDX) { /* cdq clobbers this */ x86_push_reg (code, ins->sreg2); x86_cdq (code); x86_div_membase (code, X86_ESP, 0, TRUE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); } else { x86_cdq (code); x86_div_reg (code, ins->sreg2, TRUE); } break; case OP_IDIV_UN: case OP_IREM_UN: if (ins->sreg2 == X86_EDX) { x86_push_reg (code, ins->sreg2); x86_alu_reg_reg (code, X86_XOR, X86_EDX, X86_EDX); x86_div_membase (code, X86_ESP, 0, FALSE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); } else { x86_alu_reg_reg (code, X86_XOR, X86_EDX, X86_EDX); x86_div_reg (code, ins->sreg2, FALSE); } break; case OP_DIV_IMM: x86_mov_reg_imm (code, ins->sreg2, ins->inst_imm); x86_cdq (code); x86_div_reg (code, ins->sreg2, TRUE); break; case OP_IREM_IMM: { int power = mono_is_power_of_two (ins->inst_imm); g_assert (ins->sreg1 == X86_EAX); g_assert (ins->dreg == X86_EAX); g_assert (power >= 0); if (power == 1) { /* Based on http://compilers.iecc.com/comparch/article/93-04-079 */ x86_cdq (code); x86_alu_reg_imm (code, X86_AND, X86_EAX, 1); /* * If the divident is >= 0, this does not nothing. If it is positive, it * it transforms %eax=0 into %eax=0, and %eax=1 into %eax=-1. */ x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EDX); x86_alu_reg_reg (code, X86_SUB, X86_EAX, X86_EDX); } else if (power == 0) { x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); } else { /* Based on gcc code */ /* Add compensation for negative dividents */ x86_cdq (code); x86_shift_reg_imm (code, X86_SHR, X86_EDX, 32 - power); x86_alu_reg_reg (code, X86_ADD, X86_EAX, X86_EDX); /* Compute remainder */ x86_alu_reg_imm (code, X86_AND, X86_EAX, (1 << power) - 1); /* Remove compensation */ x86_alu_reg_reg (code, X86_SUB, X86_EAX, X86_EDX); } break; } case OP_IOR: x86_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2); break; case OP_OR_IMM: case OP_IOR_IMM: x86_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm); break; case OP_IXOR: x86_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2); break; case OP_XOR_IMM: case OP_IXOR_IMM: x86_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm); break; case OP_ISHL: g_assert (ins->sreg2 == X86_ECX); x86_shift_reg (code, X86_SHL, ins->dreg); break; case OP_ISHR: g_assert (ins->sreg2 == X86_ECX); x86_shift_reg (code, X86_SAR, ins->dreg); break; case OP_SHR_IMM: case OP_ISHR_IMM: x86_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm); break; case OP_SHR_UN_IMM: case OP_ISHR_UN_IMM: x86_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm); break; case OP_ISHR_UN: g_assert (ins->sreg2 == X86_ECX); x86_shift_reg (code, X86_SHR, ins->dreg); break; case OP_SHL_IMM: case OP_ISHL_IMM: x86_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm); break; case OP_LSHL: { guint8 *jump_to_end; /* handle shifts below 32 bits */ x86_shld_reg (code, ins->backend.reg3, ins->sreg1); x86_shift_reg (code, X86_SHL, ins->sreg1); x86_test_reg_imm (code, X86_ECX, 32); jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE); /* handle shift over 32 bit */ x86_mov_reg_reg (code, ins->backend.reg3, ins->sreg1); x86_clear_reg (code, ins->sreg1); x86_patch (jump_to_end, code); } break; case OP_LSHR: { guint8 *jump_to_end; /* handle shifts below 32 bits */ x86_shrd_reg (code, ins->sreg1, ins->backend.reg3); x86_shift_reg (code, X86_SAR, ins->backend.reg3); x86_test_reg_imm (code, X86_ECX, 32); jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); /* handle shifts over 31 bits */ x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3); x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, 31); x86_patch (jump_to_end, code); } break; case OP_LSHR_UN: { guint8 *jump_to_end; /* handle shifts below 32 bits */ x86_shrd_reg (code, ins->sreg1, ins->backend.reg3); x86_shift_reg (code, X86_SHR, ins->backend.reg3); x86_test_reg_imm (code, X86_ECX, 32); jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); /* handle shifts over 31 bits */ x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3); x86_clear_reg (code, ins->backend.reg3); x86_patch (jump_to_end, code); } break; case OP_LSHL_IMM: if (ins->inst_imm >= 32) { x86_mov_reg_reg (code, ins->backend.reg3, ins->sreg1); x86_clear_reg (code, ins->sreg1); x86_shift_reg_imm (code, X86_SHL, ins->backend.reg3, ins->inst_imm - 32); } else { x86_shld_reg_imm (code, ins->backend.reg3, ins->sreg1, ins->inst_imm); x86_shift_reg_imm (code, X86_SHL, ins->sreg1, ins->inst_imm); } break; case OP_LSHR_IMM: if (ins->inst_imm >= 32) { x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3); x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, 0x1f); x86_shift_reg_imm (code, X86_SAR, ins->sreg1, ins->inst_imm - 32); } else { x86_shrd_reg_imm (code, ins->sreg1, ins->backend.reg3, ins->inst_imm); x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, ins->inst_imm); } break; case OP_LSHR_UN_IMM: if (ins->inst_imm >= 32) { x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3); x86_clear_reg (code, ins->backend.reg3); x86_shift_reg_imm (code, X86_SHR, ins->sreg1, ins->inst_imm - 32); } else { x86_shrd_reg_imm (code, ins->sreg1, ins->backend.reg3, ins->inst_imm); x86_shift_reg_imm (code, X86_SHR, ins->backend.reg3, ins->inst_imm); } break; case OP_INOT: x86_not_reg (code, ins->sreg1); break; case OP_INEG: x86_neg_reg (code, ins->sreg1); break; case OP_IMUL: x86_imul_reg_reg (code, ins->sreg1, ins->sreg2); break; case OP_MUL_IMM: case OP_IMUL_IMM: switch (ins->inst_imm) { case 2: /* MOV r1, r2 */ /* ADD r1, r1 */ x86_mov_reg_reg (code, ins->dreg, ins->sreg1); x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg); break; case 3: /* LEA r1, [r2 + r2*2] */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1); break; case 5: /* LEA r1, [r2 + r2*4] */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); break; case 6: /* LEA r1, [r2 + r2*2] */ /* ADD r1, r1 */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1); x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg); break; case 9: /* LEA r1, [r2 + r2*8] */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 3); break; case 10: /* LEA r1, [r2 + r2*4] */ /* ADD r1, r1 */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg); break; case 12: /* LEA r1, [r2 + r2*2] */ /* SHL r1, 2 */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1); x86_shift_reg_imm (code, X86_SHL, ins->dreg, 2); break; case 25: /* LEA r1, [r2 + r2*4] */ /* LEA r1, [r1 + r1*4] */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); x86_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2); break; case 100: /* LEA r1, [r2 + r2*4] */ /* SHL r1, 2 */ /* LEA r1, [r1 + r1*4] */ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2); x86_shift_reg_imm (code, X86_SHL, ins->dreg, 2); x86_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2); break; default: x86_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm); break; } break; case OP_IMUL_OVF: x86_imul_reg_reg (code, ins->sreg1, ins->sreg2); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, ins->inst_exc_name); break; case OP_IMUL_OVF_UN: { /* the mul operation and the exception check should most likely be split */ int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE; /*g_assert (ins->sreg2 == X86_EAX); g_assert (ins->dreg == X86_EAX);*/ if (ins->sreg2 == X86_EAX) { non_eax_reg = ins->sreg1; } else if (ins->sreg1 == X86_EAX) { non_eax_reg = ins->sreg2; } else { /* no need to save since we're going to store to it anyway */ if (ins->dreg != X86_EAX) { saved_eax = TRUE; x86_push_reg (code, X86_EAX); } x86_mov_reg_reg (code, X86_EAX, ins->sreg1); non_eax_reg = ins->sreg2; } if (ins->dreg == X86_EDX) { if (!saved_eax) { saved_eax = TRUE; x86_push_reg (code, X86_EAX); } } else { saved_edx = TRUE; x86_push_reg (code, X86_EDX); } x86_mul_reg (code, non_eax_reg, FALSE); /* save before the check since pop and mov don't change the flags */ x86_mov_reg_reg (code, ins->dreg, X86_EAX); if (saved_edx) x86_pop_reg (code, X86_EDX); if (saved_eax) x86_pop_reg (code, X86_EAX); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, ins->inst_exc_name); break; } case OP_ICONST: x86_mov_reg_imm (code, ins->dreg, ins->inst_c0); break; case OP_AOTCONST: g_assert_not_reached (); mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0); x86_mov_reg_imm (code, ins->dreg, 0); break; case OP_JUMP_TABLE: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0); x86_mov_reg_imm (code, ins->dreg, 0); break; case OP_LOAD_GOTADDR: g_assert (ins->dreg == MONO_ARCH_GOT_REG); code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL); break; case OP_GOT_ENTRY: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_right->inst_i1, ins->inst_right->inst_p0); x86_mov_reg_membase (code, ins->dreg, ins->inst_basereg, 0xf0f0f0f0, 4); break; case OP_X86_PUSH_GOT_ENTRY: mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_right->inst_i1, ins->inst_right->inst_p0); x86_push_membase (code, ins->inst_basereg, 0xf0f0f0f0); break; case OP_MOVE: x86_mov_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_TAILCALL_PARAMETER: // This opcode helps compute sizes, i.e. // of the subsequent OP_TAILCALL, but contributes no code. g_assert (ins->next); break; case OP_TAILCALL: case OP_TAILCALL_MEMBASE: case OP_TAILCALL_REG: { call = (MonoCallInst*)ins; int pos = 0, i; gboolean const tailcall_membase = ins->opcode == OP_TAILCALL_MEMBASE; gboolean const tailcall_reg = (ins->opcode == OP_TAILCALL_REG); int const sreg1 = ins->sreg1; gboolean const sreg1_ecx = sreg1 == X86_ECX; gboolean const tailcall_membase_ecx = tailcall_membase && sreg1_ecx; gboolean const tailcall_membase_not_ecx = tailcall_membase && !sreg1_ecx; max_len += (call->stack_usage - call->stack_align_amount) / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER); code = realloc_code (cfg, max_len); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; g_assert (!cfg->method->save_lmf); // Ecx is volatile, not used for parameters, or rgctx/imt (edx). // It is also not used for return value, though that does not matter. // Ecx is preserved across the tailcall formation. // // Eax could also be used here at the cost of a push/pop moving the parameters. // Edx must be preserved as it is rgctx/imt. // // If ecx happens to be the base of the tailcall_membase, then // just end with jmp [ecx+offset] -- one instruction. // if ecx is not the base, then move ecx, [reg+offset] and later jmp [ecx] -- two instructions. if (tailcall_reg) { g_assert (sreg1 > -1); x86_mov_reg_reg (code, X86_ECX, sreg1); } else if (tailcall_membase_not_ecx) { g_assert (sreg1 > -1); x86_mov_reg_membase (code, X86_ECX, sreg1, ins->inst_offset, 4); } /* restore callee saved registers */ for (i = 0; i < X86_NREG; ++i) if (X86_IS_CALLEE_SAVED_REG (i) && cfg->used_int_regs & (1 << i)) pos -= 4; if (cfg->used_int_regs & (1 << X86_ESI)) { x86_mov_reg_membase (code, X86_ESI, X86_EBP, pos, 4); pos += 4; } if (cfg->used_int_regs & (1 << X86_EDI)) { x86_mov_reg_membase (code, X86_EDI, X86_EBP, pos, 4); pos += 4; } if (cfg->used_int_regs & (1 << X86_EBX)) { x86_mov_reg_membase (code, X86_EBX, X86_EBP, pos, 4); pos += 4; } /* Copy arguments on the stack to our argument area */ // FIXME use rep mov for constant code size, before nonvolatiles // restored, first saving esi, edi into volatiles for (i = 0; i < call->stack_usage - call->stack_align_amount; i += 4) { x86_mov_reg_membase (code, X86_EAX, X86_ESP, i, 4); x86_mov_membase_reg (code, X86_EBP, 8 + i, X86_EAX, 4); } /* restore ESP/EBP */ x86_leave (code); if (tailcall_membase_ecx) { x86_jump_membase (code, X86_ECX, ins->inst_offset); } else if (tailcall_reg || tailcall_membase_not_ecx) { x86_jump_reg (code, X86_ECX); } else { // FIXME Patch data instead of code. code = x86_align_and_patch (cfg, code, MONO_PATCH_INFO_METHOD_JUMP, call->method); x86_jump32 (code, 0); } ins->flags |= MONO_INST_GC_CALLSITE; break; } case OP_CHECK_THIS: /* ensure ins->sreg1 is not NULL * note that cmp DWORD PTR [eax], eax is one byte shorter than * cmp DWORD PTR [eax], 0 */ x86_alu_membase_reg (code, X86_CMP, ins->sreg1, 0, ins->sreg1); break; case OP_ARGLIST: { int hreg = ins->sreg1 == X86_EAX? X86_ECX: X86_EAX; x86_push_reg (code, hreg); x86_lea_membase (code, hreg, X86_EBP, cfg->sig_cookie); x86_mov_membase_reg (code, ins->sreg1, 0, hreg, 4); x86_pop_reg (code, hreg); break; } case OP_FCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: case OP_FCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: case OP_FCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: { CallInfo *cinfo; call = (MonoCallInst*)ins; cinfo = call->call_info; switch (ins->opcode) { case OP_FCALL: case OP_LCALL: case OP_VCALL: case OP_VCALL2: case OP_VOIDCALL: case OP_CALL: { const MonoJumpInfoTarget patch = mono_call_to_patch (call); code = emit_call (cfg, code, patch.type, patch.target); break; } case OP_FCALL_REG: case OP_LCALL_REG: case OP_VCALL_REG: case OP_VCALL2_REG: case OP_VOIDCALL_REG: case OP_CALL_REG: x86_call_reg (code, ins->sreg1); break; case OP_FCALL_MEMBASE: case OP_LCALL_MEMBASE: case OP_VCALL_MEMBASE: case OP_VCALL2_MEMBASE: case OP_VOIDCALL_MEMBASE: case OP_CALL_MEMBASE: x86_call_membase (code, ins->sreg1, ins->inst_offset); break; default: g_assert_not_reached (); break; } ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; if (cinfo->callee_stack_pop) { /* Have to compensate for the stack space popped by the callee */ x86_alu_reg_imm (code, X86_SUB, X86_ESP, cinfo->callee_stack_pop); } code = emit_move_return_value (cfg, ins, code); break; } case OP_X86_LEA: x86_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->backend.shift_amount); break; case OP_X86_LEA_MEMBASE: x86_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm); break; case OP_X86_XCHG: x86_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4); break; case OP_LOCALLOC: /* keep alignment */ x86_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_LOCALLOC_ALIGNMENT - 1); x86_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1)); code = mono_emit_stack_alloc (cfg, code, ins); x86_mov_reg_reg (code, ins->dreg, X86_ESP); if (cfg->param_area) x86_alu_reg_imm (code, X86_ADD, ins->dreg, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT)); break; case OP_LOCALLOC_IMM: { guint32 size = ins->inst_imm; size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1); if (ins->flags & MONO_INST_INIT) { /* FIXME: Optimize this */ x86_mov_reg_imm (code, ins->dreg, size); ins->sreg1 = ins->dreg; code = mono_emit_stack_alloc (cfg, code, ins); x86_mov_reg_reg (code, ins->dreg, X86_ESP); } else { x86_alu_reg_imm (code, X86_SUB, X86_ESP, size); x86_mov_reg_reg (code, ins->dreg, X86_ESP); } if (cfg->param_area) x86_alu_reg_imm (code, X86_ADD, ins->dreg, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT)); break; } case OP_THROW: { x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4); x86_push_reg (code, ins->sreg1); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception)); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; break; } case OP_RETHROW: { x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4); x86_push_reg (code, ins->sreg1); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception)); ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; break; } case OP_CALL_HANDLER: x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4); mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb); x86_call_imm (code, 0); for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev) mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb); x86_alu_reg_imm (code, X86_ADD, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4); break; case OP_START_HANDLER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); x86_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, X86_ESP, 4); if (cfg->param_area) x86_alu_reg_imm (code, X86_SUB, X86_ESP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT)); break; } case OP_ENDFINALLY: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); x86_mov_reg_membase (code, X86_ESP, spvar->inst_basereg, spvar->inst_offset, 4); x86_ret (code); break; } case OP_ENDFILTER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); x86_mov_reg_membase (code, X86_ESP, spvar->inst_basereg, spvar->inst_offset, 4); /* The local allocator will put the result into EAX */ x86_ret (code); break; } case OP_GET_EX_OBJ: x86_mov_reg_reg (code, ins->dreg, X86_EAX); break; case OP_LABEL: ins->inst_c0 = code - cfg->native_code; break; case OP_BR: if (ins->inst_target_bb->native_offset) { x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); } else { mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb); if ((cfg->opt & MONO_OPT_BRANCH) && x86_is_imm8 (ins->inst_target_bb->max_offset - cpos)) x86_jump8 (code, 0); else x86_jump32 (code, 0); } break; case OP_BR_REG: x86_jump_reg (code, ins->sreg1); break; case OP_ICNEQ: case OP_ICGE: case OP_ICLE: case OP_ICGE_UN: case OP_ICLE_UN: case OP_CEQ: case OP_CLT: case OP_CLT_UN: case OP_CGT: case OP_CGT_UN: case OP_CNE: case OP_ICEQ: case OP_ICLT: case OP_ICLT_UN: case OP_ICGT: case OP_ICGT_UN: x86_set_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, cc_signed_table [mono_opcode_to_cond (ins->opcode)]); x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); break; case OP_COND_EXC_EQ: case OP_COND_EXC_NE_UN: case OP_COND_EXC_LT: case OP_COND_EXC_LT_UN: case OP_COND_EXC_GT: case OP_COND_EXC_GT_UN: case OP_COND_EXC_GE: case OP_COND_EXC_GE_UN: case OP_COND_EXC_LE: case OP_COND_EXC_LE_UN: case OP_COND_EXC_IEQ: case OP_COND_EXC_INE_UN: case OP_COND_EXC_ILT: case OP_COND_EXC_ILT_UN: case OP_COND_EXC_IGT: case OP_COND_EXC_IGT_UN: case OP_COND_EXC_IGE: case OP_COND_EXC_IGE_UN: case OP_COND_EXC_ILE: case OP_COND_EXC_ILE_UN: EMIT_COND_SYSTEM_EXCEPTION (cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], (const char*)ins->inst_p1); break; case OP_COND_EXC_OV: case OP_COND_EXC_NO: case OP_COND_EXC_C: case OP_COND_EXC_NC: EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], (ins->opcode < OP_COND_EXC_NE_UN), (const char*)ins->inst_p1); break; case OP_COND_EXC_IOV: case OP_COND_EXC_INO: case OP_COND_EXC_IC: case OP_COND_EXC_INC: EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_IEQ], (ins->opcode < OP_COND_EXC_INE_UN), (const char*)ins->inst_p1); break; case OP_IBEQ: case OP_IBNE_UN: case OP_IBLT: case OP_IBLT_UN: case OP_IBGT: case OP_IBGT_UN: case OP_IBGE: case OP_IBGE_UN: case OP_IBLE: case OP_IBLE_UN: EMIT_COND_BRANCH (ins, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)]); break; case OP_CMOV_IEQ: case OP_CMOV_IGE: case OP_CMOV_IGT: case OP_CMOV_ILE: case OP_CMOV_ILT: case OP_CMOV_INE_UN: case OP_CMOV_IGE_UN: case OP_CMOV_IGT_UN: case OP_CMOV_ILE_UN: case OP_CMOV_ILT_UN: g_assert (ins->dreg == ins->sreg1); x86_cmov_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, ins->sreg2); break; /* floating point opcodes */ case OP_R8CONST: { double d = *(double *)ins->inst_p0; if ((d == 0.0) && (mono_signbit (d) == 0)) { x86_fldz (code); } else if (d == 1.0) { x86_fld1 (code); } else { if (cfg->compile_aot) { guint32 *val = (guint32*)&d; x86_push_imm (code, val [1]); x86_push_imm (code, val [0]); x86_fld_membase (code, X86_ESP, 0, TRUE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8); } else { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_R8, ins->inst_p0); x86_fld (code, NULL, TRUE); } } break; } case OP_R4CONST: { float f = *(float *)ins->inst_p0; if ((f == 0.0) && (mono_signbit (f) == 0)) { x86_fldz (code); } else if (f == 1.0) { x86_fld1 (code); } else { if (cfg->compile_aot) { guint32 val = *(guint32*)&f; x86_push_imm (code, val); x86_fld_membase (code, X86_ESP, 0, FALSE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); } else { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_R4, ins->inst_p0); x86_fld (code, NULL, FALSE); } } break; } case OP_STORER8_MEMBASE_REG: x86_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, TRUE, TRUE); break; case OP_LOADR8_MEMBASE: x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE); break; case OP_STORER4_MEMBASE_REG: x86_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, FALSE, TRUE); break; case OP_LOADR4_MEMBASE: x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, FALSE); break; case OP_ICONV_TO_R4: x86_push_reg (code, ins->sreg1); x86_fild_membase (code, X86_ESP, 0, FALSE); /* Change precision */ x86_fst_membase (code, X86_ESP, 0, FALSE, TRUE); x86_fld_membase (code, X86_ESP, 0, FALSE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); break; case OP_ICONV_TO_R8: x86_push_reg (code, ins->sreg1); x86_fild_membase (code, X86_ESP, 0, FALSE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); break; case OP_ICONV_TO_R_UN: x86_push_imm (code, 0); x86_push_reg (code, ins->sreg1); x86_fild_membase (code, X86_ESP, 0, TRUE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8); break; case OP_X86_FP_LOAD_I8: x86_fild_membase (code, ins->inst_basereg, ins->inst_offset, TRUE); break; case OP_X86_FP_LOAD_I4: x86_fild_membase (code, ins->inst_basereg, ins->inst_offset, FALSE); break; case OP_FCONV_TO_R4: /* Change precision */ x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4); x86_fst_membase (code, X86_ESP, 0, FALSE, TRUE); x86_fld_membase (code, X86_ESP, 0, FALSE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); break; case OP_FCONV_TO_I1: code = emit_float_to_int (cfg, code, ins->dreg, 1, TRUE); break; case OP_FCONV_TO_U1: code = emit_float_to_int (cfg, code, ins->dreg, 1, FALSE); break; case OP_FCONV_TO_I2: code = emit_float_to_int (cfg, code, ins->dreg, 2, TRUE); break; case OP_FCONV_TO_U2: code = emit_float_to_int (cfg, code, ins->dreg, 2, FALSE); break; case OP_FCONV_TO_I4: code = emit_float_to_int (cfg, code, ins->dreg, 4, TRUE); break; case OP_FCONV_TO_I8: x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4); x86_fnstcw_membase(code, X86_ESP, 0); x86_mov_reg_membase (code, ins->dreg, X86_ESP, 0, 2); x86_alu_reg_imm (code, X86_OR, ins->dreg, 0xc00); x86_mov_membase_reg (code, X86_ESP, 2, ins->dreg, 2); x86_fldcw_membase (code, X86_ESP, 2); x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8); x86_fist_pop_membase (code, X86_ESP, 0, TRUE); x86_pop_reg (code, ins->dreg); x86_pop_reg (code, ins->backend.reg3); x86_fldcw_membase (code, X86_ESP, 0); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4); break; case OP_LCONV_TO_R8_2: x86_push_reg (code, ins->sreg2); x86_push_reg (code, ins->sreg1); x86_fild_membase (code, X86_ESP, 0, TRUE); /* Change precision */ x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE); x86_fld_membase (code, X86_ESP, 0, TRUE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8); break; case OP_LCONV_TO_R4_2: x86_push_reg (code, ins->sreg2); x86_push_reg (code, ins->sreg1); x86_fild_membase (code, X86_ESP, 0, TRUE); /* Change precision */ x86_fst_membase (code, X86_ESP, 0, FALSE, TRUE); x86_fld_membase (code, X86_ESP, 0, FALSE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8); break; case OP_LCONV_TO_R_UN_2: { static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 }; guint8 *br; /* load 64bit integer to FP stack */ x86_push_reg (code, ins->sreg2); x86_push_reg (code, ins->sreg1); x86_fild_membase (code, X86_ESP, 0, TRUE); /* test if lreg is negative */ x86_test_reg_reg (code, ins->sreg2, ins->sreg2); br = code; x86_branch8 (code, X86_CC_GEZ, 0, TRUE); /* add correction constant mn */ if (cfg->compile_aot) { x86_push_imm (code, (((guint32)mn [9]) << 24) | ((guint32)mn [8] << 16) | ((guint32)mn [7] << 8) | ((guint32)mn [6])); x86_push_imm (code, (((guint32)mn [5]) << 24) | ((guint32)mn [4] << 16) | ((guint32)mn [3] << 8) | ((guint32)mn [2])); x86_push_imm (code, (((guint32)mn [1]) << 24) | ((guint32)mn [0] << 16)); x86_fld80_membase (code, X86_ESP, 2); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 12); } else { x86_fld80_mem (code, (gsize)&mn); } x86_fp_op_reg (code, X86_FADD, 1, TRUE); x86_patch (br, code); /* Change precision */ x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE); x86_fld_membase (code, X86_ESP, 0, TRUE); x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8); break; } case OP_LCONV_TO_OVF_I: case OP_LCONV_TO_OVF_I4_2: { guint8 *br [3], *label [1]; MonoInst *tins; /* * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000 */ x86_test_reg_reg (code, ins->sreg1, ins->sreg1); /* If the low word top bit is set, see if we are negative */ br [0] = code; x86_branch8 (code, X86_CC_LT, 0, TRUE); /* We are not negative (no top bit set, check for our top word to be zero */ x86_test_reg_reg (code, ins->sreg2, ins->sreg2); br [1] = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE); label [0] = code; /* throw exception */ tins = mono_branch_optimize_exception_target (cfg, bb, "OverflowException"); if (tins) { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, tins->inst_true_bb); if ((cfg->opt & MONO_OPT_BRANCH) && x86_is_imm8 (tins->inst_true_bb->max_offset - cpos)) x86_jump8 (code, 0); else x86_jump32 (code, 0); } else { mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException"); x86_jump32 (code, 0); } x86_patch (br [0], code); /* our top bit is set, check that top word is 0xfffffff */ x86_alu_reg_imm (code, X86_CMP, ins->sreg2, 0xffffffff); x86_patch (br [1], code); /* nope, emit exception */ br [2] = code; x86_branch8 (code, X86_CC_NE, 0, TRUE); x86_patch (br [2], label [0]); x86_mov_reg_reg (code, ins->dreg, ins->sreg1); break; } case OP_FMOVE: /* Not needed on the fp stack */ break; case OP_MOVE_F_TO_I4: x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE, TRUE); x86_mov_reg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, 4); break; case OP_MOVE_I4_TO_F: x86_mov_membase_reg (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1, 4); x86_fld_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE); break; case OP_FADD: x86_fp_op_reg (code, X86_FADD, 1, TRUE); break; case OP_FSUB: x86_fp_op_reg (code, X86_FSUB, 1, TRUE); break; case OP_FMUL: x86_fp_op_reg (code, X86_FMUL, 1, TRUE); break; case OP_FDIV: x86_fp_op_reg (code, X86_FDIV, 1, TRUE); break; case OP_FNEG: x86_fchs (code); break; case OP_ABS: x86_fabs (code); break; case OP_TAN: { /* * it really doesn't make sense to inline all this code, * it's here just to show that things may not be as simple * as they appear. */ guchar *check_pos, *end_tan, *pop_jump; x86_push_reg (code, X86_EAX); x86_fptan (code); x86_fnstsw (code); x86_test_reg_imm (code, X86_EAX, X86_FP_C2); check_pos = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); x86_fstp (code, 0); /* pop the 1.0 */ end_tan = code; x86_jump8 (code, 0); x86_fldpi (code); x86_fp_op (code, X86_FADD, 0); x86_fxch (code, 1); x86_fprem1 (code); x86_fstsw (code); x86_test_reg_imm (code, X86_EAX, X86_FP_C2); pop_jump = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); x86_fstp (code, 1); x86_fptan (code); x86_patch (pop_jump, code); x86_fstp (code, 0); /* pop the 1.0 */ x86_patch (check_pos, code); x86_patch (end_tan, code); x86_fldz (code); x86_fp_op_reg (code, X86_FADD, 1, TRUE); x86_pop_reg (code, X86_EAX); break; } case OP_ATAN: x86_fld1 (code); x86_fpatan (code); x86_fldz (code); x86_fp_op_reg (code, X86_FADD, 1, TRUE); break; case OP_SQRT: x86_fsqrt (code); break; case OP_ROUND: x86_frndint (code); break; case OP_IMIN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); x86_cmov_reg (code, X86_CC_GT, TRUE, ins->dreg, ins->sreg2); break; case OP_IMIN_UN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); x86_cmov_reg (code, X86_CC_GT, FALSE, ins->dreg, ins->sreg2); break; case OP_IMAX: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); x86_cmov_reg (code, X86_CC_LT, TRUE, ins->dreg, ins->sreg2); break; case OP_IMAX_UN: g_assert (cfg->opt & MONO_OPT_CMOV); g_assert (ins->dreg == ins->sreg1); x86_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2); x86_cmov_reg (code, X86_CC_LT, FALSE, ins->dreg, ins->sreg2); break; case OP_X86_FPOP: x86_fstp (code, 0); break; case OP_X86_FXCH: x86_fxch (code, ins->inst_imm); break; case OP_FREM: { guint8 *l1, *l2; x86_push_reg (code, X86_EAX); /* we need to exchange ST(0) with ST(1) */ x86_fxch (code, 1); /* this requires a loop, because fprem somtimes * returns a partial remainder */ l1 = code; /* looks like MS is using fprem instead of the IEEE compatible fprem1 */ /* x86_fprem1 (code); */ x86_fprem (code); x86_fnstsw (code); x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_C2); l2 = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); x86_patch (l2, l1); /* pop result */ x86_fstp (code, 1); x86_pop_reg (code, X86_EAX); break; } case OP_FCOMPARE: if (cfg->opt & MONO_OPT_FCMOV) { x86_fcomip (code, 1); x86_fstp (code, 0); break; } /* this overwrites EAX */ EMIT_FPCOMPARE(code); x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK); break; case OP_FCEQ: case OP_FCNEQ: if (cfg->opt & MONO_OPT_FCMOV) { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ guchar *unordered_check; x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); x86_fcomip (code, 1); x86_fstp (code, 0); unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); if (ins->opcode == OP_FCEQ) { x86_set_reg (code, X86_CC_EQ, ins->dreg, FALSE); x86_patch (unordered_check, code); } else { guchar *jump_to_end; x86_set_reg (code, X86_CC_NE, ins->dreg, FALSE); jump_to_end = code; x86_jump8 (code, 0); x86_patch (unordered_check, code); x86_inc_reg (code, ins->dreg); x86_patch (jump_to_end, code); } break; } if (ins->dreg != X86_EAX) x86_push_reg (code, X86_EAX); EMIT_FPCOMPARE(code); x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK); x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4000); x86_set_reg (code, ins->opcode == OP_FCEQ ? X86_CC_EQ : X86_CC_NE, ins->dreg, TRUE); x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); if (ins->dreg != X86_EAX) x86_pop_reg (code, X86_EAX); break; case OP_FCLT: case OP_FCLT_UN: if (cfg->opt & MONO_OPT_FCMOV) { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); x86_fcomip (code, 1); x86_fstp (code, 0); if (ins->opcode == OP_FCLT_UN) { guchar *unordered_check = code; guchar *jump_to_end; x86_branch8 (code, X86_CC_P, 0, FALSE); x86_set_reg (code, X86_CC_GT, ins->dreg, FALSE); jump_to_end = code; x86_jump8 (code, 0); x86_patch (unordered_check, code); x86_inc_reg (code, ins->dreg); x86_patch (jump_to_end, code); } else { x86_set_reg (code, X86_CC_GT, ins->dreg, FALSE); } break; } if (ins->dreg != X86_EAX) x86_push_reg (code, X86_EAX); EMIT_FPCOMPARE(code); x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK); if (ins->opcode == OP_FCLT_UN) { guchar *is_not_zero_check, *end_jump; is_not_zero_check = code; x86_branch8 (code, X86_CC_NZ, 0, TRUE); end_jump = code; x86_jump8 (code, 0); x86_patch (is_not_zero_check, code); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK); x86_patch (end_jump, code); } x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE); x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); if (ins->dreg != X86_EAX) x86_pop_reg (code, X86_EAX); break; case OP_FCLE: { guchar *unordered_check; guchar *jump_to_end; if (cfg->opt & MONO_OPT_FCMOV) { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); x86_fcomip (code, 1); x86_fstp (code, 0); unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); x86_set_reg (code, X86_CC_NB, ins->dreg, FALSE); x86_patch (unordered_check, code); break; } if (ins->dreg != X86_EAX) x86_push_reg (code, X86_EAX); EMIT_FPCOMPARE(code); x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK); x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4500); unordered_check = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0); x86_set_reg (code, X86_CC_NE, ins->dreg, TRUE); x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); jump_to_end = code; x86_jump8 (code, 0); x86_patch (unordered_check, code); x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); x86_patch (jump_to_end, code); if (ins->dreg != X86_EAX) x86_pop_reg (code, X86_EAX); break; } case OP_FCGT: case OP_FCGT_UN: if (cfg->opt & MONO_OPT_FCMOV) { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ guchar *unordered_check; x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); x86_fcomip (code, 1); x86_fstp (code, 0); if (ins->opcode == OP_FCGT) { unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); x86_set_reg (code, X86_CC_LT, ins->dreg, FALSE); x86_patch (unordered_check, code); } else { x86_set_reg (code, X86_CC_LT, ins->dreg, FALSE); } break; } if (ins->dreg != X86_EAX) x86_push_reg (code, X86_EAX); EMIT_FPCOMPARE(code); x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0); if (ins->opcode == OP_FCGT_UN) { guchar *is_not_zero_check, *end_jump; is_not_zero_check = code; x86_branch8 (code, X86_CC_NZ, 0, TRUE); end_jump = code; x86_jump8 (code, 0); x86_patch (is_not_zero_check, code); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK); x86_patch (end_jump, code); } x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE); x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); if (ins->dreg != X86_EAX) x86_pop_reg (code, X86_EAX); break; case OP_FCGE: { guchar *unordered_check; guchar *jump_to_end; if (cfg->opt & MONO_OPT_FCMOV) { /* zeroing the register at the start results in * shorter and faster code (we can also remove the widening op) */ x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); x86_fcomip (code, 1); x86_fstp (code, 0); unordered_check = code; x86_branch8 (code, X86_CC_P, 0, FALSE); x86_set_reg (code, X86_CC_NA, ins->dreg, FALSE); x86_patch (unordered_check, code); break; } if (ins->dreg != X86_EAX) x86_push_reg (code, X86_EAX); EMIT_FPCOMPARE(code); x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK); x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4500); unordered_check = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0); x86_set_reg (code, X86_CC_GE, ins->dreg, TRUE); x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); jump_to_end = code; x86_jump8 (code, 0); x86_patch (unordered_check, code); x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg); x86_patch (jump_to_end, code); if (ins->dreg != X86_EAX) x86_pop_reg (code, X86_EAX); break; } case OP_FBEQ: if (cfg->opt & MONO_OPT_FCMOV) { guchar *jump = code; x86_branch8 (code, X86_CC_P, 0, TRUE); EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); x86_patch (jump, code); break; } x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4000); EMIT_COND_BRANCH (ins, X86_CC_EQ, TRUE); break; case OP_FBNE_UN: /* Branch if C013 != 100 */ if (cfg->opt & MONO_OPT_FCMOV) { /* branch if !ZF or (PF|CF) */ EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE); EMIT_COND_BRANCH (ins, X86_CC_P, FALSE); EMIT_COND_BRANCH (ins, X86_CC_B, FALSE); break; } x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C3); EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE); break; case OP_FBLT: if (cfg->opt & MONO_OPT_FCMOV) { EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE); break; } EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); break; case OP_FBLT_UN: if (cfg->opt & MONO_OPT_FCMOV) { EMIT_COND_BRANCH (ins, X86_CC_P, FALSE); EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE); break; } if (ins->opcode == OP_FBLT_UN) { guchar *is_not_zero_check, *end_jump; is_not_zero_check = code; x86_branch8 (code, X86_CC_NZ, 0, TRUE); end_jump = code; x86_jump8 (code, 0); x86_patch (is_not_zero_check, code); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK); x86_patch (end_jump, code); } EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); break; case OP_FBGT: case OP_FBGT_UN: if (cfg->opt & MONO_OPT_FCMOV) { if (ins->opcode == OP_FBGT) { guchar *br1; /* skip branch if C1=1 */ br1 = code; x86_branch8 (code, X86_CC_P, 0, FALSE); /* branch if (C0 | C3) = 1 */ EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE); x86_patch (br1, code); } else { EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE); } break; } x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0); if (ins->opcode == OP_FBGT_UN) { guchar *is_not_zero_check, *end_jump; is_not_zero_check = code; x86_branch8 (code, X86_CC_NZ, 0, TRUE); end_jump = code; x86_jump8 (code, 0); x86_patch (is_not_zero_check, code); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK); x86_patch (end_jump, code); } EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); break; case OP_FBGE: /* Branch if C013 == 100 or 001 */ if (cfg->opt & MONO_OPT_FCMOV) { guchar *br1; /* skip branch if C1=1 */ br1 = code; x86_branch8 (code, X86_CC_P, 0, FALSE); /* branch if (C0 | C3) = 1 */ EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE); x86_patch (br1, code); break; } x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0); EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C3); EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); break; case OP_FBGE_UN: /* Branch if C013 == 000 */ if (cfg->opt & MONO_OPT_FCMOV) { EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE); break; } EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE); break; case OP_FBLE: /* Branch if C013=000 or 100 */ if (cfg->opt & MONO_OPT_FCMOV) { guchar *br1; /* skip branch if C1=1 */ br1 = code; x86_branch8 (code, X86_CC_P, 0, FALSE); /* branch if C0=0 */ EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE); x86_patch (br1, code); break; } x86_alu_reg_imm (code, X86_AND, X86_EAX, (X86_FP_C0|X86_FP_C1)); x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0); EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE); break; case OP_FBLE_UN: /* Branch if C013 != 001 */ if (cfg->opt & MONO_OPT_FCMOV) { EMIT_COND_BRANCH (ins, X86_CC_P, FALSE); EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE); break; } x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0); EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE); break; case OP_CKFINITE: { guchar *br1; x86_push_reg (code, X86_EAX); x86_fxam (code); x86_fnstsw (code); x86_alu_reg_imm (code, X86_AND, X86_EAX, 0x4100); x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0); x86_pop_reg (code, X86_EAX); /* Have to clean up the fp stack before throwing the exception */ br1 = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); x86_fstp (code, 0); EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException"); x86_patch (br1, code); break; } case OP_TLS_GET: { code = mono_x86_emit_tls_get (code, ins->dreg, ins->inst_offset); break; } case OP_TLS_SET: { code = mono_x86_emit_tls_set (code, ins->sreg1, ins->inst_offset); break; } case OP_MEMORY_BARRIER: { if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) { x86_prefix (code, X86_LOCK_PREFIX); x86_alu_membase_imm (code, X86_ADD, X86_ESP, 0, 0); } break; } case OP_ATOMIC_ADD_I4: { int dreg = ins->dreg; g_assert (cfg->has_atomic_add_i4); /* hack: limit in regalloc, dreg != sreg1 && dreg != sreg2 */ if (ins->sreg2 == dreg) { if (dreg == X86_EBX) { dreg = X86_EDI; if (ins->inst_basereg == X86_EDI) dreg = X86_ESI; } else { dreg = X86_EBX; if (ins->inst_basereg == X86_EBX) dreg = X86_EDI; } } else if (ins->inst_basereg == dreg) { if (dreg == X86_EBX) { dreg = X86_EDI; if (ins->sreg2 == X86_EDI) dreg = X86_ESI; } else { dreg = X86_EBX; if (ins->sreg2 == X86_EBX) dreg = X86_EDI; } } if (dreg != ins->dreg) { x86_push_reg (code, dreg); } x86_mov_reg_reg (code, dreg, ins->sreg2); x86_prefix (code, X86_LOCK_PREFIX); x86_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, 4); /* dreg contains the old value, add with sreg2 value */ x86_alu_reg_reg (code, X86_ADD, dreg, ins->sreg2); if (ins->dreg != dreg) { x86_mov_reg_reg (code, ins->dreg, dreg); x86_pop_reg (code, dreg); } break; } case OP_ATOMIC_EXCHANGE_I4: { guchar *br[2]; int sreg2 = ins->sreg2; int breg = ins->inst_basereg; g_assert (cfg->has_atomic_exchange_i4); /* cmpxchg uses eax as comperand, need to make sure we can use it * hack to overcome limits in x86 reg allocator * (req: dreg == eax and sreg2 != eax and breg != eax) */ g_assert (ins->dreg == X86_EAX); /* We need the EAX reg for the cmpxchg */ if (ins->sreg2 == X86_EAX) { sreg2 = (breg == X86_EDX) ? X86_EBX : X86_EDX; x86_push_reg (code, sreg2); x86_mov_reg_reg (code, sreg2, X86_EAX); } if (breg == X86_EAX) { breg = (sreg2 == X86_ESI) ? X86_EDI : X86_ESI; x86_push_reg (code, breg); x86_mov_reg_reg (code, breg, X86_EAX); } x86_mov_reg_membase (code, X86_EAX, breg, ins->inst_offset, 4); br [0] = code; x86_prefix (code, X86_LOCK_PREFIX); x86_cmpxchg_membase_reg (code, breg, ins->inst_offset, sreg2); br [1] = code; x86_branch8 (code, X86_CC_NE, -1, FALSE); x86_patch (br [1], br [0]); if (breg != ins->inst_basereg) x86_pop_reg (code, breg); if (ins->sreg2 != sreg2) x86_pop_reg (code, sreg2); break; } case OP_ATOMIC_CAS_I4: { g_assert (ins->dreg == X86_EAX); g_assert (ins->sreg3 == X86_EAX); g_assert (ins->sreg1 != X86_EAX); g_assert (ins->sreg1 != ins->sreg2); x86_prefix (code, X86_LOCK_PREFIX); x86_cmpxchg_membase_reg (code, ins->sreg1, ins->inst_offset, ins->sreg2); break; } case OP_ATOMIC_LOAD_I1: { x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE); break; } case OP_ATOMIC_LOAD_U1: { x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE); break; } case OP_ATOMIC_LOAD_I2: { x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE); break; } case OP_ATOMIC_LOAD_U2: { x86_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE); break; } case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_U4: { x86_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4); break; } case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_LOAD_R8: { x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, ins->opcode == OP_ATOMIC_LOAD_R8); break; } case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U4: { int size; switch (ins->opcode) { case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_U1: size = 1; break; case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_U2: size = 2; break; case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U4: size = 4; break; } x86_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, size); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) x86_mfence (code); break; } case OP_ATOMIC_STORE_R4: case OP_ATOMIC_STORE_R8: { x86_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, ins->opcode == OP_ATOMIC_STORE_R8, TRUE); if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ) x86_mfence (code); break; } case OP_CARD_TABLE_WBARRIER: { int ptr = ins->sreg1; int value = ins->sreg2; guchar *br = NULL; int nursery_shift, card_table_shift; gpointer card_table_mask; size_t nursery_size; gulong card_table = (gsize)mono_gc_get_card_table (&card_table_shift, &card_table_mask); gulong nursery_start = (gsize)mono_gc_get_nursery (&nursery_shift, &nursery_size); gboolean card_table_nursery_check = mono_gc_card_table_nursery_check (); /* * We need one register we can clobber, we choose EDX and make sreg1 * fixed EAX to work around limitations in the local register allocator. * sreg2 might get allocated to EDX, but that is not a problem since * we use it before clobbering EDX. */ g_assert (ins->sreg1 == X86_EAX); /* * This is the code we produce: * * edx = value * edx >>= nursery_shift * cmp edx, (nursery_start >> nursery_shift) * jne done * edx = ptr * edx >>= card_table_shift * card_table[edx] = 1 * done: */ if (card_table_nursery_check) { if (value != X86_EDX) x86_mov_reg_reg (code, X86_EDX, value); x86_shift_reg_imm (code, X86_SHR, X86_EDX, nursery_shift); x86_alu_reg_imm (code, X86_CMP, X86_EDX, nursery_start >> nursery_shift); br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE); } x86_mov_reg_reg (code, X86_EDX, ptr); x86_shift_reg_imm (code, X86_SHR, X86_EDX, card_table_shift); if (card_table_mask) x86_alu_reg_imm (code, X86_AND, X86_EDX, (gsize)card_table_mask); x86_mov_membase_imm (code, X86_EDX, card_table, 1, 1); if (card_table_nursery_check) x86_patch (br, code); break; } #ifdef MONO_ARCH_SIMD_INTRINSICS case OP_ADDPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_ADD, ins->sreg1, ins->sreg2); break; case OP_DIVPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_DIV, ins->sreg1, ins->sreg2); break; case OP_MULPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_MUL, ins->sreg1, ins->sreg2); break; case OP_SUBPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_SUB, ins->sreg1, ins->sreg2); break; case OP_MAXPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_MAX, ins->sreg1, ins->sreg2); break; case OP_MINPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_MIN, ins->sreg1, ins->sreg2); break; case OP_COMPPS: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7); x86_sse_alu_ps_reg_reg_imm (code, X86_SSE_COMP, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ANDPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_AND, ins->sreg1, ins->sreg2); break; case OP_ANDNPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_ANDN, ins->sreg1, ins->sreg2); break; case OP_ORPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_OR, ins->sreg1, ins->sreg2); break; case OP_XORPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_XOR, ins->sreg1, ins->sreg2); break; case OP_SQRTPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_SQRT, ins->dreg, ins->sreg1); break; case OP_RSQRTPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_RSQRT, ins->dreg, ins->sreg1); break; case OP_RCPPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_RCP, ins->dreg, ins->sreg1); break; case OP_ADDSUBPS: x86_sse_alu_sd_reg_reg (code, X86_SSE_ADDSUB, ins->sreg1, ins->sreg2); break; case OP_HADDPS: x86_sse_alu_sd_reg_reg (code, X86_SSE_HADD, ins->sreg1, ins->sreg2); break; case OP_HSUBPS: x86_sse_alu_sd_reg_reg (code, X86_SSE_HSUB, ins->sreg1, ins->sreg2); break; case OP_DUPPS_HIGH: x86_sse_alu_ss_reg_reg (code, X86_SSE_MOVSHDUP, ins->dreg, ins->sreg1); break; case OP_DUPPS_LOW: x86_sse_alu_ss_reg_reg (code, X86_SSE_MOVSLDUP, ins->dreg, ins->sreg1); break; case OP_PSHUFLEW_HIGH: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); x86_pshufw_reg_reg (code, ins->dreg, ins->sreg1, ins->inst_c0, 1); break; case OP_PSHUFLEW_LOW: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); x86_pshufw_reg_reg (code, ins->dreg, ins->sreg1, ins->inst_c0, 0); break; case OP_PSHUFLED: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_SHUFPS: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF); x86_sse_alu_reg_reg_imm8 (code, X86_SSE_SHUFP, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_SHUFPD: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0x3); x86_sse_alu_pd_reg_reg_imm8 (code, X86_SSE_SHUFP, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ADDPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_ADD, ins->sreg1, ins->sreg2); break; case OP_DIVPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_DIV, ins->sreg1, ins->sreg2); break; case OP_MULPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_MUL, ins->sreg1, ins->sreg2); break; case OP_SUBPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_SUB, ins->sreg1, ins->sreg2); break; case OP_MAXPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_MAX, ins->sreg1, ins->sreg2); break; case OP_MINPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_MIN, ins->sreg1, ins->sreg2); break; case OP_COMPPD: g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7); x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_COMP, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_ANDPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_AND, ins->sreg1, ins->sreg2); break; case OP_ANDNPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_ANDN, ins->sreg1, ins->sreg2); break; case OP_ORPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_OR, ins->sreg1, ins->sreg2); break; case OP_XORPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_XOR, ins->sreg1, ins->sreg2); break; case OP_SQRTPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_SQRT, ins->dreg, ins->sreg1); break; case OP_ADDSUBPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_ADDSUB, ins->sreg1, ins->sreg2); break; case OP_HADDPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_HADD, ins->sreg1, ins->sreg2); break; case OP_HSUBPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_HSUB, ins->sreg1, ins->sreg2); break; case OP_DUPPD: x86_sse_alu_sd_reg_reg (code, X86_SSE_MOVDDUP, ins->dreg, ins->sreg1); break; case OP_EXTRACT_MASK: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMOVMSKB, ins->dreg, ins->sreg1); break; case OP_PAND: x86_sse_alu_pd_reg_reg (code, X86_SSE_PAND, ins->sreg1, ins->sreg2); break; case OP_POR: x86_sse_alu_pd_reg_reg (code, X86_SSE_POR, ins->sreg1, ins->sreg2); break; case OP_PXOR: x86_sse_alu_pd_reg_reg (code, X86_SSE_PXOR, ins->sreg1, ins->sreg2); break; case OP_PADDB: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDB, ins->sreg1, ins->sreg2); break; case OP_PADDW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDW, ins->sreg1, ins->sreg2); break; case OP_PADDD: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDD, ins->sreg1, ins->sreg2); break; case OP_PADDQ: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDQ, ins->sreg1, ins->sreg2); break; case OP_PSUBB: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBB, ins->sreg1, ins->sreg2); break; case OP_PSUBW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBW, ins->sreg1, ins->sreg2); break; case OP_PSUBD: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBD, ins->sreg1, ins->sreg2); break; case OP_PSUBQ: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBQ, ins->sreg1, ins->sreg2); break; case OP_PMAXB_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMAXUB, ins->sreg1, ins->sreg2); break; case OP_PMAXW_UN: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXUW, ins->sreg1, ins->sreg2); break; case OP_PMAXD_UN: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXUD, ins->sreg1, ins->sreg2); break; case OP_PMAXB: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXSB, ins->sreg1, ins->sreg2); break; case OP_PMAXW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMAXSW, ins->sreg1, ins->sreg2); break; case OP_PMAXD: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMAXSD, ins->sreg1, ins->sreg2); break; case OP_PAVGB_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PAVGB, ins->sreg1, ins->sreg2); break; case OP_PAVGW_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PAVGW, ins->sreg1, ins->sreg2); break; case OP_PMINB_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMINUB, ins->sreg1, ins->sreg2); break; case OP_PMINW_UN: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINUW, ins->sreg1, ins->sreg2); break; case OP_PMIND_UN: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINUD, ins->sreg1, ins->sreg2); break; case OP_PMINB: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINSB, ins->sreg1, ins->sreg2); break; case OP_PMINW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMINSW, ins->sreg1, ins->sreg2); break; case OP_PMIND: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMINSD, ins->sreg1, ins->sreg2); break; case OP_PCMPEQB: x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQB, ins->sreg1, ins->sreg2); break; case OP_PCMPEQW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQW, ins->sreg1, ins->sreg2); break; case OP_PCMPEQD: x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQD, ins->sreg1, ins->sreg2); break; case OP_PCMPEQQ: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PCMPEQQ, ins->sreg1, ins->sreg2); break; case OP_PCMPGTB: x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPGTB, ins->sreg1, ins->sreg2); break; case OP_PCMPGTW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPGTW, ins->sreg1, ins->sreg2); break; case OP_PCMPGTD: x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPGTD, ins->sreg1, ins->sreg2); break; case OP_PCMPGTQ: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PCMPGTQ, ins->sreg1, ins->sreg2); break; case OP_PSUM_ABS_DIFF: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSADBW, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWB: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLBW, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLWD, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWD: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLDQ, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWQ: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKLQDQ, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_UNPCKL, ins->sreg1, ins->sreg2); break; case OP_UNPACK_LOWPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_UNPCKL, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHB: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHBW, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHWD, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHD: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHDQ, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHQ: x86_sse_alu_pd_reg_reg (code, X86_SSE_PUNPCKHQDQ, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHPS: x86_sse_alu_ps_reg_reg (code, X86_SSE_UNPCKH, ins->sreg1, ins->sreg2); break; case OP_UNPACK_HIGHPD: x86_sse_alu_pd_reg_reg (code, X86_SSE_UNPCKH, ins->sreg1, ins->sreg2); break; case OP_PACKW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PACKSSWB, ins->sreg1, ins->sreg2); break; case OP_PACKD: x86_sse_alu_pd_reg_reg (code, X86_SSE_PACKSSDW, ins->sreg1, ins->sreg2); break; case OP_PACKW_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PACKUSWB, ins->sreg1, ins->sreg2); break; case OP_PACKD_UN: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PACKUSDW, ins->sreg1, ins->sreg2); break; case OP_PADDB_SAT_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDUSB, ins->sreg1, ins->sreg2); break; case OP_PSUBB_SAT_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBUSB, ins->sreg1, ins->sreg2); break; case OP_PADDW_SAT_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDUSW, ins->sreg1, ins->sreg2); break; case OP_PSUBW_SAT_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBUSW, ins->sreg1, ins->sreg2); break; case OP_PADDB_SAT: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDSB, ins->sreg1, ins->sreg2); break; case OP_PSUBB_SAT: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBSB, ins->sreg1, ins->sreg2); break; case OP_PADDW_SAT: x86_sse_alu_pd_reg_reg (code, X86_SSE_PADDSW, ins->sreg1, ins->sreg2); break; case OP_PSUBW_SAT: x86_sse_alu_pd_reg_reg (code, X86_SSE_PSUBSW, ins->sreg1, ins->sreg2); break; case OP_PMULW: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULLW, ins->sreg1, ins->sreg2); break; case OP_PMULD: x86_sse_alu_sse41_reg_reg (code, X86_SSE_PMULLD, ins->sreg1, ins->sreg2); break; case OP_PMULQ: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULUDQ, ins->sreg1, ins->sreg2); break; case OP_PMULW_HIGH_UN: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULHUW, ins->sreg1, ins->sreg2); break; case OP_PMULW_HIGH: x86_sse_alu_pd_reg_reg (code, X86_SSE_PMULHW, ins->sreg1, ins->sreg2); break; case OP_PSHRW: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTW, X86_SSE_SHR, ins->dreg, ins->inst_imm); break; case OP_PSHRW_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSRLW_REG, ins->dreg, ins->sreg2); break; case OP_PSARW: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTW, X86_SSE_SAR, ins->dreg, ins->inst_imm); break; case OP_PSARW_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSRAW_REG, ins->dreg, ins->sreg2); break; case OP_PSHLW: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTW, X86_SSE_SHL, ins->dreg, ins->inst_imm); break; case OP_PSHLW_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSLLW_REG, ins->dreg, ins->sreg2); break; case OP_PSHRD: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTD, X86_SSE_SHR, ins->dreg, ins->inst_imm); break; case OP_PSHRD_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSRLD_REG, ins->dreg, ins->sreg2); break; case OP_PSARD: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTD, X86_SSE_SAR, ins->dreg, ins->inst_imm); break; case OP_PSARD_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSRAD_REG, ins->dreg, ins->sreg2); break; case OP_PSHLD: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTD, X86_SSE_SHL, ins->dreg, ins->inst_imm); break; case OP_PSHLD_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSLLD_REG, ins->dreg, ins->sreg2); break; case OP_PSHRQ: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTQ, X86_SSE_SHR, ins->dreg, ins->inst_imm); break; case OP_PSHRQ_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSRLQ_REG, ins->dreg, ins->sreg2); break; case OP_PSHLQ: x86_sse_shift_reg_imm (code, X86_SSE_PSHIFTQ, X86_SSE_SHL, ins->dreg, ins->inst_imm); break; case OP_PSHLQ_REG: x86_sse_shift_reg_reg (code, X86_SSE_PSLLQ_REG, ins->dreg, ins->sreg2); break; case OP_ICONV_TO_X: x86_movd_xreg_reg (code, ins->dreg, ins->sreg1); break; case OP_EXTRACT_I4: x86_movd_reg_xreg (code, ins->dreg, ins->sreg1); break; case OP_EXTRACT_I1: x86_movd_reg_xreg (code, ins->dreg, ins->sreg1); if (ins->inst_c0) x86_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_c0 * 8); x86_widen_reg (code, ins->dreg, ins->dreg, ins->inst_c1 == MONO_TYPE_I1, FALSE); break; case OP_EXTRACT_I2: x86_movd_reg_xreg (code, ins->dreg, ins->sreg1); if (ins->inst_c0) x86_shift_reg_imm (code, X86_SHR, ins->dreg, 16); x86_widen_reg (code, ins->dreg, ins->dreg, ins->inst_c1 == MONO_TYPE_I2, TRUE); break; case OP_EXTRACT_R8: if (ins->inst_c0) x86_sse_alu_pd_membase_reg (code, X86_SSE_MOVHPD_MEMBASE_REG, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1); else x86_sse_alu_sd_membase_reg (code, X86_SSE_MOVSD_MEMBASE_REG, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, ins->sreg1); x86_fld_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE); break; case OP_INSERT_I2: x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->sreg1, ins->sreg2, ins->inst_c0); break; case OP_EXTRACTX_U2: x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PEXTRW, ins->dreg, ins->sreg1, ins->inst_c0); break; case OP_INSERTX_U1_SLOW: /*sreg1 is the extracted ireg (scratch) /sreg2 is the to be inserted ireg (scratch) /dreg is the xreg to receive the value*/ /*clear the bits from the extracted word*/ x86_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_c0 & 1 ? 0x00FF : 0xFF00); /*shift the value to insert if needed*/ if (ins->inst_c0 & 1) x86_shift_reg_imm (code, X86_SHL, ins->sreg2, 8); /*join them together*/ x86_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2); x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg1, ins->inst_c0 / 2); break; case OP_INSERTX_I4_SLOW: x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg2, ins->inst_c0 * 2); x86_shift_reg_imm (code, X86_SHR, ins->sreg2, 16); x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg2, ins->inst_c0 * 2 + 1); break; case OP_INSERTX_R4_SLOW: x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE, TRUE); /*TODO if inst_c0 == 0 use movss*/ x86_sse_alu_pd_reg_membase_imm (code, X86_SSE_PINSRW, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset + 0, ins->inst_c0 * 2); x86_sse_alu_pd_reg_membase_imm (code, X86_SSE_PINSRW, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset + 2, ins->inst_c0 * 2 + 1); break; case OP_INSERTX_R8_SLOW: x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE, TRUE); if (cfg->verbose_level) printf ("CONVERTING a OP_INSERTX_R8_SLOW %d offset %x\n", ins->inst_c0, offset); if (ins->inst_c0) x86_sse_alu_pd_reg_membase (code, X86_SSE_MOVHPD_REG_MEMBASE, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset); else x86_movsd_reg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset); break; case OP_STOREX_MEMBASE_REG: case OP_STOREX_MEMBASE: x86_movups_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1); break; case OP_LOADX_MEMBASE: x86_movups_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_LOADX_ALIGNED_MEMBASE: x86_movaps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_STOREX_ALIGNED_MEMBASE_REG: x86_movaps_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1); break; case OP_STOREX_NTA_MEMBASE_REG: x86_sse_alu_reg_membase (code, X86_SSE_MOVNTPS, ins->dreg, ins->sreg1, ins->inst_offset); break; case OP_PREFETCH_MEMBASE: x86_sse_alu_reg_membase (code, X86_SSE_PREFETCH, ins->backend.arg_info, ins->sreg1, ins->inst_offset); break; case OP_XMOVE: /*FIXME the peephole pass should have killed this*/ if (ins->dreg != ins->sreg1) x86_movaps_reg_reg (code, ins->dreg, ins->sreg1); break; case OP_XZERO: x86_sse_alu_pd_reg_reg (code, X86_SSE_PXOR, ins->dreg, ins->dreg); break; case OP_XONES: x86_sse_alu_pd_reg_reg (code, X86_SSE_PCMPEQB, ins->dreg, ins->dreg); break; case OP_FCONV_TO_R8_X: x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE, TRUE); x86_movsd_reg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset); break; case OP_XCONV_R8_TO_I4: x86_cvttsd2si (code, ins->dreg, ins->sreg1); switch (ins->backend.source_opcode) { case OP_FCONV_TO_I1: x86_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE); break; case OP_FCONV_TO_U1: x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE); break; case OP_FCONV_TO_I2: x86_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE); break; case OP_FCONV_TO_U2: x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE); break; } break; case OP_EXPAND_I2: x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg1, 0); x86_sse_alu_pd_reg_reg_imm (code, X86_SSE_PINSRW, ins->dreg, ins->sreg1, 1); x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_I4: x86_movd_xreg_reg (code, ins->dreg, ins->sreg1); x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_R4: x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, FALSE, TRUE); x86_movd_xreg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset); x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->dreg, 0); break; case OP_EXPAND_R8: x86_fst_membase (code, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset, TRUE, TRUE); x86_movsd_reg_membase (code, ins->dreg, ins->backend.spill_var->inst_basereg, ins->backend.spill_var->inst_offset); x86_sse_shift_reg_imm (code, X86_SSE_PSHUFD, ins->dreg, ins->dreg, 0x44); break; case OP_CVTDQ2PD: x86_sse_alu_ss_reg_reg (code, X86_SSE_CVTDQ2PD, ins->dreg, ins->sreg1); break; case OP_CVTDQ2PS: x86_sse_alu_ps_reg_reg (code, X86_SSE_CVTDQ2PS, ins->dreg, ins->sreg1); break; case OP_CVTPD2DQ: x86_sse_alu_sd_reg_reg (code, X86_SSE_CVTPD2DQ, ins->dreg, ins->sreg1); break; case OP_CVTPD2PS: x86_sse_alu_pd_reg_reg (code, X86_SSE_CVTPD2PS, ins->dreg, ins->sreg1); break; case OP_CVTPS2DQ: x86_sse_alu_pd_reg_reg (code, X86_SSE_CVTPS2DQ, ins->dreg, ins->sreg1); break; case OP_CVTPS2PD: x86_sse_alu_ps_reg_reg (code, X86_SSE_CVTPS2PD, ins->dreg, ins->sreg1); break; case OP_CVTTPD2DQ: x86_sse_alu_pd_reg_reg (code, X86_SSE_CVTTPD2DQ, ins->dreg, ins->sreg1); break; case OP_CVTTPS2DQ: x86_sse_alu_ss_reg_reg (code, X86_SSE_CVTTPS2DQ, ins->dreg, ins->sreg1); break; #endif case OP_LIVERANGE_START: { if (cfg->verbose_level > 1) printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code; break; } case OP_LIVERANGE_END: { if (cfg->verbose_level > 1) printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code)); MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code; break; } case OP_GC_SAFE_POINT: { guint8 *br [1]; x86_test_membase_imm (code, ins->sreg1, 0, 1); br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE); code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll)); x86_patch (br [0], code); break; } case OP_GC_LIVENESS_DEF: case OP_GC_LIVENESS_USE: case OP_GC_PARAM_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; break; case OP_GC_SPILL_SLOT_LIVENESS_DEF: ins->backend.pc_offset = code - cfg->native_code; bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins); break; case OP_GET_SP: x86_mov_reg_reg (code, ins->dreg, X86_ESP); break; case OP_SET_SP: x86_mov_reg_reg (code, X86_ESP, ins->sreg1); break; case OP_FILL_PROF_CALL_CTX: x86_mov_membase_reg (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, esp), X86_ESP, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, ebp), X86_EBP, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, ebx), X86_EBX, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, esi), X86_ESI, sizeof (target_mgreg_t)); x86_mov_membase_reg (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, edi), X86_EDI, sizeof (target_mgreg_t)); break; case OP_GET_LAST_ERROR: code = emit_get_last_error (code, ins->dreg); break; default: g_warning ("unknown opcode %s\n", mono_inst_name (ins->opcode)); g_assert_not_reached (); } if (G_UNLIKELY ((code - cfg->native_code - offset) > max_len)) { g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)", mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset); g_assert_not_reached (); } cpos += max_len; } set_code_cursor (cfg, code); } #endif /* DISABLE_JIT */ void mono_arch_register_lowlevel_calls (void) { } void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target) { unsigned char *ip = ji->ip.i + code; switch (ji->type) { case MONO_PATCH_INFO_IP: *((gconstpointer *)(ip)) = target; break; case MONO_PATCH_INFO_ABS: case MONO_PATCH_INFO_METHOD: case MONO_PATCH_INFO_METHOD_JUMP: case MONO_PATCH_INFO_JIT_ICALL_ID: case MONO_PATCH_INFO_BB: case MONO_PATCH_INFO_LABEL: case MONO_PATCH_INFO_RGCTX_FETCH: case MONO_PATCH_INFO_JIT_ICALL_ADDR: case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR: x86_patch (ip, (unsigned char*)target); break; case MONO_PATCH_INFO_NONE: break; case MONO_PATCH_INFO_R4: case MONO_PATCH_INFO_R8: { guint32 offset = mono_arch_get_patch_offset (ip); *((gconstpointer *)(ip + offset)) = target; break; } default: { guint32 offset = mono_arch_get_patch_offset (ip); *((gconstpointer *)(ip + offset)) = target; break; } } } static G_GNUC_UNUSED void stack_unaligned (MonoMethod *m, gpointer caller) { printf ("%s\n", mono_method_full_name (m, TRUE)); g_assert_not_reached (); } #ifndef DISABLE_JIT guint8 * mono_arch_emit_prolog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoBasicBlock *bb; MonoMethodSignature *sig; MonoInst *inst; CallInfo *cinfo; ArgInfo *ainfo; int alloc_size, pos, max_offset, i, cfa_offset; guint8 *code; gboolean need_stack_frame; cfg->code_size = MAX (cfg->header->code_size * 4, 10240); code = cfg->native_code = g_malloc (cfg->code_size); #if 0 { guint8 *br [16]; /* Check that the stack is aligned on osx */ x86_mov_reg_reg (code, X86_EAX, X86_ESP); x86_alu_reg_imm (code, X86_AND, X86_EAX, 15); x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0xc); br [0] = code; x86_branch_disp (code, X86_CC_Z, 0, FALSE); x86_push_membase (code, X86_ESP, 0); x86_push_imm (code, cfg->method); x86_mov_reg_imm (code, X86_EAX, stack_unaligned); x86_call_reg (code, X86_EAX); x86_patch (br [0], code); } #endif /* Offset between RSP and the CFA */ cfa_offset = 0; // CFA = sp + 4 cfa_offset = 4; mono_emit_unwind_op_def_cfa (cfg, code, X86_ESP, cfa_offset); // IP saved at CFA - 4 /* There is no IP reg on x86 */ mono_emit_unwind_op_offset (cfg, code, X86_NREG, -cfa_offset); mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF); need_stack_frame = needs_stack_frame (cfg); if (need_stack_frame) { x86_push_reg (code, X86_EBP); cfa_offset += 4; mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset); mono_emit_unwind_op_offset (cfg, code, X86_EBP, - cfa_offset); x86_mov_reg_reg (code, X86_EBP, X86_ESP); mono_emit_unwind_op_def_cfa_reg (cfg, code, X86_EBP); /* These are handled automatically by the stack marking code */ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF); } else { cfg->frame_reg = X86_ESP; } cfg->stack_offset += cfg->param_area; cfg->stack_offset = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT); alloc_size = cfg->stack_offset; pos = 0; if (!method->save_lmf) { if (cfg->used_int_regs & (1 << X86_EBX)) { x86_push_reg (code, X86_EBX); pos += 4; cfa_offset += 4; mono_emit_unwind_op_offset (cfg, code, X86_EBX, - cfa_offset); /* These are handled automatically by the stack marking code */ mini_gc_set_slot_type_from_cfa (cfg, - cfa_offset, SLOT_NOREF); } if (cfg->used_int_regs & (1 << X86_EDI)) { x86_push_reg (code, X86_EDI); pos += 4; cfa_offset += 4; mono_emit_unwind_op_offset (cfg, code, X86_EDI, - cfa_offset); mini_gc_set_slot_type_from_cfa (cfg, - cfa_offset, SLOT_NOREF); } if (cfg->used_int_regs & (1 << X86_ESI)) { x86_push_reg (code, X86_ESI); pos += 4; cfa_offset += 4; mono_emit_unwind_op_offset (cfg, code, X86_ESI, - cfa_offset); mini_gc_set_slot_type_from_cfa (cfg, - cfa_offset, SLOT_NOREF); } } alloc_size -= pos; /* the original alloc_size is already aligned: there is %ebp and retip pushed, so realign */ if (mono_do_x86_stack_align && need_stack_frame) { int tot = alloc_size + pos + 4; /* ret ip */ if (need_stack_frame) tot += 4; /* ebp */ tot &= MONO_ARCH_FRAME_ALIGNMENT - 1; if (tot) { alloc_size += MONO_ARCH_FRAME_ALIGNMENT - tot; for (i = 0; i < MONO_ARCH_FRAME_ALIGNMENT - tot; i += sizeof (target_mgreg_t)) mini_gc_set_slot_type_from_fp (cfg, - (alloc_size + pos - i), SLOT_NOREF); } } cfg->arch.sp_fp_offset = alloc_size + pos; if (alloc_size) { /* See mono_emit_stack_alloc */ #if defined (TARGET_WIN32) || defined (MONO_ARCH_SIGSEGV_ON_ALTSTACK) guint32 remaining_size = alloc_size; /*FIXME handle unbounded code expansion, we should use a loop in case of more than X interactions*/ guint32 required_code_size = ((remaining_size / 0x1000) + 1) * 8; /*8 is the max size of x86_alu_reg_imm + x86_test_membase_reg*/ set_code_cursor (cfg, code); code = realloc_code (cfg, required_code_size); while (remaining_size >= 0x1000) { x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000); x86_test_membase_reg (code, X86_ESP, 0, X86_ESP); remaining_size -= 0x1000; } if (remaining_size) x86_alu_reg_imm (code, X86_SUB, X86_ESP, remaining_size); #else x86_alu_reg_imm (code, X86_SUB, X86_ESP, alloc_size); #endif g_assert (need_stack_frame); } if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED || cfg->method->wrapper_type == MONO_WRAPPER_RUNTIME_INVOKE) { x86_alu_reg_imm (code, X86_AND, X86_ESP, -MONO_ARCH_FRAME_ALIGNMENT); } #if DEBUG_STACK_ALIGNMENT /* check the stack is aligned */ if (need_stack_frame && method->wrapper_type == MONO_WRAPPER_NONE) { x86_mov_reg_reg (code, X86_ECX, X86_ESP); x86_alu_reg_imm (code, X86_AND, X86_ECX, MONO_ARCH_FRAME_ALIGNMENT - 1); x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0); x86_branch_disp (code, X86_CC_EQ, 3, FALSE); x86_breakpoint (code); } #endif /* compute max_offset in order to use short forward jumps */ max_offset = 0; if (cfg->opt & MONO_OPT_BRANCH) { for (bb = cfg->bb_entry; bb; bb = bb->next_bb) { MonoInst *ins; bb->max_offset = max_offset; /* max alignment for loops */ if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb)) max_offset += LOOP_ALIGNMENT; MONO_BB_FOR_EACH_INS (bb, ins) { if (ins->opcode == OP_LABEL) ins->inst_c1 = max_offset; max_offset += ins_get_size (ins->opcode); } } } /* store runtime generic context */ if (cfg->rgctx_var) { g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET && cfg->rgctx_var->inst_basereg == X86_EBP); x86_mov_membase_reg (code, X86_EBP, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, 4); } if (method->save_lmf) code = emit_setup_lmf (cfg, code, cfg->lmf_var->inst_offset, cfa_offset); { MonoInst *ins; if (cfg->arch.ss_tramp_var) { /* Initialize ss_tramp_var */ ins = cfg->arch.ss_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); g_assert (!cfg->compile_aot); x86_mov_membase_imm (code, ins->inst_basereg, ins->inst_offset, (gsize)&ss_trampoline, 4); } if (cfg->arch.bp_tramp_var) { /* Initialize bp_tramp_var */ ins = cfg->arch.bp_tramp_var; g_assert (ins->opcode == OP_REGOFFSET); g_assert (!cfg->compile_aot); x86_mov_membase_imm (code, ins->inst_basereg, ins->inst_offset, (gsize)&bp_trampoline, 4); } } /* load arguments allocated to register from the stack */ sig = mono_method_signature_internal (method); pos = 0; cinfo = cfg->arch.cinfo; for (i = 0; i < sig->param_count + sig->hasthis; ++i) { inst = cfg->args [pos]; ainfo = &cinfo->args [pos]; if (inst->opcode == OP_REGVAR) { if (storage_in_ireg (ainfo->storage)) { x86_mov_reg_reg (code, inst->dreg, ainfo->reg); } else { g_assert (need_stack_frame); x86_mov_reg_membase (code, inst->dreg, X86_EBP, ainfo->offset + ARGS_OFFSET, 4); } if (cfg->verbose_level > 2) g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg)); } else { if (storage_in_ireg (ainfo->storage)) { x86_mov_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg, 4); } } pos++; } set_code_cursor (cfg, code); return code; } #endif void mono_arch_emit_epilog (MonoCompile *cfg) { MonoMethod *method = cfg->method; MonoMethodSignature *sig = mono_method_signature_internal (method); int i, quad, pos; guint32 stack_to_pop; guint8 *code; int max_epilog_size = 16; CallInfo *cinfo; gboolean need_stack_frame = needs_stack_frame (cfg); if (cfg->method->save_lmf) max_epilog_size += 128; code = realloc_code (cfg, max_epilog_size); /* the code restoring the registers must be kept in sync with OP_TAILCALL */ pos = 0; if (method->save_lmf) { gint32 lmf_offset = cfg->lmf_var->inst_offset; /* restore caller saved regs */ if (cfg->used_int_regs & (1 << X86_EBX)) { x86_mov_reg_membase (code, X86_EBX, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, ebx), 4); } if (cfg->used_int_regs & (1 << X86_EDI)) { x86_mov_reg_membase (code, X86_EDI, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, edi), 4); } if (cfg->used_int_regs & (1 << X86_ESI)) { x86_mov_reg_membase (code, X86_ESI, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, esi), 4); } /* EBP is restored by LEAVE */ } else { for (i = 0; i < X86_NREG; ++i) { if ((cfg->used_int_regs & X86_CALLER_REGS & (1 << i)) && (i != X86_EBP)) { pos -= 4; } } g_assert (!pos || need_stack_frame); if (pos) { x86_lea_membase (code, X86_ESP, X86_EBP, pos); } if (cfg->used_int_regs & (1 << X86_ESI)) { x86_pop_reg (code, X86_ESI); } if (cfg->used_int_regs & (1 << X86_EDI)) { x86_pop_reg (code, X86_EDI); } if (cfg->used_int_regs & (1 << X86_EBX)) { x86_pop_reg (code, X86_EBX); } } /* Load returned vtypes into registers if needed */ cinfo = cfg->arch.cinfo; if (cinfo->ret.storage == ArgValuetypeInReg) { for (quad = 0; quad < 2; quad ++) { switch (cinfo->ret.pair_storage [quad]) { case ArgInIReg: x86_mov_reg_membase (code, cinfo->ret.pair_regs [quad], cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (target_mgreg_t)), 4); break; case ArgOnFloatFpStack: x86_fld_membase (code, cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (target_mgreg_t)), FALSE); break; case ArgOnDoubleFpStack: x86_fld_membase (code, cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (target_mgreg_t)), TRUE); break; case ArgNone: break; default: g_assert_not_reached (); } } } if (need_stack_frame) x86_leave (code); if (CALLCONV_IS_STDCALL (sig)) { MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, sig->param_count + 1); stack_to_pop = mono_arch_get_argument_info (sig, sig->param_count, arg_info); } else if (cinfo->callee_stack_pop) stack_to_pop = cinfo->callee_stack_pop; else stack_to_pop = 0; if (stack_to_pop) { g_assert (need_stack_frame); x86_ret_imm (code, stack_to_pop); } else { x86_ret (code); } set_code_cursor (cfg, code); } void mono_arch_emit_exceptions (MonoCompile *cfg) { MonoJumpInfo *patch_info; int nthrows, i; guint8 *code; MonoClass *exc_classes [16]; guint8 *exc_throw_start [16], *exc_throw_end [16]; guint32 code_size; int exc_count = 0; /* Compute needed space */ for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { if (patch_info->type == MONO_PATCH_INFO_EXC) exc_count++; } /* * make sure we have enough space for exceptions * 16 is the size of two push_imm instructions and a call */ if (cfg->compile_aot) code_size = exc_count * 32; else code_size = exc_count * 16; code = realloc_code (cfg, code_size); nthrows = 0; for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) { switch (patch_info->type) { case MONO_PATCH_INFO_EXC: { MonoClass *exc_class; guint8 *buf, *buf2; guint32 throw_ip; x86_patch (patch_info->ip.i + cfg->native_code, code); exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name); throw_ip = patch_info->ip.i; /* Find a throw sequence for the same exception class */ for (i = 0; i < nthrows; ++i) if (exc_classes [i] == exc_class) break; if (i < nthrows) { x86_push_imm (code, (exc_throw_end [i] - cfg->native_code) - throw_ip); x86_jump_code (code, exc_throw_start [i]); patch_info->type = MONO_PATCH_INFO_NONE; } else { guint32 size; /* Compute size of code following the push <OFFSET> */ size = 5 + 5; /*This is aligned to 16 bytes by the callee. This way we save a few bytes here.*/ if ((code - cfg->native_code) - throw_ip < 126 - size) { /* Use the shorter form */ buf = buf2 = code; x86_push_imm (code, 0); } else { buf = code; x86_push_imm (code, 0xf0f0f0f0); buf2 = code; } if (nthrows < 16) { exc_classes [nthrows] = exc_class; exc_throw_start [nthrows] = code; } x86_push_imm (code, m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF); patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception; patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID; patch_info->ip.i = code - cfg->native_code; x86_call_code (code, 0); x86_push_imm (buf, (code - cfg->native_code) - throw_ip); while (buf < buf2) x86_nop (buf); if (nthrows < 16) { exc_throw_end [nthrows] = code; nthrows ++; } } break; } default: /* do nothing */ break; } set_code_cursor (cfg, code); } set_code_cursor (cfg, code); } MONO_NEVER_INLINE void mono_arch_flush_icache (guint8 *code, gint size) { /* call/ret required (or likely other control transfer) */ } void mono_arch_flush_register_windows (void) { } gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm) { return TRUE; } void mono_arch_finish_init (void) { char *mono_no_tls = g_getenv ("MONO_NO_TLS"); if (!mono_no_tls) { #ifndef TARGET_WIN32 #if MONO_XEN_OPT optimize_for_xen = access ("/proc/xen", F_OK) == 0; #endif #endif } else { g_free (mono_no_tls); } } // Linear handler, the bsearch head compare is shorter //[2 + 4] x86_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm); //[1 + 1] x86_branch8(inst,cond,imm,is_signed) // x86_patch(ins,target) //[1 + 5] x86_jump_mem(inst,mem) #define CMP_SIZE 6 #define BR_SMALL_SIZE 2 #define BR_LARGE_SIZE 5 #define JUMP_IMM_SIZE 6 #define ENABLE_WRONG_METHOD_CHECK 0 #define DEBUG_IMT 0 static int imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target) { int i, distance = 0; for (i = start; i < target; ++i) distance += imt_entries [i]->chunk_size; return distance; } /* * LOCKING: called with the domain lock held */ gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp) { int i; int size = 0; guint8 *code, *start; GSList *unwind_ops; MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) item->chunk_size += CMP_SIZE; item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE; } else { if (fail_tramp) { item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + JUMP_IMM_SIZE * 2; } else { item->chunk_size += JUMP_IMM_SIZE; #if ENABLE_WRONG_METHOD_CHECK item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1; #endif } } } else { item->chunk_size += CMP_SIZE + BR_LARGE_SIZE; imt_entries [item->check_target_idx]->compare_done = TRUE; } size += item->chunk_size; } if (fail_tramp) { code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size); } else { code = mono_mem_manager_code_reserve (mem_manager, size); } start = code; unwind_ops = mono_arch_get_cie_program (); for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; item->code_target = code; if (item->is_equals) { if (item->check_target_idx) { if (!item->compare_done) x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gsize)item->key); item->jmp_code = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); if (item->has_target_code) x86_jump_code (code, item->value.target_code); else x86_jump_mem (code, (gsize)&vtable->vtable [item->value.vtable_slot]); } else { if (fail_tramp) { x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gsize)item->key); item->jmp_code = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); if (item->has_target_code) x86_jump_code (code, item->value.target_code); else x86_jump_mem (code, (gsize)&vtable->vtable [item->value.vtable_slot]); x86_patch (item->jmp_code, code); x86_jump_code (code, fail_tramp); item->jmp_code = NULL; } else { /* enable the commented code to assert on wrong method */ #if ENABLE_WRONG_METHOD_CHECK x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gsize)item->key); item->jmp_code = code; x86_branch8 (code, X86_CC_NE, 0, FALSE); #endif if (item->has_target_code) x86_jump_code (code, item->value.target_code); else x86_jump_mem (code, (gsize)&vtable->vtable [item->value.vtable_slot]); #if ENABLE_WRONG_METHOD_CHECK x86_patch (item->jmp_code, code); x86_breakpoint (code); item->jmp_code = NULL; #endif } } } else { x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gsize)item->key); item->jmp_code = code; if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx))) x86_branch8 (code, X86_CC_GE, 0, FALSE); else x86_branch32 (code, X86_CC_GE, 0, FALSE); } } /* patch the branches to get to the target items */ for (i = 0; i < count; ++i) { MonoIMTCheckItem *item = imt_entries [i]; if (item->jmp_code) { if (item->check_target_idx) { x86_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target); } } } if (!fail_tramp) UnlockedAdd (&mono_stats.imt_trampolines_size, code - start); g_assertf (code - start <= size, "%d %d", (int)(code - start), size); #if DEBUG_IMT { char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count); mono_disassemble_code (NULL, (guint8*)start, code - start, buff); g_free (buff); } #endif if (mono_jit_map_is_enabled ()) { char *buff; if (vtable) buff = g_strdup_printf ("imt_%s_%s_entries_%d", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count); else buff = g_strdup_printf ("imt_trampoline_entries_%d", count); mono_emit_jit_tramp (start, code - start, buff); g_free (buff); } MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL)); mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), mem_manager); return start; } MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code) { return (MonoMethod*) regs [MONO_ARCH_IMT_REG]; } MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code) { return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG]; } GSList* mono_arch_get_cie_program (void) { GSList *l = NULL; mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, X86_ESP, 4); mono_add_unwind_op_offset (l, (guint8*)NULL, (guint8*)NULL, X86_NREG, -4); return l; } MonoInst* mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins = NULL; int opcode = 0; if (cmethod->klass == mono_class_try_get_math_class ()) { if (strcmp (cmethod->name, "Tan") == 0) { opcode = OP_TAN; } else if (strcmp (cmethod->name, "Atan") == 0) { opcode = OP_ATAN; } else if (strcmp (cmethod->name, "Sqrt") == 0) { opcode = OP_SQRT; } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) { opcode = OP_ABS; } else if (strcmp (cmethod->name, "Round") == 0 && fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) { opcode = OP_ROUND; } if (opcode && fsig->param_count == 1) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_R8; ins->dreg = mono_alloc_freg (cfg); ins->sreg1 = args [0]->dreg; MONO_ADD_INS (cfg->cbb, ins); } if (cfg->opt & MONO_OPT_CMOV) { opcode = 0; if (strcmp (cmethod->name, "Min") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMIN; } else if (strcmp (cmethod->name, "Max") == 0) { if (fsig->params [0]->type == MONO_TYPE_I4) opcode = OP_IMAX; } if (opcode && fsig->param_count == 2) { MONO_INST_NEW (cfg, ins, opcode); ins->type = STACK_I4; ins->dreg = mono_alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; MONO_ADD_INS (cfg->cbb, ins); } } #if 0 /* OP_FREM is not IEEE compatible */ else if (strcmp (cmethod->name, "IEEERemainder") == 0 && fsig->param_count == 2) { MONO_INST_NEW (cfg, ins, OP_FREM); ins->inst_i0 = args [0]; ins->inst_i1 = args [1]; } #endif } return ins; } guint32 mono_arch_get_patch_offset (guint8 *code) { if ((code [0] == 0x8b) && (x86_modrm_mod (code [1]) == 0x2)) return 2; else if (code [0] == 0xba) return 1; else if (code [0] == 0x68) /* push IMM */ return 1; else if ((code [0] == 0xff) && (x86_modrm_reg (code [1]) == 0x6)) /* push <OFFSET>(<REG>) */ return 2; else if ((code [0] == 0xff) && (x86_modrm_reg (code [1]) == 0x2)) /* call *<OFFSET>(<REG>) */ return 2; else if ((code [0] == 0xdd) || (code [0] == 0xd9)) /* fldl <ADDR> */ return 2; else if ((code [0] == 0x58) && (code [1] == 0x05)) /* pop %eax; add <OFFSET>, %eax */ return 2; else if ((code [0] >= 0x58) && (code [0] <= 0x58 + X86_NREG) && (code [1] == 0x81)) /* pop <REG>; add <OFFSET>, <REG> */ return 3; else if ((code [0] >= 0xb8) && (code [0] < 0xb8 + 8)) /* mov <REG>, imm */ return 1; else if (code [0] == 0xE9) /* jmp eip+32b */ return 1; g_assert_not_reached (); return -1; } /** * \return TRUE if no sw breakpoint was present (always). * * Copy \p size bytes from \p code - \p offset to the buffer \p buf. If the debugger inserted software * breakpoints in the original code, they are removed in the copy. */ gboolean mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size) { /* * If method_start is non-NULL we need to perform bound checks, since we access memory * at code - offset we could go before the start of the method and end up in a different * page of memory that is not mapped or read incorrect data anyway. We zero-fill the bytes * instead. */ if (!method_start || code - offset >= method_start) { memcpy (buf, code - offset, size); } else { int diff = code - method_start; memset (buf, 0, size); memcpy (buf + offset - diff, method_start, diff + size - offset); } return TRUE; } /* * mono_x86_get_this_arg_offset: * * Return the offset of the stack location where this is passed during a virtual * call. */ guint32 mono_x86_get_this_arg_offset (MonoMethodSignature *sig) { return 0; } gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code) { host_mgreg_t esp = regs [X86_ESP]; gpointer res; int offset; offset = 0; /* * The stack looks like: * <other args> * <this=delegate> */ res = ((MonoObject**)esp) [0]; return res; } #define MAX_ARCH_DELEGATE_PARAMS 10 static gpointer get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, guint32 param_count) { guint8 *code, *start; int code_reserve = 64; GSList *unwind_ops; unwind_ops = mono_arch_get_cie_program (); /* * The stack contains: * <delegate> * <return addr> */ if (has_target) { start = code = mono_global_codeman_reserve (code_reserve); /* Replace the this argument with the target */ x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4); x86_mov_reg_membase (code, X86_ECX, X86_EAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 4); x86_mov_membase_reg (code, X86_ESP, 4, X86_ECX, 4); x86_jump_membase (code, X86_EAX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); } else { int i = 0; /* 8 for mov_reg and jump, plus 8 for each parameter */ code_reserve = 8 + (param_count * 8); /* * The stack contains: * <args in reverse order> * <delegate> * <return addr> * * and we need: * <args in reverse order> * <return addr> * * without unbalancing the stack. * So move each arg up a spot in the stack (overwriting un-needed 'this' arg) * and leaving original spot of first arg as placeholder in stack so * when callee pops stack everything works. */ start = code = mono_global_codeman_reserve (code_reserve); /* store delegate for access to method_ptr */ x86_mov_reg_membase (code, X86_ECX, X86_ESP, 4, 4); /* move args up */ for (i = 0; i < param_count; ++i) { x86_mov_reg_membase (code, X86_EAX, X86_ESP, (i+2)*4, 4); x86_mov_membase_reg (code, X86_ESP, (i+1)*4, X86_EAX, 4); } x86_jump_membase (code, X86_ECX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr)); } g_assertf ((code - start) <= code_reserve, "%d %d", (int)(code - start), code_reserve); if (has_target) { *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops); } else { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count); *info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops); g_free (name); } if (mono_jit_map_is_enabled ()) { char *buff; if (has_target) buff = (char*)"delegate_invoke_has_target"; else buff = g_strdup_printf ("delegate_invoke_no_target_%d", param_count); mono_emit_jit_tramp (start, code - start, buff); if (!has_target) g_free (buff); } MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); return start; } #define MAX_VIRTUAL_DELEGATE_OFFSET 32 static gpointer get_delegate_virtual_invoke_impl (MonoTrampInfo **info, gboolean load_imt_reg, int offset) { guint8 *code, *start; int size = 24; char *tramp_name; GSList *unwind_ops; if (offset / (int)sizeof (target_mgreg_t) > MAX_VIRTUAL_DELEGATE_OFFSET) return NULL; /* * The stack contains: * <delegate> * <return addr> */ start = code = mono_global_codeman_reserve (size); unwind_ops = mono_arch_get_cie_program (); /* Replace the this argument with the target */ x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4); x86_mov_reg_membase (code, X86_ECX, X86_EAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 4); x86_mov_membase_reg (code, X86_ESP, 4, X86_ECX, 4); if (load_imt_reg) { /* Load the IMT reg */ x86_mov_reg_membase (code, MONO_ARCH_IMT_REG, X86_EAX, MONO_STRUCT_OFFSET (MonoDelegate, method), 4); } /* Load the vtable */ x86_mov_reg_membase (code, X86_EAX, X86_ECX, MONO_STRUCT_OFFSET (MonoObject, vtable), 4); x86_jump_membase (code, X86_EAX, offset); g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size); MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL)); tramp_name = mono_get_delegate_virtual_invoke_impl_name (load_imt_reg, offset); *info = mono_tramp_info_create (tramp_name, start, code - start, NULL, unwind_ops); g_free (tramp_name); return start; } GSList* mono_arch_get_delegate_invoke_impls (void) { GSList *res = NULL; MonoTrampInfo *info; int i; get_delegate_invoke_impl (&info, TRUE, 0); res = g_slist_prepend (res, info); for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) { get_delegate_invoke_impl (&info, FALSE, i); res = g_slist_prepend (res, info); } for (i = 0; i <= MAX_VIRTUAL_DELEGATE_OFFSET; ++i) { get_delegate_virtual_invoke_impl (&info, TRUE, - i * TARGET_SIZEOF_VOID_P); res = g_slist_prepend (res, info); get_delegate_virtual_invoke_impl (&info, FALSE, i * TARGET_SIZEOF_VOID_P); res = g_slist_prepend (res, info); } return res; } gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target) { guint8 *code, *start; if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS) return NULL; /* FIXME: Support more cases */ if (MONO_TYPE_ISSTRUCT (sig->ret)) return NULL; /* * The stack contains: * <delegate> * <return addr> */ if (has_target) { static guint8* cached = NULL; if (cached) return cached; if (mono_ee_features.use_aot_trampolines) { start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target"); } else { MonoTrampInfo *info; start = (guint8*)get_delegate_invoke_impl (&info, TRUE, 0); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cached = start; } else { static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL}; int i = 0; for (i = 0; i < sig->param_count; ++i) if (!mono_is_regsize_var (sig->params [i])) return NULL; code = cache [sig->param_count]; if (code) return code; if (mono_ee_features.use_aot_trampolines) { char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count); start = (guint8*)mono_aot_get_trampoline (name); g_free (name); } else { MonoTrampInfo *info; start = (guint8*)get_delegate_invoke_impl (&info, FALSE, sig->param_count); mono_tramp_info_register (info, NULL); } mono_memory_barrier (); cache [sig->param_count] = start; } return start; } gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg) { MonoTrampInfo *info; gpointer code; code = get_delegate_virtual_invoke_impl (&info, load_imt_reg, offset); if (code) mono_tramp_info_register (info, NULL); return code; } host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg) { switch (reg) { case X86_EAX: return ctx->eax; case X86_EBX: return ctx->ebx; case X86_ECX: return ctx->ecx; case X86_EDX: return ctx->edx; case X86_ESP: return ctx->esp; case X86_EBP: return ctx->ebp; case X86_ESI: return ctx->esi; case X86_EDI: return ctx->edi; default: g_assert_not_reached (); return 0; } } host_mgreg_t* mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg) { switch (reg) { case X86_EAX: return &ctx->eax; case X86_EBX: return &ctx->ebx; case X86_ECX: return &ctx->ecx; case X86_EDX: return &ctx->edx; case X86_ESP: return &ctx->esp; case X86_EBP: return &ctx->ebp; case X86_ESI: return &ctx->esi; case X86_EDI: return &ctx->edi; default: g_assert_not_reached (); return 0; } } void mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val) { switch (reg) { case X86_EAX: ctx->eax = val; break; case X86_EBX: ctx->ebx = val; break; case X86_ECX: ctx->ecx = val; break; case X86_EDX: ctx->edx = val; break; case X86_ESP: ctx->esp = val; break; case X86_EBP: ctx->ebp = val; break; case X86_ESI: ctx->esi = val; break; case X86_EDI: ctx->edi = val; break; default: g_assert_not_reached (); } } #ifdef MONO_ARCH_SIMD_INTRINSICS static MonoInst* get_float_to_x_spill_area (MonoCompile *cfg) { if (!cfg->fconv_to_r8_x_var) { cfg->fconv_to_r8_x_var = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL); cfg->fconv_to_r8_x_var->flags |= MONO_INST_VOLATILE; /*FIXME, use the don't regalloc flag*/ } return cfg->fconv_to_r8_x_var; } /* * Convert all fconv opts that MONO_OPT_SSE2 would get wrong. */ void mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins) { MonoInst *fconv; int dreg, src_opcode; if (!(cfg->opt & MONO_OPT_SSE2) || !(cfg->opt & MONO_OPT_SIMD) || COMPILE_LLVM (cfg)) return; switch (src_opcode = ins->opcode) { case OP_FCONV_TO_I1: case OP_FCONV_TO_U1: case OP_FCONV_TO_I2: case OP_FCONV_TO_U2: case OP_FCONV_TO_I4: break; default: return; } /* dreg is the IREG and sreg1 is the FREG */ MONO_INST_NEW (cfg, fconv, OP_FCONV_TO_R8_X); fconv->klass = NULL; /*FIXME, what can I use here as the Mono.Simd lib might not be loaded yet*/ fconv->sreg1 = ins->sreg1; fconv->dreg = mono_alloc_ireg (cfg); fconv->type = STACK_VTYPE; fconv->backend.spill_var = get_float_to_x_spill_area (cfg); mono_bblock_insert_before_ins (cfg->cbb, ins, fconv); dreg = ins->dreg; NULLIFY_INS (ins); ins->opcode = OP_XCONV_R8_TO_I4; ins->klass = mono_defaults.int32_class; ins->sreg1 = fconv->dreg; ins->dreg = dreg; ins->type = STACK_I4; ins->backend.source_opcode = src_opcode; } #endif /* #ifdef MONO_ARCH_SIMD_INTRINSICS */ void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins) { MonoInst *ins; int vreg; if (long_ins->opcode == OP_LNEG) { ins = long_ins; MONO_EMIT_NEW_UNALU (cfg, OP_INEG, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1)); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADC_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), 0); MONO_EMIT_NEW_UNALU (cfg, OP_INEG, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->dreg)); NULLIFY_INS (ins); return; } #ifdef MONO_ARCH_SIMD_INTRINSICS if (!(cfg->opt & MONO_OPT_SIMD)) return; /*TODO move this to simd-intrinsic.c once we support sse 4.1 dword extractors since we need the runtime caps info */ switch (long_ins->opcode) { case OP_EXTRACT_I8: vreg = long_ins->sreg1; if (long_ins->inst_c0) { MONO_INST_NEW (cfg, ins, OP_PSHUFLED); ins->klass = long_ins->klass; ins->sreg1 = long_ins->sreg1; ins->inst_c0 = 2; ins->type = STACK_VTYPE; ins->dreg = vreg = alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, ins); } MONO_INST_NEW (cfg, ins, OP_EXTRACT_I4); ins->klass = mono_defaults.int32_class; ins->sreg1 = vreg; ins->type = STACK_I4; ins->dreg = MONO_LVREG_LS (long_ins->dreg); MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_PSHUFLED); ins->klass = long_ins->klass; ins->sreg1 = long_ins->sreg1; ins->inst_c0 = long_ins->inst_c0 ? 3 : 1; ins->type = STACK_VTYPE; ins->dreg = vreg = alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_EXTRACT_I4); ins->klass = mono_defaults.int32_class; ins->sreg1 = vreg; ins->type = STACK_I4; ins->dreg = MONO_LVREG_MS (long_ins->dreg); MONO_ADD_INS (cfg->cbb, ins); long_ins->opcode = OP_NOP; break; case OP_INSERTX_I8_SLOW: MONO_INST_NEW (cfg, ins, OP_INSERTX_I4_SLOW); ins->dreg = long_ins->dreg; ins->sreg1 = long_ins->dreg; ins->sreg2 = MONO_LVREG_LS (long_ins->sreg2); ins->inst_c0 = long_ins->inst_c0 * 2; MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_INSERTX_I4_SLOW); ins->dreg = long_ins->dreg; ins->sreg1 = long_ins->dreg; ins->sreg2 = MONO_LVREG_MS (long_ins->sreg2); ins->inst_c0 = long_ins->inst_c0 * 2 + 1; MONO_ADD_INS (cfg->cbb, ins); long_ins->opcode = OP_NOP; break; case OP_EXPAND_I8: MONO_INST_NEW (cfg, ins, OP_ICONV_TO_X); ins->dreg = long_ins->dreg; ins->sreg1 = MONO_LVREG_LS (long_ins->sreg1); ins->klass = long_ins->klass; ins->type = STACK_VTYPE; MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_INSERTX_I4_SLOW); ins->dreg = long_ins->dreg; ins->sreg1 = long_ins->dreg; ins->sreg2 = MONO_LVREG_MS (long_ins->sreg1); ins->inst_c0 = 1; ins->klass = long_ins->klass; ins->type = STACK_VTYPE; MONO_ADD_INS (cfg->cbb, ins); MONO_INST_NEW (cfg, ins, OP_PSHUFLED); ins->dreg = long_ins->dreg; ins->sreg1 = long_ins->dreg; ins->inst_c0 = 0x44; /*Magic number for swizzling (X,Y,X,Y)*/ ins->klass = long_ins->klass; ins->type = STACK_VTYPE; MONO_ADD_INS (cfg->cbb, ins); long_ins->opcode = OP_NOP; break; } #endif /* MONO_ARCH_SIMD_INTRINSICS */ } /* * mono_aot_emit_load_got_addr: * * Emit code to load the got address. * On x86, the result is placed into EBX. */ guint8* mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji) { x86_call_imm (code, 0); /* * The patch needs to point to the pop, since the GOT offset needs * to be added to that address. */ if (cfg) mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GOT_OFFSET, NULL); else *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL); x86_pop_reg (code, MONO_ARCH_GOT_REG); x86_alu_reg_imm (code, X86_ADD, MONO_ARCH_GOT_REG, 0xf0f0f0f0); if (cfg) set_code_cursor (cfg, code); return code; } /* * mono_arch_emit_load_aotconst: * * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and * TARGET from the mscorlib GOT in full-aot code. * On x86, the GOT address is assumed to be in EBX, and the result is placed into * EAX. */ guint8* mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, MonoJumpInfoType tramp_type, gconstpointer target) { /* Load the mscorlib got address */ x86_mov_reg_membase (code, X86_EAX, MONO_ARCH_GOT_REG, sizeof (target_mgreg_t), 4); *ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target); /* arch_emit_got_access () patches this */ x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0xf0f0f0f0, 4); return code; } /* Can't put this into mini-x86.h */ gpointer mono_x86_get_signal_exception_trampoline (MonoTrampInfo **info, gboolean aot); GSList * mono_arch_get_trampolines (gboolean aot) { MonoTrampInfo *info; GSList *tramps = NULL; mono_x86_get_signal_exception_trampoline (&info, aot); tramps = g_slist_append (tramps, info); return tramps; } /* Soft Debug support */ #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED /* * mono_arch_set_breakpoint: * * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET. * The location should contain code emitted by OP_SEQ_POINT. */ void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip + OP_SEQ_POINT_BP_OFFSET; g_assert (code [0] == 0x90); x86_call_membase (code, X86_ECX, 0); } /* * mono_arch_clear_breakpoint: * * Clear the breakpoint at IP. */ void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip) { guint8 *code = ip + OP_SEQ_POINT_BP_OFFSET; int i; for (i = 0; i < 2; ++i) x86_nop (code); } /* * mono_arch_start_single_stepping: * * Start single stepping. */ void mono_arch_start_single_stepping (void) { ss_trampoline = mini_get_single_step_trampoline (); } /* * mono_arch_stop_single_stepping: * * Stop single stepping. */ void mono_arch_stop_single_stepping (void) { ss_trampoline = NULL; } /* * mono_arch_is_single_step_event: * * Return whenever the machine state in SIGCTX corresponds to a single * step event. */ gboolean mono_arch_is_single_step_event (void *info, void *sigctx) { /* We use soft breakpoints */ return FALSE; } gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx) { /* We use soft breakpoints */ return FALSE; } #define BREAKPOINT_SIZE 2 /* * mono_arch_skip_breakpoint: * * See mini-amd64.c for docs. */ void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji) { g_assert_not_reached (); } /* * mono_arch_skip_single_step: * * See mini-amd64.c for docs. */ void mono_arch_skip_single_step (MonoContext *ctx) { g_assert_not_reached (); } /* * mono_arch_get_seq_point_info: * * See mini-amd64.c for docs. */ SeqPointInfo* mono_arch_get_seq_point_info (guint8 *code) { NOT_IMPLEMENTED; return NULL; } #endif gboolean mono_arch_opcode_supported (int opcode) { switch (opcode) { case OP_ATOMIC_ADD_I4: case OP_ATOMIC_EXCHANGE_I4: case OP_ATOMIC_CAS_I4: case OP_ATOMIC_LOAD_I1: case OP_ATOMIC_LOAD_I2: case OP_ATOMIC_LOAD_I4: case OP_ATOMIC_LOAD_U1: case OP_ATOMIC_LOAD_U2: case OP_ATOMIC_LOAD_U4: case OP_ATOMIC_LOAD_R4: case OP_ATOMIC_LOAD_R8: case OP_ATOMIC_STORE_I1: case OP_ATOMIC_STORE_I2: case OP_ATOMIC_STORE_I4: case OP_ATOMIC_STORE_U1: case OP_ATOMIC_STORE_U2: case OP_ATOMIC_STORE_U4: case OP_ATOMIC_STORE_R4: case OP_ATOMIC_STORE_R8: return TRUE; default: return FALSE; } } CallInfo* mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) { return get_call_info (mp, sig); } gpointer mono_arch_load_function (MonoJitICallId jit_icall_id) { gpointer target = NULL; switch (jit_icall_id) { #undef MONO_AOT_ICALL #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break; MONO_AOT_ICALL (mono_x86_start_gsharedvt_call) MONO_AOT_ICALL (mono_x86_throw_corlib_exception) MONO_AOT_ICALL (mono_x86_throw_exception) } return target; }
1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/riscv/Linit_local.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Ginit_local.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Ginit_local.c" #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./docs/design/coreclr/profiling/IL Rewriting Basics.md
# IL Rewriting Basics ## Intro One of the common use cases of the `ICorProfiler*` interfaces is to perform IL rewriting. Some possible reasons a profiler would want to rewrite IL: - Inspecting interesting process state - Capturing exception state - Inspecting managed objects - Inspecting function arguments/return values - Injecting method hooks at the start/end of the method that call in to another managed library There are two ways to rewrite IL 1. At Module load time with `ICorProfilerInfo::SetILFunctionBody` This approach has the benefit that it is 'set it and forget it'. You can replace the IL at module load, and the runtime will treat this new IL as if the module contained that IL - you don't have to worry about any of the quirks of ReJIT. The downside is that is is unrevertable - once it is set, you cannot change your mind. 2. At any point during the process lifetime with `ICorProfilerInfo4::RequestReJIT` or `ICorProfilerInfo10::RequestReJITWithInliners`. This approach means that you can modify functions in response to changing conditions, and you can revert the modified code if you decide you are done with it. See the other entries about ReJIT in this folder for more information. ## How to rewrite IL Hopefully this section will be fleshed out in the future. Right now we have some documentation in the archives at [Creating an IL-rewriting profiler](<./davbr-blog-archive/Creating an IL-rewriting profiler.md>), but there is no start to finish tutorial on IL rewriting. ## What if multiple profilers want to rewrite IL in a given process? The `ICorProfiler*` interfaces do not provide a way to multiplex different profilers, only one profiler can be loaded at a time. The [CLR Instrumentation Engine](https://github.com/microsoft/CLRInstrumentationEngine) project was created to address this limitation. If you are concerned about profiler multiplexing, head over and check out the project. A short summary is that it provides a higher level interface than the `ICorProfiler*` interfaces, and also provides way for multiple profilers to interact in a well defined manner.
# IL Rewriting Basics ## Intro One of the common use cases of the `ICorProfiler*` interfaces is to perform IL rewriting. Some possible reasons a profiler would want to rewrite IL: - Inspecting interesting process state - Capturing exception state - Inspecting managed objects - Inspecting function arguments/return values - Injecting method hooks at the start/end of the method that call in to another managed library There are two ways to rewrite IL 1. At Module load time with `ICorProfilerInfo::SetILFunctionBody` This approach has the benefit that it is 'set it and forget it'. You can replace the IL at module load, and the runtime will treat this new IL as if the module contained that IL - you don't have to worry about any of the quirks of ReJIT. The downside is that is is unrevertable - once it is set, you cannot change your mind. 2. At any point during the process lifetime with `ICorProfilerInfo4::RequestReJIT` or `ICorProfilerInfo10::RequestReJITWithInliners`. This approach means that you can modify functions in response to changing conditions, and you can revert the modified code if you decide you are done with it. See the other entries about ReJIT in this folder for more information. ## How to rewrite IL Hopefully this section will be fleshed out in the future. Right now we have some documentation in the archives at [Creating an IL-rewriting profiler](<./davbr-blog-archive/Creating an IL-rewriting profiler.md>), but there is no start to finish tutorial on IL rewriting. ## What if multiple profilers want to rewrite IL in a given process? The `ICorProfiler*` interfaces do not provide a way to multiplex different profilers, only one profiler can be loaded at a time. The [CLR Instrumentation Engine](https://github.com/microsoft/CLRInstrumentationEngine) project was created to address this limitation. If you are concerned about profiler multiplexing, head over and check out the project. A short summary is that it provides a higher level interface than the `ICorProfiler*` interfaces, and also provides way for multiple profilers to interact in a well defined manner.
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/mi/Gset_fpreg.c
/* libunwind - a platform-independent unwind library Copyright (C) 2004-2005 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "libunwind_i.h" int unw_set_fpreg (unw_cursor_t *cursor, int regnum, unw_fpreg_t val) { struct cursor *c = (struct cursor *) cursor; return tdep_access_fpreg (c, regnum, &val, 1); }
/* libunwind - a platform-independent unwind library Copyright (C) 2004-2005 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "libunwind_i.h" int unw_set_fpreg (unw_cursor_t *cursor, int regnum, unw_fpreg_t val) { struct cursor *c = (struct cursor *) cursor; return tdep_access_fpreg (c, regnum, &val, 1); }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/aarch64/Ginit_remote.c
/* libunwind - a platform-independent unwind library Copyright (C) 2008 CodeSourcery This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "init.h" #include "unwind_i.h" int unw_init_remote (unw_cursor_t *cursor, unw_addr_space_t as, void *as_arg) { #ifdef UNW_LOCAL_ONLY return -UNW_EINVAL; #else /* !UNW_LOCAL_ONLY */ struct cursor *c = (struct cursor *) cursor; if (!atomic_load(&tdep_init_done)) tdep_init (); Debug (1, "(cursor=%p)\n", c); c->dwarf.as = as; if (as == unw_local_addr_space) { c->dwarf.as_arg = c; c->uc = as_arg; } else { c->dwarf.as_arg = as_arg; c->uc = 0; } return common_init (c, 0); #endif /* !UNW_LOCAL_ONLY */ }
/* libunwind - a platform-independent unwind library Copyright (C) 2008 CodeSourcery This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "init.h" #include "unwind_i.h" int unw_init_remote (unw_cursor_t *cursor, unw_addr_space_t as, void *as_arg) { #ifdef UNW_LOCAL_ONLY return -UNW_EINVAL; #else /* !UNW_LOCAL_ONLY */ struct cursor *c = (struct cursor *) cursor; if (!atomic_load(&tdep_init_done)) tdep_init (); Debug (1, "(cursor=%p)\n", c); c->dwarf.as = as; if (as == unw_local_addr_space) { c->dwarf.as_arg = c; c->uc = as_arg; } else { c->dwarf.as_arg = as_arg; c->uc = 0; } return common_init (c, 0); #endif /* !UNW_LOCAL_ONLY */ }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/native/eventpipe/ep.c
#include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #if !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES) #define EP_IMPL_EP_GETTER_SETTER #include "ep.h" #include "ep-config.h" #include "ep-config-internals.h" #include "ep-event.h" #include "ep-event-payload.h" #include "ep-event-source.h" #include "ep-provider.h" #include "ep-provider-internals.h" #include "ep-session.h" #include "ep-sample-profiler.h" static bool _ep_can_start_threads = false; static ep_rt_session_id_array_t _ep_deferred_enable_session_ids = { 0 }; static ep_rt_session_id_array_t _ep_deferred_disable_session_ids = { 0 }; static EventPipeIpcStreamFactorySuspendedPortsCallback _ep_ipc_stream_factory_suspended_ports_callback = NULL; static ep_rt_execution_checkpoint_array_t _ep_rundown_execution_checkpoints = { 0 }; /* * Forward declares of all static functions. */ // _Requires_lock_held (ep) static bool enabled (void); // _Requires_lock_held (ep) static uint32_t generate_session_index (void); // _Requires_lock_held (ep) static bool is_session_id_in_collection (EventPipeSessionID id); // _Requires_lock_held (ep) static EventPipeSessionID enable ( const ep_char8_t *output_path, uint32_t circular_buffer_size_in_mb, const EventPipeProviderConfiguration *providers, uint32_t providers_len, EventPipeSessionType session_type, EventPipeSerializationFormat format, bool rundown_requested, IpcStream *stream, EventPipeProviderCallbackDataQueue *provider_callback_data_queue, EventPipeSessionSynchronousCallback sync_callback, void *callback_additional_data); static void log_process_info_event (EventPipeEventSource *event_source); // _Requires_lock_held (ep) static void disable_holding_lock ( EventPipeSessionID id, EventPipeProviderCallbackDataQueue *provider_callback_data_queue); static void disable_helper (EventPipeSessionID id); static void write_event ( EventPipeEvent *ep_event, EventPipeEventPayload *payload, const uint8_t *activity_id, const uint8_t *related_activity_id); static void write_event_2 ( ep_rt_thread_handle_t thread, EventPipeEvent *ep_event, EventPipeEventPayload *payload, const uint8_t *activity_id, const uint8_t *related_activity_id, ep_rt_thread_handle_t event_thread, EventPipeStackContents *stack); static const ep_char8_t * get_next_config_value (const ep_char8_t *data, const ep_char8_t **start, const ep_char8_t **end); static ep_char8_t * get_next_config_value_as_utf8_string (const ep_char8_t **data); static uint64_t get_next_config_value_as_uint64_t (const ep_char8_t **data); static uint32_t get_next_config_value_as_uint32_t (const ep_char8_t **data); static void enable_default_session_via_env_variables (void); static bool session_requested_sampling (EventPipeSession *session); static bool ipc_stream_factory_any_suspended_ports (void); /* * Global volatile varaibles, only to be accessed through inlined volatile access functions. */ volatile EventPipeState _ep_state = EP_STATE_NOT_INITIALIZED; volatile uint32_t _ep_number_of_sessions = 0; volatile EventPipeSession *_ep_sessions [EP_MAX_NUMBER_OF_SESSIONS] = { 0 }; volatile uint64_t _ep_allow_write = 0; /* * EventFilterDescriptor. */ EventFilterDescriptor * ep_event_filter_desc_alloc ( uint64_t ptr, uint32_t size, uint32_t type) { EventFilterDescriptor *instance = ep_rt_object_alloc (EventFilterDescriptor); ep_raise_error_if_nok (instance != NULL); ep_raise_error_if_nok (ep_event_filter_desc_init (instance, ptr, size, type) != NULL); ep_on_exit: return instance; ep_on_error: ep_event_filter_desc_free (instance); instance = NULL; ep_exit_error_handler (); } EventFilterDescriptor * ep_event_filter_desc_init ( EventFilterDescriptor *event_filter_desc, uint64_t ptr, uint32_t size, uint32_t type) { EP_ASSERT (event_filter_desc != NULL); event_filter_desc->ptr = ptr; event_filter_desc->size = size; event_filter_desc->type = type; return event_filter_desc; } void ep_event_filter_desc_fini (EventFilterDescriptor * filter_desc) { ; } void ep_event_filter_desc_free (EventFilterDescriptor * filter_desc) { ep_return_void_if_nok (filter_desc != NULL); ep_event_filter_desc_fini (filter_desc); ep_rt_object_free (filter_desc); } /* * EventPipeProviderCallbackDataQueue. */ EventPipeProviderCallbackDataQueue * ep_provider_callback_data_queue_init (EventPipeProviderCallbackDataQueue *provider_callback_data_queue) { EP_ASSERT (provider_callback_data_queue != NULL); ep_rt_provider_callback_data_queue_alloc (&provider_callback_data_queue->queue); return ep_rt_provider_callback_data_queue_is_valid (&provider_callback_data_queue->queue) ? provider_callback_data_queue : NULL; } void ep_provider_callback_data_queue_fini (EventPipeProviderCallbackDataQueue *provider_callback_data_queue) { ep_return_void_if_nok (provider_callback_data_queue != NULL); ep_rt_provider_callback_data_queue_free (&provider_callback_data_queue->queue); } /* * EventPipeProviderCallbackData. */ EventPipeProviderCallbackData * ep_provider_callback_data_alloc ( const ep_char8_t *filter_data, EventPipeCallback callback_function, void *callback_data, int64_t keywords, EventPipeEventLevel provider_level, bool enabled) { EventPipeProviderCallbackData *instance = ep_rt_object_alloc (EventPipeProviderCallbackData); ep_raise_error_if_nok (instance != NULL); ep_raise_error_if_nok (ep_provider_callback_data_init ( instance, filter_data, callback_function, callback_data, keywords, provider_level, enabled) != NULL); ep_on_exit: return instance; ep_on_error: ep_provider_callback_data_free (instance); instance = NULL; ep_exit_error_handler (); } EventPipeProviderCallbackData * ep_provider_callback_data_alloc_copy (EventPipeProviderCallbackData *provider_callback_data_src) { EventPipeProviderCallbackData *instance = ep_rt_object_alloc (EventPipeProviderCallbackData); ep_raise_error_if_nok (instance != NULL); if (provider_callback_data_src) { *instance = *provider_callback_data_src; instance->filter_data = ep_rt_utf8_string_dup (provider_callback_data_src->filter_data); } ep_on_exit: return instance; ep_on_error: ep_provider_callback_data_free (instance); instance = NULL; ep_exit_error_handler (); } EventPipeProviderCallbackData * ep_provider_callback_data_alloc_move (EventPipeProviderCallbackData *provider_callback_data_src) { EventPipeProviderCallbackData *instance = ep_rt_object_alloc (EventPipeProviderCallbackData); ep_raise_error_if_nok (instance != NULL); if (provider_callback_data_src) { *instance = *provider_callback_data_src; memset (provider_callback_data_src, 0, sizeof (*provider_callback_data_src)); } ep_on_exit: return instance; ep_on_error: ep_provider_callback_data_free (instance); instance = NULL; ep_exit_error_handler (); } EventPipeProviderCallbackData * ep_provider_callback_data_init ( EventPipeProviderCallbackData *provider_callback_data, const ep_char8_t *filter_data, EventPipeCallback callback_function, void *callback_data, int64_t keywords, EventPipeEventLevel provider_level, bool enabled) { EP_ASSERT (provider_callback_data != NULL); provider_callback_data->filter_data = ep_rt_utf8_string_dup (filter_data); provider_callback_data->callback_function = callback_function; provider_callback_data->callback_data = callback_data; provider_callback_data->keywords = keywords; provider_callback_data->provider_level = provider_level; provider_callback_data->enabled = enabled; return provider_callback_data; } EventPipeProviderCallbackData * ep_provider_callback_data_init_copy ( EventPipeProviderCallbackData *provider_callback_data_dst, EventPipeProviderCallbackData *provider_callback_data_src) { EP_ASSERT (provider_callback_data_dst != NULL); EP_ASSERT (provider_callback_data_src != NULL); *provider_callback_data_dst = *provider_callback_data_src; provider_callback_data_dst->filter_data = ep_rt_utf8_string_dup (provider_callback_data_src->filter_data); return provider_callback_data_dst; } EventPipeProviderCallbackData * ep_provider_callback_data_init_move ( EventPipeProviderCallbackData *provider_callback_data_dst, EventPipeProviderCallbackData *provider_callback_data_src) { EP_ASSERT (provider_callback_data_dst != NULL); EP_ASSERT (provider_callback_data_src != NULL); *provider_callback_data_dst = *provider_callback_data_src; memset (provider_callback_data_src, 0, sizeof (*provider_callback_data_src)); return provider_callback_data_dst; } void ep_provider_callback_data_fini (EventPipeProviderCallbackData *provider_callback_data) { ep_return_void_if_nok (provider_callback_data != NULL); ep_rt_utf8_string_free (provider_callback_data->filter_data); } void ep_provider_callback_data_free (EventPipeProviderCallbackData *provider_callback_data) { ep_return_void_if_nok (provider_callback_data != NULL); ep_provider_callback_data_fini (provider_callback_data); ep_rt_object_free (provider_callback_data); } /* * EventPipeProviderConfiguration. */ EventPipeProviderConfiguration * ep_provider_config_init ( EventPipeProviderConfiguration *provider_config, const ep_char8_t *provider_name, uint64_t keywords, EventPipeEventLevel logging_level, const ep_char8_t *filter_data) { EP_ASSERT (provider_config != NULL); EP_ASSERT (provider_name != NULL); provider_config->provider_name = provider_name; provider_config->keywords = keywords; provider_config->logging_level = logging_level; provider_config->filter_data = filter_data; // Runtime specific rundown provider configuration. ep_rt_provider_config_init (provider_config); return provider_config; } void ep_provider_config_fini (EventPipeProviderConfiguration *provider_config) { ; } /* * EventPipeExecutionCheckpoint. */ EventPipeExecutionCheckpoint * ep_execution_checkpoint_alloc ( const ep_char8_t *name, ep_timestamp_t timestamp) { EventPipeExecutionCheckpoint *instance = ep_rt_object_alloc (EventPipeExecutionCheckpoint); ep_raise_error_if_nok (instance != NULL); instance->name = name ? ep_rt_utf8_string_dup (name) : NULL; instance->timestamp = timestamp; ep_on_exit: return instance; ep_on_error: ep_execution_checkpoint_free (instance); instance = NULL; ep_exit_error_handler (); } void ep_execution_checkpoint_free (EventPipeExecutionCheckpoint *execution_checkpoint) { ep_return_void_if_nok (execution_checkpoint != NULL); ep_rt_object_free (execution_checkpoint); } /* * EventPipe. */ static bool enabled (void) { ep_requires_lock_held (); return (ep_volatile_load_eventpipe_state_without_barrier () >= EP_STATE_INITIALIZED && ep_volatile_load_number_of_sessions_without_barrier () > 0); } static uint32_t generate_session_index (void) { ep_requires_lock_held (); for (uint32_t i = 0; i < EP_MAX_NUMBER_OF_SESSIONS; ++i) if (ep_volatile_load_session_without_barrier (i) == NULL) return i; return EP_MAX_NUMBER_OF_SESSIONS; } static bool is_session_id_in_collection (EventPipeSessionID session_id) { EP_ASSERT (session_id != 0); ep_requires_lock_held (); const EventPipeSession *const session = (EventPipeSession *)session_id; for (uint32_t i = 0; i < EP_MAX_NUMBER_OF_SESSIONS; ++i) { if (ep_volatile_load_session (i) == session) { EP_ASSERT (i == ep_session_get_index (session)); return true; } } return false; } static EventPipeSessionID enable ( const ep_char8_t *output_path, uint32_t circular_buffer_size_in_mb, const EventPipeProviderConfiguration *providers, uint32_t providers_len, EventPipeSessionType session_type, EventPipeSerializationFormat format, bool rundown_requested, IpcStream *stream, EventPipeProviderCallbackDataQueue *provider_callback_data_queue, EventPipeSessionSynchronousCallback sync_callback, void *callback_additional_data) { EP_ASSERT (format < EP_SERIALIZATION_FORMAT_COUNT); EP_ASSERT (session_type == EP_SESSION_TYPE_SYNCHRONOUS || circular_buffer_size_in_mb > 0); EP_ASSERT (providers_len > 0 && providers != NULL); ep_requires_lock_held (); EventPipeSession *session = NULL; EventPipeSessionID session_id = 0; uint32_t session_index = 0; ep_raise_error_if_nok (ep_volatile_load_eventpipe_state () == EP_STATE_INITIALIZED); session_index = generate_session_index (); ep_raise_error_if_nok (session_index < EP_MAX_NUMBER_OF_SESSIONS); session = ep_session_alloc ( session_index, output_path, stream, session_type, format, rundown_requested, circular_buffer_size_in_mb, providers, providers_len, sync_callback, callback_additional_data); ep_raise_error_if_nok (session != NULL && ep_session_is_valid (session)); session_id = (EventPipeSessionID)session; // Return if the index is invalid. if (ep_session_get_index (session) >= EP_MAX_NUMBER_OF_SESSIONS) { EP_ASSERT (!"Session index was out of range."); ep_raise_error (); } if (ep_volatile_load_number_of_sessions () >= EP_MAX_NUMBER_OF_SESSIONS) { EP_ASSERT (!"max number of sessions reached."); ep_raise_error (); } // Register the SampleProfiler the very first time (if supported). ep_sample_profiler_init (provider_callback_data_queue); // Enable the EventPipe EventSource. ep_raise_error_if_nok (ep_event_source_enable (ep_event_source_get (), session)); // Save the session. if (ep_volatile_load_session_without_barrier (ep_session_get_index (session)) != NULL) { EP_ASSERT (!"Attempting to override an existing session."); ep_raise_error (); } ep_volatile_store_session (ep_session_get_index (session), session); ep_volatile_store_allow_write (ep_volatile_load_allow_write () | ep_session_get_mask (session)); ep_volatile_store_number_of_sessions (ep_volatile_load_number_of_sessions () + 1); // Enable tracing. config_enable_disable (ep_config_get (), session, provider_callback_data_queue, true); if (session_requested_sampling (session)) ep_sample_profiler_enable (); ep_on_exit: ep_requires_lock_held (); return session_id; ep_on_error: ep_session_free (session); session_id = 0; ep_exit_error_handler (); } static void log_process_info_event (EventPipeEventSource *event_source) { // Get the managed command line. const ep_char8_t *cmd_line = ep_rt_diagnostics_command_line_get (); // Log the process information event. ep_event_source_send_process_info (event_source, cmd_line); } static void disable_holding_lock ( EventPipeSessionID id, EventPipeProviderCallbackDataQueue *provider_callback_data_queue) { EP_ASSERT (id != 0); EP_ASSERT (ep_volatile_load_number_of_sessions () > 0); ep_requires_lock_held (); if (is_session_id_in_collection (id)) { EventPipeSession *const session = (EventPipeSession *)id; if (session_requested_sampling (session)) { // Disable the profiler. ep_sample_profiler_disable (); } // Log the process information event. log_process_info_event (ep_event_source_get ()); // Disable session tracing. config_enable_disable (ep_config_get (), session, provider_callback_data_queue, false); ep_session_disable (session); // WriteAllBuffersToFile, and remove providers. // Do rundown before fully stopping the session unless rundown wasn't requested if (ep_session_get_rundown_requested (session) && _ep_can_start_threads) { ep_session_enable_rundown (session); // Set Rundown provider. EventPipeThread *const thread = ep_thread_get_or_create (); if (thread != NULL) { ep_thread_set_as_rundown_thread (thread, session); { config_enable_disable (ep_config_get (), session, provider_callback_data_queue, true); { ep_session_execute_rundown (session, &_ep_rundown_execution_checkpoints); } config_enable_disable(ep_config_get (), session, provider_callback_data_queue, false); } ep_thread_set_as_rundown_thread (thread, NULL); } else { EP_ASSERT (!"Failed to get or create the EventPipeThread for rundown events."); } } ep_volatile_store_allow_write (ep_volatile_load_allow_write () & ~(ep_session_get_mask (session))); // Remove the session from the array before calling ep_session_suspend_write_event. This way // we can guarantee that either the event write got the pointer and will complete // the write successfully, or it gets NULL and will bail. EP_ASSERT (ep_volatile_load_session (ep_session_get_index (session)) == session); ep_volatile_store_session (ep_session_get_index (session), NULL); ep_session_suspend_write_event (session); bool ignored; ep_session_write_all_buffers_to_file (session, &ignored); // Flush the buffers to the stream/file ep_volatile_store_number_of_sessions (ep_volatile_load_number_of_sessions () - 1); // Write a final sequence point to the file now that all events have // been emitted. ep_session_write_sequence_point_unbuffered (session); ep_session_free (session); // Providers can't be deleted during tracing because they may be needed when serializing the file. config_delete_deferred_providers(ep_config_get ()); } ep_requires_lock_held (); return; } static void disable_helper (EventPipeSessionID id) { ep_requires_lock_not_held (); if (_ep_can_start_threads) ep_rt_thread_setup (); if (id == 0) return; // Don't block GC during clean-up. EP_GCX_PREEMP_ENTER EventPipeProviderCallbackDataQueue callback_data_queue; EventPipeProviderCallbackData provider_callback_data; EventPipeProviderCallbackDataQueue *provider_callback_data_queue = ep_provider_callback_data_queue_init (&callback_data_queue); EP_LOCK_ENTER (section1) if (ep_volatile_load_number_of_sessions () > 0) disable_holding_lock (id, provider_callback_data_queue); EP_LOCK_EXIT (section1) while (ep_provider_callback_data_queue_try_dequeue (provider_callback_data_queue, &provider_callback_data)) { ep_rt_prepare_provider_invoke_callback (&provider_callback_data); provider_invoke_callback (&provider_callback_data); ep_provider_callback_data_fini (&provider_callback_data); } ep_provider_callback_data_queue_fini (provider_callback_data_queue); #ifdef EP_CHECKED_BUILD if (ep_volatile_load_number_of_sessions () == 0) EP_ASSERT (ep_rt_providers_validate_all_disabled ()); #endif EP_GCX_PREEMP_EXIT ep_on_exit: ep_requires_lock_not_held (); return; ep_on_error: ep_exit_error_handler (); } static void write_event ( EventPipeEvent *ep_event, EventPipeEventPayload *payload, const uint8_t *activity_id, const uint8_t *related_activity_id) { EP_ASSERT (ep_event != NULL); EP_ASSERT (payload != NULL); // We can't proceed if tracing is not initialized. ep_return_void_if_nok (ep_volatile_load_eventpipe_state () >= EP_STATE_INITIALIZED); // Exit early if the event is not enabled. ep_return_void_if_nok (ep_event_is_enabled (ep_event)); // Get current thread. ep_rt_thread_handle_t thread = ep_rt_thread_get_handle (); // If the activity id isn't specified AND we are in a eventpipe thread, pull it from the current thread. // If pThread is NULL (we aren't in writing from a managed thread) then activity_id can be NULL if (activity_id == NULL && thread != NULL) activity_id = ep_thread_get_activity_id_cref (ep_thread_get_activity_id_handle ()); write_event_2 ( thread, ep_event, payload, activity_id, related_activity_id, NULL, NULL); } static void write_event_2 ( ep_rt_thread_handle_t thread, EventPipeEvent *ep_event, EventPipeEventPayload *payload, const uint8_t *activity_id, const uint8_t *related_activity_id, ep_rt_thread_handle_t event_thread, EventPipeStackContents *stack) { EP_ASSERT (ep_event != NULL); EP_ASSERT (payload != NULL); // We can't proceed if tracing is not initialized. ep_return_void_if_nok (ep_volatile_load_eventpipe_state () >= EP_STATE_INITIALIZED); EventPipeThread *const current_thread = ep_thread_get_or_create (); if (!current_thread) { EP_ASSERT (!"Failed to get or create an EventPipeThread."); return; } if (ep_thread_is_rundown_thread (current_thread)) { EventPipeSession *const rundown_session = ep_thread_get_rundown_session (current_thread); EP_ASSERT (rundown_session != NULL); EP_ASSERT (thread != NULL); uint8_t *data = ep_event_payload_get_flat_data (payload); if (thread != NULL && rundown_session != NULL && data != NULL) { ep_session_write_event ( rundown_session, thread, ep_event, payload, activity_id, related_activity_id, event_thread, stack); } } else { for (uint32_t i = 0; i < EP_MAX_NUMBER_OF_SESSIONS; ++i) { if ((ep_volatile_load_allow_write () & ((uint64_t)1 << i)) == 0) continue; // Now that we know this session is probably live we pay the perf cost of the memory barriers // Setting this flag lets a thread trying to do a concurrent disable that it is not safe to delete // session ID i. The if check above also ensures that once the session is unpublished this thread // will eventually stop ever storing ID i into the WriteInProgress flag. This is important to // guarantee termination of the YIELD_WHILE loop in SuspendWriteEvents. ep_thread_set_session_write_in_progress (current_thread, i); { EventPipeSession *const session = ep_volatile_load_session (i); // Disable is allowed to set s_pSessions[i] = NULL at any time and that may have occured in between // the check and the load if (session != NULL) { ep_session_write_event ( session, thread, ep_event, payload, activity_id, related_activity_id, event_thread, stack); } } // Do not reference session past this point, we are signaling Disable() that it is safe to // delete it ep_thread_set_session_write_in_progress (current_thread, UINT32_MAX); } } } static const ep_char8_t * get_next_config_value (const ep_char8_t *data, const ep_char8_t **start, const ep_char8_t **end) { EP_ASSERT (data != NULL); EP_ASSERT (start != NULL); EP_ASSERT (end != NULL); *start = data; while (*data != '\0' && *data != ':') data++; *end = data; return *data != '\0' ? ++data : NULL; } static ep_char8_t * get_next_config_value_as_utf8_string (const ep_char8_t **data) { EP_ASSERT (data != NULL); ep_char8_t *buffer = NULL; const ep_char8_t *start = NULL; const ep_char8_t *end = NULL; *data = get_next_config_value (*data, &start, &end); ptrdiff_t byte_len = end - start; if (byte_len != 0) buffer = ep_rt_utf8_string_dup_range(start, end); return buffer; } static uint64_t get_next_config_value_as_uint64_t (const ep_char8_t **data) { EP_ASSERT (data != NULL); ep_char8_t *value_as_utf8 = get_next_config_value_as_utf8_string (data); uint64_t value = UINT64_MAX; if (value_as_utf8) { value = (uint64_t)strtoull (value_as_utf8, NULL, 16); ep_rt_utf8_string_free (value_as_utf8); } return value; } static uint32_t get_next_config_value_as_uint32_t (const ep_char8_t **data) { EP_ASSERT (data != NULL); ep_char8_t *value_as_utf8 = get_next_config_value_as_utf8_string (data); uint32_t value = UINT32_MAX; if (value_as_utf8) { value = (uint32_t)strtoul (value_as_utf8, NULL, 10); ep_rt_utf8_string_free (value_as_utf8); } return value; } // // If EventPipe environment variables are specified, parse them and start a session. // static void enable_default_session_via_env_variables (void) { ep_char8_t *ep_config = NULL; ep_char8_t *ep_config_output_path = NULL; uint32_t ep_circular_mb = 0; const ep_char8_t *output_path = NULL; if (ep_rt_config_value_get_enable ()) { ep_config = ep_rt_config_value_get_config (); ep_config_output_path = ep_rt_config_value_get_output_path (); ep_char8_t pidStr[24]; ep_rt_utf8_string_snprintf(pidStr, ARRAY_SIZE (pidStr), "%u", (unsigned)ep_rt_current_process_get_id()); while (true) { if (ep_rt_utf8_string_replace(&ep_config_output_path, "{pid}", pidStr)) { // In case there is a second use of {pid} in the output path continue; } // No more instances of {pid} in the OutputPath break; } ep_circular_mb = ep_rt_config_value_get_circular_mb (); output_path = NULL; output_path = ep_config_output_path ? ep_config_output_path : "trace.nettrace"; ep_circular_mb = ep_circular_mb > 0 ? ep_circular_mb : 1; uint64_t session_id = ep_enable_2 ( output_path, ep_circular_mb, ep_config, ep_rt_config_value_get_output_streaming () ? EP_SESSION_TYPE_FILESTREAM : EP_SESSION_TYPE_FILE, EP_SERIALIZATION_FORMAT_NETTRACE_V4, true, NULL, NULL, NULL); if (session_id) ep_start_streaming (session_id); } ep_rt_utf8_string_free (ep_config_output_path); ep_rt_utf8_string_free (ep_config); return; } static bool session_requested_sampling (EventPipeSession *session) { EP_ASSERT (session != NULL); return ep_rt_session_provider_list_find_by_name (ep_session_provider_list_get_providers_cref (ep_session_get_providers (session)), ep_config_get_sample_profiler_provider_name_utf8 ()); } static bool ipc_stream_factory_any_suspended_ports (void) { return _ep_ipc_stream_factory_suspended_ports_callback ? _ep_ipc_stream_factory_suspended_ports_callback () : false; } #ifdef EP_CHECKED_BUILD void ep_requires_lock_held (void) { ep_rt_config_requires_lock_held (); } void ep_requires_lock_not_held (void) { ep_rt_config_requires_lock_not_held (); } #endif EventPipeSessionID ep_enable ( const ep_char8_t *output_path, uint32_t circular_buffer_size_in_mb, const EventPipeProviderConfiguration *providers, uint32_t providers_len, EventPipeSessionType session_type, EventPipeSerializationFormat format, bool rundown_requested, IpcStream *stream, EventPipeSessionSynchronousCallback sync_callback, void *callback_additional_data) { ep_return_zero_if_nok (format < EP_SERIALIZATION_FORMAT_COUNT); ep_return_zero_if_nok (session_type == EP_SESSION_TYPE_SYNCHRONOUS || circular_buffer_size_in_mb > 0); ep_return_zero_if_nok (providers_len > 0 && providers != NULL); ep_requires_lock_not_held (); // If the state or arguments are invalid, bail here. if ((session_type == EP_SESSION_TYPE_FILE || session_type == EP_SESSION_TYPE_FILESTREAM) && output_path == NULL) return 0; if (session_type == EP_SESSION_TYPE_IPCSTREAM && stream == NULL) return 0; EventPipeSessionID session_id = 0; EventPipeProviderCallbackDataQueue callback_data_queue; EventPipeProviderCallbackData provider_callback_data; EventPipeProviderCallbackDataQueue *provider_callback_data_queue = ep_provider_callback_data_queue_init (&callback_data_queue); EP_LOCK_ENTER (section1) session_id = enable ( output_path, circular_buffer_size_in_mb, providers, providers_len, session_type, format, rundown_requested, stream, provider_callback_data_queue, sync_callback, callback_additional_data); EP_LOCK_EXIT (section1) while (ep_provider_callback_data_queue_try_dequeue (provider_callback_data_queue, &provider_callback_data)) { ep_rt_prepare_provider_invoke_callback (&provider_callback_data); provider_invoke_callback (&provider_callback_data); ep_provider_callback_data_fini (&provider_callback_data); } ep_on_exit: ep_provider_callback_data_queue_fini (provider_callback_data_queue); ep_requires_lock_not_held (); return session_id; ep_on_error: session_id = 0; ep_exit_error_handler (); } EventPipeSessionID ep_enable_2 ( const ep_char8_t *output_path, uint32_t circular_buffer_size_in_mb, const ep_char8_t *providers_config, EventPipeSessionType session_type, EventPipeSerializationFormat format, bool rundown_requested, IpcStream *stream, EventPipeSessionSynchronousCallback sync_callback, void *callback_additional_data) { const ep_char8_t *providers_config_to_parse = providers_config; int32_t providers_len = 0; EventPipeProviderConfiguration *providers = NULL; int32_t current_provider = 0; uint64_t session_id = 0; // If no specific providers config is used, enable EventPipe session // with the default provider configurations. if (!providers_config_to_parse || *providers_config_to_parse == '\0') { providers_len = 3; providers = ep_rt_object_array_alloc (EventPipeProviderConfiguration, providers_len); ep_raise_error_if_nok (providers != NULL); ep_provider_config_init (&providers [0], ep_rt_utf8_string_dup (ep_config_get_public_provider_name_utf8 ()), 0x4c14fccbd, EP_EVENT_LEVEL_VERBOSE, NULL); ep_provider_config_init (&providers [1], ep_rt_utf8_string_dup (ep_config_get_private_provider_name_utf8 ()), 0x4002000b, EP_EVENT_LEVEL_VERBOSE, NULL); ep_provider_config_init (&providers [2], ep_rt_utf8_string_dup (ep_config_get_sample_profiler_provider_name_utf8 ()), 0x0, EP_EVENT_LEVEL_VERBOSE, NULL); } else { // Count number of providers to parse. while (*providers_config_to_parse != '\0') { providers_len += 1; while (*providers_config_to_parse != '\0' && *providers_config_to_parse != ',') providers_config_to_parse++; if (*providers_config_to_parse != '\0') providers_config_to_parse++; } providers_config_to_parse = providers_config; providers = ep_rt_object_array_alloc (EventPipeProviderConfiguration, providers_len); ep_raise_error_if_nok (providers != NULL); while (*providers_config_to_parse != '\0') { ep_char8_t *provider_name = NULL; uint64_t keyword_mask = 0; EventPipeEventLevel level = EP_EVENT_LEVEL_VERBOSE; ep_char8_t *args = NULL; if (providers_config_to_parse && *providers_config_to_parse != ',') { provider_name = get_next_config_value_as_utf8_string (&providers_config_to_parse); ep_raise_error_if_nok (provider_name != NULL); } if (providers_config_to_parse && *providers_config_to_parse != ',') keyword_mask = get_next_config_value_as_uint64_t (&providers_config_to_parse); if (providers_config_to_parse && *providers_config_to_parse != ',') level = (EventPipeEventLevel)get_next_config_value_as_uint32_t (&providers_config_to_parse); if (providers_config_to_parse && *providers_config_to_parse != ',') args = get_next_config_value_as_utf8_string (&providers_config_to_parse); ep_provider_config_init (&providers [current_provider++], provider_name, keyword_mask, level, args); if (!providers_config_to_parse) break; while (*providers_config_to_parse != '\0' && *providers_config_to_parse != ',') providers_config_to_parse++; if (*providers_config_to_parse != '\0') providers_config_to_parse++; } } session_id = ep_enable ( output_path, circular_buffer_size_in_mb, providers, providers_len, session_type, format, rundown_requested, stream, sync_callback, callback_additional_data); ep_on_exit: if (providers) { for (int32_t i = 0; i < providers_len; ++i) { ep_provider_config_fini (&providers [i]); ep_rt_utf8_string_free ((ep_char8_t *)providers [i].provider_name); ep_rt_utf8_string_free ((ep_char8_t *)providers [i].filter_data); } ep_rt_object_array_free (providers); } return session_id; ep_on_error: ep_exit_error_handler (); } void ep_disable (EventPipeSessionID id) { ep_requires_lock_not_held (); // ep_disable is called synchronously since the diagnostics server is // single threaded. HOWEVER, if the runtime was suspended during startup, // then ep_finish_init might not have executed yet. Disabling a session // needs to either happen before we resume or after initialization. We briefly take the // lock to check _ep_can_start_threads to check whether we've finished initialization. We // also check whether we are still suspended in which case we can safely disable the session // without deferral. EP_LOCK_ENTER (section1) if (!_ep_can_start_threads && !ipc_stream_factory_any_suspended_ports ()) { ep_rt_session_id_array_append (&_ep_deferred_disable_session_ids, id); ep_raise_error_holding_lock (section1); } EP_LOCK_EXIT (section1) disable_helper (id); ep_on_exit: ep_requires_lock_not_held (); return; ep_on_error: ep_exit_error_handler (); } EventPipeSession * ep_get_session (EventPipeSessionID session_id) { ep_requires_lock_not_held (); EP_LOCK_ENTER (section1) if (ep_volatile_load_eventpipe_state () == EP_STATE_NOT_INITIALIZED) { EP_ASSERT (!"EventPipe::GetSession invoked before EventPipe was initialized."); ep_raise_error_holding_lock (section1); } ep_raise_error_if_nok_holding_lock (is_session_id_in_collection (session_id), section1); EP_LOCK_EXIT (section1) ep_on_exit: ep_requires_lock_not_held (); return (EventPipeSession *)session_id; ep_on_error: session_id = 0; ep_exit_error_handler (); } bool ep_is_session_enabled (EventPipeSessionID session_id) { ep_return_false_if_nok (session_id != 0); return ep_volatile_load_session (ep_session_get_index ((EventPipeSession *)session_id)) != NULL; } void ep_start_streaming (EventPipeSessionID session_id) { ep_requires_lock_not_held (); EP_LOCK_ENTER (section1) ep_raise_error_if_nok_holding_lock (is_session_id_in_collection (session_id), section1); if (_ep_can_start_threads) ep_session_start_streaming ((EventPipeSession *)session_id); else ep_rt_session_id_array_append (&_ep_deferred_enable_session_ids, session_id); EP_LOCK_EXIT (section1) ep_on_exit: ep_requires_lock_not_held (); return; ep_on_error: ep_exit_error_handler (); } bool ep_enabled (void) { return (ep_volatile_load_eventpipe_state () >= EP_STATE_INITIALIZED && ep_volatile_load_number_of_sessions () > 0); } EventPipeProvider * ep_create_provider ( const ep_char8_t *provider_name, EventPipeCallback callback_func, EventPipeCallbackDataFree callback_data_free_func, void *callback_data) { ep_return_null_if_nok (provider_name != NULL); ep_requires_lock_not_held (); EventPipeProvider *provider = NULL; EventPipeProviderCallbackDataQueue data_queue; EventPipeProviderCallbackData provider_callback_data; EventPipeProviderCallbackDataQueue *provider_callback_data_queue = ep_provider_callback_data_queue_init (&data_queue); EP_LOCK_ENTER (section1) provider = config_create_provider (ep_config_get (), provider_name, callback_func, callback_data_free_func, callback_data, provider_callback_data_queue); ep_raise_error_if_nok_holding_lock (provider != NULL, section1); EP_LOCK_EXIT (section1) while (ep_provider_callback_data_queue_try_dequeue (provider_callback_data_queue, &provider_callback_data)) { ep_rt_prepare_provider_invoke_callback (&provider_callback_data); provider_invoke_callback (&provider_callback_data); ep_provider_callback_data_fini (&provider_callback_data); } ep_rt_notify_profiler_provider_created (provider); ep_on_exit: ep_provider_callback_data_queue_fini (provider_callback_data_queue); ep_requires_lock_not_held (); return provider; ep_on_error: ep_delete_provider (provider); provider = NULL; ep_exit_error_handler (); } void ep_delete_provider (EventPipeProvider *provider) { ep_return_void_if_nok (provider != NULL); ep_requires_lock_not_held (); // Take the lock to make sure that we don't have a race // between disabling tracing and deleting a provider // where we hold a provider after tracing has been disabled. EP_LOCK_ENTER (section1) if (enabled ()) { // Save the provider until the end of the tracing session. ep_provider_set_delete_deferred (provider, true); } else { config_delete_provider (ep_config_get (), provider); } EP_LOCK_EXIT (section1) ep_on_exit: ep_requires_lock_not_held (); return; ep_on_error: ep_exit_error_handler (); } EventPipeProvider * ep_get_provider (const ep_char8_t *provider_name) { ep_return_null_if_nok (provider_name != NULL); ep_requires_lock_not_held (); EventPipeProvider *provider = NULL; EP_LOCK_ENTER (section1) provider = config_get_provider (ep_config_get (), provider_name); ep_raise_error_if_nok_holding_lock (provider != NULL, section1); EP_LOCK_EXIT (section1) ep_on_exit: ep_requires_lock_not_held (); return provider; ep_on_error: provider = NULL; ep_exit_error_handler (); } bool ep_add_provider_to_session ( EventPipeSessionProvider *provider, EventPipeSession *session) { ep_return_false_if_nok (provider != NULL && session != NULL); ep_requires_lock_not_held (); bool result = false; EP_LOCK_ENTER (section1) ep_raise_error_if_nok_holding_lock (ep_session_add_session_provider (session, provider), section1); EP_LOCK_EXIT (section1) result = true; ep_on_exit: ep_requires_lock_not_held (); return result; ep_on_error: EP_ASSERT (!result); ep_exit_error_handler (); } void ep_init (void) { ep_requires_lock_not_held (); ep_rt_init (); if (ep_volatile_load_eventpipe_state () != EP_STATE_NOT_INITIALIZED) { EP_ASSERT (!"EventPipe already initialized."); return; } ep_thread_init (); for (uint32_t i = 0; i < EP_MAX_NUMBER_OF_SESSIONS; ++i) ep_volatile_store_session (i, NULL); ep_config_init (ep_config_get ()); ep_event_source_init (ep_event_source_get ()); // This calls into auto-generated code to initialize the runtime specific providers // and events so that the EventPipe configuration lock isn't taken at runtime ep_rt_init_providers_and_events (); // Set the sampling rate for the sample profiler. const uint32_t default_profiler_sample_rate_in_nanoseconds = 1000000; // 1 msec. ep_sample_profiler_set_sampling_rate (default_profiler_sample_rate_in_nanoseconds); ep_rt_session_id_array_alloc (&_ep_deferred_enable_session_ids); ep_rt_session_id_array_alloc (&_ep_deferred_disable_session_ids); ep_rt_execution_checkpoint_array_alloc (&_ep_rundown_execution_checkpoints); EP_LOCK_ENTER (section1) ep_volatile_store_eventpipe_state (EP_STATE_INITIALIZED); EP_LOCK_EXIT (section1) enable_default_session_via_env_variables (); ep_on_exit: ep_requires_lock_not_held (); return; ep_on_error: ep_exit_error_handler (); } void ep_finish_init (void) { ep_requires_lock_not_held (); ep_rt_init_finish (); // Enable streaming for any deferred sessions EP_LOCK_ENTER (section1) _ep_can_start_threads = true; if (ep_volatile_load_eventpipe_state () == EP_STATE_INITIALIZED) { ep_rt_session_id_array_iterator_t deferred_session_ids_iterator = ep_rt_session_id_array_iterator_begin (&_ep_deferred_enable_session_ids); while (!ep_rt_session_id_array_iterator_end (&_ep_deferred_enable_session_ids, &deferred_session_ids_iterator)) { EventPipeSessionID session_id = ep_rt_session_id_array_iterator_value (&deferred_session_ids_iterator); if (is_session_id_in_collection (session_id)) ep_session_start_streaming ((EventPipeSession *)session_id); ep_rt_session_id_array_iterator_next (&deferred_session_ids_iterator); } ep_rt_session_id_array_clear (&_ep_deferred_enable_session_ids); } ep_sample_profiler_can_start_sampling (); EP_LOCK_EXIT (section1) // release lock in case someone tried to disable while we held it // _ep_deferred_disable_session_ids is now safe to access without the // lock since we've set _ep_can_start_threads to true inside the lock. Anyone // who was waiting on that lock will see that state and not mutate the defer list if (ep_volatile_load_eventpipe_state () == EP_STATE_INITIALIZED) { ep_rt_session_id_array_iterator_t deferred_disable_session_ids_iterator = ep_rt_session_id_array_iterator_begin (&_ep_deferred_disable_session_ids); while (!ep_rt_session_id_array_iterator_end (&_ep_deferred_disable_session_ids, &deferred_disable_session_ids_iterator)) { EventPipeSessionID session_id = ep_rt_session_id_array_iterator_value (&deferred_disable_session_ids_iterator); disable_helper (session_id); ep_rt_session_id_array_iterator_next (&deferred_disable_session_ids_iterator); } ep_rt_session_id_array_clear (&_ep_deferred_disable_session_ids); } ep_on_exit: ep_requires_lock_not_held (); return; ep_on_error: ep_exit_error_handler (); } void ep_shutdown (void) { ep_requires_lock_not_held (); ep_return_void_if_nok (ep_volatile_load_eventpipe_state () != EP_STATE_SHUTTING_DOWN); ep_return_void_if_nok (!ep_rt_process_detach ()); ep_return_void_if_nok (ep_volatile_load_eventpipe_state () == EP_STATE_INITIALIZED); EP_LOCK_ENTER (section1) ep_volatile_store_eventpipe_state (EP_STATE_SHUTTING_DOWN); EP_LOCK_EXIT (section1) for (uint32_t i = 0; i < EP_MAX_NUMBER_OF_SESSIONS; ++i) { EventPipeSession *session = ep_volatile_load_session (i); if (session) ep_disable ((EventPipeSessionID)session); } ep_rt_execution_checkpoint_array_iterator_t checkpoint_iterator; EventPipeExecutionCheckpoint *checkpoint; checkpoint_iterator = ep_rt_execution_checkpoint_array_iterator_begin (&_ep_rundown_execution_checkpoints); while (!ep_rt_execution_checkpoint_array_iterator_end (&_ep_rundown_execution_checkpoints, &checkpoint_iterator)) { checkpoint = ep_rt_execution_checkpoint_array_iterator_value (&checkpoint_iterator); if (checkpoint) ep_rt_utf8_string_free (checkpoint->name); ep_rt_execution_checkpoint_array_iterator_next (&checkpoint_iterator); } ep_rt_execution_checkpoint_array_free (&_ep_rundown_execution_checkpoints); ep_rt_session_id_array_free (&_ep_deferred_enable_session_ids); ep_rt_session_id_array_free (&_ep_deferred_disable_session_ids); ep_thread_fini (); // dotnet/coreclr: issue 24850: EventPipe shutdown race conditions // Deallocating providers/events here might cause AV if a WriteEvent // was to occur. Thus, we are not doing this cleanup. /*EP_LOCK_ENTER (section1) ep_sample_profiler_shutdown (); EP_LOCK_EXIT (section1)*/ // // Remove EventPipeEventSource first since it tries to use the data structures that we remove below. // // We need to do this after disabling sessions since those try to write to EventPipeEventSource. // ep_event_source_fini (ep_event_source_get ()); // ep_config_shutdown (ep_config_get ()); ep_on_exit: ep_requires_lock_not_held (); ep_rt_shutdown (); return; ep_on_error: ep_exit_error_handler (); } EventPipeEventMetadataEvent * ep_build_event_metadata_event ( EventPipeEventInstance *event_instance, uint32_t metadata_id) { ep_return_null_if_nok (event_instance != NULL); return ep_config_build_event_metadata_event (ep_config_get (), event_instance, metadata_id); } void ep_write_event ( EventPipeEvent *ep_event, uint8_t *data, uint32_t data_len, const uint8_t *activity_id, const uint8_t *related_activity_id) { ep_return_void_if_nok (ep_event != NULL); EventPipeEventPayload payload; EventPipeEventPayload *event_payload = ep_event_payload_init (&payload, data, data_len); write_event (ep_event, event_payload, activity_id, related_activity_id); ep_event_payload_fini (event_payload); } void ep_write_event_2 ( EventPipeEvent *ep_event, EventData *event_data, uint32_t event_data_len, const uint8_t *activity_id, const uint8_t *related_activity_id) { ep_return_void_if_nok (ep_event != NULL); EventPipeEventPayload payload; EventPipeEventPayload *event_payload = ep_event_payload_init_2 (&payload, event_data, event_data_len); write_event (ep_event, event_payload, activity_id, related_activity_id); ep_event_payload_fini (event_payload); } void ep_write_sample_profile_event ( ep_rt_thread_handle_t sampling_thread, EventPipeEvent *ep_event, ep_rt_thread_handle_t target_thread, EventPipeStackContents *stack, uint8_t *event_data, uint32_t event_data_len) { ep_return_void_if_nok (ep_event != NULL); EventPipeEventPayload payload; EventPipeEventPayload *event_payload = ep_event_payload_init (&payload, event_data, event_data_len); write_event_2 ( sampling_thread, ep_event, event_payload, NULL, NULL, target_thread, stack); ep_event_payload_fini (event_payload); } EventPipeEventInstance * ep_get_next_event (EventPipeSessionID session_id) { ep_requires_lock_not_held (); // Only fetch the next event if a tracing session exists. // The buffer manager is not disposed until the process is shutdown. EventPipeSession *const session = ep_get_session (session_id); return session ? ep_session_get_next_event (session) : NULL; } EventPipeWaitHandle ep_get_wait_handle (EventPipeSessionID session_id) { EventPipeSession *const session = ep_get_session (session_id); return session ? ep_rt_wait_event_get_wait_handle (ep_session_get_wait_event (session)) : 0; } bool ep_add_rundown_execution_checkpoint ( const ep_char8_t *name, ep_timestamp_t timestamp) { ep_requires_lock_not_held (); bool result = false; EventPipeExecutionCheckpoint *exec_checkpoint = ep_execution_checkpoint_alloc (name, timestamp); ep_raise_error_if_nok (exec_checkpoint != NULL); EP_LOCK_ENTER (section1) ep_raise_error_if_nok_holding_lock (ep_rt_execution_checkpoint_array_append (&_ep_rundown_execution_checkpoints, exec_checkpoint), section1); exec_checkpoint = NULL; EP_LOCK_EXIT (section1) result = true; ep_on_exit: ep_requires_lock_not_held (); return result; ep_on_error: ep_execution_checkpoint_free (exec_checkpoint); EP_ASSERT (result == false); ep_exit_error_handler (); } /* * EventPipeProviderCallbackDataQueue. */ bool ep_provider_callback_data_queue_enqueue ( EventPipeProviderCallbackDataQueue *provider_callback_data_queue, EventPipeProviderCallbackData *provider_callback_data) { EP_ASSERT (provider_callback_data_queue != NULL); EventPipeProviderCallbackData *provider_callback_data_move = ep_provider_callback_data_alloc_move (provider_callback_data); ep_raise_error_if_nok (provider_callback_data_move != NULL); ep_raise_error_if_nok (ep_rt_provider_callback_data_queue_push_tail (ep_provider_callback_data_queue_get_queue_ref (provider_callback_data_queue), provider_callback_data_move)); return true; ep_on_error: ep_provider_callback_data_free (provider_callback_data_move); return false; } bool ep_provider_callback_data_queue_try_dequeue ( EventPipeProviderCallbackDataQueue *provider_callback_data_queue, EventPipeProviderCallbackData *provider_callback_data) { EP_ASSERT (provider_callback_data_queue != NULL); ep_return_false_if_nok (!ep_rt_provider_callback_data_queue_is_empty (ep_provider_callback_data_queue_get_queue_ref (provider_callback_data_queue))); EventPipeProviderCallbackData *value = NULL; ep_raise_error_if_nok (ep_rt_provider_callback_data_queue_pop_head (ep_provider_callback_data_queue_get_queue_ref (provider_callback_data_queue), &value)); ep_provider_callback_data_init_move (provider_callback_data, value); ep_provider_callback_data_free (value); return true; ep_on_error: return false; } /* * EventPipeSystemTime. */ void ep_system_time_set ( EventPipeSystemTime *system_time, uint16_t year, uint16_t month, uint16_t day_of_week, uint16_t day, uint16_t hour, uint16_t minute, uint16_t second, uint16_t milliseconds) { EP_ASSERT (system_time != NULL); system_time->year = year; system_time->month = month; system_time->day_of_week = day_of_week; system_time->day = day; system_time->hour = hour; system_time->minute = minute; system_time->second = second; system_time->milliseconds = milliseconds; } void ep_ipc_stream_factory_callback_set (EventPipeIpcStreamFactorySuspendedPortsCallback suspended_ports_callback) { _ep_ipc_stream_factory_suspended_ports_callback = suspended_ports_callback; } #endif /* !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES) */ #endif /* ENABLE_PERFTRACING */ #ifndef EP_INCLUDE_SOURCE_FILES extern const char quiet_linker_empty_file_warning_eventpipe; const char quiet_linker_empty_file_warning_eventpipe = 0; #endif
#include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #if !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES) #define EP_IMPL_EP_GETTER_SETTER #include "ep.h" #include "ep-config.h" #include "ep-config-internals.h" #include "ep-event.h" #include "ep-event-payload.h" #include "ep-event-source.h" #include "ep-provider.h" #include "ep-provider-internals.h" #include "ep-session.h" #include "ep-sample-profiler.h" static bool _ep_can_start_threads = false; static ep_rt_session_id_array_t _ep_deferred_enable_session_ids = { 0 }; static ep_rt_session_id_array_t _ep_deferred_disable_session_ids = { 0 }; static EventPipeIpcStreamFactorySuspendedPortsCallback _ep_ipc_stream_factory_suspended_ports_callback = NULL; static ep_rt_execution_checkpoint_array_t _ep_rundown_execution_checkpoints = { 0 }; /* * Forward declares of all static functions. */ // _Requires_lock_held (ep) static bool enabled (void); // _Requires_lock_held (ep) static uint32_t generate_session_index (void); // _Requires_lock_held (ep) static bool is_session_id_in_collection (EventPipeSessionID id); // _Requires_lock_held (ep) static EventPipeSessionID enable ( const ep_char8_t *output_path, uint32_t circular_buffer_size_in_mb, const EventPipeProviderConfiguration *providers, uint32_t providers_len, EventPipeSessionType session_type, EventPipeSerializationFormat format, bool rundown_requested, IpcStream *stream, EventPipeProviderCallbackDataQueue *provider_callback_data_queue, EventPipeSessionSynchronousCallback sync_callback, void *callback_additional_data); static void log_process_info_event (EventPipeEventSource *event_source); // _Requires_lock_held (ep) static void disable_holding_lock ( EventPipeSessionID id, EventPipeProviderCallbackDataQueue *provider_callback_data_queue); static void disable_helper (EventPipeSessionID id); static void write_event ( EventPipeEvent *ep_event, EventPipeEventPayload *payload, const uint8_t *activity_id, const uint8_t *related_activity_id); static void write_event_2 ( ep_rt_thread_handle_t thread, EventPipeEvent *ep_event, EventPipeEventPayload *payload, const uint8_t *activity_id, const uint8_t *related_activity_id, ep_rt_thread_handle_t event_thread, EventPipeStackContents *stack); static const ep_char8_t * get_next_config_value (const ep_char8_t *data, const ep_char8_t **start, const ep_char8_t **end); static ep_char8_t * get_next_config_value_as_utf8_string (const ep_char8_t **data); static uint64_t get_next_config_value_as_uint64_t (const ep_char8_t **data); static uint32_t get_next_config_value_as_uint32_t (const ep_char8_t **data); static void enable_default_session_via_env_variables (void); static bool session_requested_sampling (EventPipeSession *session); static bool ipc_stream_factory_any_suspended_ports (void); /* * Global volatile varaibles, only to be accessed through inlined volatile access functions. */ volatile EventPipeState _ep_state = EP_STATE_NOT_INITIALIZED; volatile uint32_t _ep_number_of_sessions = 0; volatile EventPipeSession *_ep_sessions [EP_MAX_NUMBER_OF_SESSIONS] = { 0 }; volatile uint64_t _ep_allow_write = 0; /* * EventFilterDescriptor. */ EventFilterDescriptor * ep_event_filter_desc_alloc ( uint64_t ptr, uint32_t size, uint32_t type) { EventFilterDescriptor *instance = ep_rt_object_alloc (EventFilterDescriptor); ep_raise_error_if_nok (instance != NULL); ep_raise_error_if_nok (ep_event_filter_desc_init (instance, ptr, size, type) != NULL); ep_on_exit: return instance; ep_on_error: ep_event_filter_desc_free (instance); instance = NULL; ep_exit_error_handler (); } EventFilterDescriptor * ep_event_filter_desc_init ( EventFilterDescriptor *event_filter_desc, uint64_t ptr, uint32_t size, uint32_t type) { EP_ASSERT (event_filter_desc != NULL); event_filter_desc->ptr = ptr; event_filter_desc->size = size; event_filter_desc->type = type; return event_filter_desc; } void ep_event_filter_desc_fini (EventFilterDescriptor * filter_desc) { ; } void ep_event_filter_desc_free (EventFilterDescriptor * filter_desc) { ep_return_void_if_nok (filter_desc != NULL); ep_event_filter_desc_fini (filter_desc); ep_rt_object_free (filter_desc); } /* * EventPipeProviderCallbackDataQueue. */ EventPipeProviderCallbackDataQueue * ep_provider_callback_data_queue_init (EventPipeProviderCallbackDataQueue *provider_callback_data_queue) { EP_ASSERT (provider_callback_data_queue != NULL); ep_rt_provider_callback_data_queue_alloc (&provider_callback_data_queue->queue); return ep_rt_provider_callback_data_queue_is_valid (&provider_callback_data_queue->queue) ? provider_callback_data_queue : NULL; } void ep_provider_callback_data_queue_fini (EventPipeProviderCallbackDataQueue *provider_callback_data_queue) { ep_return_void_if_nok (provider_callback_data_queue != NULL); ep_rt_provider_callback_data_queue_free (&provider_callback_data_queue->queue); } /* * EventPipeProviderCallbackData. */ EventPipeProviderCallbackData * ep_provider_callback_data_alloc ( const ep_char8_t *filter_data, EventPipeCallback callback_function, void *callback_data, int64_t keywords, EventPipeEventLevel provider_level, bool enabled) { EventPipeProviderCallbackData *instance = ep_rt_object_alloc (EventPipeProviderCallbackData); ep_raise_error_if_nok (instance != NULL); ep_raise_error_if_nok (ep_provider_callback_data_init ( instance, filter_data, callback_function, callback_data, keywords, provider_level, enabled) != NULL); ep_on_exit: return instance; ep_on_error: ep_provider_callback_data_free (instance); instance = NULL; ep_exit_error_handler (); } EventPipeProviderCallbackData * ep_provider_callback_data_alloc_copy (EventPipeProviderCallbackData *provider_callback_data_src) { EventPipeProviderCallbackData *instance = ep_rt_object_alloc (EventPipeProviderCallbackData); ep_raise_error_if_nok (instance != NULL); if (provider_callback_data_src) { *instance = *provider_callback_data_src; instance->filter_data = ep_rt_utf8_string_dup (provider_callback_data_src->filter_data); } ep_on_exit: return instance; ep_on_error: ep_provider_callback_data_free (instance); instance = NULL; ep_exit_error_handler (); } EventPipeProviderCallbackData * ep_provider_callback_data_alloc_move (EventPipeProviderCallbackData *provider_callback_data_src) { EventPipeProviderCallbackData *instance = ep_rt_object_alloc (EventPipeProviderCallbackData); ep_raise_error_if_nok (instance != NULL); if (provider_callback_data_src) { *instance = *provider_callback_data_src; memset (provider_callback_data_src, 0, sizeof (*provider_callback_data_src)); } ep_on_exit: return instance; ep_on_error: ep_provider_callback_data_free (instance); instance = NULL; ep_exit_error_handler (); } EventPipeProviderCallbackData * ep_provider_callback_data_init ( EventPipeProviderCallbackData *provider_callback_data, const ep_char8_t *filter_data, EventPipeCallback callback_function, void *callback_data, int64_t keywords, EventPipeEventLevel provider_level, bool enabled) { EP_ASSERT (provider_callback_data != NULL); provider_callback_data->filter_data = ep_rt_utf8_string_dup (filter_data); provider_callback_data->callback_function = callback_function; provider_callback_data->callback_data = callback_data; provider_callback_data->keywords = keywords; provider_callback_data->provider_level = provider_level; provider_callback_data->enabled = enabled; return provider_callback_data; } EventPipeProviderCallbackData * ep_provider_callback_data_init_copy ( EventPipeProviderCallbackData *provider_callback_data_dst, EventPipeProviderCallbackData *provider_callback_data_src) { EP_ASSERT (provider_callback_data_dst != NULL); EP_ASSERT (provider_callback_data_src != NULL); *provider_callback_data_dst = *provider_callback_data_src; provider_callback_data_dst->filter_data = ep_rt_utf8_string_dup (provider_callback_data_src->filter_data); return provider_callback_data_dst; } EventPipeProviderCallbackData * ep_provider_callback_data_init_move ( EventPipeProviderCallbackData *provider_callback_data_dst, EventPipeProviderCallbackData *provider_callback_data_src) { EP_ASSERT (provider_callback_data_dst != NULL); EP_ASSERT (provider_callback_data_src != NULL); *provider_callback_data_dst = *provider_callback_data_src; memset (provider_callback_data_src, 0, sizeof (*provider_callback_data_src)); return provider_callback_data_dst; } void ep_provider_callback_data_fini (EventPipeProviderCallbackData *provider_callback_data) { ep_return_void_if_nok (provider_callback_data != NULL); ep_rt_utf8_string_free (provider_callback_data->filter_data); } void ep_provider_callback_data_free (EventPipeProviderCallbackData *provider_callback_data) { ep_return_void_if_nok (provider_callback_data != NULL); ep_provider_callback_data_fini (provider_callback_data); ep_rt_object_free (provider_callback_data); } /* * EventPipeProviderConfiguration. */ EventPipeProviderConfiguration * ep_provider_config_init ( EventPipeProviderConfiguration *provider_config, const ep_char8_t *provider_name, uint64_t keywords, EventPipeEventLevel logging_level, const ep_char8_t *filter_data) { EP_ASSERT (provider_config != NULL); EP_ASSERT (provider_name != NULL); provider_config->provider_name = provider_name; provider_config->keywords = keywords; provider_config->logging_level = logging_level; provider_config->filter_data = filter_data; // Runtime specific rundown provider configuration. ep_rt_provider_config_init (provider_config); return provider_config; } void ep_provider_config_fini (EventPipeProviderConfiguration *provider_config) { ; } /* * EventPipeExecutionCheckpoint. */ EventPipeExecutionCheckpoint * ep_execution_checkpoint_alloc ( const ep_char8_t *name, ep_timestamp_t timestamp) { EventPipeExecutionCheckpoint *instance = ep_rt_object_alloc (EventPipeExecutionCheckpoint); ep_raise_error_if_nok (instance != NULL); instance->name = name ? ep_rt_utf8_string_dup (name) : NULL; instance->timestamp = timestamp; ep_on_exit: return instance; ep_on_error: ep_execution_checkpoint_free (instance); instance = NULL; ep_exit_error_handler (); } void ep_execution_checkpoint_free (EventPipeExecutionCheckpoint *execution_checkpoint) { ep_return_void_if_nok (execution_checkpoint != NULL); ep_rt_object_free (execution_checkpoint); } /* * EventPipe. */ static bool enabled (void) { ep_requires_lock_held (); return (ep_volatile_load_eventpipe_state_without_barrier () >= EP_STATE_INITIALIZED && ep_volatile_load_number_of_sessions_without_barrier () > 0); } static uint32_t generate_session_index (void) { ep_requires_lock_held (); for (uint32_t i = 0; i < EP_MAX_NUMBER_OF_SESSIONS; ++i) if (ep_volatile_load_session_without_barrier (i) == NULL) return i; return EP_MAX_NUMBER_OF_SESSIONS; } static bool is_session_id_in_collection (EventPipeSessionID session_id) { EP_ASSERT (session_id != 0); ep_requires_lock_held (); const EventPipeSession *const session = (EventPipeSession *)session_id; for (uint32_t i = 0; i < EP_MAX_NUMBER_OF_SESSIONS; ++i) { if (ep_volatile_load_session (i) == session) { EP_ASSERT (i == ep_session_get_index (session)); return true; } } return false; } static EventPipeSessionID enable ( const ep_char8_t *output_path, uint32_t circular_buffer_size_in_mb, const EventPipeProviderConfiguration *providers, uint32_t providers_len, EventPipeSessionType session_type, EventPipeSerializationFormat format, bool rundown_requested, IpcStream *stream, EventPipeProviderCallbackDataQueue *provider_callback_data_queue, EventPipeSessionSynchronousCallback sync_callback, void *callback_additional_data) { EP_ASSERT (format < EP_SERIALIZATION_FORMAT_COUNT); EP_ASSERT (session_type == EP_SESSION_TYPE_SYNCHRONOUS || circular_buffer_size_in_mb > 0); EP_ASSERT (providers_len > 0 && providers != NULL); ep_requires_lock_held (); EventPipeSession *session = NULL; EventPipeSessionID session_id = 0; uint32_t session_index = 0; ep_raise_error_if_nok (ep_volatile_load_eventpipe_state () == EP_STATE_INITIALIZED); session_index = generate_session_index (); ep_raise_error_if_nok (session_index < EP_MAX_NUMBER_OF_SESSIONS); session = ep_session_alloc ( session_index, output_path, stream, session_type, format, rundown_requested, circular_buffer_size_in_mb, providers, providers_len, sync_callback, callback_additional_data); ep_raise_error_if_nok (session != NULL && ep_session_is_valid (session)); session_id = (EventPipeSessionID)session; // Return if the index is invalid. if (ep_session_get_index (session) >= EP_MAX_NUMBER_OF_SESSIONS) { EP_ASSERT (!"Session index was out of range."); ep_raise_error (); } if (ep_volatile_load_number_of_sessions () >= EP_MAX_NUMBER_OF_SESSIONS) { EP_ASSERT (!"max number of sessions reached."); ep_raise_error (); } // Register the SampleProfiler the very first time (if supported). ep_sample_profiler_init (provider_callback_data_queue); // Enable the EventPipe EventSource. ep_raise_error_if_nok (ep_event_source_enable (ep_event_source_get (), session)); // Save the session. if (ep_volatile_load_session_without_barrier (ep_session_get_index (session)) != NULL) { EP_ASSERT (!"Attempting to override an existing session."); ep_raise_error (); } ep_volatile_store_session (ep_session_get_index (session), session); ep_volatile_store_allow_write (ep_volatile_load_allow_write () | ep_session_get_mask (session)); ep_volatile_store_number_of_sessions (ep_volatile_load_number_of_sessions () + 1); // Enable tracing. config_enable_disable (ep_config_get (), session, provider_callback_data_queue, true); if (session_requested_sampling (session)) ep_sample_profiler_enable (); ep_on_exit: ep_requires_lock_held (); return session_id; ep_on_error: ep_session_free (session); session_id = 0; ep_exit_error_handler (); } static void log_process_info_event (EventPipeEventSource *event_source) { // Get the managed command line. const ep_char8_t *cmd_line = ep_rt_diagnostics_command_line_get (); // Log the process information event. ep_event_source_send_process_info (event_source, cmd_line); } static void disable_holding_lock ( EventPipeSessionID id, EventPipeProviderCallbackDataQueue *provider_callback_data_queue) { EP_ASSERT (id != 0); EP_ASSERT (ep_volatile_load_number_of_sessions () > 0); ep_requires_lock_held (); if (is_session_id_in_collection (id)) { EventPipeSession *const session = (EventPipeSession *)id; if (session_requested_sampling (session)) { // Disable the profiler. ep_sample_profiler_disable (); } // Log the process information event. log_process_info_event (ep_event_source_get ()); // Disable session tracing. config_enable_disable (ep_config_get (), session, provider_callback_data_queue, false); ep_session_disable (session); // WriteAllBuffersToFile, and remove providers. // Do rundown before fully stopping the session unless rundown wasn't requested if (ep_session_get_rundown_requested (session) && _ep_can_start_threads) { ep_session_enable_rundown (session); // Set Rundown provider. EventPipeThread *const thread = ep_thread_get_or_create (); if (thread != NULL) { ep_thread_set_as_rundown_thread (thread, session); { config_enable_disable (ep_config_get (), session, provider_callback_data_queue, true); { ep_session_execute_rundown (session, &_ep_rundown_execution_checkpoints); } config_enable_disable(ep_config_get (), session, provider_callback_data_queue, false); } ep_thread_set_as_rundown_thread (thread, NULL); } else { EP_ASSERT (!"Failed to get or create the EventPipeThread for rundown events."); } } ep_volatile_store_allow_write (ep_volatile_load_allow_write () & ~(ep_session_get_mask (session))); // Remove the session from the array before calling ep_session_suspend_write_event. This way // we can guarantee that either the event write got the pointer and will complete // the write successfully, or it gets NULL and will bail. EP_ASSERT (ep_volatile_load_session (ep_session_get_index (session)) == session); ep_volatile_store_session (ep_session_get_index (session), NULL); ep_session_suspend_write_event (session); bool ignored; ep_session_write_all_buffers_to_file (session, &ignored); // Flush the buffers to the stream/file ep_volatile_store_number_of_sessions (ep_volatile_load_number_of_sessions () - 1); // Write a final sequence point to the file now that all events have // been emitted. ep_session_write_sequence_point_unbuffered (session); ep_session_free (session); // Providers can't be deleted during tracing because they may be needed when serializing the file. config_delete_deferred_providers(ep_config_get ()); } ep_requires_lock_held (); return; } static void disable_helper (EventPipeSessionID id) { ep_requires_lock_not_held (); if (_ep_can_start_threads) ep_rt_thread_setup (); if (id == 0) return; // Don't block GC during clean-up. EP_GCX_PREEMP_ENTER EventPipeProviderCallbackDataQueue callback_data_queue; EventPipeProviderCallbackData provider_callback_data; EventPipeProviderCallbackDataQueue *provider_callback_data_queue = ep_provider_callback_data_queue_init (&callback_data_queue); EP_LOCK_ENTER (section1) if (ep_volatile_load_number_of_sessions () > 0) disable_holding_lock (id, provider_callback_data_queue); EP_LOCK_EXIT (section1) while (ep_provider_callback_data_queue_try_dequeue (provider_callback_data_queue, &provider_callback_data)) { ep_rt_prepare_provider_invoke_callback (&provider_callback_data); provider_invoke_callback (&provider_callback_data); ep_provider_callback_data_fini (&provider_callback_data); } ep_provider_callback_data_queue_fini (provider_callback_data_queue); #ifdef EP_CHECKED_BUILD if (ep_volatile_load_number_of_sessions () == 0) EP_ASSERT (ep_rt_providers_validate_all_disabled ()); #endif EP_GCX_PREEMP_EXIT ep_on_exit: ep_requires_lock_not_held (); return; ep_on_error: ep_exit_error_handler (); } static void write_event ( EventPipeEvent *ep_event, EventPipeEventPayload *payload, const uint8_t *activity_id, const uint8_t *related_activity_id) { EP_ASSERT (ep_event != NULL); EP_ASSERT (payload != NULL); // We can't proceed if tracing is not initialized. ep_return_void_if_nok (ep_volatile_load_eventpipe_state () >= EP_STATE_INITIALIZED); // Exit early if the event is not enabled. ep_return_void_if_nok (ep_event_is_enabled (ep_event)); // Get current thread. ep_rt_thread_handle_t thread = ep_rt_thread_get_handle (); // If the activity id isn't specified AND we are in a eventpipe thread, pull it from the current thread. // If pThread is NULL (we aren't in writing from a managed thread) then activity_id can be NULL if (activity_id == NULL && thread != NULL) activity_id = ep_thread_get_activity_id_cref (ep_thread_get_activity_id_handle ()); write_event_2 ( thread, ep_event, payload, activity_id, related_activity_id, NULL, NULL); } static void write_event_2 ( ep_rt_thread_handle_t thread, EventPipeEvent *ep_event, EventPipeEventPayload *payload, const uint8_t *activity_id, const uint8_t *related_activity_id, ep_rt_thread_handle_t event_thread, EventPipeStackContents *stack) { EP_ASSERT (ep_event != NULL); EP_ASSERT (payload != NULL); // We can't proceed if tracing is not initialized. ep_return_void_if_nok (ep_volatile_load_eventpipe_state () >= EP_STATE_INITIALIZED); EventPipeThread *const current_thread = ep_thread_get_or_create (); if (!current_thread) { EP_ASSERT (!"Failed to get or create an EventPipeThread."); return; } if (ep_thread_is_rundown_thread (current_thread)) { EventPipeSession *const rundown_session = ep_thread_get_rundown_session (current_thread); EP_ASSERT (rundown_session != NULL); EP_ASSERT (thread != NULL); uint8_t *data = ep_event_payload_get_flat_data (payload); if (thread != NULL && rundown_session != NULL && data != NULL) { ep_session_write_event ( rundown_session, thread, ep_event, payload, activity_id, related_activity_id, event_thread, stack); } } else { for (uint32_t i = 0; i < EP_MAX_NUMBER_OF_SESSIONS; ++i) { if ((ep_volatile_load_allow_write () & ((uint64_t)1 << i)) == 0) continue; // Now that we know this session is probably live we pay the perf cost of the memory barriers // Setting this flag lets a thread trying to do a concurrent disable that it is not safe to delete // session ID i. The if check above also ensures that once the session is unpublished this thread // will eventually stop ever storing ID i into the WriteInProgress flag. This is important to // guarantee termination of the YIELD_WHILE loop in SuspendWriteEvents. ep_thread_set_session_write_in_progress (current_thread, i); { EventPipeSession *const session = ep_volatile_load_session (i); // Disable is allowed to set s_pSessions[i] = NULL at any time and that may have occured in between // the check and the load if (session != NULL) { ep_session_write_event ( session, thread, ep_event, payload, activity_id, related_activity_id, event_thread, stack); } } // Do not reference session past this point, we are signaling Disable() that it is safe to // delete it ep_thread_set_session_write_in_progress (current_thread, UINT32_MAX); } } } static const ep_char8_t * get_next_config_value (const ep_char8_t *data, const ep_char8_t **start, const ep_char8_t **end) { EP_ASSERT (data != NULL); EP_ASSERT (start != NULL); EP_ASSERT (end != NULL); *start = data; while (*data != '\0' && *data != ':') data++; *end = data; return *data != '\0' ? ++data : NULL; } static ep_char8_t * get_next_config_value_as_utf8_string (const ep_char8_t **data) { EP_ASSERT (data != NULL); ep_char8_t *buffer = NULL; const ep_char8_t *start = NULL; const ep_char8_t *end = NULL; *data = get_next_config_value (*data, &start, &end); ptrdiff_t byte_len = end - start; if (byte_len != 0) buffer = ep_rt_utf8_string_dup_range(start, end); return buffer; } static uint64_t get_next_config_value_as_uint64_t (const ep_char8_t **data) { EP_ASSERT (data != NULL); ep_char8_t *value_as_utf8 = get_next_config_value_as_utf8_string (data); uint64_t value = UINT64_MAX; if (value_as_utf8) { value = (uint64_t)strtoull (value_as_utf8, NULL, 16); ep_rt_utf8_string_free (value_as_utf8); } return value; } static uint32_t get_next_config_value_as_uint32_t (const ep_char8_t **data) { EP_ASSERT (data != NULL); ep_char8_t *value_as_utf8 = get_next_config_value_as_utf8_string (data); uint32_t value = UINT32_MAX; if (value_as_utf8) { value = (uint32_t)strtoul (value_as_utf8, NULL, 10); ep_rt_utf8_string_free (value_as_utf8); } return value; } // // If EventPipe environment variables are specified, parse them and start a session. // static void enable_default_session_via_env_variables (void) { ep_char8_t *ep_config = NULL; ep_char8_t *ep_config_output_path = NULL; uint32_t ep_circular_mb = 0; const ep_char8_t *output_path = NULL; if (ep_rt_config_value_get_enable ()) { ep_config = ep_rt_config_value_get_config (); ep_config_output_path = ep_rt_config_value_get_output_path (); ep_char8_t pidStr[24]; ep_rt_utf8_string_snprintf(pidStr, ARRAY_SIZE (pidStr), "%u", (unsigned)ep_rt_current_process_get_id()); while (true) { if (ep_rt_utf8_string_replace(&ep_config_output_path, "{pid}", pidStr)) { // In case there is a second use of {pid} in the output path continue; } // No more instances of {pid} in the OutputPath break; } ep_circular_mb = ep_rt_config_value_get_circular_mb (); output_path = NULL; output_path = ep_config_output_path ? ep_config_output_path : "trace.nettrace"; ep_circular_mb = ep_circular_mb > 0 ? ep_circular_mb : 1; uint64_t session_id = ep_enable_2 ( output_path, ep_circular_mb, ep_config, ep_rt_config_value_get_output_streaming () ? EP_SESSION_TYPE_FILESTREAM : EP_SESSION_TYPE_FILE, EP_SERIALIZATION_FORMAT_NETTRACE_V4, true, NULL, NULL, NULL); if (session_id) ep_start_streaming (session_id); } ep_rt_utf8_string_free (ep_config_output_path); ep_rt_utf8_string_free (ep_config); return; } static bool session_requested_sampling (EventPipeSession *session) { EP_ASSERT (session != NULL); return ep_rt_session_provider_list_find_by_name (ep_session_provider_list_get_providers_cref (ep_session_get_providers (session)), ep_config_get_sample_profiler_provider_name_utf8 ()); } static bool ipc_stream_factory_any_suspended_ports (void) { return _ep_ipc_stream_factory_suspended_ports_callback ? _ep_ipc_stream_factory_suspended_ports_callback () : false; } #ifdef EP_CHECKED_BUILD void ep_requires_lock_held (void) { ep_rt_config_requires_lock_held (); } void ep_requires_lock_not_held (void) { ep_rt_config_requires_lock_not_held (); } #endif EventPipeSessionID ep_enable ( const ep_char8_t *output_path, uint32_t circular_buffer_size_in_mb, const EventPipeProviderConfiguration *providers, uint32_t providers_len, EventPipeSessionType session_type, EventPipeSerializationFormat format, bool rundown_requested, IpcStream *stream, EventPipeSessionSynchronousCallback sync_callback, void *callback_additional_data) { ep_return_zero_if_nok (format < EP_SERIALIZATION_FORMAT_COUNT); ep_return_zero_if_nok (session_type == EP_SESSION_TYPE_SYNCHRONOUS || circular_buffer_size_in_mb > 0); ep_return_zero_if_nok (providers_len > 0 && providers != NULL); ep_requires_lock_not_held (); // If the state or arguments are invalid, bail here. if ((session_type == EP_SESSION_TYPE_FILE || session_type == EP_SESSION_TYPE_FILESTREAM) && output_path == NULL) return 0; if (session_type == EP_SESSION_TYPE_IPCSTREAM && stream == NULL) return 0; EventPipeSessionID session_id = 0; EventPipeProviderCallbackDataQueue callback_data_queue; EventPipeProviderCallbackData provider_callback_data; EventPipeProviderCallbackDataQueue *provider_callback_data_queue = ep_provider_callback_data_queue_init (&callback_data_queue); EP_LOCK_ENTER (section1) session_id = enable ( output_path, circular_buffer_size_in_mb, providers, providers_len, session_type, format, rundown_requested, stream, provider_callback_data_queue, sync_callback, callback_additional_data); EP_LOCK_EXIT (section1) while (ep_provider_callback_data_queue_try_dequeue (provider_callback_data_queue, &provider_callback_data)) { ep_rt_prepare_provider_invoke_callback (&provider_callback_data); provider_invoke_callback (&provider_callback_data); ep_provider_callback_data_fini (&provider_callback_data); } ep_on_exit: ep_provider_callback_data_queue_fini (provider_callback_data_queue); ep_requires_lock_not_held (); return session_id; ep_on_error: session_id = 0; ep_exit_error_handler (); } EventPipeSessionID ep_enable_2 ( const ep_char8_t *output_path, uint32_t circular_buffer_size_in_mb, const ep_char8_t *providers_config, EventPipeSessionType session_type, EventPipeSerializationFormat format, bool rundown_requested, IpcStream *stream, EventPipeSessionSynchronousCallback sync_callback, void *callback_additional_data) { const ep_char8_t *providers_config_to_parse = providers_config; int32_t providers_len = 0; EventPipeProviderConfiguration *providers = NULL; int32_t current_provider = 0; uint64_t session_id = 0; // If no specific providers config is used, enable EventPipe session // with the default provider configurations. if (!providers_config_to_parse || *providers_config_to_parse == '\0') { providers_len = 3; providers = ep_rt_object_array_alloc (EventPipeProviderConfiguration, providers_len); ep_raise_error_if_nok (providers != NULL); ep_provider_config_init (&providers [0], ep_rt_utf8_string_dup (ep_config_get_public_provider_name_utf8 ()), 0x4c14fccbd, EP_EVENT_LEVEL_VERBOSE, NULL); ep_provider_config_init (&providers [1], ep_rt_utf8_string_dup (ep_config_get_private_provider_name_utf8 ()), 0x4002000b, EP_EVENT_LEVEL_VERBOSE, NULL); ep_provider_config_init (&providers [2], ep_rt_utf8_string_dup (ep_config_get_sample_profiler_provider_name_utf8 ()), 0x0, EP_EVENT_LEVEL_VERBOSE, NULL); } else { // Count number of providers to parse. while (*providers_config_to_parse != '\0') { providers_len += 1; while (*providers_config_to_parse != '\0' && *providers_config_to_parse != ',') providers_config_to_parse++; if (*providers_config_to_parse != '\0') providers_config_to_parse++; } providers_config_to_parse = providers_config; providers = ep_rt_object_array_alloc (EventPipeProviderConfiguration, providers_len); ep_raise_error_if_nok (providers != NULL); while (*providers_config_to_parse != '\0') { ep_char8_t *provider_name = NULL; uint64_t keyword_mask = 0; EventPipeEventLevel level = EP_EVENT_LEVEL_VERBOSE; ep_char8_t *args = NULL; if (providers_config_to_parse && *providers_config_to_parse != ',') { provider_name = get_next_config_value_as_utf8_string (&providers_config_to_parse); ep_raise_error_if_nok (provider_name != NULL); } if (providers_config_to_parse && *providers_config_to_parse != ',') keyword_mask = get_next_config_value_as_uint64_t (&providers_config_to_parse); if (providers_config_to_parse && *providers_config_to_parse != ',') level = (EventPipeEventLevel)get_next_config_value_as_uint32_t (&providers_config_to_parse); if (providers_config_to_parse && *providers_config_to_parse != ',') args = get_next_config_value_as_utf8_string (&providers_config_to_parse); ep_provider_config_init (&providers [current_provider++], provider_name, keyword_mask, level, args); if (!providers_config_to_parse) break; while (*providers_config_to_parse != '\0' && *providers_config_to_parse != ',') providers_config_to_parse++; if (*providers_config_to_parse != '\0') providers_config_to_parse++; } } session_id = ep_enable ( output_path, circular_buffer_size_in_mb, providers, providers_len, session_type, format, rundown_requested, stream, sync_callback, callback_additional_data); ep_on_exit: if (providers) { for (int32_t i = 0; i < providers_len; ++i) { ep_provider_config_fini (&providers [i]); ep_rt_utf8_string_free ((ep_char8_t *)providers [i].provider_name); ep_rt_utf8_string_free ((ep_char8_t *)providers [i].filter_data); } ep_rt_object_array_free (providers); } return session_id; ep_on_error: ep_exit_error_handler (); } void ep_disable (EventPipeSessionID id) { ep_requires_lock_not_held (); // ep_disable is called synchronously since the diagnostics server is // single threaded. HOWEVER, if the runtime was suspended during startup, // then ep_finish_init might not have executed yet. Disabling a session // needs to either happen before we resume or after initialization. We briefly take the // lock to check _ep_can_start_threads to check whether we've finished initialization. We // also check whether we are still suspended in which case we can safely disable the session // without deferral. EP_LOCK_ENTER (section1) if (!_ep_can_start_threads && !ipc_stream_factory_any_suspended_ports ()) { ep_rt_session_id_array_append (&_ep_deferred_disable_session_ids, id); ep_raise_error_holding_lock (section1); } EP_LOCK_EXIT (section1) disable_helper (id); ep_on_exit: ep_requires_lock_not_held (); return; ep_on_error: ep_exit_error_handler (); } EventPipeSession * ep_get_session (EventPipeSessionID session_id) { ep_requires_lock_not_held (); EP_LOCK_ENTER (section1) if (ep_volatile_load_eventpipe_state () == EP_STATE_NOT_INITIALIZED) { EP_ASSERT (!"EventPipe::GetSession invoked before EventPipe was initialized."); ep_raise_error_holding_lock (section1); } ep_raise_error_if_nok_holding_lock (is_session_id_in_collection (session_id), section1); EP_LOCK_EXIT (section1) ep_on_exit: ep_requires_lock_not_held (); return (EventPipeSession *)session_id; ep_on_error: session_id = 0; ep_exit_error_handler (); } bool ep_is_session_enabled (EventPipeSessionID session_id) { ep_return_false_if_nok (session_id != 0); return ep_volatile_load_session (ep_session_get_index ((EventPipeSession *)session_id)) != NULL; } void ep_start_streaming (EventPipeSessionID session_id) { ep_requires_lock_not_held (); EP_LOCK_ENTER (section1) ep_raise_error_if_nok_holding_lock (is_session_id_in_collection (session_id), section1); if (_ep_can_start_threads) ep_session_start_streaming ((EventPipeSession *)session_id); else ep_rt_session_id_array_append (&_ep_deferred_enable_session_ids, session_id); EP_LOCK_EXIT (section1) ep_on_exit: ep_requires_lock_not_held (); return; ep_on_error: ep_exit_error_handler (); } bool ep_enabled (void) { return (ep_volatile_load_eventpipe_state () >= EP_STATE_INITIALIZED && ep_volatile_load_number_of_sessions () > 0); } EventPipeProvider * ep_create_provider ( const ep_char8_t *provider_name, EventPipeCallback callback_func, EventPipeCallbackDataFree callback_data_free_func, void *callback_data) { ep_return_null_if_nok (provider_name != NULL); ep_requires_lock_not_held (); EventPipeProvider *provider = NULL; EventPipeProviderCallbackDataQueue data_queue; EventPipeProviderCallbackData provider_callback_data; EventPipeProviderCallbackDataQueue *provider_callback_data_queue = ep_provider_callback_data_queue_init (&data_queue); EP_LOCK_ENTER (section1) provider = config_create_provider (ep_config_get (), provider_name, callback_func, callback_data_free_func, callback_data, provider_callback_data_queue); ep_raise_error_if_nok_holding_lock (provider != NULL, section1); EP_LOCK_EXIT (section1) while (ep_provider_callback_data_queue_try_dequeue (provider_callback_data_queue, &provider_callback_data)) { ep_rt_prepare_provider_invoke_callback (&provider_callback_data); provider_invoke_callback (&provider_callback_data); ep_provider_callback_data_fini (&provider_callback_data); } ep_rt_notify_profiler_provider_created (provider); ep_on_exit: ep_provider_callback_data_queue_fini (provider_callback_data_queue); ep_requires_lock_not_held (); return provider; ep_on_error: ep_delete_provider (provider); provider = NULL; ep_exit_error_handler (); } void ep_delete_provider (EventPipeProvider *provider) { ep_return_void_if_nok (provider != NULL); ep_requires_lock_not_held (); // Take the lock to make sure that we don't have a race // between disabling tracing and deleting a provider // where we hold a provider after tracing has been disabled. EP_LOCK_ENTER (section1) if (enabled ()) { // Save the provider until the end of the tracing session. ep_provider_set_delete_deferred (provider, true); } else { config_delete_provider (ep_config_get (), provider); } EP_LOCK_EXIT (section1) ep_on_exit: ep_requires_lock_not_held (); return; ep_on_error: ep_exit_error_handler (); } EventPipeProvider * ep_get_provider (const ep_char8_t *provider_name) { ep_return_null_if_nok (provider_name != NULL); ep_requires_lock_not_held (); EventPipeProvider *provider = NULL; EP_LOCK_ENTER (section1) provider = config_get_provider (ep_config_get (), provider_name); ep_raise_error_if_nok_holding_lock (provider != NULL, section1); EP_LOCK_EXIT (section1) ep_on_exit: ep_requires_lock_not_held (); return provider; ep_on_error: provider = NULL; ep_exit_error_handler (); } bool ep_add_provider_to_session ( EventPipeSessionProvider *provider, EventPipeSession *session) { ep_return_false_if_nok (provider != NULL && session != NULL); ep_requires_lock_not_held (); bool result = false; EP_LOCK_ENTER (section1) ep_raise_error_if_nok_holding_lock (ep_session_add_session_provider (session, provider), section1); EP_LOCK_EXIT (section1) result = true; ep_on_exit: ep_requires_lock_not_held (); return result; ep_on_error: EP_ASSERT (!result); ep_exit_error_handler (); } void ep_init (void) { ep_requires_lock_not_held (); ep_rt_init (); if (ep_volatile_load_eventpipe_state () != EP_STATE_NOT_INITIALIZED) { EP_ASSERT (!"EventPipe already initialized."); return; } ep_thread_init (); for (uint32_t i = 0; i < EP_MAX_NUMBER_OF_SESSIONS; ++i) ep_volatile_store_session (i, NULL); ep_config_init (ep_config_get ()); ep_event_source_init (ep_event_source_get ()); // This calls into auto-generated code to initialize the runtime specific providers // and events so that the EventPipe configuration lock isn't taken at runtime ep_rt_init_providers_and_events (); // Set the sampling rate for the sample profiler. const uint32_t default_profiler_sample_rate_in_nanoseconds = 1000000; // 1 msec. ep_sample_profiler_set_sampling_rate (default_profiler_sample_rate_in_nanoseconds); ep_rt_session_id_array_alloc (&_ep_deferred_enable_session_ids); ep_rt_session_id_array_alloc (&_ep_deferred_disable_session_ids); ep_rt_execution_checkpoint_array_alloc (&_ep_rundown_execution_checkpoints); EP_LOCK_ENTER (section1) ep_volatile_store_eventpipe_state (EP_STATE_INITIALIZED); EP_LOCK_EXIT (section1) enable_default_session_via_env_variables (); ep_on_exit: ep_requires_lock_not_held (); return; ep_on_error: ep_exit_error_handler (); } void ep_finish_init (void) { ep_requires_lock_not_held (); ep_rt_init_finish (); // Enable streaming for any deferred sessions EP_LOCK_ENTER (section1) _ep_can_start_threads = true; if (ep_volatile_load_eventpipe_state () == EP_STATE_INITIALIZED) { ep_rt_session_id_array_iterator_t deferred_session_ids_iterator = ep_rt_session_id_array_iterator_begin (&_ep_deferred_enable_session_ids); while (!ep_rt_session_id_array_iterator_end (&_ep_deferred_enable_session_ids, &deferred_session_ids_iterator)) { EventPipeSessionID session_id = ep_rt_session_id_array_iterator_value (&deferred_session_ids_iterator); if (is_session_id_in_collection (session_id)) ep_session_start_streaming ((EventPipeSession *)session_id); ep_rt_session_id_array_iterator_next (&deferred_session_ids_iterator); } ep_rt_session_id_array_clear (&_ep_deferred_enable_session_ids); } ep_sample_profiler_can_start_sampling (); EP_LOCK_EXIT (section1) // release lock in case someone tried to disable while we held it // _ep_deferred_disable_session_ids is now safe to access without the // lock since we've set _ep_can_start_threads to true inside the lock. Anyone // who was waiting on that lock will see that state and not mutate the defer list if (ep_volatile_load_eventpipe_state () == EP_STATE_INITIALIZED) { ep_rt_session_id_array_iterator_t deferred_disable_session_ids_iterator = ep_rt_session_id_array_iterator_begin (&_ep_deferred_disable_session_ids); while (!ep_rt_session_id_array_iterator_end (&_ep_deferred_disable_session_ids, &deferred_disable_session_ids_iterator)) { EventPipeSessionID session_id = ep_rt_session_id_array_iterator_value (&deferred_disable_session_ids_iterator); disable_helper (session_id); ep_rt_session_id_array_iterator_next (&deferred_disable_session_ids_iterator); } ep_rt_session_id_array_clear (&_ep_deferred_disable_session_ids); } ep_on_exit: ep_requires_lock_not_held (); return; ep_on_error: ep_exit_error_handler (); } void ep_shutdown (void) { ep_requires_lock_not_held (); ep_return_void_if_nok (ep_volatile_load_eventpipe_state () != EP_STATE_SHUTTING_DOWN); ep_return_void_if_nok (!ep_rt_process_detach ()); ep_return_void_if_nok (ep_volatile_load_eventpipe_state () == EP_STATE_INITIALIZED); EP_LOCK_ENTER (section1) ep_volatile_store_eventpipe_state (EP_STATE_SHUTTING_DOWN); EP_LOCK_EXIT (section1) for (uint32_t i = 0; i < EP_MAX_NUMBER_OF_SESSIONS; ++i) { EventPipeSession *session = ep_volatile_load_session (i); if (session) ep_disable ((EventPipeSessionID)session); } ep_rt_execution_checkpoint_array_iterator_t checkpoint_iterator; EventPipeExecutionCheckpoint *checkpoint; checkpoint_iterator = ep_rt_execution_checkpoint_array_iterator_begin (&_ep_rundown_execution_checkpoints); while (!ep_rt_execution_checkpoint_array_iterator_end (&_ep_rundown_execution_checkpoints, &checkpoint_iterator)) { checkpoint = ep_rt_execution_checkpoint_array_iterator_value (&checkpoint_iterator); if (checkpoint) ep_rt_utf8_string_free (checkpoint->name); ep_rt_execution_checkpoint_array_iterator_next (&checkpoint_iterator); } ep_rt_execution_checkpoint_array_free (&_ep_rundown_execution_checkpoints); ep_rt_session_id_array_free (&_ep_deferred_enable_session_ids); ep_rt_session_id_array_free (&_ep_deferred_disable_session_ids); ep_thread_fini (); // dotnet/coreclr: issue 24850: EventPipe shutdown race conditions // Deallocating providers/events here might cause AV if a WriteEvent // was to occur. Thus, we are not doing this cleanup. /*EP_LOCK_ENTER (section1) ep_sample_profiler_shutdown (); EP_LOCK_EXIT (section1)*/ // // Remove EventPipeEventSource first since it tries to use the data structures that we remove below. // // We need to do this after disabling sessions since those try to write to EventPipeEventSource. // ep_event_source_fini (ep_event_source_get ()); // ep_config_shutdown (ep_config_get ()); ep_on_exit: ep_requires_lock_not_held (); ep_rt_shutdown (); return; ep_on_error: ep_exit_error_handler (); } EventPipeEventMetadataEvent * ep_build_event_metadata_event ( EventPipeEventInstance *event_instance, uint32_t metadata_id) { ep_return_null_if_nok (event_instance != NULL); return ep_config_build_event_metadata_event (ep_config_get (), event_instance, metadata_id); } void ep_write_event ( EventPipeEvent *ep_event, uint8_t *data, uint32_t data_len, const uint8_t *activity_id, const uint8_t *related_activity_id) { ep_return_void_if_nok (ep_event != NULL); EventPipeEventPayload payload; EventPipeEventPayload *event_payload = ep_event_payload_init (&payload, data, data_len); write_event (ep_event, event_payload, activity_id, related_activity_id); ep_event_payload_fini (event_payload); } void ep_write_event_2 ( EventPipeEvent *ep_event, EventData *event_data, uint32_t event_data_len, const uint8_t *activity_id, const uint8_t *related_activity_id) { ep_return_void_if_nok (ep_event != NULL); EventPipeEventPayload payload; EventPipeEventPayload *event_payload = ep_event_payload_init_2 (&payload, event_data, event_data_len); write_event (ep_event, event_payload, activity_id, related_activity_id); ep_event_payload_fini (event_payload); } void ep_write_sample_profile_event ( ep_rt_thread_handle_t sampling_thread, EventPipeEvent *ep_event, ep_rt_thread_handle_t target_thread, EventPipeStackContents *stack, uint8_t *event_data, uint32_t event_data_len) { ep_return_void_if_nok (ep_event != NULL); EventPipeEventPayload payload; EventPipeEventPayload *event_payload = ep_event_payload_init (&payload, event_data, event_data_len); write_event_2 ( sampling_thread, ep_event, event_payload, NULL, NULL, target_thread, stack); ep_event_payload_fini (event_payload); } EventPipeEventInstance * ep_get_next_event (EventPipeSessionID session_id) { ep_requires_lock_not_held (); // Only fetch the next event if a tracing session exists. // The buffer manager is not disposed until the process is shutdown. EventPipeSession *const session = ep_get_session (session_id); return session ? ep_session_get_next_event (session) : NULL; } EventPipeWaitHandle ep_get_wait_handle (EventPipeSessionID session_id) { EventPipeSession *const session = ep_get_session (session_id); return session ? ep_rt_wait_event_get_wait_handle (ep_session_get_wait_event (session)) : 0; } bool ep_add_rundown_execution_checkpoint ( const ep_char8_t *name, ep_timestamp_t timestamp) { ep_requires_lock_not_held (); bool result = false; EventPipeExecutionCheckpoint *exec_checkpoint = ep_execution_checkpoint_alloc (name, timestamp); ep_raise_error_if_nok (exec_checkpoint != NULL); EP_LOCK_ENTER (section1) ep_raise_error_if_nok_holding_lock (ep_rt_execution_checkpoint_array_append (&_ep_rundown_execution_checkpoints, exec_checkpoint), section1); exec_checkpoint = NULL; EP_LOCK_EXIT (section1) result = true; ep_on_exit: ep_requires_lock_not_held (); return result; ep_on_error: ep_execution_checkpoint_free (exec_checkpoint); EP_ASSERT (result == false); ep_exit_error_handler (); } /* * EventPipeProviderCallbackDataQueue. */ bool ep_provider_callback_data_queue_enqueue ( EventPipeProviderCallbackDataQueue *provider_callback_data_queue, EventPipeProviderCallbackData *provider_callback_data) { EP_ASSERT (provider_callback_data_queue != NULL); EventPipeProviderCallbackData *provider_callback_data_move = ep_provider_callback_data_alloc_move (provider_callback_data); ep_raise_error_if_nok (provider_callback_data_move != NULL); ep_raise_error_if_nok (ep_rt_provider_callback_data_queue_push_tail (ep_provider_callback_data_queue_get_queue_ref (provider_callback_data_queue), provider_callback_data_move)); return true; ep_on_error: ep_provider_callback_data_free (provider_callback_data_move); return false; } bool ep_provider_callback_data_queue_try_dequeue ( EventPipeProviderCallbackDataQueue *provider_callback_data_queue, EventPipeProviderCallbackData *provider_callback_data) { EP_ASSERT (provider_callback_data_queue != NULL); ep_return_false_if_nok (!ep_rt_provider_callback_data_queue_is_empty (ep_provider_callback_data_queue_get_queue_ref (provider_callback_data_queue))); EventPipeProviderCallbackData *value = NULL; ep_raise_error_if_nok (ep_rt_provider_callback_data_queue_pop_head (ep_provider_callback_data_queue_get_queue_ref (provider_callback_data_queue), &value)); ep_provider_callback_data_init_move (provider_callback_data, value); ep_provider_callback_data_free (value); return true; ep_on_error: return false; } /* * EventPipeSystemTime. */ void ep_system_time_set ( EventPipeSystemTime *system_time, uint16_t year, uint16_t month, uint16_t day_of_week, uint16_t day, uint16_t hour, uint16_t minute, uint16_t second, uint16_t milliseconds) { EP_ASSERT (system_time != NULL); system_time->year = year; system_time->month = month; system_time->day_of_week = day_of_week; system_time->day = day; system_time->hour = hour; system_time->minute = minute; system_time->second = second; system_time->milliseconds = milliseconds; } void ep_ipc_stream_factory_callback_set (EventPipeIpcStreamFactorySuspendedPortsCallback suspended_ports_callback) { _ep_ipc_stream_factory_suspended_ports_callback = suspended_ports_callback; } #endif /* !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES) */ #endif /* ENABLE_PERFTRACING */ #ifndef EP_INCLUDE_SOURCE_FILES extern const char quiet_linker_empty_file_warning_eventpipe; const char quiet_linker_empty_file_warning_eventpipe = 0; #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./docs/workflow/requirements/linux-requirements.md
Requirements to build dotnet/runtime on Linux ====================== This guide will walk you through the requirements to build dotnet/runtime on Linux. Before building there is environment setup that needs to happen to pull in all the dependencies required by the build. There are two suggested ways to go about doing this. First you are able to use the Docker environments provided by https://github.com/dotnet/dotnet-buildtools-prereqs-docker, or you can set up the environment yourself. The documentation will go over both ways of building. Using Docker allows you to leverage our existing images which already have an environment set up. General instructions for building are [here](../README.md). Instructions for building CoreCLR for Linux are [here](../building/coreclr/linux-instructions.md). Docker ================== Install Docker; see https://docs.docker.com/install/. All the required build tools are included in the Docker images used to do the build, so no additional setup is required. Environment =========== These instructions are written assuming the Ubuntu 16.04/18.04 LTS, since that's the distro the team uses. Pull Requests are welcome to address other environments as long as they don't break the ability to use Ubuntu 16.04/18.04 LTS. Minimum RAM required to build is 1GB. The build is known to fail on 512 MB VMs ([dotnet/runtime#4069](https://github.com/dotnet/runtime/issues/4069)). Toolchain Setup --------------- Building the repo requires CMake 3.14.5 or newer on Linux. Add Kitware's APT feed to your configuration for a newer version of CMake. See their instructions at <https://apt.kitware.com/>. You may need to add LLVM's APT feed to your configuration to obtain the required version of clang/LLVM. See their instructions at <https://apt.llvm.org/>. Install the following packages for the toolchain: - cmake - llvm-9 - clang-9 - build-essential - python - curl - git - lldb-6.0 - liblldb-6.0-dev - libunwind8 - libunwind8-dev - gettext - libicu-dev - liblttng-ust-dev - libssl-dev - libkrb5-dev - libnuma-dev (optional, enables numa support) - zlib1g-dev - ninja-build (optional, enables building native code with ninja instead of make) ``` sudo apt-get install -y cmake llvm-9 clang-9 \ build-essential python curl git lldb-6.0 liblldb-6.0-dev \ libunwind8 libunwind8-dev gettext libicu-dev liblttng-ust-dev \ libssl-dev libnuma-dev libkrb5-dev zlib1g-dev ninja-build ``` You now have all the required components. *Unsupported OSes*: In case you have Gentoo you can run following commands: ``` emerge --ask clang dev-util/lttng-ust app-crypt/mit-krb5 ```
Requirements to build dotnet/runtime on Linux ====================== This guide will walk you through the requirements to build dotnet/runtime on Linux. Before building there is environment setup that needs to happen to pull in all the dependencies required by the build. There are two suggested ways to go about doing this. First you are able to use the Docker environments provided by https://github.com/dotnet/dotnet-buildtools-prereqs-docker, or you can set up the environment yourself. The documentation will go over both ways of building. Using Docker allows you to leverage our existing images which already have an environment set up. General instructions for building are [here](../README.md). Instructions for building CoreCLR for Linux are [here](../building/coreclr/linux-instructions.md). Docker ================== Install Docker; see https://docs.docker.com/install/. All the required build tools are included in the Docker images used to do the build, so no additional setup is required. Environment =========== These instructions are written assuming the Ubuntu 16.04/18.04 LTS, since that's the distro the team uses. Pull Requests are welcome to address other environments as long as they don't break the ability to use Ubuntu 16.04/18.04 LTS. Minimum RAM required to build is 1GB. The build is known to fail on 512 MB VMs ([dotnet/runtime#4069](https://github.com/dotnet/runtime/issues/4069)). Toolchain Setup --------------- Building the repo requires CMake 3.14.5 or newer on Linux. Add Kitware's APT feed to your configuration for a newer version of CMake. See their instructions at <https://apt.kitware.com/>. You may need to add LLVM's APT feed to your configuration to obtain the required version of clang/LLVM. See their instructions at <https://apt.llvm.org/>. Install the following packages for the toolchain: - cmake - llvm-9 - clang-9 - build-essential - python - curl - git - lldb-6.0 - liblldb-6.0-dev - libunwind8 - libunwind8-dev - gettext - libicu-dev - liblttng-ust-dev - libssl-dev - libkrb5-dev - libnuma-dev (optional, enables numa support) - zlib1g-dev - ninja-build (optional, enables building native code with ninja instead of make) ``` sudo apt-get install -y cmake llvm-9 clang-9 \ build-essential python curl git lldb-6.0 liblldb-6.0-dev \ libunwind8 libunwind8-dev gettext libicu-dev liblttng-ust-dev \ libssl-dev libnuma-dev libkrb5-dev zlib1g-dev ninja-build ``` You now have all the required components. *Unsupported OSes*: In case you have Gentoo you can run following commands: ``` emerge --ask clang dev-util/lttng-ust app-crypt/mit-krb5 ```
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/ptrace/_UPT_destroy.c
/* libunwind - a platform-independent unwind library Copyright (C) 2003 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "_UPT_internal.h" void _UPT_destroy (void *ptr) { struct UPT_info *ui = (struct UPT_info *) ptr; invalidate_edi (&ui->edi); free (ptr); }
/* libunwind - a platform-independent unwind library Copyright (C) 2003 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "_UPT_internal.h" void _UPT_destroy (void *ptr) { struct UPT_info *ui = (struct UPT_info *) ptr; invalidate_edi (&ui->edi); free (ptr); }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/native/libs/System.Security.Cryptography.Native/pal_evp_pkey_rsa.c
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_evp_pkey_rsa.h" #include "pal_utilities.h" #include <assert.h> static int HasNoPrivateKey(const RSA* rsa); EVP_PKEY* CryptoNative_EvpPKeyCreateRsa(RSA* currentKey) { assert(currentKey != NULL); ERR_clear_error(); EVP_PKEY* pkey = EVP_PKEY_new(); if (pkey == NULL) { return NULL; } if (!EVP_PKEY_set1_RSA(pkey, currentKey)) { EVP_PKEY_free(pkey); return NULL; } return pkey; } EVP_PKEY* CryptoNative_RsaGenerateKey(int keySize) { ERR_clear_error(); EVP_PKEY_CTX* ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_RSA, NULL); if (ctx == NULL) { return NULL; } EVP_PKEY* pkey = NULL; EVP_PKEY* ret = NULL; if (EVP_PKEY_keygen_init(ctx) == 1 && EVP_PKEY_CTX_set_rsa_keygen_bits(ctx, keySize) == 1 && EVP_PKEY_keygen(ctx, &pkey) == 1) { ret = pkey; pkey = NULL; } if (pkey != NULL) { EVP_PKEY_free(pkey); } EVP_PKEY_CTX_free(ctx); return ret; } static bool ConfigureEncryption(EVP_PKEY_CTX* ctx, RsaPaddingMode padding, const EVP_MD* digest) { if (padding == RsaPaddingPkcs1) { if (EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING) <= 0) { return false; } } else { assert(padding == RsaPaddingOaepOrPss); if (EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_OAEP_PADDING) <= 0) { return false; } #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wcast-qual" if (EVP_PKEY_CTX_set_rsa_oaep_md(ctx, digest) <= 0) #pragma clang diagnostic pop { return false; } } return true; } int32_t CryptoNative_RsaDecrypt(EVP_PKEY* pkey, const uint8_t* source, int32_t sourceLen, RsaPaddingMode padding, const EVP_MD* digest, uint8_t* destination, int32_t destinationLen) { assert(pkey != NULL); assert(source != NULL); assert(destination != NULL); assert(padding >= RsaPaddingPkcs1 && padding <= RsaPaddingOaepOrPss); assert(digest != NULL || padding == RsaPaddingPkcs1); ERR_clear_error(); EVP_PKEY_CTX* ctx = EVP_PKEY_CTX_new(pkey, NULL); int ret = -1; if (ctx == NULL || EVP_PKEY_decrypt_init(ctx) <= 0) { goto done; } if (!ConfigureEncryption(ctx, padding, digest)) { goto done; } // This check may no longer be needed on OpenSSL 3.0 { const RSA* rsa = EVP_PKEY_get0_RSA(pkey); if (rsa == NULL || HasNoPrivateKey(rsa)) { ERR_PUT_error(ERR_LIB_RSA, RSA_F_RSA_NULL_PRIVATE_DECRYPT, RSA_R_VALUE_MISSING, __FILE__, __LINE__); goto done; } } size_t written = Int32ToSizeT(destinationLen); if (EVP_PKEY_decrypt(ctx, destination, &written, source, Int32ToSizeT(sourceLen)) > 0) { ret = SizeTToInt32(written); } done: if (ctx != NULL) { EVP_PKEY_CTX_free(ctx); } return ret; } int32_t CryptoNative_RsaEncrypt(EVP_PKEY* pkey, const uint8_t* source, int32_t sourceLen, RsaPaddingMode padding, const EVP_MD* digest, uint8_t* destination, int32_t destinationLen) { assert(pkey != NULL); assert(destination != NULL); assert(padding >= RsaPaddingPkcs1 && padding <= RsaPaddingOaepOrPss); assert(digest != NULL || padding == RsaPaddingPkcs1); ERR_clear_error(); EVP_PKEY_CTX* ctx = EVP_PKEY_CTX_new(pkey, NULL); int ret = -1; if (ctx == NULL || EVP_PKEY_encrypt_init(ctx) <= 0) { goto done; } if (!ConfigureEncryption(ctx, padding, digest)) { goto done; } size_t written = Int32ToSizeT(destinationLen); if (EVP_PKEY_encrypt(ctx, destination, &written, source, Int32ToSizeT(sourceLen)) > 0) { ret = SizeTToInt32(written); } done: if (ctx != NULL) { EVP_PKEY_CTX_free(ctx); } return ret; } static bool ConfigureSignature(EVP_PKEY_CTX* ctx, RsaPaddingMode padding, const EVP_MD* digest) { if (padding == RsaPaddingPkcs1) { if (EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING) <= 0) { return false; } } else { assert(padding == RsaPaddingOaepOrPss); if (EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PSS_PADDING) <= 0 || EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, RSA_PSS_SALTLEN_DIGEST) <= 0) { return false; } } #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wcast-qual" if (EVP_PKEY_CTX_set_signature_md(ctx, digest) <= 0) #pragma clang diagnostic pop { return false; } return true; } int32_t CryptoNative_RsaSignHash(EVP_PKEY* pkey, RsaPaddingMode padding, const EVP_MD* digest, const uint8_t* hash, int32_t hashLen, uint8_t* destination, int32_t destinationLen) { assert(pkey != NULL); assert(destination != NULL); assert(padding >= RsaPaddingPkcs1 && padding <= RsaPaddingOaepOrPss); assert(digest != NULL || padding == RsaPaddingPkcs1); ERR_clear_error(); EVP_PKEY_CTX* ctx = EVP_PKEY_CTX_new(pkey, NULL); int ret = -1; if (ctx == NULL || EVP_PKEY_sign_init(ctx) <= 0) { goto done; } if (!ConfigureSignature(ctx, padding, digest)) { goto done; } // This check may no longer be needed on OpenSSL 3.0 { const RSA* rsa = EVP_PKEY_get0_RSA(pkey); if (rsa == NULL || HasNoPrivateKey(rsa)) { ERR_PUT_error(ERR_LIB_RSA, RSA_F_RSA_NULL_PRIVATE_DECRYPT, RSA_R_VALUE_MISSING, __FILE__, __LINE__); goto done; } } size_t written = Int32ToSizeT(destinationLen); if (EVP_PKEY_sign(ctx, destination, &written, hash, Int32ToSizeT(hashLen)) > 0) { ret = SizeTToInt32(written); } done: if (ctx != NULL) { EVP_PKEY_CTX_free(ctx); } return ret; } int32_t CryptoNative_RsaVerifyHash(EVP_PKEY* pkey, RsaPaddingMode padding, const EVP_MD* digest, const uint8_t* hash, int32_t hashLen, const uint8_t* signature, int32_t signatureLen) { assert(pkey != NULL); assert(signature != NULL); assert(padding >= RsaPaddingPkcs1 && padding <= RsaPaddingOaepOrPss); assert(digest != NULL || padding == RsaPaddingPkcs1); ERR_clear_error(); EVP_PKEY_CTX* ctx = EVP_PKEY_CTX_new(pkey, NULL); int ret = -1; if (ctx == NULL || EVP_PKEY_verify_init(ctx) <= 0) { goto done; } if (!ConfigureSignature(ctx, padding, digest)) { goto done; } // EVP_PKEY_verify is not consistent on whether a mis-sized hash is an error or just a mismatch. // Normalize to mismatch. if (hashLen != EVP_MD_get_size(digest)) { ret = 0; goto done; } ret = EVP_PKEY_verify(ctx, signature, Int32ToSizeT(signatureLen), hash, Int32ToSizeT(hashLen)); done: if (ctx != NULL) { EVP_PKEY_CTX_free(ctx); } return ret; } static int HasNoPrivateKey(const RSA* rsa) { if (rsa == NULL) return 1; // Shared pointer, don't free. const RSA_METHOD* meth = RSA_get_method(rsa); // The method has descibed itself as having the private key external to the structure. // That doesn't mean it's actually present, but we can't tell. #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wcast-qual" if (RSA_test_flags(rsa, RSA_FLAG_EXT_PKEY) || RSA_meth_get_flags((RSA_METHOD*)meth) & RSA_FLAG_EXT_PKEY) #pragma clang diagnostic pop { return 0; } // In the event that there's a middle-ground where we report failure when success is expected, // one could do something like check if the RSA_METHOD intercepts all private key operations: // // * meth->rsa_priv_enc // * meth->rsa_priv_dec // * meth->rsa_sign (in 1.0.x this is only respected if the RSA_FLAG_SIGN_VER flag is asserted) // // But, for now, leave it at the EXT_PKEY flag test. // The module is documented as accepting either d or the full set of CRT parameters (p, q, dp, dq, qInv) // So if we see d, we're good. Otherwise, if any of the rest are missing, we're public-only. const BIGNUM* d; RSA_get0_key(rsa, NULL, NULL, &d); if (d != NULL) { return 0; } const BIGNUM* p; const BIGNUM* q; const BIGNUM* dmp1; const BIGNUM* dmq1; const BIGNUM* iqmp; RSA_get0_factors(rsa, &p, &q); RSA_get0_crt_params(rsa, &dmp1, &dmq1, &iqmp); if (p == NULL || q == NULL || dmp1 == NULL || dmq1 == NULL || iqmp == NULL) { return 1; } return 0; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_evp_pkey_rsa.h" #include "pal_utilities.h" #include <assert.h> static int HasNoPrivateKey(const RSA* rsa); EVP_PKEY* CryptoNative_EvpPKeyCreateRsa(RSA* currentKey) { assert(currentKey != NULL); ERR_clear_error(); EVP_PKEY* pkey = EVP_PKEY_new(); if (pkey == NULL) { return NULL; } if (!EVP_PKEY_set1_RSA(pkey, currentKey)) { EVP_PKEY_free(pkey); return NULL; } return pkey; } EVP_PKEY* CryptoNative_RsaGenerateKey(int keySize) { ERR_clear_error(); EVP_PKEY_CTX* ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_RSA, NULL); if (ctx == NULL) { return NULL; } EVP_PKEY* pkey = NULL; EVP_PKEY* ret = NULL; if (EVP_PKEY_keygen_init(ctx) == 1 && EVP_PKEY_CTX_set_rsa_keygen_bits(ctx, keySize) == 1 && EVP_PKEY_keygen(ctx, &pkey) == 1) { ret = pkey; pkey = NULL; } if (pkey != NULL) { EVP_PKEY_free(pkey); } EVP_PKEY_CTX_free(ctx); return ret; } static bool ConfigureEncryption(EVP_PKEY_CTX* ctx, RsaPaddingMode padding, const EVP_MD* digest) { if (padding == RsaPaddingPkcs1) { if (EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING) <= 0) { return false; } } else { assert(padding == RsaPaddingOaepOrPss); if (EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_OAEP_PADDING) <= 0) { return false; } #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wcast-qual" if (EVP_PKEY_CTX_set_rsa_oaep_md(ctx, digest) <= 0) #pragma clang diagnostic pop { return false; } } return true; } int32_t CryptoNative_RsaDecrypt(EVP_PKEY* pkey, const uint8_t* source, int32_t sourceLen, RsaPaddingMode padding, const EVP_MD* digest, uint8_t* destination, int32_t destinationLen) { assert(pkey != NULL); assert(source != NULL); assert(destination != NULL); assert(padding >= RsaPaddingPkcs1 && padding <= RsaPaddingOaepOrPss); assert(digest != NULL || padding == RsaPaddingPkcs1); ERR_clear_error(); EVP_PKEY_CTX* ctx = EVP_PKEY_CTX_new(pkey, NULL); int ret = -1; if (ctx == NULL || EVP_PKEY_decrypt_init(ctx) <= 0) { goto done; } if (!ConfigureEncryption(ctx, padding, digest)) { goto done; } // This check may no longer be needed on OpenSSL 3.0 { const RSA* rsa = EVP_PKEY_get0_RSA(pkey); if (rsa == NULL || HasNoPrivateKey(rsa)) { ERR_PUT_error(ERR_LIB_RSA, RSA_F_RSA_NULL_PRIVATE_DECRYPT, RSA_R_VALUE_MISSING, __FILE__, __LINE__); goto done; } } size_t written = Int32ToSizeT(destinationLen); if (EVP_PKEY_decrypt(ctx, destination, &written, source, Int32ToSizeT(sourceLen)) > 0) { ret = SizeTToInt32(written); } done: if (ctx != NULL) { EVP_PKEY_CTX_free(ctx); } return ret; } int32_t CryptoNative_RsaEncrypt(EVP_PKEY* pkey, const uint8_t* source, int32_t sourceLen, RsaPaddingMode padding, const EVP_MD* digest, uint8_t* destination, int32_t destinationLen) { assert(pkey != NULL); assert(destination != NULL); assert(padding >= RsaPaddingPkcs1 && padding <= RsaPaddingOaepOrPss); assert(digest != NULL || padding == RsaPaddingPkcs1); ERR_clear_error(); EVP_PKEY_CTX* ctx = EVP_PKEY_CTX_new(pkey, NULL); int ret = -1; if (ctx == NULL || EVP_PKEY_encrypt_init(ctx) <= 0) { goto done; } if (!ConfigureEncryption(ctx, padding, digest)) { goto done; } size_t written = Int32ToSizeT(destinationLen); if (EVP_PKEY_encrypt(ctx, destination, &written, source, Int32ToSizeT(sourceLen)) > 0) { ret = SizeTToInt32(written); } done: if (ctx != NULL) { EVP_PKEY_CTX_free(ctx); } return ret; } static bool ConfigureSignature(EVP_PKEY_CTX* ctx, RsaPaddingMode padding, const EVP_MD* digest) { if (padding == RsaPaddingPkcs1) { if (EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING) <= 0) { return false; } } else { assert(padding == RsaPaddingOaepOrPss); if (EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PSS_PADDING) <= 0 || EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, RSA_PSS_SALTLEN_DIGEST) <= 0) { return false; } } #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wcast-qual" if (EVP_PKEY_CTX_set_signature_md(ctx, digest) <= 0) #pragma clang diagnostic pop { return false; } return true; } int32_t CryptoNative_RsaSignHash(EVP_PKEY* pkey, RsaPaddingMode padding, const EVP_MD* digest, const uint8_t* hash, int32_t hashLen, uint8_t* destination, int32_t destinationLen) { assert(pkey != NULL); assert(destination != NULL); assert(padding >= RsaPaddingPkcs1 && padding <= RsaPaddingOaepOrPss); assert(digest != NULL || padding == RsaPaddingPkcs1); ERR_clear_error(); EVP_PKEY_CTX* ctx = EVP_PKEY_CTX_new(pkey, NULL); int ret = -1; if (ctx == NULL || EVP_PKEY_sign_init(ctx) <= 0) { goto done; } if (!ConfigureSignature(ctx, padding, digest)) { goto done; } // This check may no longer be needed on OpenSSL 3.0 { const RSA* rsa = EVP_PKEY_get0_RSA(pkey); if (rsa == NULL || HasNoPrivateKey(rsa)) { ERR_PUT_error(ERR_LIB_RSA, RSA_F_RSA_NULL_PRIVATE_DECRYPT, RSA_R_VALUE_MISSING, __FILE__, __LINE__); goto done; } } size_t written = Int32ToSizeT(destinationLen); if (EVP_PKEY_sign(ctx, destination, &written, hash, Int32ToSizeT(hashLen)) > 0) { ret = SizeTToInt32(written); } done: if (ctx != NULL) { EVP_PKEY_CTX_free(ctx); } return ret; } int32_t CryptoNative_RsaVerifyHash(EVP_PKEY* pkey, RsaPaddingMode padding, const EVP_MD* digest, const uint8_t* hash, int32_t hashLen, const uint8_t* signature, int32_t signatureLen) { assert(pkey != NULL); assert(signature != NULL); assert(padding >= RsaPaddingPkcs1 && padding <= RsaPaddingOaepOrPss); assert(digest != NULL || padding == RsaPaddingPkcs1); ERR_clear_error(); EVP_PKEY_CTX* ctx = EVP_PKEY_CTX_new(pkey, NULL); int ret = -1; if (ctx == NULL || EVP_PKEY_verify_init(ctx) <= 0) { goto done; } if (!ConfigureSignature(ctx, padding, digest)) { goto done; } // EVP_PKEY_verify is not consistent on whether a mis-sized hash is an error or just a mismatch. // Normalize to mismatch. if (hashLen != EVP_MD_get_size(digest)) { ret = 0; goto done; } ret = EVP_PKEY_verify(ctx, signature, Int32ToSizeT(signatureLen), hash, Int32ToSizeT(hashLen)); done: if (ctx != NULL) { EVP_PKEY_CTX_free(ctx); } return ret; } static int HasNoPrivateKey(const RSA* rsa) { if (rsa == NULL) return 1; // Shared pointer, don't free. const RSA_METHOD* meth = RSA_get_method(rsa); // The method has descibed itself as having the private key external to the structure. // That doesn't mean it's actually present, but we can't tell. #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wcast-qual" if (RSA_test_flags(rsa, RSA_FLAG_EXT_PKEY) || RSA_meth_get_flags((RSA_METHOD*)meth) & RSA_FLAG_EXT_PKEY) #pragma clang diagnostic pop { return 0; } // In the event that there's a middle-ground where we report failure when success is expected, // one could do something like check if the RSA_METHOD intercepts all private key operations: // // * meth->rsa_priv_enc // * meth->rsa_priv_dec // * meth->rsa_sign (in 1.0.x this is only respected if the RSA_FLAG_SIGN_VER flag is asserted) // // But, for now, leave it at the EXT_PKEY flag test. // The module is documented as accepting either d or the full set of CRT parameters (p, q, dp, dq, qInv) // So if we see d, we're good. Otherwise, if any of the rest are missing, we're public-only. const BIGNUM* d; RSA_get0_key(rsa, NULL, NULL, &d); if (d != NULL) { return 0; } const BIGNUM* p; const BIGNUM* q; const BIGNUM* dmp1; const BIGNUM* dmq1; const BIGNUM* iqmp; RSA_get0_factors(rsa, &p, &q); RSA_get0_crt_params(rsa, &dmp1, &dmq1, &iqmp); if (p == NULL || q == NULL || dmp1 == NULL || dmq1 == NULL || iqmp == NULL) { return 1; } return 0; }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/x86_64/Lregs.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gregs.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gregs.c" #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./docs/workflow/debugging/libraries/debugging-vscode.md
# Debugging Libraries with Visual Studio Code - Install [Visual Studio Code](https://code.visualstudio.com/) - Install the [C# Extension](https://marketplace.visualstudio.com/items?itemName=ms-dotnettools.csharp) - Open the folder containing the source you want to debug in VS Code - i.e., if you are debugging a test failure in System.Net.Sockets, open `runtime/src/libraries/System.Net.Sockets` - Open the debug window: `ctrl-shift-D` or click on the button on the left - Click the gear button at the top to create a launch configuration, select `.NET Core` from the selection dropdown - In the ".NET Core Launch (console)" `launch.json` configuration file make the following changes: - delete the `preLaunchTask` property - set `program` to the full path to `dotnet` in the artifacts/bin/testhost directory. - something like `artifacts/bin/testhost/netcoreapp-{OS}-{Configuration}-{Architecture}`, plus the full path to your dotnet/runtime directory. - set `cwd` to the test bin directory. - using the System.Net.Sockets example, it should be something like `artifacts/bin/System.Net.Sockets.Tests/netcoreapp-{OS}-{Configuration}-{Architecture}`, plus the full path to your dotnet/runtime directory. - set `args` to the command line arguments to pass to the test - something like: `[ "exec", "--runtimeconfig", "{TestProjectName}.runtimeconfig.json", "xunit.console.dll", "{TestProjectName}.dll", "-notrait", ... ]`, where TestProjectName would be `System.Net.Sockets.Tests` - to run a specific test, you can append something like: `[ "-method", "System.Net.Sockets.Tests.{ClassName}.{TestMethodName}", ...]` - Set a breakpoint and launch the debugger, inspecting variables and call stacks will now work
# Debugging Libraries with Visual Studio Code - Install [Visual Studio Code](https://code.visualstudio.com/) - Install the [C# Extension](https://marketplace.visualstudio.com/items?itemName=ms-dotnettools.csharp) - Open the folder containing the source you want to debug in VS Code - i.e., if you are debugging a test failure in System.Net.Sockets, open `runtime/src/libraries/System.Net.Sockets` - Open the debug window: `ctrl-shift-D` or click on the button on the left - Click the gear button at the top to create a launch configuration, select `.NET Core` from the selection dropdown - In the ".NET Core Launch (console)" `launch.json` configuration file make the following changes: - delete the `preLaunchTask` property - set `program` to the full path to `dotnet` in the artifacts/bin/testhost directory. - something like `artifacts/bin/testhost/netcoreapp-{OS}-{Configuration}-{Architecture}`, plus the full path to your dotnet/runtime directory. - set `cwd` to the test bin directory. - using the System.Net.Sockets example, it should be something like `artifacts/bin/System.Net.Sockets.Tests/netcoreapp-{OS}-{Configuration}-{Architecture}`, plus the full path to your dotnet/runtime directory. - set `args` to the command line arguments to pass to the test - something like: `[ "exec", "--runtimeconfig", "{TestProjectName}.runtimeconfig.json", "xunit.console.dll", "{TestProjectName}.dll", "-notrait", ... ]`, where TestProjectName would be `System.Net.Sockets.Tests` - to run a specific test, you can append something like: `[ "-method", "System.Net.Sockets.Tests.{ClassName}.{TestMethodName}", ...]` - Set a breakpoint and launch the debugger, inspecting variables and call stacks will now work
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/eglib/gfile-unix.c
/* * File utility functions. * * Author: * Gonzalo Paniagua Javier ([email protected]) * * (C) 2006 Novell, Inc. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <config.h> #include <glib.h> #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <errno.h> #include <fcntl.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif gboolean g_file_test (const gchar *filename, GFileTest test) { struct stat st; gboolean have_stat; if (filename == NULL || test == 0) return FALSE; have_stat = FALSE; if ((test & G_FILE_TEST_EXISTS) != 0) { if (access (filename, F_OK) == 0) return TRUE; } if ((test & G_FILE_TEST_IS_EXECUTABLE) != 0) { #if !defined(__PASE__) if (access (filename, X_OK) == 0) return TRUE; #else /* * PASE always returns true for X_OK; contrary to how AIX * behaves (but *does* correspond to how it's documented!). * This behaviour is also consistent with the ILE, so it's * probably just an upcall returning the same results. As * such, workaround it. */ if (!have_stat) have_stat = (stat (filename, &st) == 0); /* Hairy parens, but just manually try all permission bits */ if (have_stat && ( ((st.st_mode & S_IXOTH) || ((st.st_mode & S_IXUSR) && (st.st_uid == getuid())) || ((st.st_mode & S_IXGRP) && (st.st_gid == getgid()))))) return TRUE; #endif } #ifdef HAVE_LSTAT if ((test & G_FILE_TEST_IS_SYMLINK) != 0) { have_stat = (lstat (filename, &st) == 0); if (have_stat && S_ISLNK (st.st_mode)) return TRUE; } #endif if ((test & G_FILE_TEST_IS_REGULAR) != 0) { if (!have_stat) have_stat = (stat (filename, &st) == 0); if (have_stat && S_ISREG (st.st_mode)) return TRUE; } if ((test & G_FILE_TEST_IS_DIR) != 0) { if (!have_stat) have_stat = (stat (filename, &st) == 0); if (have_stat && S_ISDIR (st.st_mode)) return TRUE; } return FALSE; } gchar * g_mkdtemp (char *temp) { /* * On systems without mkdtemp, use a reimplemented version * adapted from the Win32 version of this file. AIX is an * exception because i before version 7.2 lacks mkdtemp in * libc, and GCC can "fix" system headers so that it isn't * present without redefining it. */ #if defined(HAVE_MKDTEMP) && !defined(_AIX) return mkdtemp (g_strdup (temp)); #elif defined(HOST_WASI) g_critical ("g_mkdtemp is not implemented for WASI\n"); return NULL; #else temp = mktemp (g_strdup (temp)); /* 0700 is the mode specified in specs */ if (temp && *temp && mkdir (temp, 0700) == 0) return temp; g_free (temp); return NULL; #endif }
/* * File utility functions. * * Author: * Gonzalo Paniagua Javier ([email protected]) * * (C) 2006 Novell, Inc. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <config.h> #include <glib.h> #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <errno.h> #include <fcntl.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif gboolean g_file_test (const gchar *filename, GFileTest test) { struct stat st; gboolean have_stat; if (filename == NULL || test == 0) return FALSE; have_stat = FALSE; if ((test & G_FILE_TEST_EXISTS) != 0) { if (access (filename, F_OK) == 0) return TRUE; } if ((test & G_FILE_TEST_IS_EXECUTABLE) != 0) { #if !defined(__PASE__) if (access (filename, X_OK) == 0) return TRUE; #else /* * PASE always returns true for X_OK; contrary to how AIX * behaves (but *does* correspond to how it's documented!). * This behaviour is also consistent with the ILE, so it's * probably just an upcall returning the same results. As * such, workaround it. */ if (!have_stat) have_stat = (stat (filename, &st) == 0); /* Hairy parens, but just manually try all permission bits */ if (have_stat && ( ((st.st_mode & S_IXOTH) || ((st.st_mode & S_IXUSR) && (st.st_uid == getuid())) || ((st.st_mode & S_IXGRP) && (st.st_gid == getgid()))))) return TRUE; #endif } #ifdef HAVE_LSTAT if ((test & G_FILE_TEST_IS_SYMLINK) != 0) { have_stat = (lstat (filename, &st) == 0); if (have_stat && S_ISLNK (st.st_mode)) return TRUE; } #endif if ((test & G_FILE_TEST_IS_REGULAR) != 0) { if (!have_stat) have_stat = (stat (filename, &st) == 0); if (have_stat && S_ISREG (st.st_mode)) return TRUE; } if ((test & G_FILE_TEST_IS_DIR) != 0) { if (!have_stat) have_stat = (stat (filename, &st) == 0); if (have_stat && S_ISDIR (st.st_mode)) return TRUE; } return FALSE; } gchar * g_mkdtemp (char *temp) { /* * On systems without mkdtemp, use a reimplemented version * adapted from the Win32 version of this file. AIX is an * exception because i before version 7.2 lacks mkdtemp in * libc, and GCC can "fix" system headers so that it isn't * present without redefining it. */ #if defined(HAVE_MKDTEMP) && !defined(_AIX) return mkdtemp (g_strdup (temp)); #elif defined(HOST_WASI) g_critical ("g_mkdtemp is not implemented for WASI\n"); return NULL; #else temp = mktemp (g_strdup (temp)); /* 0700 is the mode specified in specs */ if (temp && *temp && mkdir (temp, 0700) == 0) return temp; g_free (temp); return NULL; #endif }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/native/external/brotli/enc/brotli_bit_stream.c
/* Copyright 2014 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ /* Brotli bit stream functions to support the low level format. There are no compression algorithms here, just the right ordering of bits to match the specs. */ #include "./brotli_bit_stream.h" #include <string.h> /* memcpy, memset */ #include "../common/constants.h" #include "../common/context.h" #include "../common/platform.h" #include <brotli/types.h> #include "./entropy_encode.h" #include "./entropy_encode_static.h" #include "./fast_log.h" #include "./histogram.h" #include "./memory.h" #include "./write_bits.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif #define MAX_HUFFMAN_TREE_SIZE (2 * BROTLI_NUM_COMMAND_SYMBOLS + 1) /* The maximum size of Huffman dictionary for distances assuming that NPOSTFIX = 0 and NDIRECT = 0. */ #define MAX_SIMPLE_DISTANCE_ALPHABET_SIZE \ BROTLI_DISTANCE_ALPHABET_SIZE(0, 0, BROTLI_LARGE_MAX_DISTANCE_BITS) /* MAX_SIMPLE_DISTANCE_ALPHABET_SIZE == 140 */ static BROTLI_INLINE uint32_t BlockLengthPrefixCode(uint32_t len) { uint32_t code = (len >= 177) ? (len >= 753 ? 20 : 14) : (len >= 41 ? 7 : 0); while (code < (BROTLI_NUM_BLOCK_LEN_SYMBOLS - 1) && len >= _kBrotliPrefixCodeRanges[code + 1].offset) ++code; return code; } static BROTLI_INLINE void GetBlockLengthPrefixCode(uint32_t len, size_t* code, uint32_t* n_extra, uint32_t* extra) { *code = BlockLengthPrefixCode(len); *n_extra = _kBrotliPrefixCodeRanges[*code].nbits; *extra = len - _kBrotliPrefixCodeRanges[*code].offset; } typedef struct BlockTypeCodeCalculator { size_t last_type; size_t second_last_type; } BlockTypeCodeCalculator; static void InitBlockTypeCodeCalculator(BlockTypeCodeCalculator* self) { self->last_type = 1; self->second_last_type = 0; } static BROTLI_INLINE size_t NextBlockTypeCode( BlockTypeCodeCalculator* calculator, uint8_t type) { size_t type_code = (type == calculator->last_type + 1) ? 1u : (type == calculator->second_last_type) ? 0u : type + 2u; calculator->second_last_type = calculator->last_type; calculator->last_type = type; return type_code; } /* |nibblesbits| represents the 2 bits to encode MNIBBLES (0-3) REQUIRES: length > 0 REQUIRES: length <= (1 << 24) */ static void BrotliEncodeMlen(size_t length, uint64_t* bits, size_t* numbits, uint64_t* nibblesbits) { size_t lg = (length == 1) ? 1 : Log2FloorNonZero((uint32_t)(length - 1)) + 1; size_t mnibbles = (lg < 16 ? 16 : (lg + 3)) / 4; BROTLI_DCHECK(length > 0); BROTLI_DCHECK(length <= (1 << 24)); BROTLI_DCHECK(lg <= 24); *nibblesbits = mnibbles - 4; *numbits = mnibbles * 4; *bits = length - 1; } static BROTLI_INLINE void StoreCommandExtra( const Command* cmd, size_t* storage_ix, uint8_t* storage) { uint32_t copylen_code = CommandCopyLenCode(cmd); uint16_t inscode = GetInsertLengthCode(cmd->insert_len_); uint16_t copycode = GetCopyLengthCode(copylen_code); uint32_t insnumextra = GetInsertExtra(inscode); uint64_t insextraval = cmd->insert_len_ - GetInsertBase(inscode); uint64_t copyextraval = copylen_code - GetCopyBase(copycode); uint64_t bits = (copyextraval << insnumextra) | insextraval; BrotliWriteBits( insnumextra + GetCopyExtra(copycode), bits, storage_ix, storage); } /* Data structure that stores almost everything that is needed to encode each block switch command. */ typedef struct BlockSplitCode { BlockTypeCodeCalculator type_code_calculator; uint8_t type_depths[BROTLI_MAX_BLOCK_TYPE_SYMBOLS]; uint16_t type_bits[BROTLI_MAX_BLOCK_TYPE_SYMBOLS]; uint8_t length_depths[BROTLI_NUM_BLOCK_LEN_SYMBOLS]; uint16_t length_bits[BROTLI_NUM_BLOCK_LEN_SYMBOLS]; } BlockSplitCode; /* Stores a number between 0 and 255. */ static void StoreVarLenUint8(size_t n, size_t* storage_ix, uint8_t* storage) { if (n == 0) { BrotliWriteBits(1, 0, storage_ix, storage); } else { size_t nbits = Log2FloorNonZero(n); BrotliWriteBits(1, 1, storage_ix, storage); BrotliWriteBits(3, nbits, storage_ix, storage); BrotliWriteBits(nbits, n - ((size_t)1 << nbits), storage_ix, storage); } } /* Stores the compressed meta-block header. REQUIRES: length > 0 REQUIRES: length <= (1 << 24) */ static void StoreCompressedMetaBlockHeader(BROTLI_BOOL is_final_block, size_t length, size_t* storage_ix, uint8_t* storage) { uint64_t lenbits; size_t nlenbits; uint64_t nibblesbits; /* Write ISLAST bit. */ BrotliWriteBits(1, (uint64_t)is_final_block, storage_ix, storage); /* Write ISEMPTY bit. */ if (is_final_block) { BrotliWriteBits(1, 0, storage_ix, storage); } BrotliEncodeMlen(length, &lenbits, &nlenbits, &nibblesbits); BrotliWriteBits(2, nibblesbits, storage_ix, storage); BrotliWriteBits(nlenbits, lenbits, storage_ix, storage); if (!is_final_block) { /* Write ISUNCOMPRESSED bit. */ BrotliWriteBits(1, 0, storage_ix, storage); } } /* Stores the uncompressed meta-block header. REQUIRES: length > 0 REQUIRES: length <= (1 << 24) */ static void BrotliStoreUncompressedMetaBlockHeader(size_t length, size_t* storage_ix, uint8_t* storage) { uint64_t lenbits; size_t nlenbits; uint64_t nibblesbits; /* Write ISLAST bit. Uncompressed block cannot be the last one, so set to 0. */ BrotliWriteBits(1, 0, storage_ix, storage); BrotliEncodeMlen(length, &lenbits, &nlenbits, &nibblesbits); BrotliWriteBits(2, nibblesbits, storage_ix, storage); BrotliWriteBits(nlenbits, lenbits, storage_ix, storage); /* Write ISUNCOMPRESSED bit. */ BrotliWriteBits(1, 1, storage_ix, storage); } static void BrotliStoreHuffmanTreeOfHuffmanTreeToBitMask( const int num_codes, const uint8_t* code_length_bitdepth, size_t* storage_ix, uint8_t* storage) { static const uint8_t kStorageOrder[BROTLI_CODE_LENGTH_CODES] = { 1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; /* The bit lengths of the Huffman code over the code length alphabet are compressed with the following static Huffman code: Symbol Code ------ ---- 0 00 1 1110 2 110 3 01 4 10 5 1111 */ static const uint8_t kHuffmanBitLengthHuffmanCodeSymbols[6] = { 0, 7, 3, 2, 1, 15 }; static const uint8_t kHuffmanBitLengthHuffmanCodeBitLengths[6] = { 2, 4, 3, 2, 2, 4 }; size_t skip_some = 0; /* skips none. */ /* Throw away trailing zeros: */ size_t codes_to_store = BROTLI_CODE_LENGTH_CODES; if (num_codes > 1) { for (; codes_to_store > 0; --codes_to_store) { if (code_length_bitdepth[kStorageOrder[codes_to_store - 1]] != 0) { break; } } } if (code_length_bitdepth[kStorageOrder[0]] == 0 && code_length_bitdepth[kStorageOrder[1]] == 0) { skip_some = 2; /* skips two. */ if (code_length_bitdepth[kStorageOrder[2]] == 0) { skip_some = 3; /* skips three. */ } } BrotliWriteBits(2, skip_some, storage_ix, storage); { size_t i; for (i = skip_some; i < codes_to_store; ++i) { size_t l = code_length_bitdepth[kStorageOrder[i]]; BrotliWriteBits(kHuffmanBitLengthHuffmanCodeBitLengths[l], kHuffmanBitLengthHuffmanCodeSymbols[l], storage_ix, storage); } } } static void BrotliStoreHuffmanTreeToBitMask( const size_t huffman_tree_size, const uint8_t* huffman_tree, const uint8_t* huffman_tree_extra_bits, const uint8_t* code_length_bitdepth, const uint16_t* code_length_bitdepth_symbols, size_t* BROTLI_RESTRICT storage_ix, uint8_t* BROTLI_RESTRICT storage) { size_t i; for (i = 0; i < huffman_tree_size; ++i) { size_t ix = huffman_tree[i]; BrotliWriteBits(code_length_bitdepth[ix], code_length_bitdepth_symbols[ix], storage_ix, storage); /* Extra bits */ switch (ix) { case BROTLI_REPEAT_PREVIOUS_CODE_LENGTH: BrotliWriteBits(2, huffman_tree_extra_bits[i], storage_ix, storage); break; case BROTLI_REPEAT_ZERO_CODE_LENGTH: BrotliWriteBits(3, huffman_tree_extra_bits[i], storage_ix, storage); break; } } } static void StoreSimpleHuffmanTree(const uint8_t* depths, size_t symbols[4], size_t num_symbols, size_t max_bits, size_t* storage_ix, uint8_t* storage) { /* value of 1 indicates a simple Huffman code */ BrotliWriteBits(2, 1, storage_ix, storage); BrotliWriteBits(2, num_symbols - 1, storage_ix, storage); /* NSYM - 1 */ { /* Sort */ size_t i; for (i = 0; i < num_symbols; i++) { size_t j; for (j = i + 1; j < num_symbols; j++) { if (depths[symbols[j]] < depths[symbols[i]]) { BROTLI_SWAP(size_t, symbols, j, i); } } } } if (num_symbols == 2) { BrotliWriteBits(max_bits, symbols[0], storage_ix, storage); BrotliWriteBits(max_bits, symbols[1], storage_ix, storage); } else if (num_symbols == 3) { BrotliWriteBits(max_bits, symbols[0], storage_ix, storage); BrotliWriteBits(max_bits, symbols[1], storage_ix, storage); BrotliWriteBits(max_bits, symbols[2], storage_ix, storage); } else { BrotliWriteBits(max_bits, symbols[0], storage_ix, storage); BrotliWriteBits(max_bits, symbols[1], storage_ix, storage); BrotliWriteBits(max_bits, symbols[2], storage_ix, storage); BrotliWriteBits(max_bits, symbols[3], storage_ix, storage); /* tree-select */ BrotliWriteBits(1, depths[symbols[0]] == 1 ? 1 : 0, storage_ix, storage); } } /* num = alphabet size depths = symbol depths */ void BrotliStoreHuffmanTree(const uint8_t* depths, size_t num, HuffmanTree* tree, size_t* storage_ix, uint8_t* storage) { /* Write the Huffman tree into the brotli-representation. The command alphabet is the largest, so this allocation will fit all alphabets. */ uint8_t huffman_tree[BROTLI_NUM_COMMAND_SYMBOLS]; uint8_t huffman_tree_extra_bits[BROTLI_NUM_COMMAND_SYMBOLS]; size_t huffman_tree_size = 0; uint8_t code_length_bitdepth[BROTLI_CODE_LENGTH_CODES] = { 0 }; uint16_t code_length_bitdepth_symbols[BROTLI_CODE_LENGTH_CODES]; uint32_t huffman_tree_histogram[BROTLI_CODE_LENGTH_CODES] = { 0 }; size_t i; int num_codes = 0; size_t code = 0; BROTLI_DCHECK(num <= BROTLI_NUM_COMMAND_SYMBOLS); BrotliWriteHuffmanTree(depths, num, &huffman_tree_size, huffman_tree, huffman_tree_extra_bits); /* Calculate the statistics of the Huffman tree in brotli-representation. */ for (i = 0; i < huffman_tree_size; ++i) { ++huffman_tree_histogram[huffman_tree[i]]; } for (i = 0; i < BROTLI_CODE_LENGTH_CODES; ++i) { if (huffman_tree_histogram[i]) { if (num_codes == 0) { code = i; num_codes = 1; } else if (num_codes == 1) { num_codes = 2; break; } } } /* Calculate another Huffman tree to use for compressing both the earlier Huffman tree with. */ BrotliCreateHuffmanTree(huffman_tree_histogram, BROTLI_CODE_LENGTH_CODES, 5, tree, code_length_bitdepth); BrotliConvertBitDepthsToSymbols(code_length_bitdepth, BROTLI_CODE_LENGTH_CODES, code_length_bitdepth_symbols); /* Now, we have all the data, let's start storing it */ BrotliStoreHuffmanTreeOfHuffmanTreeToBitMask(num_codes, code_length_bitdepth, storage_ix, storage); if (num_codes == 1) { code_length_bitdepth[code] = 0; } /* Store the real Huffman tree now. */ BrotliStoreHuffmanTreeToBitMask(huffman_tree_size, huffman_tree, huffman_tree_extra_bits, code_length_bitdepth, code_length_bitdepth_symbols, storage_ix, storage); } /* Builds a Huffman tree from histogram[0:length] into depth[0:length] and bits[0:length] and stores the encoded tree to the bit stream. */ static void BuildAndStoreHuffmanTree(const uint32_t* histogram, const size_t histogram_length, const size_t alphabet_size, HuffmanTree* tree, uint8_t* depth, uint16_t* bits, size_t* storage_ix, uint8_t* storage) { size_t count = 0; size_t s4[4] = { 0 }; size_t i; size_t max_bits = 0; for (i = 0; i < histogram_length; i++) { if (histogram[i]) { if (count < 4) { s4[count] = i; } else if (count > 4) { break; } count++; } } { size_t max_bits_counter = alphabet_size - 1; while (max_bits_counter) { max_bits_counter >>= 1; ++max_bits; } } if (count <= 1) { BrotliWriteBits(4, 1, storage_ix, storage); BrotliWriteBits(max_bits, s4[0], storage_ix, storage); depth[s4[0]] = 0; bits[s4[0]] = 0; return; } memset(depth, 0, histogram_length * sizeof(depth[0])); BrotliCreateHuffmanTree(histogram, histogram_length, 15, tree, depth); BrotliConvertBitDepthsToSymbols(depth, histogram_length, bits); if (count <= 4) { StoreSimpleHuffmanTree(depth, s4, count, max_bits, storage_ix, storage); } else { BrotliStoreHuffmanTree(depth, histogram_length, tree, storage_ix, storage); } } static BROTLI_INLINE BROTLI_BOOL SortHuffmanTree( const HuffmanTree* v0, const HuffmanTree* v1) { return TO_BROTLI_BOOL(v0->total_count_ < v1->total_count_); } void BrotliBuildAndStoreHuffmanTreeFast(MemoryManager* m, const uint32_t* histogram, const size_t histogram_total, const size_t max_bits, uint8_t* depth, uint16_t* bits, size_t* storage_ix, uint8_t* storage) { size_t count = 0; size_t symbols[4] = { 0 }; size_t length = 0; size_t total = histogram_total; while (total != 0) { if (histogram[length]) { if (count < 4) { symbols[count] = length; } ++count; total -= histogram[length]; } ++length; } if (count <= 1) { BrotliWriteBits(4, 1, storage_ix, storage); BrotliWriteBits(max_bits, symbols[0], storage_ix, storage); depth[symbols[0]] = 0; bits[symbols[0]] = 0; return; } memset(depth, 0, length * sizeof(depth[0])); { const size_t max_tree_size = 2 * length + 1; HuffmanTree* tree = BROTLI_ALLOC(m, HuffmanTree, max_tree_size); uint32_t count_limit; if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(tree)) return; for (count_limit = 1; ; count_limit *= 2) { HuffmanTree* node = tree; size_t l; for (l = length; l != 0;) { --l; if (histogram[l]) { if (BROTLI_PREDICT_TRUE(histogram[l] >= count_limit)) { InitHuffmanTree(node, histogram[l], -1, (int16_t)l); } else { InitHuffmanTree(node, count_limit, -1, (int16_t)l); } ++node; } } { const int n = (int)(node - tree); HuffmanTree sentinel; int i = 0; /* Points to the next leaf node. */ int j = n + 1; /* Points to the next non-leaf node. */ int k; SortHuffmanTreeItems(tree, (size_t)n, SortHuffmanTree); /* The nodes are: [0, n): the sorted leaf nodes that we start with. [n]: we add a sentinel here. [n + 1, 2n): new parent nodes are added here, starting from (n+1). These are naturally in ascending order. [2n]: we add a sentinel at the end as well. There will be (2n+1) elements at the end. */ InitHuffmanTree(&sentinel, BROTLI_UINT32_MAX, -1, -1); *node++ = sentinel; *node++ = sentinel; for (k = n - 1; k > 0; --k) { int left, right; if (tree[i].total_count_ <= tree[j].total_count_) { left = i; ++i; } else { left = j; ++j; } if (tree[i].total_count_ <= tree[j].total_count_) { right = i; ++i; } else { right = j; ++j; } /* The sentinel node becomes the parent node. */ node[-1].total_count_ = tree[left].total_count_ + tree[right].total_count_; node[-1].index_left_ = (int16_t)left; node[-1].index_right_or_value_ = (int16_t)right; /* Add back the last sentinel node. */ *node++ = sentinel; } if (BrotliSetDepth(2 * n - 1, tree, depth, 14)) { /* We need to pack the Huffman tree in 14 bits. If this was not successful, add fake entities to the lowest values and retry. */ break; } } } BROTLI_FREE(m, tree); } BrotliConvertBitDepthsToSymbols(depth, length, bits); if (count <= 4) { size_t i; /* value of 1 indicates a simple Huffman code */ BrotliWriteBits(2, 1, storage_ix, storage); BrotliWriteBits(2, count - 1, storage_ix, storage); /* NSYM - 1 */ /* Sort */ for (i = 0; i < count; i++) { size_t j; for (j = i + 1; j < count; j++) { if (depth[symbols[j]] < depth[symbols[i]]) { BROTLI_SWAP(size_t, symbols, j, i); } } } if (count == 2) { BrotliWriteBits(max_bits, symbols[0], storage_ix, storage); BrotliWriteBits(max_bits, symbols[1], storage_ix, storage); } else if (count == 3) { BrotliWriteBits(max_bits, symbols[0], storage_ix, storage); BrotliWriteBits(max_bits, symbols[1], storage_ix, storage); BrotliWriteBits(max_bits, symbols[2], storage_ix, storage); } else { BrotliWriteBits(max_bits, symbols[0], storage_ix, storage); BrotliWriteBits(max_bits, symbols[1], storage_ix, storage); BrotliWriteBits(max_bits, symbols[2], storage_ix, storage); BrotliWriteBits(max_bits, symbols[3], storage_ix, storage); /* tree-select */ BrotliWriteBits(1, depth[symbols[0]] == 1 ? 1 : 0, storage_ix, storage); } } else { uint8_t previous_value = 8; size_t i; /* Complex Huffman Tree */ StoreStaticCodeLengthCode(storage_ix, storage); /* Actual RLE coding. */ for (i = 0; i < length;) { const uint8_t value = depth[i]; size_t reps = 1; size_t k; for (k = i + 1; k < length && depth[k] == value; ++k) { ++reps; } i += reps; if (value == 0) { BrotliWriteBits(kZeroRepsDepth[reps], kZeroRepsBits[reps], storage_ix, storage); } else { if (previous_value != value) { BrotliWriteBits(kCodeLengthDepth[value], kCodeLengthBits[value], storage_ix, storage); --reps; } if (reps < 3) { while (reps != 0) { reps--; BrotliWriteBits(kCodeLengthDepth[value], kCodeLengthBits[value], storage_ix, storage); } } else { reps -= 3; BrotliWriteBits(kNonZeroRepsDepth[reps], kNonZeroRepsBits[reps], storage_ix, storage); } previous_value = value; } } } } static size_t IndexOf(const uint8_t* v, size_t v_size, uint8_t value) { size_t i = 0; for (; i < v_size; ++i) { if (v[i] == value) return i; } return i; } static void MoveToFront(uint8_t* v, size_t index) { uint8_t value = v[index]; size_t i; for (i = index; i != 0; --i) { v[i] = v[i - 1]; } v[0] = value; } static void MoveToFrontTransform(const uint32_t* BROTLI_RESTRICT v_in, const size_t v_size, uint32_t* v_out) { size_t i; uint8_t mtf[256]; uint32_t max_value; if (v_size == 0) { return; } max_value = v_in[0]; for (i = 1; i < v_size; ++i) { if (v_in[i] > max_value) max_value = v_in[i]; } BROTLI_DCHECK(max_value < 256u); for (i = 0; i <= max_value; ++i) { mtf[i] = (uint8_t)i; } { size_t mtf_size = max_value + 1; for (i = 0; i < v_size; ++i) { size_t index = IndexOf(mtf, mtf_size, (uint8_t)v_in[i]); BROTLI_DCHECK(index < mtf_size); v_out[i] = (uint32_t)index; MoveToFront(mtf, index); } } } /* Finds runs of zeros in v[0..in_size) and replaces them with a prefix code of the run length plus extra bits (lower 9 bits is the prefix code and the rest are the extra bits). Non-zero values in v[] are shifted by *max_length_prefix. Will not create prefix codes bigger than the initial value of *max_run_length_prefix. The prefix code of run length L is simply Log2Floor(L) and the number of extra bits is the same as the prefix code. */ static void RunLengthCodeZeros(const size_t in_size, uint32_t* BROTLI_RESTRICT v, size_t* BROTLI_RESTRICT out_size, uint32_t* BROTLI_RESTRICT max_run_length_prefix) { uint32_t max_reps = 0; size_t i; uint32_t max_prefix; for (i = 0; i < in_size;) { uint32_t reps = 0; for (; i < in_size && v[i] != 0; ++i) ; for (; i < in_size && v[i] == 0; ++i) { ++reps; } max_reps = BROTLI_MAX(uint32_t, reps, max_reps); } max_prefix = max_reps > 0 ? Log2FloorNonZero(max_reps) : 0; max_prefix = BROTLI_MIN(uint32_t, max_prefix, *max_run_length_prefix); *max_run_length_prefix = max_prefix; *out_size = 0; for (i = 0; i < in_size;) { BROTLI_DCHECK(*out_size <= i); if (v[i] != 0) { v[*out_size] = v[i] + *max_run_length_prefix; ++i; ++(*out_size); } else { uint32_t reps = 1; size_t k; for (k = i + 1; k < in_size && v[k] == 0; ++k) { ++reps; } i += reps; while (reps != 0) { if (reps < (2u << max_prefix)) { uint32_t run_length_prefix = Log2FloorNonZero(reps); const uint32_t extra_bits = reps - (1u << run_length_prefix); v[*out_size] = run_length_prefix + (extra_bits << 9); ++(*out_size); break; } else { const uint32_t extra_bits = (1u << max_prefix) - 1u; v[*out_size] = max_prefix + (extra_bits << 9); reps -= (2u << max_prefix) - 1u; ++(*out_size); } } } } } #define SYMBOL_BITS 9 static void EncodeContextMap(MemoryManager* m, const uint32_t* context_map, size_t context_map_size, size_t num_clusters, HuffmanTree* tree, size_t* storage_ix, uint8_t* storage) { size_t i; uint32_t* rle_symbols; uint32_t max_run_length_prefix = 6; size_t num_rle_symbols = 0; uint32_t histogram[BROTLI_MAX_CONTEXT_MAP_SYMBOLS]; static const uint32_t kSymbolMask = (1u << SYMBOL_BITS) - 1u; uint8_t depths[BROTLI_MAX_CONTEXT_MAP_SYMBOLS]; uint16_t bits[BROTLI_MAX_CONTEXT_MAP_SYMBOLS]; StoreVarLenUint8(num_clusters - 1, storage_ix, storage); if (num_clusters == 1) { return; } rle_symbols = BROTLI_ALLOC(m, uint32_t, context_map_size); if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(rle_symbols)) return; MoveToFrontTransform(context_map, context_map_size, rle_symbols); RunLengthCodeZeros(context_map_size, rle_symbols, &num_rle_symbols, &max_run_length_prefix); memset(histogram, 0, sizeof(histogram)); for (i = 0; i < num_rle_symbols; ++i) { ++histogram[rle_symbols[i] & kSymbolMask]; } { BROTLI_BOOL use_rle = TO_BROTLI_BOOL(max_run_length_prefix > 0); BrotliWriteBits(1, (uint64_t)use_rle, storage_ix, storage); if (use_rle) { BrotliWriteBits(4, max_run_length_prefix - 1, storage_ix, storage); } } BuildAndStoreHuffmanTree(histogram, num_clusters + max_run_length_prefix, num_clusters + max_run_length_prefix, tree, depths, bits, storage_ix, storage); for (i = 0; i < num_rle_symbols; ++i) { const uint32_t rle_symbol = rle_symbols[i] & kSymbolMask; const uint32_t extra_bits_val = rle_symbols[i] >> SYMBOL_BITS; BrotliWriteBits(depths[rle_symbol], bits[rle_symbol], storage_ix, storage); if (rle_symbol > 0 && rle_symbol <= max_run_length_prefix) { BrotliWriteBits(rle_symbol, extra_bits_val, storage_ix, storage); } } BrotliWriteBits(1, 1, storage_ix, storage); /* use move-to-front */ BROTLI_FREE(m, rle_symbols); } /* Stores the block switch command with index block_ix to the bit stream. */ static BROTLI_INLINE void StoreBlockSwitch(BlockSplitCode* code, const uint32_t block_len, const uint8_t block_type, BROTLI_BOOL is_first_block, size_t* storage_ix, uint8_t* storage) { size_t typecode = NextBlockTypeCode(&code->type_code_calculator, block_type); size_t lencode; uint32_t len_nextra; uint32_t len_extra; if (!is_first_block) { BrotliWriteBits(code->type_depths[typecode], code->type_bits[typecode], storage_ix, storage); } GetBlockLengthPrefixCode(block_len, &lencode, &len_nextra, &len_extra); BrotliWriteBits(code->length_depths[lencode], code->length_bits[lencode], storage_ix, storage); BrotliWriteBits(len_nextra, len_extra, storage_ix, storage); } /* Builds a BlockSplitCode data structure from the block split given by the vector of block types and block lengths and stores it to the bit stream. */ static void BuildAndStoreBlockSplitCode(const uint8_t* types, const uint32_t* lengths, const size_t num_blocks, const size_t num_types, HuffmanTree* tree, BlockSplitCode* code, size_t* storage_ix, uint8_t* storage) { uint32_t type_histo[BROTLI_MAX_BLOCK_TYPE_SYMBOLS]; uint32_t length_histo[BROTLI_NUM_BLOCK_LEN_SYMBOLS]; size_t i; BlockTypeCodeCalculator type_code_calculator; memset(type_histo, 0, (num_types + 2) * sizeof(type_histo[0])); memset(length_histo, 0, sizeof(length_histo)); InitBlockTypeCodeCalculator(&type_code_calculator); for (i = 0; i < num_blocks; ++i) { size_t type_code = NextBlockTypeCode(&type_code_calculator, types[i]); if (i != 0) ++type_histo[type_code]; ++length_histo[BlockLengthPrefixCode(lengths[i])]; } StoreVarLenUint8(num_types - 1, storage_ix, storage); if (num_types > 1) { /* TODO: else? could StoreBlockSwitch occur? */ BuildAndStoreHuffmanTree(&type_histo[0], num_types + 2, num_types + 2, tree, &code->type_depths[0], &code->type_bits[0], storage_ix, storage); BuildAndStoreHuffmanTree(&length_histo[0], BROTLI_NUM_BLOCK_LEN_SYMBOLS, BROTLI_NUM_BLOCK_LEN_SYMBOLS, tree, &code->length_depths[0], &code->length_bits[0], storage_ix, storage); StoreBlockSwitch(code, lengths[0], types[0], 1, storage_ix, storage); } } /* Stores a context map where the histogram type is always the block type. */ static void StoreTrivialContextMap(size_t num_types, size_t context_bits, HuffmanTree* tree, size_t* storage_ix, uint8_t* storage) { StoreVarLenUint8(num_types - 1, storage_ix, storage); if (num_types > 1) { size_t repeat_code = context_bits - 1u; size_t repeat_bits = (1u << repeat_code) - 1u; size_t alphabet_size = num_types + repeat_code; uint32_t histogram[BROTLI_MAX_CONTEXT_MAP_SYMBOLS]; uint8_t depths[BROTLI_MAX_CONTEXT_MAP_SYMBOLS]; uint16_t bits[BROTLI_MAX_CONTEXT_MAP_SYMBOLS]; size_t i; memset(histogram, 0, alphabet_size * sizeof(histogram[0])); /* Write RLEMAX. */ BrotliWriteBits(1, 1, storage_ix, storage); BrotliWriteBits(4, repeat_code - 1, storage_ix, storage); histogram[repeat_code] = (uint32_t)num_types; histogram[0] = 1; for (i = context_bits; i < alphabet_size; ++i) { histogram[i] = 1; } BuildAndStoreHuffmanTree(histogram, alphabet_size, alphabet_size, tree, depths, bits, storage_ix, storage); for (i = 0; i < num_types; ++i) { size_t code = (i == 0 ? 0 : i + context_bits - 1); BrotliWriteBits(depths[code], bits[code], storage_ix, storage); BrotliWriteBits( depths[repeat_code], bits[repeat_code], storage_ix, storage); BrotliWriteBits(repeat_code, repeat_bits, storage_ix, storage); } /* Write IMTF (inverse-move-to-front) bit. */ BrotliWriteBits(1, 1, storage_ix, storage); } } /* Manages the encoding of one block category (literal, command or distance). */ typedef struct BlockEncoder { size_t histogram_length_; size_t num_block_types_; const uint8_t* block_types_; /* Not owned. */ const uint32_t* block_lengths_; /* Not owned. */ size_t num_blocks_; BlockSplitCode block_split_code_; size_t block_ix_; size_t block_len_; size_t entropy_ix_; uint8_t* depths_; uint16_t* bits_; } BlockEncoder; static void InitBlockEncoder(BlockEncoder* self, size_t histogram_length, size_t num_block_types, const uint8_t* block_types, const uint32_t* block_lengths, const size_t num_blocks) { self->histogram_length_ = histogram_length; self->num_block_types_ = num_block_types; self->block_types_ = block_types; self->block_lengths_ = block_lengths; self->num_blocks_ = num_blocks; InitBlockTypeCodeCalculator(&self->block_split_code_.type_code_calculator); self->block_ix_ = 0; self->block_len_ = num_blocks == 0 ? 0 : block_lengths[0]; self->entropy_ix_ = 0; self->depths_ = 0; self->bits_ = 0; } static void CleanupBlockEncoder(MemoryManager* m, BlockEncoder* self) { BROTLI_FREE(m, self->depths_); BROTLI_FREE(m, self->bits_); } /* Creates entropy codes of block lengths and block types and stores them to the bit stream. */ static void BuildAndStoreBlockSwitchEntropyCodes(BlockEncoder* self, HuffmanTree* tree, size_t* storage_ix, uint8_t* storage) { BuildAndStoreBlockSplitCode(self->block_types_, self->block_lengths_, self->num_blocks_, self->num_block_types_, tree, &self->block_split_code_, storage_ix, storage); } /* Stores the next symbol with the entropy code of the current block type. Updates the block type and block length at block boundaries. */ static void StoreSymbol(BlockEncoder* self, size_t symbol, size_t* storage_ix, uint8_t* storage) { if (self->block_len_ == 0) { size_t block_ix = ++self->block_ix_; uint32_t block_len = self->block_lengths_[block_ix]; uint8_t block_type = self->block_types_[block_ix]; self->block_len_ = block_len; self->entropy_ix_ = block_type * self->histogram_length_; StoreBlockSwitch(&self->block_split_code_, block_len, block_type, 0, storage_ix, storage); } --self->block_len_; { size_t ix = self->entropy_ix_ + symbol; BrotliWriteBits(self->depths_[ix], self->bits_[ix], storage_ix, storage); } } /* Stores the next symbol with the entropy code of the current block type and context value. Updates the block type and block length at block boundaries. */ static void StoreSymbolWithContext(BlockEncoder* self, size_t symbol, size_t context, const uint32_t* context_map, size_t* storage_ix, uint8_t* storage, const size_t context_bits) { if (self->block_len_ == 0) { size_t block_ix = ++self->block_ix_; uint32_t block_len = self->block_lengths_[block_ix]; uint8_t block_type = self->block_types_[block_ix]; self->block_len_ = block_len; self->entropy_ix_ = (size_t)block_type << context_bits; StoreBlockSwitch(&self->block_split_code_, block_len, block_type, 0, storage_ix, storage); } --self->block_len_; { size_t histo_ix = context_map[self->entropy_ix_ + context]; size_t ix = histo_ix * self->histogram_length_ + symbol; BrotliWriteBits(self->depths_[ix], self->bits_[ix], storage_ix, storage); } } #define FN(X) X ## Literal /* NOLINTNEXTLINE(build/include) */ #include "./block_encoder_inc.h" #undef FN #define FN(X) X ## Command /* NOLINTNEXTLINE(build/include) */ #include "./block_encoder_inc.h" #undef FN #define FN(X) X ## Distance /* NOLINTNEXTLINE(build/include) */ #include "./block_encoder_inc.h" #undef FN static void JumpToByteBoundary(size_t* storage_ix, uint8_t* storage) { *storage_ix = (*storage_ix + 7u) & ~7u; storage[*storage_ix >> 3] = 0; } void BrotliStoreMetaBlock(MemoryManager* m, const uint8_t* input, size_t start_pos, size_t length, size_t mask, uint8_t prev_byte, uint8_t prev_byte2, BROTLI_BOOL is_last, const BrotliEncoderParams* params, ContextType literal_context_mode, const Command* commands, size_t n_commands, const MetaBlockSplit* mb, size_t* storage_ix, uint8_t* storage) { size_t pos = start_pos; size_t i; uint32_t num_distance_symbols = params->dist.alphabet_size_max; uint32_t num_effective_distance_symbols = params->dist.alphabet_size_limit; HuffmanTree* tree; ContextLut literal_context_lut = BROTLI_CONTEXT_LUT(literal_context_mode); BlockEncoder literal_enc; BlockEncoder command_enc; BlockEncoder distance_enc; const BrotliDistanceParams* dist = &params->dist; BROTLI_DCHECK( num_effective_distance_symbols <= BROTLI_NUM_HISTOGRAM_DISTANCE_SYMBOLS); StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); tree = BROTLI_ALLOC(m, HuffmanTree, MAX_HUFFMAN_TREE_SIZE); if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(tree)) return; InitBlockEncoder(&literal_enc, BROTLI_NUM_LITERAL_SYMBOLS, mb->literal_split.num_types, mb->literal_split.types, mb->literal_split.lengths, mb->literal_split.num_blocks); InitBlockEncoder(&command_enc, BROTLI_NUM_COMMAND_SYMBOLS, mb->command_split.num_types, mb->command_split.types, mb->command_split.lengths, mb->command_split.num_blocks); InitBlockEncoder(&distance_enc, num_effective_distance_symbols, mb->distance_split.num_types, mb->distance_split.types, mb->distance_split.lengths, mb->distance_split.num_blocks); BuildAndStoreBlockSwitchEntropyCodes(&literal_enc, tree, storage_ix, storage); BuildAndStoreBlockSwitchEntropyCodes(&command_enc, tree, storage_ix, storage); BuildAndStoreBlockSwitchEntropyCodes( &distance_enc, tree, storage_ix, storage); BrotliWriteBits(2, dist->distance_postfix_bits, storage_ix, storage); BrotliWriteBits( 4, dist->num_direct_distance_codes >> dist->distance_postfix_bits, storage_ix, storage); for (i = 0; i < mb->literal_split.num_types; ++i) { BrotliWriteBits(2, literal_context_mode, storage_ix, storage); } if (mb->literal_context_map_size == 0) { StoreTrivialContextMap(mb->literal_histograms_size, BROTLI_LITERAL_CONTEXT_BITS, tree, storage_ix, storage); } else { EncodeContextMap(m, mb->literal_context_map, mb->literal_context_map_size, mb->literal_histograms_size, tree, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; } if (mb->distance_context_map_size == 0) { StoreTrivialContextMap(mb->distance_histograms_size, BROTLI_DISTANCE_CONTEXT_BITS, tree, storage_ix, storage); } else { EncodeContextMap(m, mb->distance_context_map, mb->distance_context_map_size, mb->distance_histograms_size, tree, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; } BuildAndStoreEntropyCodesLiteral(m, &literal_enc, mb->literal_histograms, mb->literal_histograms_size, BROTLI_NUM_LITERAL_SYMBOLS, tree, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; BuildAndStoreEntropyCodesCommand(m, &command_enc, mb->command_histograms, mb->command_histograms_size, BROTLI_NUM_COMMAND_SYMBOLS, tree, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; BuildAndStoreEntropyCodesDistance(m, &distance_enc, mb->distance_histograms, mb->distance_histograms_size, num_distance_symbols, tree, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; BROTLI_FREE(m, tree); for (i = 0; i < n_commands; ++i) { const Command cmd = commands[i]; size_t cmd_code = cmd.cmd_prefix_; StoreSymbol(&command_enc, cmd_code, storage_ix, storage); StoreCommandExtra(&cmd, storage_ix, storage); if (mb->literal_context_map_size == 0) { size_t j; for (j = cmd.insert_len_; j != 0; --j) { StoreSymbol(&literal_enc, input[pos & mask], storage_ix, storage); ++pos; } } else { size_t j; for (j = cmd.insert_len_; j != 0; --j) { size_t context = BROTLI_CONTEXT(prev_byte, prev_byte2, literal_context_lut); uint8_t literal = input[pos & mask]; StoreSymbolWithContext(&literal_enc, literal, context, mb->literal_context_map, storage_ix, storage, BROTLI_LITERAL_CONTEXT_BITS); prev_byte2 = prev_byte; prev_byte = literal; ++pos; } } pos += CommandCopyLen(&cmd); if (CommandCopyLen(&cmd)) { prev_byte2 = input[(pos - 2) & mask]; prev_byte = input[(pos - 1) & mask]; if (cmd.cmd_prefix_ >= 128) { size_t dist_code = cmd.dist_prefix_ & 0x3FF; uint32_t distnumextra = cmd.dist_prefix_ >> 10; uint64_t distextra = cmd.dist_extra_; if (mb->distance_context_map_size == 0) { StoreSymbol(&distance_enc, dist_code, storage_ix, storage); } else { size_t context = CommandDistanceContext(&cmd); StoreSymbolWithContext(&distance_enc, dist_code, context, mb->distance_context_map, storage_ix, storage, BROTLI_DISTANCE_CONTEXT_BITS); } BrotliWriteBits(distnumextra, distextra, storage_ix, storage); } } } CleanupBlockEncoder(m, &distance_enc); CleanupBlockEncoder(m, &command_enc); CleanupBlockEncoder(m, &literal_enc); if (is_last) { JumpToByteBoundary(storage_ix, storage); } } static void BuildHistograms(const uint8_t* input, size_t start_pos, size_t mask, const Command* commands, size_t n_commands, HistogramLiteral* lit_histo, HistogramCommand* cmd_histo, HistogramDistance* dist_histo) { size_t pos = start_pos; size_t i; for (i = 0; i < n_commands; ++i) { const Command cmd = commands[i]; size_t j; HistogramAddCommand(cmd_histo, cmd.cmd_prefix_); for (j = cmd.insert_len_; j != 0; --j) { HistogramAddLiteral(lit_histo, input[pos & mask]); ++pos; } pos += CommandCopyLen(&cmd); if (CommandCopyLen(&cmd) && cmd.cmd_prefix_ >= 128) { HistogramAddDistance(dist_histo, cmd.dist_prefix_ & 0x3FF); } } } static void StoreDataWithHuffmanCodes(const uint8_t* input, size_t start_pos, size_t mask, const Command* commands, size_t n_commands, const uint8_t* lit_depth, const uint16_t* lit_bits, const uint8_t* cmd_depth, const uint16_t* cmd_bits, const uint8_t* dist_depth, const uint16_t* dist_bits, size_t* storage_ix, uint8_t* storage) { size_t pos = start_pos; size_t i; for (i = 0; i < n_commands; ++i) { const Command cmd = commands[i]; const size_t cmd_code = cmd.cmd_prefix_; size_t j; BrotliWriteBits( cmd_depth[cmd_code], cmd_bits[cmd_code], storage_ix, storage); StoreCommandExtra(&cmd, storage_ix, storage); for (j = cmd.insert_len_; j != 0; --j) { const uint8_t literal = input[pos & mask]; BrotliWriteBits( lit_depth[literal], lit_bits[literal], storage_ix, storage); ++pos; } pos += CommandCopyLen(&cmd); if (CommandCopyLen(&cmd) && cmd.cmd_prefix_ >= 128) { const size_t dist_code = cmd.dist_prefix_ & 0x3FF; const uint32_t distnumextra = cmd.dist_prefix_ >> 10; const uint32_t distextra = cmd.dist_extra_; BrotliWriteBits(dist_depth[dist_code], dist_bits[dist_code], storage_ix, storage); BrotliWriteBits(distnumextra, distextra, storage_ix, storage); } } } void BrotliStoreMetaBlockTrivial(MemoryManager* m, const uint8_t* input, size_t start_pos, size_t length, size_t mask, BROTLI_BOOL is_last, const BrotliEncoderParams* params, const Command* commands, size_t n_commands, size_t* storage_ix, uint8_t* storage) { HistogramLiteral lit_histo; HistogramCommand cmd_histo; HistogramDistance dist_histo; uint8_t lit_depth[BROTLI_NUM_LITERAL_SYMBOLS]; uint16_t lit_bits[BROTLI_NUM_LITERAL_SYMBOLS]; uint8_t cmd_depth[BROTLI_NUM_COMMAND_SYMBOLS]; uint16_t cmd_bits[BROTLI_NUM_COMMAND_SYMBOLS]; uint8_t dist_depth[MAX_SIMPLE_DISTANCE_ALPHABET_SIZE]; uint16_t dist_bits[MAX_SIMPLE_DISTANCE_ALPHABET_SIZE]; HuffmanTree* tree; uint32_t num_distance_symbols = params->dist.alphabet_size_max; StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); HistogramClearLiteral(&lit_histo); HistogramClearCommand(&cmd_histo); HistogramClearDistance(&dist_histo); BuildHistograms(input, start_pos, mask, commands, n_commands, &lit_histo, &cmd_histo, &dist_histo); BrotliWriteBits(13, 0, storage_ix, storage); tree = BROTLI_ALLOC(m, HuffmanTree, MAX_HUFFMAN_TREE_SIZE); if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(tree)) return; BuildAndStoreHuffmanTree(lit_histo.data_, BROTLI_NUM_LITERAL_SYMBOLS, BROTLI_NUM_LITERAL_SYMBOLS, tree, lit_depth, lit_bits, storage_ix, storage); BuildAndStoreHuffmanTree(cmd_histo.data_, BROTLI_NUM_COMMAND_SYMBOLS, BROTLI_NUM_COMMAND_SYMBOLS, tree, cmd_depth, cmd_bits, storage_ix, storage); BuildAndStoreHuffmanTree(dist_histo.data_, MAX_SIMPLE_DISTANCE_ALPHABET_SIZE, num_distance_symbols, tree, dist_depth, dist_bits, storage_ix, storage); BROTLI_FREE(m, tree); StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, lit_depth, lit_bits, cmd_depth, cmd_bits, dist_depth, dist_bits, storage_ix, storage); if (is_last) { JumpToByteBoundary(storage_ix, storage); } } void BrotliStoreMetaBlockFast(MemoryManager* m, const uint8_t* input, size_t start_pos, size_t length, size_t mask, BROTLI_BOOL is_last, const BrotliEncoderParams* params, const Command* commands, size_t n_commands, size_t* storage_ix, uint8_t* storage) { uint32_t num_distance_symbols = params->dist.alphabet_size_max; uint32_t distance_alphabet_bits = Log2FloorNonZero(num_distance_symbols - 1) + 1; StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); BrotliWriteBits(13, 0, storage_ix, storage); if (n_commands <= 128) { uint32_t histogram[BROTLI_NUM_LITERAL_SYMBOLS] = { 0 }; size_t pos = start_pos; size_t num_literals = 0; size_t i; uint8_t lit_depth[BROTLI_NUM_LITERAL_SYMBOLS]; uint16_t lit_bits[BROTLI_NUM_LITERAL_SYMBOLS]; for (i = 0; i < n_commands; ++i) { const Command cmd = commands[i]; size_t j; for (j = cmd.insert_len_; j != 0; --j) { ++histogram[input[pos & mask]]; ++pos; } num_literals += cmd.insert_len_; pos += CommandCopyLen(&cmd); } BrotliBuildAndStoreHuffmanTreeFast(m, histogram, num_literals, /* max_bits = */ 8, lit_depth, lit_bits, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; StoreStaticCommandHuffmanTree(storage_ix, storage); StoreStaticDistanceHuffmanTree(storage_ix, storage); StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, lit_depth, lit_bits, kStaticCommandCodeDepth, kStaticCommandCodeBits, kStaticDistanceCodeDepth, kStaticDistanceCodeBits, storage_ix, storage); } else { HistogramLiteral lit_histo; HistogramCommand cmd_histo; HistogramDistance dist_histo; uint8_t lit_depth[BROTLI_NUM_LITERAL_SYMBOLS]; uint16_t lit_bits[BROTLI_NUM_LITERAL_SYMBOLS]; uint8_t cmd_depth[BROTLI_NUM_COMMAND_SYMBOLS]; uint16_t cmd_bits[BROTLI_NUM_COMMAND_SYMBOLS]; uint8_t dist_depth[MAX_SIMPLE_DISTANCE_ALPHABET_SIZE]; uint16_t dist_bits[MAX_SIMPLE_DISTANCE_ALPHABET_SIZE]; HistogramClearLiteral(&lit_histo); HistogramClearCommand(&cmd_histo); HistogramClearDistance(&dist_histo); BuildHistograms(input, start_pos, mask, commands, n_commands, &lit_histo, &cmd_histo, &dist_histo); BrotliBuildAndStoreHuffmanTreeFast(m, lit_histo.data_, lit_histo.total_count_, /* max_bits = */ 8, lit_depth, lit_bits, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; BrotliBuildAndStoreHuffmanTreeFast(m, cmd_histo.data_, cmd_histo.total_count_, /* max_bits = */ 10, cmd_depth, cmd_bits, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; BrotliBuildAndStoreHuffmanTreeFast(m, dist_histo.data_, dist_histo.total_count_, /* max_bits = */ distance_alphabet_bits, dist_depth, dist_bits, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, lit_depth, lit_bits, cmd_depth, cmd_bits, dist_depth, dist_bits, storage_ix, storage); } if (is_last) { JumpToByteBoundary(storage_ix, storage); } } /* This is for storing uncompressed blocks (simple raw storage of bytes-as-bytes). */ void BrotliStoreUncompressedMetaBlock(BROTLI_BOOL is_final_block, const uint8_t* BROTLI_RESTRICT input, size_t position, size_t mask, size_t len, size_t* BROTLI_RESTRICT storage_ix, uint8_t* BROTLI_RESTRICT storage) { size_t masked_pos = position & mask; BrotliStoreUncompressedMetaBlockHeader(len, storage_ix, storage); JumpToByteBoundary(storage_ix, storage); if (masked_pos + len > mask + 1) { size_t len1 = mask + 1 - masked_pos; memcpy(&storage[*storage_ix >> 3], &input[masked_pos], len1); *storage_ix += len1 << 3; len -= len1; masked_pos = 0; } memcpy(&storage[*storage_ix >> 3], &input[masked_pos], len); *storage_ix += len << 3; /* We need to clear the next 4 bytes to continue to be compatible with BrotliWriteBits. */ BrotliWriteBitsPrepareStorage(*storage_ix, storage); /* Since the uncompressed block itself may not be the final block, add an empty one after this. */ if (is_final_block) { BrotliWriteBits(1, 1, storage_ix, storage); /* islast */ BrotliWriteBits(1, 1, storage_ix, storage); /* isempty */ JumpToByteBoundary(storage_ix, storage); } } #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif
/* Copyright 2014 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ /* Brotli bit stream functions to support the low level format. There are no compression algorithms here, just the right ordering of bits to match the specs. */ #include "./brotli_bit_stream.h" #include <string.h> /* memcpy, memset */ #include "../common/constants.h" #include "../common/context.h" #include "../common/platform.h" #include <brotli/types.h> #include "./entropy_encode.h" #include "./entropy_encode_static.h" #include "./fast_log.h" #include "./histogram.h" #include "./memory.h" #include "./write_bits.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif #define MAX_HUFFMAN_TREE_SIZE (2 * BROTLI_NUM_COMMAND_SYMBOLS + 1) /* The maximum size of Huffman dictionary for distances assuming that NPOSTFIX = 0 and NDIRECT = 0. */ #define MAX_SIMPLE_DISTANCE_ALPHABET_SIZE \ BROTLI_DISTANCE_ALPHABET_SIZE(0, 0, BROTLI_LARGE_MAX_DISTANCE_BITS) /* MAX_SIMPLE_DISTANCE_ALPHABET_SIZE == 140 */ static BROTLI_INLINE uint32_t BlockLengthPrefixCode(uint32_t len) { uint32_t code = (len >= 177) ? (len >= 753 ? 20 : 14) : (len >= 41 ? 7 : 0); while (code < (BROTLI_NUM_BLOCK_LEN_SYMBOLS - 1) && len >= _kBrotliPrefixCodeRanges[code + 1].offset) ++code; return code; } static BROTLI_INLINE void GetBlockLengthPrefixCode(uint32_t len, size_t* code, uint32_t* n_extra, uint32_t* extra) { *code = BlockLengthPrefixCode(len); *n_extra = _kBrotliPrefixCodeRanges[*code].nbits; *extra = len - _kBrotliPrefixCodeRanges[*code].offset; } typedef struct BlockTypeCodeCalculator { size_t last_type; size_t second_last_type; } BlockTypeCodeCalculator; static void InitBlockTypeCodeCalculator(BlockTypeCodeCalculator* self) { self->last_type = 1; self->second_last_type = 0; } static BROTLI_INLINE size_t NextBlockTypeCode( BlockTypeCodeCalculator* calculator, uint8_t type) { size_t type_code = (type == calculator->last_type + 1) ? 1u : (type == calculator->second_last_type) ? 0u : type + 2u; calculator->second_last_type = calculator->last_type; calculator->last_type = type; return type_code; } /* |nibblesbits| represents the 2 bits to encode MNIBBLES (0-3) REQUIRES: length > 0 REQUIRES: length <= (1 << 24) */ static void BrotliEncodeMlen(size_t length, uint64_t* bits, size_t* numbits, uint64_t* nibblesbits) { size_t lg = (length == 1) ? 1 : Log2FloorNonZero((uint32_t)(length - 1)) + 1; size_t mnibbles = (lg < 16 ? 16 : (lg + 3)) / 4; BROTLI_DCHECK(length > 0); BROTLI_DCHECK(length <= (1 << 24)); BROTLI_DCHECK(lg <= 24); *nibblesbits = mnibbles - 4; *numbits = mnibbles * 4; *bits = length - 1; } static BROTLI_INLINE void StoreCommandExtra( const Command* cmd, size_t* storage_ix, uint8_t* storage) { uint32_t copylen_code = CommandCopyLenCode(cmd); uint16_t inscode = GetInsertLengthCode(cmd->insert_len_); uint16_t copycode = GetCopyLengthCode(copylen_code); uint32_t insnumextra = GetInsertExtra(inscode); uint64_t insextraval = cmd->insert_len_ - GetInsertBase(inscode); uint64_t copyextraval = copylen_code - GetCopyBase(copycode); uint64_t bits = (copyextraval << insnumextra) | insextraval; BrotliWriteBits( insnumextra + GetCopyExtra(copycode), bits, storage_ix, storage); } /* Data structure that stores almost everything that is needed to encode each block switch command. */ typedef struct BlockSplitCode { BlockTypeCodeCalculator type_code_calculator; uint8_t type_depths[BROTLI_MAX_BLOCK_TYPE_SYMBOLS]; uint16_t type_bits[BROTLI_MAX_BLOCK_TYPE_SYMBOLS]; uint8_t length_depths[BROTLI_NUM_BLOCK_LEN_SYMBOLS]; uint16_t length_bits[BROTLI_NUM_BLOCK_LEN_SYMBOLS]; } BlockSplitCode; /* Stores a number between 0 and 255. */ static void StoreVarLenUint8(size_t n, size_t* storage_ix, uint8_t* storage) { if (n == 0) { BrotliWriteBits(1, 0, storage_ix, storage); } else { size_t nbits = Log2FloorNonZero(n); BrotliWriteBits(1, 1, storage_ix, storage); BrotliWriteBits(3, nbits, storage_ix, storage); BrotliWriteBits(nbits, n - ((size_t)1 << nbits), storage_ix, storage); } } /* Stores the compressed meta-block header. REQUIRES: length > 0 REQUIRES: length <= (1 << 24) */ static void StoreCompressedMetaBlockHeader(BROTLI_BOOL is_final_block, size_t length, size_t* storage_ix, uint8_t* storage) { uint64_t lenbits; size_t nlenbits; uint64_t nibblesbits; /* Write ISLAST bit. */ BrotliWriteBits(1, (uint64_t)is_final_block, storage_ix, storage); /* Write ISEMPTY bit. */ if (is_final_block) { BrotliWriteBits(1, 0, storage_ix, storage); } BrotliEncodeMlen(length, &lenbits, &nlenbits, &nibblesbits); BrotliWriteBits(2, nibblesbits, storage_ix, storage); BrotliWriteBits(nlenbits, lenbits, storage_ix, storage); if (!is_final_block) { /* Write ISUNCOMPRESSED bit. */ BrotliWriteBits(1, 0, storage_ix, storage); } } /* Stores the uncompressed meta-block header. REQUIRES: length > 0 REQUIRES: length <= (1 << 24) */ static void BrotliStoreUncompressedMetaBlockHeader(size_t length, size_t* storage_ix, uint8_t* storage) { uint64_t lenbits; size_t nlenbits; uint64_t nibblesbits; /* Write ISLAST bit. Uncompressed block cannot be the last one, so set to 0. */ BrotliWriteBits(1, 0, storage_ix, storage); BrotliEncodeMlen(length, &lenbits, &nlenbits, &nibblesbits); BrotliWriteBits(2, nibblesbits, storage_ix, storage); BrotliWriteBits(nlenbits, lenbits, storage_ix, storage); /* Write ISUNCOMPRESSED bit. */ BrotliWriteBits(1, 1, storage_ix, storage); } static void BrotliStoreHuffmanTreeOfHuffmanTreeToBitMask( const int num_codes, const uint8_t* code_length_bitdepth, size_t* storage_ix, uint8_t* storage) { static const uint8_t kStorageOrder[BROTLI_CODE_LENGTH_CODES] = { 1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; /* The bit lengths of the Huffman code over the code length alphabet are compressed with the following static Huffman code: Symbol Code ------ ---- 0 00 1 1110 2 110 3 01 4 10 5 1111 */ static const uint8_t kHuffmanBitLengthHuffmanCodeSymbols[6] = { 0, 7, 3, 2, 1, 15 }; static const uint8_t kHuffmanBitLengthHuffmanCodeBitLengths[6] = { 2, 4, 3, 2, 2, 4 }; size_t skip_some = 0; /* skips none. */ /* Throw away trailing zeros: */ size_t codes_to_store = BROTLI_CODE_LENGTH_CODES; if (num_codes > 1) { for (; codes_to_store > 0; --codes_to_store) { if (code_length_bitdepth[kStorageOrder[codes_to_store - 1]] != 0) { break; } } } if (code_length_bitdepth[kStorageOrder[0]] == 0 && code_length_bitdepth[kStorageOrder[1]] == 0) { skip_some = 2; /* skips two. */ if (code_length_bitdepth[kStorageOrder[2]] == 0) { skip_some = 3; /* skips three. */ } } BrotliWriteBits(2, skip_some, storage_ix, storage); { size_t i; for (i = skip_some; i < codes_to_store; ++i) { size_t l = code_length_bitdepth[kStorageOrder[i]]; BrotliWriteBits(kHuffmanBitLengthHuffmanCodeBitLengths[l], kHuffmanBitLengthHuffmanCodeSymbols[l], storage_ix, storage); } } } static void BrotliStoreHuffmanTreeToBitMask( const size_t huffman_tree_size, const uint8_t* huffman_tree, const uint8_t* huffman_tree_extra_bits, const uint8_t* code_length_bitdepth, const uint16_t* code_length_bitdepth_symbols, size_t* BROTLI_RESTRICT storage_ix, uint8_t* BROTLI_RESTRICT storage) { size_t i; for (i = 0; i < huffman_tree_size; ++i) { size_t ix = huffman_tree[i]; BrotliWriteBits(code_length_bitdepth[ix], code_length_bitdepth_symbols[ix], storage_ix, storage); /* Extra bits */ switch (ix) { case BROTLI_REPEAT_PREVIOUS_CODE_LENGTH: BrotliWriteBits(2, huffman_tree_extra_bits[i], storage_ix, storage); break; case BROTLI_REPEAT_ZERO_CODE_LENGTH: BrotliWriteBits(3, huffman_tree_extra_bits[i], storage_ix, storage); break; } } } static void StoreSimpleHuffmanTree(const uint8_t* depths, size_t symbols[4], size_t num_symbols, size_t max_bits, size_t* storage_ix, uint8_t* storage) { /* value of 1 indicates a simple Huffman code */ BrotliWriteBits(2, 1, storage_ix, storage); BrotliWriteBits(2, num_symbols - 1, storage_ix, storage); /* NSYM - 1 */ { /* Sort */ size_t i; for (i = 0; i < num_symbols; i++) { size_t j; for (j = i + 1; j < num_symbols; j++) { if (depths[symbols[j]] < depths[symbols[i]]) { BROTLI_SWAP(size_t, symbols, j, i); } } } } if (num_symbols == 2) { BrotliWriteBits(max_bits, symbols[0], storage_ix, storage); BrotliWriteBits(max_bits, symbols[1], storage_ix, storage); } else if (num_symbols == 3) { BrotliWriteBits(max_bits, symbols[0], storage_ix, storage); BrotliWriteBits(max_bits, symbols[1], storage_ix, storage); BrotliWriteBits(max_bits, symbols[2], storage_ix, storage); } else { BrotliWriteBits(max_bits, symbols[0], storage_ix, storage); BrotliWriteBits(max_bits, symbols[1], storage_ix, storage); BrotliWriteBits(max_bits, symbols[2], storage_ix, storage); BrotliWriteBits(max_bits, symbols[3], storage_ix, storage); /* tree-select */ BrotliWriteBits(1, depths[symbols[0]] == 1 ? 1 : 0, storage_ix, storage); } } /* num = alphabet size depths = symbol depths */ void BrotliStoreHuffmanTree(const uint8_t* depths, size_t num, HuffmanTree* tree, size_t* storage_ix, uint8_t* storage) { /* Write the Huffman tree into the brotli-representation. The command alphabet is the largest, so this allocation will fit all alphabets. */ uint8_t huffman_tree[BROTLI_NUM_COMMAND_SYMBOLS]; uint8_t huffman_tree_extra_bits[BROTLI_NUM_COMMAND_SYMBOLS]; size_t huffman_tree_size = 0; uint8_t code_length_bitdepth[BROTLI_CODE_LENGTH_CODES] = { 0 }; uint16_t code_length_bitdepth_symbols[BROTLI_CODE_LENGTH_CODES]; uint32_t huffman_tree_histogram[BROTLI_CODE_LENGTH_CODES] = { 0 }; size_t i; int num_codes = 0; size_t code = 0; BROTLI_DCHECK(num <= BROTLI_NUM_COMMAND_SYMBOLS); BrotliWriteHuffmanTree(depths, num, &huffman_tree_size, huffman_tree, huffman_tree_extra_bits); /* Calculate the statistics of the Huffman tree in brotli-representation. */ for (i = 0; i < huffman_tree_size; ++i) { ++huffman_tree_histogram[huffman_tree[i]]; } for (i = 0; i < BROTLI_CODE_LENGTH_CODES; ++i) { if (huffman_tree_histogram[i]) { if (num_codes == 0) { code = i; num_codes = 1; } else if (num_codes == 1) { num_codes = 2; break; } } } /* Calculate another Huffman tree to use for compressing both the earlier Huffman tree with. */ BrotliCreateHuffmanTree(huffman_tree_histogram, BROTLI_CODE_LENGTH_CODES, 5, tree, code_length_bitdepth); BrotliConvertBitDepthsToSymbols(code_length_bitdepth, BROTLI_CODE_LENGTH_CODES, code_length_bitdepth_symbols); /* Now, we have all the data, let's start storing it */ BrotliStoreHuffmanTreeOfHuffmanTreeToBitMask(num_codes, code_length_bitdepth, storage_ix, storage); if (num_codes == 1) { code_length_bitdepth[code] = 0; } /* Store the real Huffman tree now. */ BrotliStoreHuffmanTreeToBitMask(huffman_tree_size, huffman_tree, huffman_tree_extra_bits, code_length_bitdepth, code_length_bitdepth_symbols, storage_ix, storage); } /* Builds a Huffman tree from histogram[0:length] into depth[0:length] and bits[0:length] and stores the encoded tree to the bit stream. */ static void BuildAndStoreHuffmanTree(const uint32_t* histogram, const size_t histogram_length, const size_t alphabet_size, HuffmanTree* tree, uint8_t* depth, uint16_t* bits, size_t* storage_ix, uint8_t* storage) { size_t count = 0; size_t s4[4] = { 0 }; size_t i; size_t max_bits = 0; for (i = 0; i < histogram_length; i++) { if (histogram[i]) { if (count < 4) { s4[count] = i; } else if (count > 4) { break; } count++; } } { size_t max_bits_counter = alphabet_size - 1; while (max_bits_counter) { max_bits_counter >>= 1; ++max_bits; } } if (count <= 1) { BrotliWriteBits(4, 1, storage_ix, storage); BrotliWriteBits(max_bits, s4[0], storage_ix, storage); depth[s4[0]] = 0; bits[s4[0]] = 0; return; } memset(depth, 0, histogram_length * sizeof(depth[0])); BrotliCreateHuffmanTree(histogram, histogram_length, 15, tree, depth); BrotliConvertBitDepthsToSymbols(depth, histogram_length, bits); if (count <= 4) { StoreSimpleHuffmanTree(depth, s4, count, max_bits, storage_ix, storage); } else { BrotliStoreHuffmanTree(depth, histogram_length, tree, storage_ix, storage); } } static BROTLI_INLINE BROTLI_BOOL SortHuffmanTree( const HuffmanTree* v0, const HuffmanTree* v1) { return TO_BROTLI_BOOL(v0->total_count_ < v1->total_count_); } void BrotliBuildAndStoreHuffmanTreeFast(MemoryManager* m, const uint32_t* histogram, const size_t histogram_total, const size_t max_bits, uint8_t* depth, uint16_t* bits, size_t* storage_ix, uint8_t* storage) { size_t count = 0; size_t symbols[4] = { 0 }; size_t length = 0; size_t total = histogram_total; while (total != 0) { if (histogram[length]) { if (count < 4) { symbols[count] = length; } ++count; total -= histogram[length]; } ++length; } if (count <= 1) { BrotliWriteBits(4, 1, storage_ix, storage); BrotliWriteBits(max_bits, symbols[0], storage_ix, storage); depth[symbols[0]] = 0; bits[symbols[0]] = 0; return; } memset(depth, 0, length * sizeof(depth[0])); { const size_t max_tree_size = 2 * length + 1; HuffmanTree* tree = BROTLI_ALLOC(m, HuffmanTree, max_tree_size); uint32_t count_limit; if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(tree)) return; for (count_limit = 1; ; count_limit *= 2) { HuffmanTree* node = tree; size_t l; for (l = length; l != 0;) { --l; if (histogram[l]) { if (BROTLI_PREDICT_TRUE(histogram[l] >= count_limit)) { InitHuffmanTree(node, histogram[l], -1, (int16_t)l); } else { InitHuffmanTree(node, count_limit, -1, (int16_t)l); } ++node; } } { const int n = (int)(node - tree); HuffmanTree sentinel; int i = 0; /* Points to the next leaf node. */ int j = n + 1; /* Points to the next non-leaf node. */ int k; SortHuffmanTreeItems(tree, (size_t)n, SortHuffmanTree); /* The nodes are: [0, n): the sorted leaf nodes that we start with. [n]: we add a sentinel here. [n + 1, 2n): new parent nodes are added here, starting from (n+1). These are naturally in ascending order. [2n]: we add a sentinel at the end as well. There will be (2n+1) elements at the end. */ InitHuffmanTree(&sentinel, BROTLI_UINT32_MAX, -1, -1); *node++ = sentinel; *node++ = sentinel; for (k = n - 1; k > 0; --k) { int left, right; if (tree[i].total_count_ <= tree[j].total_count_) { left = i; ++i; } else { left = j; ++j; } if (tree[i].total_count_ <= tree[j].total_count_) { right = i; ++i; } else { right = j; ++j; } /* The sentinel node becomes the parent node. */ node[-1].total_count_ = tree[left].total_count_ + tree[right].total_count_; node[-1].index_left_ = (int16_t)left; node[-1].index_right_or_value_ = (int16_t)right; /* Add back the last sentinel node. */ *node++ = sentinel; } if (BrotliSetDepth(2 * n - 1, tree, depth, 14)) { /* We need to pack the Huffman tree in 14 bits. If this was not successful, add fake entities to the lowest values and retry. */ break; } } } BROTLI_FREE(m, tree); } BrotliConvertBitDepthsToSymbols(depth, length, bits); if (count <= 4) { size_t i; /* value of 1 indicates a simple Huffman code */ BrotliWriteBits(2, 1, storage_ix, storage); BrotliWriteBits(2, count - 1, storage_ix, storage); /* NSYM - 1 */ /* Sort */ for (i = 0; i < count; i++) { size_t j; for (j = i + 1; j < count; j++) { if (depth[symbols[j]] < depth[symbols[i]]) { BROTLI_SWAP(size_t, symbols, j, i); } } } if (count == 2) { BrotliWriteBits(max_bits, symbols[0], storage_ix, storage); BrotliWriteBits(max_bits, symbols[1], storage_ix, storage); } else if (count == 3) { BrotliWriteBits(max_bits, symbols[0], storage_ix, storage); BrotliWriteBits(max_bits, symbols[1], storage_ix, storage); BrotliWriteBits(max_bits, symbols[2], storage_ix, storage); } else { BrotliWriteBits(max_bits, symbols[0], storage_ix, storage); BrotliWriteBits(max_bits, symbols[1], storage_ix, storage); BrotliWriteBits(max_bits, symbols[2], storage_ix, storage); BrotliWriteBits(max_bits, symbols[3], storage_ix, storage); /* tree-select */ BrotliWriteBits(1, depth[symbols[0]] == 1 ? 1 : 0, storage_ix, storage); } } else { uint8_t previous_value = 8; size_t i; /* Complex Huffman Tree */ StoreStaticCodeLengthCode(storage_ix, storage); /* Actual RLE coding. */ for (i = 0; i < length;) { const uint8_t value = depth[i]; size_t reps = 1; size_t k; for (k = i + 1; k < length && depth[k] == value; ++k) { ++reps; } i += reps; if (value == 0) { BrotliWriteBits(kZeroRepsDepth[reps], kZeroRepsBits[reps], storage_ix, storage); } else { if (previous_value != value) { BrotliWriteBits(kCodeLengthDepth[value], kCodeLengthBits[value], storage_ix, storage); --reps; } if (reps < 3) { while (reps != 0) { reps--; BrotliWriteBits(kCodeLengthDepth[value], kCodeLengthBits[value], storage_ix, storage); } } else { reps -= 3; BrotliWriteBits(kNonZeroRepsDepth[reps], kNonZeroRepsBits[reps], storage_ix, storage); } previous_value = value; } } } } static size_t IndexOf(const uint8_t* v, size_t v_size, uint8_t value) { size_t i = 0; for (; i < v_size; ++i) { if (v[i] == value) return i; } return i; } static void MoveToFront(uint8_t* v, size_t index) { uint8_t value = v[index]; size_t i; for (i = index; i != 0; --i) { v[i] = v[i - 1]; } v[0] = value; } static void MoveToFrontTransform(const uint32_t* BROTLI_RESTRICT v_in, const size_t v_size, uint32_t* v_out) { size_t i; uint8_t mtf[256]; uint32_t max_value; if (v_size == 0) { return; } max_value = v_in[0]; for (i = 1; i < v_size; ++i) { if (v_in[i] > max_value) max_value = v_in[i]; } BROTLI_DCHECK(max_value < 256u); for (i = 0; i <= max_value; ++i) { mtf[i] = (uint8_t)i; } { size_t mtf_size = max_value + 1; for (i = 0; i < v_size; ++i) { size_t index = IndexOf(mtf, mtf_size, (uint8_t)v_in[i]); BROTLI_DCHECK(index < mtf_size); v_out[i] = (uint32_t)index; MoveToFront(mtf, index); } } } /* Finds runs of zeros in v[0..in_size) and replaces them with a prefix code of the run length plus extra bits (lower 9 bits is the prefix code and the rest are the extra bits). Non-zero values in v[] are shifted by *max_length_prefix. Will not create prefix codes bigger than the initial value of *max_run_length_prefix. The prefix code of run length L is simply Log2Floor(L) and the number of extra bits is the same as the prefix code. */ static void RunLengthCodeZeros(const size_t in_size, uint32_t* BROTLI_RESTRICT v, size_t* BROTLI_RESTRICT out_size, uint32_t* BROTLI_RESTRICT max_run_length_prefix) { uint32_t max_reps = 0; size_t i; uint32_t max_prefix; for (i = 0; i < in_size;) { uint32_t reps = 0; for (; i < in_size && v[i] != 0; ++i) ; for (; i < in_size && v[i] == 0; ++i) { ++reps; } max_reps = BROTLI_MAX(uint32_t, reps, max_reps); } max_prefix = max_reps > 0 ? Log2FloorNonZero(max_reps) : 0; max_prefix = BROTLI_MIN(uint32_t, max_prefix, *max_run_length_prefix); *max_run_length_prefix = max_prefix; *out_size = 0; for (i = 0; i < in_size;) { BROTLI_DCHECK(*out_size <= i); if (v[i] != 0) { v[*out_size] = v[i] + *max_run_length_prefix; ++i; ++(*out_size); } else { uint32_t reps = 1; size_t k; for (k = i + 1; k < in_size && v[k] == 0; ++k) { ++reps; } i += reps; while (reps != 0) { if (reps < (2u << max_prefix)) { uint32_t run_length_prefix = Log2FloorNonZero(reps); const uint32_t extra_bits = reps - (1u << run_length_prefix); v[*out_size] = run_length_prefix + (extra_bits << 9); ++(*out_size); break; } else { const uint32_t extra_bits = (1u << max_prefix) - 1u; v[*out_size] = max_prefix + (extra_bits << 9); reps -= (2u << max_prefix) - 1u; ++(*out_size); } } } } } #define SYMBOL_BITS 9 static void EncodeContextMap(MemoryManager* m, const uint32_t* context_map, size_t context_map_size, size_t num_clusters, HuffmanTree* tree, size_t* storage_ix, uint8_t* storage) { size_t i; uint32_t* rle_symbols; uint32_t max_run_length_prefix = 6; size_t num_rle_symbols = 0; uint32_t histogram[BROTLI_MAX_CONTEXT_MAP_SYMBOLS]; static const uint32_t kSymbolMask = (1u << SYMBOL_BITS) - 1u; uint8_t depths[BROTLI_MAX_CONTEXT_MAP_SYMBOLS]; uint16_t bits[BROTLI_MAX_CONTEXT_MAP_SYMBOLS]; StoreVarLenUint8(num_clusters - 1, storage_ix, storage); if (num_clusters == 1) { return; } rle_symbols = BROTLI_ALLOC(m, uint32_t, context_map_size); if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(rle_symbols)) return; MoveToFrontTransform(context_map, context_map_size, rle_symbols); RunLengthCodeZeros(context_map_size, rle_symbols, &num_rle_symbols, &max_run_length_prefix); memset(histogram, 0, sizeof(histogram)); for (i = 0; i < num_rle_symbols; ++i) { ++histogram[rle_symbols[i] & kSymbolMask]; } { BROTLI_BOOL use_rle = TO_BROTLI_BOOL(max_run_length_prefix > 0); BrotliWriteBits(1, (uint64_t)use_rle, storage_ix, storage); if (use_rle) { BrotliWriteBits(4, max_run_length_prefix - 1, storage_ix, storage); } } BuildAndStoreHuffmanTree(histogram, num_clusters + max_run_length_prefix, num_clusters + max_run_length_prefix, tree, depths, bits, storage_ix, storage); for (i = 0; i < num_rle_symbols; ++i) { const uint32_t rle_symbol = rle_symbols[i] & kSymbolMask; const uint32_t extra_bits_val = rle_symbols[i] >> SYMBOL_BITS; BrotliWriteBits(depths[rle_symbol], bits[rle_symbol], storage_ix, storage); if (rle_symbol > 0 && rle_symbol <= max_run_length_prefix) { BrotliWriteBits(rle_symbol, extra_bits_val, storage_ix, storage); } } BrotliWriteBits(1, 1, storage_ix, storage); /* use move-to-front */ BROTLI_FREE(m, rle_symbols); } /* Stores the block switch command with index block_ix to the bit stream. */ static BROTLI_INLINE void StoreBlockSwitch(BlockSplitCode* code, const uint32_t block_len, const uint8_t block_type, BROTLI_BOOL is_first_block, size_t* storage_ix, uint8_t* storage) { size_t typecode = NextBlockTypeCode(&code->type_code_calculator, block_type); size_t lencode; uint32_t len_nextra; uint32_t len_extra; if (!is_first_block) { BrotliWriteBits(code->type_depths[typecode], code->type_bits[typecode], storage_ix, storage); } GetBlockLengthPrefixCode(block_len, &lencode, &len_nextra, &len_extra); BrotliWriteBits(code->length_depths[lencode], code->length_bits[lencode], storage_ix, storage); BrotliWriteBits(len_nextra, len_extra, storage_ix, storage); } /* Builds a BlockSplitCode data structure from the block split given by the vector of block types and block lengths and stores it to the bit stream. */ static void BuildAndStoreBlockSplitCode(const uint8_t* types, const uint32_t* lengths, const size_t num_blocks, const size_t num_types, HuffmanTree* tree, BlockSplitCode* code, size_t* storage_ix, uint8_t* storage) { uint32_t type_histo[BROTLI_MAX_BLOCK_TYPE_SYMBOLS]; uint32_t length_histo[BROTLI_NUM_BLOCK_LEN_SYMBOLS]; size_t i; BlockTypeCodeCalculator type_code_calculator; memset(type_histo, 0, (num_types + 2) * sizeof(type_histo[0])); memset(length_histo, 0, sizeof(length_histo)); InitBlockTypeCodeCalculator(&type_code_calculator); for (i = 0; i < num_blocks; ++i) { size_t type_code = NextBlockTypeCode(&type_code_calculator, types[i]); if (i != 0) ++type_histo[type_code]; ++length_histo[BlockLengthPrefixCode(lengths[i])]; } StoreVarLenUint8(num_types - 1, storage_ix, storage); if (num_types > 1) { /* TODO: else? could StoreBlockSwitch occur? */ BuildAndStoreHuffmanTree(&type_histo[0], num_types + 2, num_types + 2, tree, &code->type_depths[0], &code->type_bits[0], storage_ix, storage); BuildAndStoreHuffmanTree(&length_histo[0], BROTLI_NUM_BLOCK_LEN_SYMBOLS, BROTLI_NUM_BLOCK_LEN_SYMBOLS, tree, &code->length_depths[0], &code->length_bits[0], storage_ix, storage); StoreBlockSwitch(code, lengths[0], types[0], 1, storage_ix, storage); } } /* Stores a context map where the histogram type is always the block type. */ static void StoreTrivialContextMap(size_t num_types, size_t context_bits, HuffmanTree* tree, size_t* storage_ix, uint8_t* storage) { StoreVarLenUint8(num_types - 1, storage_ix, storage); if (num_types > 1) { size_t repeat_code = context_bits - 1u; size_t repeat_bits = (1u << repeat_code) - 1u; size_t alphabet_size = num_types + repeat_code; uint32_t histogram[BROTLI_MAX_CONTEXT_MAP_SYMBOLS]; uint8_t depths[BROTLI_MAX_CONTEXT_MAP_SYMBOLS]; uint16_t bits[BROTLI_MAX_CONTEXT_MAP_SYMBOLS]; size_t i; memset(histogram, 0, alphabet_size * sizeof(histogram[0])); /* Write RLEMAX. */ BrotliWriteBits(1, 1, storage_ix, storage); BrotliWriteBits(4, repeat_code - 1, storage_ix, storage); histogram[repeat_code] = (uint32_t)num_types; histogram[0] = 1; for (i = context_bits; i < alphabet_size; ++i) { histogram[i] = 1; } BuildAndStoreHuffmanTree(histogram, alphabet_size, alphabet_size, tree, depths, bits, storage_ix, storage); for (i = 0; i < num_types; ++i) { size_t code = (i == 0 ? 0 : i + context_bits - 1); BrotliWriteBits(depths[code], bits[code], storage_ix, storage); BrotliWriteBits( depths[repeat_code], bits[repeat_code], storage_ix, storage); BrotliWriteBits(repeat_code, repeat_bits, storage_ix, storage); } /* Write IMTF (inverse-move-to-front) bit. */ BrotliWriteBits(1, 1, storage_ix, storage); } } /* Manages the encoding of one block category (literal, command or distance). */ typedef struct BlockEncoder { size_t histogram_length_; size_t num_block_types_; const uint8_t* block_types_; /* Not owned. */ const uint32_t* block_lengths_; /* Not owned. */ size_t num_blocks_; BlockSplitCode block_split_code_; size_t block_ix_; size_t block_len_; size_t entropy_ix_; uint8_t* depths_; uint16_t* bits_; } BlockEncoder; static void InitBlockEncoder(BlockEncoder* self, size_t histogram_length, size_t num_block_types, const uint8_t* block_types, const uint32_t* block_lengths, const size_t num_blocks) { self->histogram_length_ = histogram_length; self->num_block_types_ = num_block_types; self->block_types_ = block_types; self->block_lengths_ = block_lengths; self->num_blocks_ = num_blocks; InitBlockTypeCodeCalculator(&self->block_split_code_.type_code_calculator); self->block_ix_ = 0; self->block_len_ = num_blocks == 0 ? 0 : block_lengths[0]; self->entropy_ix_ = 0; self->depths_ = 0; self->bits_ = 0; } static void CleanupBlockEncoder(MemoryManager* m, BlockEncoder* self) { BROTLI_FREE(m, self->depths_); BROTLI_FREE(m, self->bits_); } /* Creates entropy codes of block lengths and block types and stores them to the bit stream. */ static void BuildAndStoreBlockSwitchEntropyCodes(BlockEncoder* self, HuffmanTree* tree, size_t* storage_ix, uint8_t* storage) { BuildAndStoreBlockSplitCode(self->block_types_, self->block_lengths_, self->num_blocks_, self->num_block_types_, tree, &self->block_split_code_, storage_ix, storage); } /* Stores the next symbol with the entropy code of the current block type. Updates the block type and block length at block boundaries. */ static void StoreSymbol(BlockEncoder* self, size_t symbol, size_t* storage_ix, uint8_t* storage) { if (self->block_len_ == 0) { size_t block_ix = ++self->block_ix_; uint32_t block_len = self->block_lengths_[block_ix]; uint8_t block_type = self->block_types_[block_ix]; self->block_len_ = block_len; self->entropy_ix_ = block_type * self->histogram_length_; StoreBlockSwitch(&self->block_split_code_, block_len, block_type, 0, storage_ix, storage); } --self->block_len_; { size_t ix = self->entropy_ix_ + symbol; BrotliWriteBits(self->depths_[ix], self->bits_[ix], storage_ix, storage); } } /* Stores the next symbol with the entropy code of the current block type and context value. Updates the block type and block length at block boundaries. */ static void StoreSymbolWithContext(BlockEncoder* self, size_t symbol, size_t context, const uint32_t* context_map, size_t* storage_ix, uint8_t* storage, const size_t context_bits) { if (self->block_len_ == 0) { size_t block_ix = ++self->block_ix_; uint32_t block_len = self->block_lengths_[block_ix]; uint8_t block_type = self->block_types_[block_ix]; self->block_len_ = block_len; self->entropy_ix_ = (size_t)block_type << context_bits; StoreBlockSwitch(&self->block_split_code_, block_len, block_type, 0, storage_ix, storage); } --self->block_len_; { size_t histo_ix = context_map[self->entropy_ix_ + context]; size_t ix = histo_ix * self->histogram_length_ + symbol; BrotliWriteBits(self->depths_[ix], self->bits_[ix], storage_ix, storage); } } #define FN(X) X ## Literal /* NOLINTNEXTLINE(build/include) */ #include "./block_encoder_inc.h" #undef FN #define FN(X) X ## Command /* NOLINTNEXTLINE(build/include) */ #include "./block_encoder_inc.h" #undef FN #define FN(X) X ## Distance /* NOLINTNEXTLINE(build/include) */ #include "./block_encoder_inc.h" #undef FN static void JumpToByteBoundary(size_t* storage_ix, uint8_t* storage) { *storage_ix = (*storage_ix + 7u) & ~7u; storage[*storage_ix >> 3] = 0; } void BrotliStoreMetaBlock(MemoryManager* m, const uint8_t* input, size_t start_pos, size_t length, size_t mask, uint8_t prev_byte, uint8_t prev_byte2, BROTLI_BOOL is_last, const BrotliEncoderParams* params, ContextType literal_context_mode, const Command* commands, size_t n_commands, const MetaBlockSplit* mb, size_t* storage_ix, uint8_t* storage) { size_t pos = start_pos; size_t i; uint32_t num_distance_symbols = params->dist.alphabet_size_max; uint32_t num_effective_distance_symbols = params->dist.alphabet_size_limit; HuffmanTree* tree; ContextLut literal_context_lut = BROTLI_CONTEXT_LUT(literal_context_mode); BlockEncoder literal_enc; BlockEncoder command_enc; BlockEncoder distance_enc; const BrotliDistanceParams* dist = &params->dist; BROTLI_DCHECK( num_effective_distance_symbols <= BROTLI_NUM_HISTOGRAM_DISTANCE_SYMBOLS); StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); tree = BROTLI_ALLOC(m, HuffmanTree, MAX_HUFFMAN_TREE_SIZE); if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(tree)) return; InitBlockEncoder(&literal_enc, BROTLI_NUM_LITERAL_SYMBOLS, mb->literal_split.num_types, mb->literal_split.types, mb->literal_split.lengths, mb->literal_split.num_blocks); InitBlockEncoder(&command_enc, BROTLI_NUM_COMMAND_SYMBOLS, mb->command_split.num_types, mb->command_split.types, mb->command_split.lengths, mb->command_split.num_blocks); InitBlockEncoder(&distance_enc, num_effective_distance_symbols, mb->distance_split.num_types, mb->distance_split.types, mb->distance_split.lengths, mb->distance_split.num_blocks); BuildAndStoreBlockSwitchEntropyCodes(&literal_enc, tree, storage_ix, storage); BuildAndStoreBlockSwitchEntropyCodes(&command_enc, tree, storage_ix, storage); BuildAndStoreBlockSwitchEntropyCodes( &distance_enc, tree, storage_ix, storage); BrotliWriteBits(2, dist->distance_postfix_bits, storage_ix, storage); BrotliWriteBits( 4, dist->num_direct_distance_codes >> dist->distance_postfix_bits, storage_ix, storage); for (i = 0; i < mb->literal_split.num_types; ++i) { BrotliWriteBits(2, literal_context_mode, storage_ix, storage); } if (mb->literal_context_map_size == 0) { StoreTrivialContextMap(mb->literal_histograms_size, BROTLI_LITERAL_CONTEXT_BITS, tree, storage_ix, storage); } else { EncodeContextMap(m, mb->literal_context_map, mb->literal_context_map_size, mb->literal_histograms_size, tree, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; } if (mb->distance_context_map_size == 0) { StoreTrivialContextMap(mb->distance_histograms_size, BROTLI_DISTANCE_CONTEXT_BITS, tree, storage_ix, storage); } else { EncodeContextMap(m, mb->distance_context_map, mb->distance_context_map_size, mb->distance_histograms_size, tree, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; } BuildAndStoreEntropyCodesLiteral(m, &literal_enc, mb->literal_histograms, mb->literal_histograms_size, BROTLI_NUM_LITERAL_SYMBOLS, tree, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; BuildAndStoreEntropyCodesCommand(m, &command_enc, mb->command_histograms, mb->command_histograms_size, BROTLI_NUM_COMMAND_SYMBOLS, tree, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; BuildAndStoreEntropyCodesDistance(m, &distance_enc, mb->distance_histograms, mb->distance_histograms_size, num_distance_symbols, tree, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; BROTLI_FREE(m, tree); for (i = 0; i < n_commands; ++i) { const Command cmd = commands[i]; size_t cmd_code = cmd.cmd_prefix_; StoreSymbol(&command_enc, cmd_code, storage_ix, storage); StoreCommandExtra(&cmd, storage_ix, storage); if (mb->literal_context_map_size == 0) { size_t j; for (j = cmd.insert_len_; j != 0; --j) { StoreSymbol(&literal_enc, input[pos & mask], storage_ix, storage); ++pos; } } else { size_t j; for (j = cmd.insert_len_; j != 0; --j) { size_t context = BROTLI_CONTEXT(prev_byte, prev_byte2, literal_context_lut); uint8_t literal = input[pos & mask]; StoreSymbolWithContext(&literal_enc, literal, context, mb->literal_context_map, storage_ix, storage, BROTLI_LITERAL_CONTEXT_BITS); prev_byte2 = prev_byte; prev_byte = literal; ++pos; } } pos += CommandCopyLen(&cmd); if (CommandCopyLen(&cmd)) { prev_byte2 = input[(pos - 2) & mask]; prev_byte = input[(pos - 1) & mask]; if (cmd.cmd_prefix_ >= 128) { size_t dist_code = cmd.dist_prefix_ & 0x3FF; uint32_t distnumextra = cmd.dist_prefix_ >> 10; uint64_t distextra = cmd.dist_extra_; if (mb->distance_context_map_size == 0) { StoreSymbol(&distance_enc, dist_code, storage_ix, storage); } else { size_t context = CommandDistanceContext(&cmd); StoreSymbolWithContext(&distance_enc, dist_code, context, mb->distance_context_map, storage_ix, storage, BROTLI_DISTANCE_CONTEXT_BITS); } BrotliWriteBits(distnumextra, distextra, storage_ix, storage); } } } CleanupBlockEncoder(m, &distance_enc); CleanupBlockEncoder(m, &command_enc); CleanupBlockEncoder(m, &literal_enc); if (is_last) { JumpToByteBoundary(storage_ix, storage); } } static void BuildHistograms(const uint8_t* input, size_t start_pos, size_t mask, const Command* commands, size_t n_commands, HistogramLiteral* lit_histo, HistogramCommand* cmd_histo, HistogramDistance* dist_histo) { size_t pos = start_pos; size_t i; for (i = 0; i < n_commands; ++i) { const Command cmd = commands[i]; size_t j; HistogramAddCommand(cmd_histo, cmd.cmd_prefix_); for (j = cmd.insert_len_; j != 0; --j) { HistogramAddLiteral(lit_histo, input[pos & mask]); ++pos; } pos += CommandCopyLen(&cmd); if (CommandCopyLen(&cmd) && cmd.cmd_prefix_ >= 128) { HistogramAddDistance(dist_histo, cmd.dist_prefix_ & 0x3FF); } } } static void StoreDataWithHuffmanCodes(const uint8_t* input, size_t start_pos, size_t mask, const Command* commands, size_t n_commands, const uint8_t* lit_depth, const uint16_t* lit_bits, const uint8_t* cmd_depth, const uint16_t* cmd_bits, const uint8_t* dist_depth, const uint16_t* dist_bits, size_t* storage_ix, uint8_t* storage) { size_t pos = start_pos; size_t i; for (i = 0; i < n_commands; ++i) { const Command cmd = commands[i]; const size_t cmd_code = cmd.cmd_prefix_; size_t j; BrotliWriteBits( cmd_depth[cmd_code], cmd_bits[cmd_code], storage_ix, storage); StoreCommandExtra(&cmd, storage_ix, storage); for (j = cmd.insert_len_; j != 0; --j) { const uint8_t literal = input[pos & mask]; BrotliWriteBits( lit_depth[literal], lit_bits[literal], storage_ix, storage); ++pos; } pos += CommandCopyLen(&cmd); if (CommandCopyLen(&cmd) && cmd.cmd_prefix_ >= 128) { const size_t dist_code = cmd.dist_prefix_ & 0x3FF; const uint32_t distnumextra = cmd.dist_prefix_ >> 10; const uint32_t distextra = cmd.dist_extra_; BrotliWriteBits(dist_depth[dist_code], dist_bits[dist_code], storage_ix, storage); BrotliWriteBits(distnumextra, distextra, storage_ix, storage); } } } void BrotliStoreMetaBlockTrivial(MemoryManager* m, const uint8_t* input, size_t start_pos, size_t length, size_t mask, BROTLI_BOOL is_last, const BrotliEncoderParams* params, const Command* commands, size_t n_commands, size_t* storage_ix, uint8_t* storage) { HistogramLiteral lit_histo; HistogramCommand cmd_histo; HistogramDistance dist_histo; uint8_t lit_depth[BROTLI_NUM_LITERAL_SYMBOLS]; uint16_t lit_bits[BROTLI_NUM_LITERAL_SYMBOLS]; uint8_t cmd_depth[BROTLI_NUM_COMMAND_SYMBOLS]; uint16_t cmd_bits[BROTLI_NUM_COMMAND_SYMBOLS]; uint8_t dist_depth[MAX_SIMPLE_DISTANCE_ALPHABET_SIZE]; uint16_t dist_bits[MAX_SIMPLE_DISTANCE_ALPHABET_SIZE]; HuffmanTree* tree; uint32_t num_distance_symbols = params->dist.alphabet_size_max; StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); HistogramClearLiteral(&lit_histo); HistogramClearCommand(&cmd_histo); HistogramClearDistance(&dist_histo); BuildHistograms(input, start_pos, mask, commands, n_commands, &lit_histo, &cmd_histo, &dist_histo); BrotliWriteBits(13, 0, storage_ix, storage); tree = BROTLI_ALLOC(m, HuffmanTree, MAX_HUFFMAN_TREE_SIZE); if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(tree)) return; BuildAndStoreHuffmanTree(lit_histo.data_, BROTLI_NUM_LITERAL_SYMBOLS, BROTLI_NUM_LITERAL_SYMBOLS, tree, lit_depth, lit_bits, storage_ix, storage); BuildAndStoreHuffmanTree(cmd_histo.data_, BROTLI_NUM_COMMAND_SYMBOLS, BROTLI_NUM_COMMAND_SYMBOLS, tree, cmd_depth, cmd_bits, storage_ix, storage); BuildAndStoreHuffmanTree(dist_histo.data_, MAX_SIMPLE_DISTANCE_ALPHABET_SIZE, num_distance_symbols, tree, dist_depth, dist_bits, storage_ix, storage); BROTLI_FREE(m, tree); StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, lit_depth, lit_bits, cmd_depth, cmd_bits, dist_depth, dist_bits, storage_ix, storage); if (is_last) { JumpToByteBoundary(storage_ix, storage); } } void BrotliStoreMetaBlockFast(MemoryManager* m, const uint8_t* input, size_t start_pos, size_t length, size_t mask, BROTLI_BOOL is_last, const BrotliEncoderParams* params, const Command* commands, size_t n_commands, size_t* storage_ix, uint8_t* storage) { uint32_t num_distance_symbols = params->dist.alphabet_size_max; uint32_t distance_alphabet_bits = Log2FloorNonZero(num_distance_symbols - 1) + 1; StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); BrotliWriteBits(13, 0, storage_ix, storage); if (n_commands <= 128) { uint32_t histogram[BROTLI_NUM_LITERAL_SYMBOLS] = { 0 }; size_t pos = start_pos; size_t num_literals = 0; size_t i; uint8_t lit_depth[BROTLI_NUM_LITERAL_SYMBOLS]; uint16_t lit_bits[BROTLI_NUM_LITERAL_SYMBOLS]; for (i = 0; i < n_commands; ++i) { const Command cmd = commands[i]; size_t j; for (j = cmd.insert_len_; j != 0; --j) { ++histogram[input[pos & mask]]; ++pos; } num_literals += cmd.insert_len_; pos += CommandCopyLen(&cmd); } BrotliBuildAndStoreHuffmanTreeFast(m, histogram, num_literals, /* max_bits = */ 8, lit_depth, lit_bits, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; StoreStaticCommandHuffmanTree(storage_ix, storage); StoreStaticDistanceHuffmanTree(storage_ix, storage); StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, lit_depth, lit_bits, kStaticCommandCodeDepth, kStaticCommandCodeBits, kStaticDistanceCodeDepth, kStaticDistanceCodeBits, storage_ix, storage); } else { HistogramLiteral lit_histo; HistogramCommand cmd_histo; HistogramDistance dist_histo; uint8_t lit_depth[BROTLI_NUM_LITERAL_SYMBOLS]; uint16_t lit_bits[BROTLI_NUM_LITERAL_SYMBOLS]; uint8_t cmd_depth[BROTLI_NUM_COMMAND_SYMBOLS]; uint16_t cmd_bits[BROTLI_NUM_COMMAND_SYMBOLS]; uint8_t dist_depth[MAX_SIMPLE_DISTANCE_ALPHABET_SIZE]; uint16_t dist_bits[MAX_SIMPLE_DISTANCE_ALPHABET_SIZE]; HistogramClearLiteral(&lit_histo); HistogramClearCommand(&cmd_histo); HistogramClearDistance(&dist_histo); BuildHistograms(input, start_pos, mask, commands, n_commands, &lit_histo, &cmd_histo, &dist_histo); BrotliBuildAndStoreHuffmanTreeFast(m, lit_histo.data_, lit_histo.total_count_, /* max_bits = */ 8, lit_depth, lit_bits, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; BrotliBuildAndStoreHuffmanTreeFast(m, cmd_histo.data_, cmd_histo.total_count_, /* max_bits = */ 10, cmd_depth, cmd_bits, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; BrotliBuildAndStoreHuffmanTreeFast(m, dist_histo.data_, dist_histo.total_count_, /* max_bits = */ distance_alphabet_bits, dist_depth, dist_bits, storage_ix, storage); if (BROTLI_IS_OOM(m)) return; StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, lit_depth, lit_bits, cmd_depth, cmd_bits, dist_depth, dist_bits, storage_ix, storage); } if (is_last) { JumpToByteBoundary(storage_ix, storage); } } /* This is for storing uncompressed blocks (simple raw storage of bytes-as-bytes). */ void BrotliStoreUncompressedMetaBlock(BROTLI_BOOL is_final_block, const uint8_t* BROTLI_RESTRICT input, size_t position, size_t mask, size_t len, size_t* BROTLI_RESTRICT storage_ix, uint8_t* BROTLI_RESTRICT storage) { size_t masked_pos = position & mask; BrotliStoreUncompressedMetaBlockHeader(len, storage_ix, storage); JumpToByteBoundary(storage_ix, storage); if (masked_pos + len > mask + 1) { size_t len1 = mask + 1 - masked_pos; memcpy(&storage[*storage_ix >> 3], &input[masked_pos], len1); *storage_ix += len1 << 3; len -= len1; masked_pos = 0; } memcpy(&storage[*storage_ix >> 3], &input[masked_pos], len); *storage_ix += len << 3; /* We need to clear the next 4 bytes to continue to be compatible with BrotliWriteBits. */ BrotliWriteBitsPrepareStorage(*storage_ix, storage); /* Since the uncompressed block itself may not be the final block, add an empty one after this. */ if (is_final_block) { BrotliWriteBits(1, 1, storage_ix, storage); /* islast */ BrotliWriteBits(1, 1, storage_ix, storage); /* isempty */ JumpToByteBoundary(storage_ix, storage); } } #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./docs/issues-pr-management.md
# Issue and Pull Request Management The purpose of this document is to help establish norms and best practices for dotnet/runtime. # Principles Here are a guiding set of principles of how to successfully combine the communities and teams which work together in dotnet/runtime. - Retain a 'one community/team' feel for dotnet/runtime - Leverage automation to label incoming/inflight to enable accountability - `area-*` labels should align with a specific community/team for accountability - Within an `area-*` there is leeway for community/team specific practices # Details dotnet/runtime issues and pull requests are a shared resource. In alignment with the Principles the goal is to find a set of norms and best practices which enable a successful community within dotnet/runtime, understand what is expected, act as 'one community/team', and provide leeway at the area level. Here are a few of the most salient components of working well together, and the FAQ has much more detail. ## Common policies: - All incoming issues and pull requests are automatically labeled with an `area-*` label. The bot also assigns the `untriaged` label to issues (not pull requests), once they get created. - All issues and pull requests should have exactly 1 `area-*` label. - Issues are considered triaged when the `untriaged` label has been removed. There is a `needs further triage` label that can be used to tag issues that need another look later. - When issues have `area-*` labels switched, the `untriaged` label must be added. This prevents issues being lost in a `triaged` state when they have not actually been triaged by the area owner. In the future, a bot may automatically ensure this happens. - The central repository owner is accountable for triaging issues and pull requests without `area-*` labels. This occurs when automation is not able to determine the appropriate area. - Any area-* label that has overlap with merged technologies is appended with the src/subfolder name: eg. `area-Infrastructure` also has an `area-Infrastructure-libraries`, `area-Infrastructure-coreclr`, and `area-Infrastructure-installer`. - Labels/Milestones – all `area-*` labels are shared, if you are updating/adding keep everyone in dotnet/runtime in mind. All labels and milestones are shared, be a conscientious citizen when updating/adding. - We lock closed issues and pull requests after 30 days of inactivity. The reason for this is that it is easy for us to overlook new comments on closed issues and PR's. Instead we encourage folks to create a new issue. ## Scenarios where area owners will be asked to manage their issues and pull requests: - All issues with the `untriaged` label are considered untriaged and close to product release, teams will be asked to triage them. If they wish to tag them for a closer look later, they can use `needs further triage`. - During a release endgame and for servicing, issues and pull requests targeting a particular release will be asked to have a milestone set. # FAQ ## What designates a 'triaged' issue? By default, all incoming issues will be labeled with an `untriaged` label. All issues with this label require action from the area owner to triage. At certain times in the release, area owners may be asked to triage their issues. Triaging an issue may be as simple as removing the `untriaged` label, but for most communities/teams this means assigning an appropriate milestone where the issue is intended to be addressed. As an aside, all incoming are also expected to be marked with an `area-*` label. Any issue that fails to receive an `area-*` is also considered untriaged. As a best practice, as issues move from one area to another the `untriaged` label should be added to the issue to indicate that it needs to be reconsidered within the new context. ## How are milestones handled? Marking issues with milestones is necessary during release endgame and servicing. As the release enters an issue burndown, the repository owner may ask area owners to mark issues that should be considered for the current release. Pull requests for servicing should add the appropriate `major.minor.x` milestone (eg. `3.0.x`). Once a specific servicing version is determined, the specific milestone will be added to the pull request (eg. `3.0.2`). It is generally acceptable to have issues without milestones, though this is left to the area owners to decide. Said another way, not having a milestone does not mean that it is not triaged, see comment above. Prefer 3 element milestone names eg `3.0.0` not `3.0` to be consistent with other repos. This helps with reporting. ## How do you request a review for an issue/pull request if only 1 `area-*` label is applied? Labeling issues with more than 1 `area-*` label has been used to bring attention to the issue or pull request from multiple teams. In order to ensure accountability we strive to only have 1 `area-*` label per issue and pull request. In the event you need to bring the issue or pull request to multiple teams attention, please add them for review as opposed to adding their `area-*` label. ## How do notifications work in dotnet/runtime? The default github notification system is used for watching and tracking issue changes. We also have a bot that sends notifications when an `area-*` label is applied. It does not automatically notify area owners; not everyone wants these notifications. Instead if you want these notification, see instructions in [area-owners.md](area-owners.md). It is not necessary to have committer rights to get them. ## How are pull requests marked with labels and milestones? Given the scope of dotnet/runtime, all pull requests are automatically assigned an `area-*` label. In addition, some pull requests may have milestones applied according to release endgame and servicing requirements. ## How do you do ongoing management for your repo? One team manager (M2) (perhaps rotating) has accountability to ensuring the following global health activities area accounted for: - Triaging incoming and assigning area-* labels to those that were not able to do automatically - Common infrastructure tracking - Service Level Agreement tracking for responsiveness and a healthy repo - Release issue burn down Area level owners will then manage their own pull requests and issues as they see fit. ## What other issue automation is available? There is a `needs author feedback` label you can apply to an issue when it is not actionable without more information from the author. When this label is applied, if there is no subsequent comment from the author within 14 days, a bot adds the `no recent activity` label and a note. After a further 7 days without comment from the author the bot closes the issue or PR. If during any of this time there’s a comment from anyone, `no recent activity` is cleared. If there is a comment from the author, `needs author feedback` is also cleared and `needs further triage` is applied so we see it. ## How are labels managed? There are few access controls, so in general everyone has access - be a good global citizen. It is possible to have too many labels. Consider whether a new label could be confusing: for example, a new `P1` label would be confusing given we already have priority labels. When in doubt ask the team manager that is responsible. ## What is dotnet/runtime's branch policy? General guidance is to rarely create a direct branch within the repository and instead fork and create a branch. If any branch is created temporarily, it should be deleted as soon as the associated pull request is merged or closed. Any non-release branch is subject to deletion at any time. Branches are made for servicing releases and are managed centrally. Merging into these branches is monitored and managed centrally. The repositories in dotnet/runtime represent the bottom of the stack for .NET Core. As such, these repositories often lock down before the rest of .NET Core at the end of a release. The general policy is that all code within dotnet/runtime will align in their lockdown dates and policies. ## What is dotnet/runtime's mirror policy? No specific policy. But please use common sense if the mirror will have any potential impact on the broader community. ## What is dotnet/runtime's project boards and ZenHub policy? The portion of ZenHub that are shared across the entire repository is the names of the pipelines (eg. the column names). As adding and editing these pipelines, it is best to communicate the broadly and build consensus. ## What is dotnet/runtime's policy on Wikis? Wikis will be disabled for this repository.
# Issue and Pull Request Management The purpose of this document is to help establish norms and best practices for dotnet/runtime. # Principles Here are a guiding set of principles of how to successfully combine the communities and teams which work together in dotnet/runtime. - Retain a 'one community/team' feel for dotnet/runtime - Leverage automation to label incoming/inflight to enable accountability - `area-*` labels should align with a specific community/team for accountability - Within an `area-*` there is leeway for community/team specific practices # Details dotnet/runtime issues and pull requests are a shared resource. In alignment with the Principles the goal is to find a set of norms and best practices which enable a successful community within dotnet/runtime, understand what is expected, act as 'one community/team', and provide leeway at the area level. Here are a few of the most salient components of working well together, and the FAQ has much more detail. ## Common policies: - All incoming issues and pull requests are automatically labeled with an `area-*` label. The bot also assigns the `untriaged` label to issues (not pull requests), once they get created. - All issues and pull requests should have exactly 1 `area-*` label. - Issues are considered triaged when the `untriaged` label has been removed. There is a `needs further triage` label that can be used to tag issues that need another look later. - When issues have `area-*` labels switched, the `untriaged` label must be added. This prevents issues being lost in a `triaged` state when they have not actually been triaged by the area owner. In the future, a bot may automatically ensure this happens. - The central repository owner is accountable for triaging issues and pull requests without `area-*` labels. This occurs when automation is not able to determine the appropriate area. - Any area-* label that has overlap with merged technologies is appended with the src/subfolder name: eg. `area-Infrastructure` also has an `area-Infrastructure-libraries`, `area-Infrastructure-coreclr`, and `area-Infrastructure-installer`. - Labels/Milestones – all `area-*` labels are shared, if you are updating/adding keep everyone in dotnet/runtime in mind. All labels and milestones are shared, be a conscientious citizen when updating/adding. - We lock closed issues and pull requests after 30 days of inactivity. The reason for this is that it is easy for us to overlook new comments on closed issues and PR's. Instead we encourage folks to create a new issue. ## Scenarios where area owners will be asked to manage their issues and pull requests: - All issues with the `untriaged` label are considered untriaged and close to product release, teams will be asked to triage them. If they wish to tag them for a closer look later, they can use `needs further triage`. - During a release endgame and for servicing, issues and pull requests targeting a particular release will be asked to have a milestone set. # FAQ ## What designates a 'triaged' issue? By default, all incoming issues will be labeled with an `untriaged` label. All issues with this label require action from the area owner to triage. At certain times in the release, area owners may be asked to triage their issues. Triaging an issue may be as simple as removing the `untriaged` label, but for most communities/teams this means assigning an appropriate milestone where the issue is intended to be addressed. As an aside, all incoming are also expected to be marked with an `area-*` label. Any issue that fails to receive an `area-*` is also considered untriaged. As a best practice, as issues move from one area to another the `untriaged` label should be added to the issue to indicate that it needs to be reconsidered within the new context. ## How are milestones handled? Marking issues with milestones is necessary during release endgame and servicing. As the release enters an issue burndown, the repository owner may ask area owners to mark issues that should be considered for the current release. Pull requests for servicing should add the appropriate `major.minor.x` milestone (eg. `3.0.x`). Once a specific servicing version is determined, the specific milestone will be added to the pull request (eg. `3.0.2`). It is generally acceptable to have issues without milestones, though this is left to the area owners to decide. Said another way, not having a milestone does not mean that it is not triaged, see comment above. Prefer 3 element milestone names eg `3.0.0` not `3.0` to be consistent with other repos. This helps with reporting. ## How do you request a review for an issue/pull request if only 1 `area-*` label is applied? Labeling issues with more than 1 `area-*` label has been used to bring attention to the issue or pull request from multiple teams. In order to ensure accountability we strive to only have 1 `area-*` label per issue and pull request. In the event you need to bring the issue or pull request to multiple teams attention, please add them for review as opposed to adding their `area-*` label. ## How do notifications work in dotnet/runtime? The default github notification system is used for watching and tracking issue changes. We also have a bot that sends notifications when an `area-*` label is applied. It does not automatically notify area owners; not everyone wants these notifications. Instead if you want these notification, see instructions in [area-owners.md](area-owners.md). It is not necessary to have committer rights to get them. ## How are pull requests marked with labels and milestones? Given the scope of dotnet/runtime, all pull requests are automatically assigned an `area-*` label. In addition, some pull requests may have milestones applied according to release endgame and servicing requirements. ## How do you do ongoing management for your repo? One team manager (M2) (perhaps rotating) has accountability to ensuring the following global health activities area accounted for: - Triaging incoming and assigning area-* labels to those that were not able to do automatically - Common infrastructure tracking - Service Level Agreement tracking for responsiveness and a healthy repo - Release issue burn down Area level owners will then manage their own pull requests and issues as they see fit. ## What other issue automation is available? There is a `needs author feedback` label you can apply to an issue when it is not actionable without more information from the author. When this label is applied, if there is no subsequent comment from the author within 14 days, a bot adds the `no recent activity` label and a note. After a further 7 days without comment from the author the bot closes the issue or PR. If during any of this time there’s a comment from anyone, `no recent activity` is cleared. If there is a comment from the author, `needs author feedback` is also cleared and `needs further triage` is applied so we see it. ## How are labels managed? There are few access controls, so in general everyone has access - be a good global citizen. It is possible to have too many labels. Consider whether a new label could be confusing: for example, a new `P1` label would be confusing given we already have priority labels. When in doubt ask the team manager that is responsible. ## What is dotnet/runtime's branch policy? General guidance is to rarely create a direct branch within the repository and instead fork and create a branch. If any branch is created temporarily, it should be deleted as soon as the associated pull request is merged or closed. Any non-release branch is subject to deletion at any time. Branches are made for servicing releases and are managed centrally. Merging into these branches is monitored and managed centrally. The repositories in dotnet/runtime represent the bottom of the stack for .NET Core. As such, these repositories often lock down before the rest of .NET Core at the end of a release. The general policy is that all code within dotnet/runtime will align in their lockdown dates and policies. ## What is dotnet/runtime's mirror policy? No specific policy. But please use common sense if the mirror will have any potential impact on the broader community. ## What is dotnet/runtime's project boards and ZenHub policy? The portion of ZenHub that are shared across the entire repository is the names of the pipelines (eg. the column names). As adding and editing these pipelines, it is best to communicate the broadly and build consensus. ## What is dotnet/runtime's policy on Wikis? Wikis will be disabled for this repository.
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/wasi/README.md
# Prototype WASI support This directory contains a build configuration for WASI support, plus a basic sample. This is not intended for production use, nor is it currently supported. This is a step towards possible future support. ## How it works The mechanism for executing .NET code in a WASI runtime environment is equivalent to how `dotnet.wasm` executes .NET code in a browser environment. That is, it runs the Mono interpreter to execute .NET bytecode that has been built in the normal way. It should also work with AOT but this is not yet attempted. ## How to build the runtime Currently this can only be built in Linux or WSL (tested on Windows 11). Simply run `make` in this directory. It will automatically download and use [WASI SDK](https://github.com/WebAssembly/wasi-sdk). The resulting libraries are placed in `(repo_root)/artifacts/bin/mono/Wasi.Release`. ## How to build and run the sample ### 1. Obtain a WASI runtime To run an application in a WASI environment, you need to have a WASI runtime available. For example, download [wasmtime](https://github.com/bytecodealliance/wasmtime/releases) and make sure it's available on `PATH`: ``` export PATH=~/wasmtime-v0.31.0-x86_64-linux wasmtime --version ``` Other WASI runtimes also work. Tested: [wamr](https://github.com/bytecodealliance/wasm-micro-runtime), [wasmer](https://wasmer.io/). ### 2. Obtain a suitable .NET build toolchain You also need to have a working installation of .NET 7 including the `browser-wasm` runtime pack. For example, obtain the [.NET SDK daily build](https://github.com/dotnet/installer/blob/main/README.md#installers-and-binaries) (`main` branch), and ensure the `browser-wasm` pack is installed: ``` dotnet workload install wasm-tools -s https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet7/nuget/v3/index.json ``` To make this available to the build scripts, supply environment variables. Example: ``` export DOTNET_ROOT=~/dotnet7 export BROWSER_WASM_RUNTIME_PATH=$(DOTNET_ROOT)/packs/Microsoft.NETCore.App.Runtime.Mono.browser-wasm/7.0.0-alpha.1.22061.11/runtimes/browser-wasm ``` You'll need to update these paths to match the location where you extracted the .NET daily SDK build and the exact version of the `browser-wasm` pack you received. ### 3. Run it Finally, you can build and run the sample: ``` cd samples/console make run ```
# Prototype WASI support This directory contains a build configuration for WASI support, plus a basic sample. This is not intended for production use, nor is it currently supported. This is a step towards possible future support. ## How it works The mechanism for executing .NET code in a WASI runtime environment is equivalent to how `dotnet.wasm` executes .NET code in a browser environment. That is, it runs the Mono interpreter to execute .NET bytecode that has been built in the normal way. It should also work with AOT but this is not yet attempted. ## How to build the runtime Currently this can only be built in Linux or WSL (tested on Windows 11). Simply run `make` in this directory. It will automatically download and use [WASI SDK](https://github.com/WebAssembly/wasi-sdk). The resulting libraries are placed in `(repo_root)/artifacts/bin/mono/Wasi.Release`. ## How to build and run the sample ### 1. Obtain a WASI runtime To run an application in a WASI environment, you need to have a WASI runtime available. For example, download [wasmtime](https://github.com/bytecodealliance/wasmtime/releases) and make sure it's available on `PATH`: ``` export PATH=~/wasmtime-v0.31.0-x86_64-linux wasmtime --version ``` Other WASI runtimes also work. Tested: [wamr](https://github.com/bytecodealliance/wasm-micro-runtime), [wasmer](https://wasmer.io/). ### 2. Obtain a suitable .NET build toolchain You also need to have a working installation of .NET 7 including the `browser-wasm` runtime pack. For example, obtain the [.NET SDK daily build](https://github.com/dotnet/installer/blob/main/README.md#installers-and-binaries) (`main` branch), and ensure the `browser-wasm` pack is installed: ``` dotnet workload install wasm-tools -s https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet7/nuget/v3/index.json ``` To make this available to the build scripts, supply environment variables. Example: ``` export DOTNET_ROOT=~/dotnet7 export BROWSER_WASM_RUNTIME_PATH=$(DOTNET_ROOT)/packs/Microsoft.NETCore.App.Runtime.Mono.browser-wasm/7.0.0-alpha.1.22061.11/runtimes/browser-wasm ``` You'll need to update these paths to match the location where you extracted the .NET daily SDK build and the exact version of the `browser-wasm` pack you received. ### 3. Run it Finally, you can build and run the sample: ``` cd samples/console make run ```
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/component/hot_reload-stub.c
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include <config.h> #include <glib.h> #include "mono/component/component.h" #include "mono/component/hot_reload.h" #include "mono/metadata/components.h" #include "mono/metadata/metadata-update.h" #include "mono/utils/mono-compiler.h" #include "mono/utils/mono-error-internals.h" static bool hot_reload_stub_available (void); static void hot_reload_stub_apply_changes (int origin, MonoImage *base_image, gconstpointer dmeta, uint32_t dmeta_len, gconstpointer dil, uint32_t dil_len, gconstpointer dpdb_bytes_orig, uint32_t dpdb_length, MonoError *error); static MonoComponentHotReload * component_hot_reload_stub_init (void); static void hot_reload_stub_set_fastpath_data (MonoMetadataUpdateData *ptr); static gboolean hot_reload_stub_update_enabled (int *modifiable_assemblies_out); static gboolean hot_reload_stub_no_inline (MonoMethod *caller, MonoMethod *callee); static uint32_t hot_reload_stub_thread_expose_published (void); static uint32_t hot_reload_stub_get_thread_generation (void); static void hot_reload_stub_cleanup_on_close (MonoImage *image); static void hot_reload_stub_effective_table_slow (const MonoTableInfo **t, int idx); static void hot_reload_stub_close_except_pools_all (MonoImage *base_image); static void hot_reload_stub_close_all (MonoImage *base_image); static gpointer hot_reload_stub_get_updated_method_rva (MonoImage *base_image, uint32_t idx); static gboolean hot_reload_stub_table_bounds_check (MonoImage *base_image, int table_index, int token_index); static gboolean hot_reload_stub_delta_heap_lookup (MonoImage *base_image, MetadataHeapGetterFunc get_heap, uint32_t orig_index, MonoImage **image_out, uint32_t *index_out); static gpointer hot_reload_stub_get_updated_method_ppdb (MonoImage *base_image, uint32_t idx); static gboolean hot_reload_stub_has_modified_rows (const MonoTableInfo *table); static int hot_reload_stub_table_num_rows_slow (MonoImage *image, int table_index); static uint32_t hot_reload_stub_method_parent (MonoImage *image, uint32_t method_index); static void* hot_reload_stub_metadata_linear_search (MonoImage *base_image, MonoTableInfo *base_table, const void *key, BinarySearchComparer comparer); static uint32_t hot_reload_stub_field_parent (MonoImage *image, uint32_t field_index); static uint32_t hot_reload_stub_get_field_idx (MonoClassField *field); static MonoClassField * hot_reload_stub_get_field (MonoClass *klass, uint32_t fielddef_token); static gpointer hot_reload_stub_get_static_field_addr (MonoClassField *field); static MonoMethod * hot_reload_stub_find_method_by_name (MonoClass *klass, const char *name, int param_count, int flags, MonoError *error); static MonoComponentHotReload fn_table = { { MONO_COMPONENT_ITF_VERSION, &hot_reload_stub_available }, &hot_reload_stub_set_fastpath_data, &hot_reload_stub_update_enabled, &hot_reload_stub_no_inline, &hot_reload_stub_thread_expose_published, &hot_reload_stub_get_thread_generation, &hot_reload_stub_cleanup_on_close, &hot_reload_stub_effective_table_slow, &hot_reload_stub_apply_changes, &hot_reload_stub_close_except_pools_all, &hot_reload_stub_close_all, &hot_reload_stub_get_updated_method_rva, &hot_reload_stub_table_bounds_check, &hot_reload_stub_delta_heap_lookup, &hot_reload_stub_get_updated_method_ppdb, &hot_reload_stub_has_modified_rows, &hot_reload_stub_table_num_rows_slow, &hot_reload_stub_method_parent, &hot_reload_stub_metadata_linear_search, &hot_reload_stub_field_parent, &hot_reload_stub_get_field_idx, &hot_reload_stub_get_field, &hot_reload_stub_get_static_field_addr, &hot_reload_stub_find_method_by_name, }; static bool hot_reload_stub_available (void) { return false; } static MonoComponentHotReload * component_hot_reload_stub_init (void) { return &fn_table; } void hot_reload_stub_set_fastpath_data (MonoMetadataUpdateData *ptr) { } gboolean hot_reload_stub_update_enabled (int *modifiable_assemblies_out) { if (modifiable_assemblies_out) *modifiable_assemblies_out = MONO_MODIFIABLE_ASSM_NONE; return false; } static gboolean hot_reload_stub_no_inline (MonoMethod *caller, MonoMethod *callee) { return false; } static uint32_t hot_reload_stub_thread_expose_published (void) { return 0; } uint32_t hot_reload_stub_get_thread_generation (void) { return 0; } void hot_reload_stub_cleanup_on_close (MonoImage *image) { } void hot_reload_stub_effective_table_slow (const MonoTableInfo **t, int idx) { g_assert_not_reached (); } void hot_reload_stub_apply_changes (int origin, MonoImage *base_image, gconstpointer dmeta, uint32_t dmeta_len, gconstpointer dil, uint32_t dil_len, gconstpointer dpdb_bytes_orig, uint32_t dpdb_length, MonoError *error) { mono_error_set_not_supported (error, "Hot reload not supported in this runtime."); } static void hot_reload_stub_close_except_pools_all (MonoImage *base_image) { } static void hot_reload_stub_close_all (MonoImage *base_image) { } gpointer hot_reload_stub_get_updated_method_rva (MonoImage *base_image, uint32_t idx) { g_assert_not_reached (); } gboolean hot_reload_stub_table_bounds_check (MonoImage *base_image, int table_index, int token_index) { g_assert_not_reached (); } static gboolean hot_reload_stub_delta_heap_lookup (MonoImage *base_image, MetadataHeapGetterFunc get_heap, uint32_t orig_index, MonoImage **image_out, uint32_t *index_out) { g_assert_not_reached (); } static gpointer hot_reload_stub_get_updated_method_ppdb (MonoImage *base_image, uint32_t idx) { g_assert_not_reached (); } static gboolean hot_reload_stub_has_modified_rows (const MonoTableInfo *table) { return FALSE; } static int hot_reload_stub_table_num_rows_slow (MonoImage *image, int table_index) { g_assert_not_reached (); /* should always take the fast path */ } static uint32_t hot_reload_stub_method_parent (MonoImage *image, uint32_t method_index) { return 0; } static void* hot_reload_stub_metadata_linear_search (MonoImage *base_image, MonoTableInfo *base_table, const void *key, BinarySearchComparer comparer) { return NULL; } static uint32_t hot_reload_stub_field_parent (MonoImage *image, uint32_t field_index) { return 0; } static uint32_t hot_reload_stub_get_field_idx (MonoClassField *field) { return 0; } static MonoClassField * hot_reload_stub_get_field (MonoClass *klass, uint32_t fielddef_token) { return NULL; } static gpointer hot_reload_stub_get_static_field_addr (MonoClassField *field) { return NULL; } static MonoMethod * hot_reload_stub_find_method_by_name (MonoClass *klass, const char *name, int param_count, int flags, MonoError *error) { return NULL; } MONO_COMPONENT_EXPORT_ENTRYPOINT MonoComponentHotReload * mono_component_hot_reload_init (void) { return component_hot_reload_stub_init (); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include <config.h> #include <glib.h> #include "mono/component/component.h" #include "mono/component/hot_reload.h" #include "mono/metadata/components.h" #include "mono/metadata/metadata-update.h" #include "mono/utils/mono-compiler.h" #include "mono/utils/mono-error-internals.h" static bool hot_reload_stub_available (void); static void hot_reload_stub_apply_changes (int origin, MonoImage *base_image, gconstpointer dmeta, uint32_t dmeta_len, gconstpointer dil, uint32_t dil_len, gconstpointer dpdb_bytes_orig, uint32_t dpdb_length, MonoError *error); static MonoComponentHotReload * component_hot_reload_stub_init (void); static void hot_reload_stub_set_fastpath_data (MonoMetadataUpdateData *ptr); static gboolean hot_reload_stub_update_enabled (int *modifiable_assemblies_out); static gboolean hot_reload_stub_no_inline (MonoMethod *caller, MonoMethod *callee); static uint32_t hot_reload_stub_thread_expose_published (void); static uint32_t hot_reload_stub_get_thread_generation (void); static void hot_reload_stub_cleanup_on_close (MonoImage *image); static void hot_reload_stub_effective_table_slow (const MonoTableInfo **t, int idx); static void hot_reload_stub_close_except_pools_all (MonoImage *base_image); static void hot_reload_stub_close_all (MonoImage *base_image); static gpointer hot_reload_stub_get_updated_method_rva (MonoImage *base_image, uint32_t idx); static gboolean hot_reload_stub_table_bounds_check (MonoImage *base_image, int table_index, int token_index); static gboolean hot_reload_stub_delta_heap_lookup (MonoImage *base_image, MetadataHeapGetterFunc get_heap, uint32_t orig_index, MonoImage **image_out, uint32_t *index_out); static gpointer hot_reload_stub_get_updated_method_ppdb (MonoImage *base_image, uint32_t idx); static gboolean hot_reload_stub_has_modified_rows (const MonoTableInfo *table); static int hot_reload_stub_table_num_rows_slow (MonoImage *image, int table_index); static uint32_t hot_reload_stub_method_parent (MonoImage *image, uint32_t method_index); static void* hot_reload_stub_metadata_linear_search (MonoImage *base_image, MonoTableInfo *base_table, const void *key, BinarySearchComparer comparer); static uint32_t hot_reload_stub_field_parent (MonoImage *image, uint32_t field_index); static uint32_t hot_reload_stub_get_field_idx (MonoClassField *field); static MonoClassField * hot_reload_stub_get_field (MonoClass *klass, uint32_t fielddef_token); static gpointer hot_reload_stub_get_static_field_addr (MonoClassField *field); static MonoMethod * hot_reload_stub_find_method_by_name (MonoClass *klass, const char *name, int param_count, int flags, MonoError *error); static MonoComponentHotReload fn_table = { { MONO_COMPONENT_ITF_VERSION, &hot_reload_stub_available }, &hot_reload_stub_set_fastpath_data, &hot_reload_stub_update_enabled, &hot_reload_stub_no_inline, &hot_reload_stub_thread_expose_published, &hot_reload_stub_get_thread_generation, &hot_reload_stub_cleanup_on_close, &hot_reload_stub_effective_table_slow, &hot_reload_stub_apply_changes, &hot_reload_stub_close_except_pools_all, &hot_reload_stub_close_all, &hot_reload_stub_get_updated_method_rva, &hot_reload_stub_table_bounds_check, &hot_reload_stub_delta_heap_lookup, &hot_reload_stub_get_updated_method_ppdb, &hot_reload_stub_has_modified_rows, &hot_reload_stub_table_num_rows_slow, &hot_reload_stub_method_parent, &hot_reload_stub_metadata_linear_search, &hot_reload_stub_field_parent, &hot_reload_stub_get_field_idx, &hot_reload_stub_get_field, &hot_reload_stub_get_static_field_addr, &hot_reload_stub_find_method_by_name, }; static bool hot_reload_stub_available (void) { return false; } static MonoComponentHotReload * component_hot_reload_stub_init (void) { return &fn_table; } void hot_reload_stub_set_fastpath_data (MonoMetadataUpdateData *ptr) { } gboolean hot_reload_stub_update_enabled (int *modifiable_assemblies_out) { if (modifiable_assemblies_out) *modifiable_assemblies_out = MONO_MODIFIABLE_ASSM_NONE; return false; } static gboolean hot_reload_stub_no_inline (MonoMethod *caller, MonoMethod *callee) { return false; } static uint32_t hot_reload_stub_thread_expose_published (void) { return 0; } uint32_t hot_reload_stub_get_thread_generation (void) { return 0; } void hot_reload_stub_cleanup_on_close (MonoImage *image) { } void hot_reload_stub_effective_table_slow (const MonoTableInfo **t, int idx) { g_assert_not_reached (); } void hot_reload_stub_apply_changes (int origin, MonoImage *base_image, gconstpointer dmeta, uint32_t dmeta_len, gconstpointer dil, uint32_t dil_len, gconstpointer dpdb_bytes_orig, uint32_t dpdb_length, MonoError *error) { mono_error_set_not_supported (error, "Hot reload not supported in this runtime."); } static void hot_reload_stub_close_except_pools_all (MonoImage *base_image) { } static void hot_reload_stub_close_all (MonoImage *base_image) { } gpointer hot_reload_stub_get_updated_method_rva (MonoImage *base_image, uint32_t idx) { g_assert_not_reached (); } gboolean hot_reload_stub_table_bounds_check (MonoImage *base_image, int table_index, int token_index) { g_assert_not_reached (); } static gboolean hot_reload_stub_delta_heap_lookup (MonoImage *base_image, MetadataHeapGetterFunc get_heap, uint32_t orig_index, MonoImage **image_out, uint32_t *index_out) { g_assert_not_reached (); } static gpointer hot_reload_stub_get_updated_method_ppdb (MonoImage *base_image, uint32_t idx) { g_assert_not_reached (); } static gboolean hot_reload_stub_has_modified_rows (const MonoTableInfo *table) { return FALSE; } static int hot_reload_stub_table_num_rows_slow (MonoImage *image, int table_index) { g_assert_not_reached (); /* should always take the fast path */ } static uint32_t hot_reload_stub_method_parent (MonoImage *image, uint32_t method_index) { return 0; } static void* hot_reload_stub_metadata_linear_search (MonoImage *base_image, MonoTableInfo *base_table, const void *key, BinarySearchComparer comparer) { return NULL; } static uint32_t hot_reload_stub_field_parent (MonoImage *image, uint32_t field_index) { return 0; } static uint32_t hot_reload_stub_get_field_idx (MonoClassField *field) { return 0; } static MonoClassField * hot_reload_stub_get_field (MonoClass *klass, uint32_t fielddef_token) { return NULL; } static gpointer hot_reload_stub_get_static_field_addr (MonoClassField *field) { return NULL; } static MonoMethod * hot_reload_stub_find_method_by_name (MonoClass *klass, const char *name, int param_count, int flags, MonoError *error) { return NULL; } MONO_COMPONENT_EXPORT_ENTRYPOINT MonoComponentHotReload * mono_component_hot_reload_init (void) { return component_hot_reload_stub_init (); }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/aot-runtime-wasm.c
/** * \file * WASM AOT runtime */ #include "config.h" #include <sys/types.h> #include "mini.h" #include <mono/jit/mono-private-unstable.h> #include "interp/interp.h" #ifdef TARGET_WASM static char type_to_c (MonoType *t) { if (m_type_is_byref (t)) return 'I'; handle_enum: switch (t->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: return 'I'; case MONO_TYPE_R4: return 'F'; case MONO_TYPE_R8: return 'D'; break; case MONO_TYPE_I8: case MONO_TYPE_U8: return 'L'; case MONO_TYPE_VOID: return 'V'; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (t->data.klass)) { t = mono_class_enum_basetype_internal (t->data.klass); goto handle_enum; } return 'I'; case MONO_TYPE_GENERICINST: if (m_class_is_valuetype (t->data.klass)) return 'S'; return 'I'; default: g_warning ("CANT TRANSLATE %s", mono_type_full_name (t)); return 'X'; } } #define FIDX(x) (x) typedef union { gint64 l; struct { gint32 lo; gint32 hi; } pair; } interp_pair; static gint64 get_long_arg (InterpMethodArguments *margs, int idx) { interp_pair p; p.pair.lo = (gint32)(gssize)margs->iargs [idx]; p.pair.hi = (gint32)(gssize)margs->iargs [idx + 1]; return p.l; } #include "wasm_m2n_invoke.g.h" static int compare_icall_tramp (const void *key, const void *elem) { return strcmp (key, *(void**)elem); } gpointer mono_wasm_get_interp_to_native_trampoline (MonoMethodSignature *sig) { char cookie [32]; int c_count; c_count = sig->param_count + sig->hasthis + 1; g_assert (c_count < sizeof (cookie)); //ensure we don't overflow the local cookie [0] = type_to_c (sig->ret); if (sig->hasthis) cookie [1] = 'I'; for (int i = 0; i < sig->param_count; ++i) { cookie [1 + sig->hasthis + i] = type_to_c (sig->params [i]); } cookie [c_count] = 0; void *p = bsearch (cookie, interp_to_native_signatures, G_N_ELEMENTS (interp_to_native_signatures), sizeof (gpointer), compare_icall_tramp); if (!p) g_error ("CANNOT HANDLE INTERP ICALL SIG %s\n", cookie); int idx = (const char**)p - (const char**)interp_to_native_signatures; return interp_to_native_invokes [idx]; } static MonoWasmGetNativeToInterpTramp get_native_to_interp_tramp_cb; MONO_API void mono_wasm_install_get_native_to_interp_tramp (MonoWasmGetNativeToInterpTramp cb) { get_native_to_interp_tramp_cb = cb; } gpointer mono_wasm_get_native_to_interp_trampoline (MonoMethod *method, gpointer extra_arg) { if (get_native_to_interp_tramp_cb) return get_native_to_interp_tramp_cb (method, extra_arg); else return NULL; } #else /* TARGET_WASM */ MONO_EMPTY_SOURCE_FILE (aot_runtime_wasm); #endif /* TARGET_WASM */
/** * \file * WASM AOT runtime */ #include "config.h" #include <sys/types.h> #include "mini.h" #include <mono/jit/mono-private-unstable.h> #include "interp/interp.h" #ifdef TARGET_WASM static char type_to_c (MonoType *t) { if (m_type_is_byref (t)) return 'I'; handle_enum: switch (t->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: return 'I'; case MONO_TYPE_R4: return 'F'; case MONO_TYPE_R8: return 'D'; break; case MONO_TYPE_I8: case MONO_TYPE_U8: return 'L'; case MONO_TYPE_VOID: return 'V'; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (t->data.klass)) { t = mono_class_enum_basetype_internal (t->data.klass); goto handle_enum; } return 'I'; case MONO_TYPE_GENERICINST: if (m_class_is_valuetype (t->data.klass)) return 'S'; return 'I'; default: g_warning ("CANT TRANSLATE %s", mono_type_full_name (t)); return 'X'; } } #define FIDX(x) (x) typedef union { gint64 l; struct { gint32 lo; gint32 hi; } pair; } interp_pair; static gint64 get_long_arg (InterpMethodArguments *margs, int idx) { interp_pair p; p.pair.lo = (gint32)(gssize)margs->iargs [idx]; p.pair.hi = (gint32)(gssize)margs->iargs [idx + 1]; return p.l; } #include "wasm_m2n_invoke.g.h" static int compare_icall_tramp (const void *key, const void *elem) { return strcmp (key, *(void**)elem); } gpointer mono_wasm_get_interp_to_native_trampoline (MonoMethodSignature *sig) { char cookie [32]; int c_count; c_count = sig->param_count + sig->hasthis + 1; g_assert (c_count < sizeof (cookie)); //ensure we don't overflow the local cookie [0] = type_to_c (sig->ret); if (sig->hasthis) cookie [1] = 'I'; for (int i = 0; i < sig->param_count; ++i) { cookie [1 + sig->hasthis + i] = type_to_c (sig->params [i]); } cookie [c_count] = 0; void *p = bsearch (cookie, interp_to_native_signatures, G_N_ELEMENTS (interp_to_native_signatures), sizeof (gpointer), compare_icall_tramp); if (!p) g_error ("CANNOT HANDLE INTERP ICALL SIG %s\n", cookie); int idx = (const char**)p - (const char**)interp_to_native_signatures; return interp_to_native_invokes [idx]; } static MonoWasmGetNativeToInterpTramp get_native_to_interp_tramp_cb; MONO_API void mono_wasm_install_get_native_to_interp_tramp (MonoWasmGetNativeToInterpTramp cb) { get_native_to_interp_tramp_cb = cb; } gpointer mono_wasm_get_native_to_interp_trampoline (MonoMethod *method, gpointer extra_arg) { if (get_native_to_interp_tramp_cb) return get_native_to_interp_tramp_cb (method, extra_arg); else return NULL; } #else /* TARGET_WASM */ MONO_EMPTY_SOURCE_FILE (aot_runtime_wasm); #endif /* TARGET_WASM */
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/mini/simd-intrinsics.c
/** * SIMD Intrinsics support for netcore. * Only LLVM is supported as a backend. */ #include <config.h> #include <mono/utils/mono-compiler.h> #include <mono/metadata/icall-decl.h> #include "mini.h" #include "mini-runtime.h" #include "ir-emit.h" #include "llvm-intrinsics-types.h" #ifdef ENABLE_LLVM #include "mini-llvm.h" #include "mini-llvm-cpp.h" #endif #include "mono/utils/bsearch.h" #include <mono/metadata/abi-details.h> #include <mono/metadata/reflection-internals.h> #include <mono/utils/mono-hwcap.h> #if defined (MONO_ARCH_SIMD_INTRINSICS) #if defined(DISABLE_JIT) void mono_simd_intrinsics_init (void) { } #else #define MSGSTRFIELD(line) MSGSTRFIELD1(line) #define MSGSTRFIELD1(line) str##line static const struct msgstr_t { #define METHOD(name) char MSGSTRFIELD(__LINE__) [sizeof (#name)]; #define METHOD2(str,name) char MSGSTRFIELD(__LINE__) [sizeof (str)]; #include "simd-methods.h" #undef METHOD #undef METHOD2 } method_names = { #define METHOD(name) #name, #define METHOD2(str,name) str, #include "simd-methods.h" #undef METHOD #undef METHOD2 }; enum { #define METHOD(name) SN_ ## name = offsetof (struct msgstr_t, MSGSTRFIELD(__LINE__)), #define METHOD2(str,name) SN_ ## name = offsetof (struct msgstr_t, MSGSTRFIELD(__LINE__)), #include "simd-methods.h" }; #define method_name(idx) ((const char*)&method_names + (idx)) static int register_size; #define None 0 typedef struct { uint16_t id; // One of the SN_ constants uint16_t default_op; // ins->opcode uint16_t default_instc0; // ins->inst_c0 uint16_t unsigned_op; uint16_t unsigned_instc0; uint16_t floating_op; uint16_t floating_instc0; } SimdIntrinsic; static const SimdIntrinsic unsupported [] = { {SN_get_IsSupported} }; void mono_simd_intrinsics_init (void) { register_size = 16; #if 0 if ((mini_get_cpu_features () & MONO_CPU_X86_AVX) != 0) register_size = 32; #endif /* Tell the class init code the size of the System.Numerics.Register type */ mono_simd_register_size = register_size; } MonoInst* mono_emit_simd_field_load (MonoCompile *cfg, MonoClassField *field, MonoInst *addr) { return NULL; } static int simd_intrinsic_compare_by_name (const void *key, const void *value) { return strcmp ((const char*)key, method_name (*(guint16*)value)); } static int simd_intrinsic_info_compare_by_name (const void *key, const void *value) { SimdIntrinsic *info = (SimdIntrinsic*)value; return strcmp ((const char*)key, method_name (info->id)); } static int lookup_intrins (guint16 *intrinsics, int size, MonoMethod *cmethod) { const guint16 *result = (const guint16 *)mono_binary_search (cmethod->name, intrinsics, size / sizeof (guint16), sizeof (guint16), &simd_intrinsic_compare_by_name); if (result == NULL) return -1; else return (int)*result; } static SimdIntrinsic* lookup_intrins_info (SimdIntrinsic *intrinsics, int size, MonoMethod *cmethod) { #if 0 for (int i = 0; i < (size / sizeof (SimdIntrinsic)) - 1; ++i) { const char *n1 = method_name (intrinsics [i].id); const char *n2 = method_name (intrinsics [i + 1].id); int len1 = strlen (n1); int len2 = strlen (n2); for (int j = 0; j < len1 && j < len2; ++j) { if (n1 [j] > n2 [j]) { printf ("%s %s\n", n1, n2); g_assert_not_reached (); } else if (n1 [j] < n2 [j]) { break; } } } #endif return (SimdIntrinsic *)mono_binary_search (cmethod->name, intrinsics, size / sizeof (SimdIntrinsic), sizeof (SimdIntrinsic), &simd_intrinsic_info_compare_by_name); } /* * Return a simd vreg for the simd value represented by SRC. * SRC is the 'this' argument to methods. * Set INDIRECT to TRUE if the value was loaded from memory. */ static int load_simd_vreg_class (MonoCompile *cfg, MonoClass *klass, MonoInst *src, gboolean *indirect) { const char *spec = INS_INFO (src->opcode); if (indirect) *indirect = FALSE; if (src->opcode == OP_XMOVE) { return src->sreg1; } else if (src->opcode == OP_LDADDR) { int res = ((MonoInst*)src->inst_p0)->dreg; return res; } else if (spec [MONO_INST_DEST] == 'x') { return src->dreg; } else if (src->type == STACK_PTR || src->type == STACK_MP) { MonoInst *ins; if (indirect) *indirect = TRUE; MONO_INST_NEW (cfg, ins, OP_LOADX_MEMBASE); ins->klass = klass; ins->sreg1 = src->dreg; ins->type = STACK_VTYPE; ins->dreg = alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, ins); return ins->dreg; } g_warning ("load_simd_vreg:: could not infer source simd (%d) vreg for op", src->type); mono_print_ins (src); g_assert_not_reached (); } static int load_simd_vreg (MonoCompile *cfg, MonoMethod *cmethod, MonoInst *src, gboolean *indirect) { return load_simd_vreg_class (cfg, cmethod->klass, src, indirect); } /* Create and emit a SIMD instruction, dreg is auto-allocated */ static MonoInst* emit_simd_ins (MonoCompile *cfg, MonoClass *klass, int opcode, int sreg1, int sreg2) { const char *spec = INS_INFO (opcode); MonoInst *ins; MONO_INST_NEW (cfg, ins, opcode); if (spec [MONO_INST_DEST] == 'x') { ins->dreg = alloc_xreg (cfg); ins->type = STACK_VTYPE; } else if (spec [MONO_INST_DEST] == 'i') { ins->dreg = alloc_ireg (cfg); ins->type = STACK_I4; } else if (spec [MONO_INST_DEST] == 'l') { ins->dreg = alloc_lreg (cfg); ins->type = STACK_I8; } else if (spec [MONO_INST_DEST] == 'f') { ins->dreg = alloc_freg (cfg); ins->type = STACK_R8; } else if (spec [MONO_INST_DEST] == 'v') { ins->dreg = alloc_dreg (cfg, STACK_VTYPE); ins->type = STACK_VTYPE; } ins->sreg1 = sreg1; ins->sreg2 = sreg2; ins->klass = klass; MONO_ADD_INS (cfg->cbb, ins); return ins; } static MonoInst* emit_simd_ins_for_sig (MonoCompile *cfg, MonoClass *klass, int opcode, int instc0, int instc1, MonoMethodSignature *fsig, MonoInst **args) { g_assert (fsig->param_count <= 3); MonoInst* ins = emit_simd_ins (cfg, klass, opcode, fsig->param_count > 0 ? args [0]->dreg : -1, fsig->param_count > 1 ? args [1]->dreg : -1); if (instc0 != -1) ins->inst_c0 = instc0; if (instc1 != -1) ins->inst_c1 = instc1; if (fsig->param_count == 3) ins->sreg3 = args [2]->dreg; return ins; } static gboolean is_hw_intrinsics_class (MonoClass *klass, const char *name, gboolean *is_64bit) { const char *class_name = m_class_get_name (klass); if ((!strcmp (class_name, "X64") || !strcmp (class_name, "Arm64")) && m_class_get_nested_in (klass)) { *is_64bit = TRUE; return !strcmp (m_class_get_name (m_class_get_nested_in (klass)), name); } else { *is_64bit = FALSE; return !strcmp (class_name, name); } } static MonoTypeEnum get_underlying_type (MonoType* type) { MonoClass* klass = mono_class_from_mono_type_internal (type); if (type->type == MONO_TYPE_PTR) // e.g. int* => MONO_TYPE_I4 return m_class_get_byval_arg (m_class_get_element_class (klass))->type; else if (type->type == MONO_TYPE_GENERICINST) // e.g. Vector128<int> => MONO_TYPE_I4 return mono_class_get_context (klass)->class_inst->type_argv [0]->type; else return type->type; } static MonoInst* emit_xcompare (MonoCompile *cfg, MonoClass *klass, MonoTypeEnum etype, MonoInst *arg1, MonoInst *arg2) { MonoInst *ins; gboolean is_fp = etype == MONO_TYPE_R4 || etype == MONO_TYPE_R8; ins = emit_simd_ins (cfg, klass, is_fp ? OP_XCOMPARE_FP : OP_XCOMPARE, arg1->dreg, arg2->dreg); ins->inst_c0 = CMP_EQ; ins->inst_c1 = etype; return ins; } static MonoInst* emit_xequal (MonoCompile *cfg, MonoClass *klass, MonoInst *arg1, MonoInst *arg2) { return emit_simd_ins (cfg, klass, OP_XEQUAL, arg1->dreg, arg2->dreg); } static MonoInst* emit_not_xequal (MonoCompile *cfg, MonoClass *klass, MonoInst *arg1, MonoInst *arg2) { MonoInst *ins = emit_simd_ins (cfg, klass, OP_XEQUAL, arg1->dreg, arg2->dreg); int sreg = ins->dreg; int dreg = alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sreg, 0); EMIT_NEW_UNALU (cfg, ins, OP_CEQ, dreg, -1); return ins; } static MonoInst* emit_xzero (MonoCompile *cfg, MonoClass *klass) { return emit_simd_ins (cfg, klass, OP_XZERO, -1, -1); } static gboolean is_intrinsics_vector_type (MonoType *vector_type) { if (vector_type->type != MONO_TYPE_GENERICINST) return FALSE; MonoClass *klass = mono_class_from_mono_type_internal (vector_type); const char *name = m_class_get_name (klass); return !strcmp (name, "Vector64`1") || !strcmp (name, "Vector128`1") || !strcmp (name, "Vector256`1"); } static MonoType* get_vector_t_elem_type (MonoType *vector_type) { MonoClass *klass; MonoType *etype; g_assert (vector_type->type == MONO_TYPE_GENERICINST); klass = mono_class_from_mono_type_internal (vector_type); g_assert ( !strcmp (m_class_get_name (klass), "Vector`1") || !strcmp (m_class_get_name (klass), "Vector64`1") || !strcmp (m_class_get_name (klass), "Vector128`1") || !strcmp (m_class_get_name (klass), "Vector256`1")); etype = mono_class_get_context (klass)->class_inst->type_argv [0]; return etype; } static gboolean type_is_unsigned (MonoType *type) { MonoClass *klass = mono_class_from_mono_type_internal (type); MonoType *etype = mono_class_get_context (klass)->class_inst->type_argv [0]; switch (etype->type) { case MONO_TYPE_U1: case MONO_TYPE_U2: case MONO_TYPE_U4: case MONO_TYPE_U8: case MONO_TYPE_U: return TRUE; } return FALSE; } static gboolean type_is_float (MonoType *type) { MonoClass *klass = mono_class_from_mono_type_internal (type); MonoType *etype = mono_class_get_context (klass)->class_inst->type_argv [0]; switch (etype->type) { case MONO_TYPE_R4: case MONO_TYPE_R8: return TRUE; } return FALSE; } static int type_to_expand_op (MonoType *type) { switch (type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_EXPAND_I1; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_EXPAND_I2; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_EXPAND_I4; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_EXPAND_I8; case MONO_TYPE_R4: return OP_EXPAND_R4; case MONO_TYPE_R8: return OP_EXPAND_R8; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_EXPAND_I8; #else return OP_EXPAND_I4; #endif default: g_assert_not_reached (); } } static int type_to_insert_op (MonoType *type) { switch (type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_INSERT_I1; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_INSERT_I2; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_INSERT_I4; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_INSERT_I8; case MONO_TYPE_R4: return OP_INSERT_R4; case MONO_TYPE_R8: return OP_INSERT_R8; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_INSERT_I8; #else return OP_INSERT_I4; #endif default: g_assert_not_reached (); } } typedef struct { const char *name; MonoCPUFeatures feature; const SimdIntrinsic *intrinsics; int intrinsics_size; gboolean jit_supported; } IntrinGroup; typedef MonoInst * (* EmitIntrinsicFn) ( MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoClass *klass, const IntrinGroup *intrin_group, const SimdIntrinsic *info, int id, MonoTypeEnum arg0_type, gboolean is_64bit); static const IntrinGroup unsupported_intrin_group [] = { { "", 0, unsupported, sizeof (unsupported) }, }; static MonoInst * emit_hardware_intrinsics ( MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, const IntrinGroup *groups, int groups_size_bytes, EmitIntrinsicFn custom_emit) { MonoClass *klass = cmethod->klass; const IntrinGroup *intrin_group = unsupported_intrin_group; gboolean is_64bit = FALSE; int groups_size = groups_size_bytes / sizeof (groups [0]); for (int i = 0; i < groups_size; ++i) { const IntrinGroup *group = &groups [i]; if (is_hw_intrinsics_class (klass, group->name, &is_64bit)) { intrin_group = group; break; } } gboolean supported = FALSE; MonoTypeEnum arg0_type = fsig->param_count > 0 ? get_underlying_type (fsig->params [0]) : MONO_TYPE_VOID; int id = -1; uint16_t op = 0; uint16_t c0 = 0; const SimdIntrinsic *intrinsics = intrin_group->intrinsics; int intrinsics_size = intrin_group->intrinsics_size; MonoCPUFeatures feature = intrin_group->feature; const SimdIntrinsic *info = lookup_intrins_info ((SimdIntrinsic *) intrinsics, intrinsics_size, cmethod); { if (!info) goto support_probe_complete; id = info->id; // Hardware intrinsics are LLVM-only. if (!COMPILE_LLVM (cfg) && !intrin_group->jit_supported) goto support_probe_complete; if (intrin_group->intrinsics == unsupported) supported = FALSE; else if (feature) supported = (mini_get_cpu_features (cfg) & feature) != 0; else supported = TRUE; op = info->default_op; c0 = info->default_instc0; gboolean is_unsigned = FALSE; gboolean is_float = FALSE; switch (arg0_type) { case MONO_TYPE_U1: case MONO_TYPE_U2: case MONO_TYPE_U4: case MONO_TYPE_U8: case MONO_TYPE_U: is_unsigned = TRUE; break; case MONO_TYPE_R4: case MONO_TYPE_R8: is_float = TRUE; break; } if (is_unsigned && info->unsigned_op != 0) { op = info->unsigned_op; c0 = info->unsigned_instc0; } else if (is_float && info->floating_op != 0) { op = info->floating_op; c0 = info->floating_instc0; } } support_probe_complete: if (id == SN_get_IsSupported) { MonoInst *ins = NULL; EMIT_NEW_ICONST (cfg, ins, supported ? 1 : 0); return ins; } if (!supported) { // Can't emit non-supported llvm intrinsics if (cfg->method != cmethod) { // Keep the original call so we end up in the intrinsic method return NULL; } else { // Emit an exception from the intrinsic method mono_emit_jit_icall (cfg, mono_throw_platform_not_supported, NULL); return NULL; } } if (op != 0) return emit_simd_ins_for_sig (cfg, klass, op, c0, arg0_type, fsig, args); return custom_emit (cfg, fsig, args, klass, intrin_group, info, id, arg0_type, is_64bit); } static MonoInst * emit_vector_create_elementwise ( MonoCompile *cfg, MonoMethodSignature *fsig, MonoType *vtype, MonoType *etype, MonoInst **args) { int op = type_to_insert_op (etype); MonoClass *vklass = mono_class_from_mono_type_internal (vtype); MonoInst *ins = emit_xzero (cfg, vklass); for (int i = 0; i < fsig->param_count; ++i) { ins = emit_simd_ins (cfg, vklass, op, ins->dreg, args [i]->dreg); ins->inst_c0 = i; } return ins; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) static int type_to_xinsert_op (MonoTypeEnum type) { switch (type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_XINSERT_I1; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_XINSERT_I2; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_XINSERT_I4; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_XINSERT_I8; case MONO_TYPE_R4: return OP_XINSERT_R4; case MONO_TYPE_R8: return OP_XINSERT_R8; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_XINSERT_I8; #else return OP_XINSERT_I4; #endif default: g_assert_not_reached (); } } static int type_to_xextract_op (MonoTypeEnum type) { switch (type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_XEXTRACT_I1; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_XEXTRACT_I2; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_XEXTRACT_I4; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_XEXTRACT_I8; case MONO_TYPE_R4: return OP_XEXTRACT_R4; case MONO_TYPE_R8: return OP_XEXTRACT_R8; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_XEXTRACT_I8; #else return OP_XEXTRACT_I4; #endif default: g_assert_not_reached (); } } static int type_to_extract_op (MonoTypeEnum type) { switch (type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_EXTRACT_I1; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_EXTRACT_I2; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_EXTRACT_I4; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_EXTRACT_I8; case MONO_TYPE_R4: return OP_EXTRACT_R4; case MONO_TYPE_R8: return OP_EXTRACT_R8; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_EXTRACT_I8; #else return OP_EXTRACT_I4; #endif default: g_assert_not_reached (); } } static guint16 sri_vector_methods [] = { SN_Abs, SN_Add, SN_AndNot, SN_As, SN_AsByte, SN_AsDouble, SN_AsInt16, SN_AsInt32, SN_AsInt64, SN_AsSByte, SN_AsSingle, SN_AsUInt16, SN_AsUInt32, SN_AsUInt64, SN_BitwiseAnd, SN_BitwiseOr, SN_AsVector128, SN_AsVector2, SN_AsVector256, SN_AsVector3, SN_AsVector4, SN_Ceiling, SN_ConditionalSelect, SN_ConvertToDouble, SN_ConvertToInt32, SN_ConvertToUInt32, SN_Create, SN_CreateScalar, SN_CreateScalarUnsafe, SN_Divide, SN_Equals, SN_EqualsAll, SN_EqualsAny, SN_Floor, SN_GetElement, SN_GetLower, SN_GetUpper, SN_GreaterThan, SN_GreaterThanOrEqual, SN_LessThan, SN_LessThanOrEqual, SN_Max, SN_Min, SN_Multiply, SN_Negate, SN_OnesComplement, SN_Sqrt, SN_Subtract, SN_ToScalar, SN_ToVector128, SN_ToVector128Unsafe, SN_ToVector256, SN_ToVector256Unsafe, SN_WithElement, SN_Xor, }; /* nint and nuint haven't been enabled yet for System.Runtime.Intrinsics. * Remove this once support has been added. */ #define MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE(t) ((MONO_TYPE_IS_VECTOR_PRIMITIVE(t)) && ((t)->type != MONO_TYPE_I) && ((t)->type != MONO_TYPE_U)) static gboolean is_elementwise_create_overload (MonoMethodSignature *fsig, MonoType *ret_type) { uint16_t param_count = fsig->param_count; if (param_count < 1) return FALSE; MonoType *type = fsig->params [0]; if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (type)) return FALSE; if (!mono_metadata_type_equal (ret_type, type)) return FALSE; for (uint16_t i = 1; i < param_count; ++i) if (!mono_metadata_type_equal (type, fsig->params [i])) return FALSE; return TRUE; } static gboolean is_create_from_half_vectors_overload (MonoMethodSignature *fsig) { if (fsig->param_count != 2) return FALSE; if (!is_intrinsics_vector_type (fsig->params [0])) return FALSE; return mono_metadata_type_equal (fsig->params [0], fsig->params [1]); } static MonoInst* emit_sri_vector (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { if (!COMPILE_LLVM (cfg)) return NULL; MonoClass *klass = cmethod->klass; int id = lookup_intrins (sri_vector_methods, sizeof (sri_vector_methods), cmethod); if (id == -1) return NULL; if (!strcmp (m_class_get_name (cfg->method->klass), "Vector256")) return NULL; // TODO: Fix Vector256.WithUpper/WithLower MonoTypeEnum arg0_type = fsig->param_count > 0 ? get_underlying_type (fsig->params [0]) : MONO_TYPE_VOID; switch (id) { case SN_Abs: { #ifdef TARGET_ARM64 switch (arg0_type) { case MONO_TYPE_U1: case MONO_TYPE_U2: case MONO_TYPE_U4: case MONO_TYPE_U8: case MONO_TYPE_U: return NULL; } gboolean is_float = arg0_type == MONO_TYPE_R4 || arg0_type == MONO_TYPE_R8; int iid = is_float ? INTRINS_AARCH64_ADV_SIMD_FABS : INTRINS_AARCH64_ADV_SIMD_ABS; return emit_simd_ins_for_sig (cfg, klass, OP_XOP_OVR_X_X, iid, arg0_type, fsig, args); #else return NULL; #endif } case SN_Add: case SN_Max: case SN_Min: case SN_Multiply: case SN_Subtract: { int instc0 = -1; if (arg0_type == MONO_TYPE_R4 || arg0_type == MONO_TYPE_R8) { switch (id) { case SN_Add: instc0 = OP_FADD; break; case SN_Max: instc0 = OP_FMAX; break; case SN_Min: instc0 = OP_FMIN; break; case SN_Multiply: instc0 = OP_FMUL; break; case SN_Subtract: instc0 = OP_FSUB; break; default: g_assert_not_reached (); } } else { switch (id) { case SN_Add: instc0 = OP_IADD; break; case SN_Max: instc0 = OP_IMAX; break; case SN_Min: instc0 = OP_IMIN; break; case SN_Multiply: instc0 = OP_IMUL; break; case SN_Subtract: instc0 = OP_ISUB; break; default: g_assert_not_reached (); } } return emit_simd_ins_for_sig (cfg, klass, OP_XBINOP, instc0, arg0_type, fsig, args); } case SN_Divide: { if ((arg0_type != MONO_TYPE_R4) && (arg0_type != MONO_TYPE_R8)) return NULL; return emit_simd_ins_for_sig (cfg, klass, OP_XBINOP, OP_FDIV, arg0_type, fsig, args); } case SN_AndNot: #ifdef TARGET_ARM64 return emit_simd_ins_for_sig (cfg, klass, OP_ARM64_BIC, -1, arg0_type, fsig, args); #else return NULL; #endif case SN_BitwiseAnd: return emit_simd_ins_for_sig (cfg, klass, OP_XBINOP, OP_IAND, arg0_type, fsig, args); case SN_BitwiseOr: return emit_simd_ins_for_sig (cfg, klass, OP_XBINOP, OP_IOR, arg0_type, fsig, args); case SN_Xor: { if ((arg0_type == MONO_TYPE_R4) || (arg0_type == MONO_TYPE_R8)) return NULL; return emit_simd_ins_for_sig (cfg, klass, OP_XBINOP, OP_IXOR, arg0_type, fsig, args); } case SN_As: case SN_AsByte: case SN_AsDouble: case SN_AsInt16: case SN_AsInt32: case SN_AsInt64: case SN_AsSByte: case SN_AsSingle: case SN_AsUInt16: case SN_AsUInt32: case SN_AsUInt64: { MonoType *ret_type = get_vector_t_elem_type (fsig->ret); MonoType *arg_type = get_vector_t_elem_type (fsig->params [0]); if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (ret_type) || !MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (arg_type)) return NULL; return emit_simd_ins (cfg, klass, OP_XCAST, args [0]->dreg, -1); } case SN_Ceiling: case SN_Floor: { #ifdef TARGET_ARM64 if ((arg0_type != MONO_TYPE_R4) && (arg0_type != MONO_TYPE_R8)) return NULL; int ceil_or_floor = id == SN_Ceiling ? INTRINS_AARCH64_ADV_SIMD_FRINTP : INTRINS_AARCH64_ADV_SIMD_FRINTM; return emit_simd_ins_for_sig (cfg, klass, OP_XOP_OVR_X_X, ceil_or_floor, arg0_type, fsig, args); #else return NULL; #endif } case SN_ConditionalSelect: { #ifdef TARGET_ARM64 return emit_simd_ins_for_sig (cfg, klass, OP_ARM64_BSL, -1, arg0_type, fsig, args); #else return NULL; #endif } case SN_ConvertToDouble: { #ifdef TARGET_ARM64 if ((arg0_type != MONO_TYPE_I8) && (arg0_type != MONO_TYPE_U8)) return NULL; MonoClass *arg_class = mono_class_from_mono_type_internal (fsig->params [0]); int size = mono_class_value_size (arg_class, NULL); int op = -1; if (size == 8) op = arg0_type == MONO_TYPE_I8 ? OP_ARM64_SCVTF_SCALAR : OP_ARM64_UCVTF_SCALAR; else op = arg0_type == MONO_TYPE_I8 ? OP_ARM64_SCVTF : OP_ARM64_UCVTF; return emit_simd_ins_for_sig (cfg, klass, op, -1, arg0_type, fsig, args); #else return NULL; #endif } case SN_ConvertToInt32: case SN_ConvertToUInt32: { #ifdef TARGET_ARM64 if (arg0_type != MONO_TYPE_R4) return NULL; int op = id == SN_ConvertToInt32 ? OP_ARM64_FCVTZS : OP_ARM64_FCVTZU; return emit_simd_ins_for_sig (cfg, klass, op, -1, arg0_type, fsig, args); #else return NULL; #endif } case SN_Create: { MonoType *etype = get_vector_t_elem_type (fsig->ret); if (fsig->param_count == 1 && mono_metadata_type_equal (fsig->params [0], etype)) return emit_simd_ins (cfg, klass, type_to_expand_op (etype), args [0]->dreg, -1); else if (is_create_from_half_vectors_overload (fsig)) return emit_simd_ins (cfg, klass, OP_XCONCAT, args [0]->dreg, args [1]->dreg); else if (is_elementwise_create_overload (fsig, etype)) return emit_vector_create_elementwise (cfg, fsig, fsig->ret, etype, args); break; } case SN_CreateScalar: return emit_simd_ins_for_sig (cfg, klass, OP_CREATE_SCALAR, -1, arg0_type, fsig, args); case SN_CreateScalarUnsafe: return emit_simd_ins_for_sig (cfg, klass, OP_CREATE_SCALAR_UNSAFE, -1, arg0_type, fsig, args); case SN_Equals: case SN_EqualsAll: case SN_EqualsAny: { MonoType *arg_type = get_vector_t_elem_type (fsig->params [0]); if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (arg_type)) return NULL; switch (id) { case SN_Equals: return emit_xcompare (cfg, klass, arg0_type, args [0], args [1]); case SN_EqualsAll: return emit_xequal (cfg, klass, args [0], args [1]); case SN_EqualsAny: { MonoClass *arg_class = mono_class_from_mono_type_internal (fsig->params [0]); MonoInst *cmp_eq = emit_xcompare (cfg, arg_class, arg0_type, args [0], args [1]); MonoInst *zero = emit_xzero (cfg, arg_class); return emit_not_xequal (cfg, arg_class, cmp_eq, zero); } default: g_assert_not_reached (); } } case SN_GetElement: { MonoClass *arg_class = mono_class_from_mono_type_internal (fsig->params [0]); MonoType *etype = mono_class_get_context (arg_class)->class_inst->type_argv [0]; if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (etype)) return NULL; int size = mono_class_value_size (arg_class, NULL); int esize = mono_class_value_size (mono_class_from_mono_type_internal (etype), NULL); int elems = size / esize; MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [1]->dreg, elems); MONO_EMIT_NEW_COND_EXC (cfg, GE_UN, "ArgumentOutOfRangeException"); int extract_op = type_to_xextract_op (arg0_type); return emit_simd_ins_for_sig (cfg, klass, extract_op, -1, arg0_type, fsig, args); } case SN_GetLower: case SN_GetUpper: { MonoType *arg_type = get_vector_t_elem_type (fsig->params [0]); if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (arg_type)) return NULL; int op = id == SN_GetLower ? OP_XLOWER : OP_XUPPER; return emit_simd_ins_for_sig (cfg, klass, op, 0, arg0_type, fsig, args); } case SN_GreaterThan: case SN_GreaterThanOrEqual: case SN_LessThan: case SN_LessThanOrEqual: { MonoType *arg_type = get_vector_t_elem_type (fsig->params [0]); if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (arg_type)) return NULL; gboolean is_unsigned = type_is_unsigned (fsig->params [0]); MonoInst *ins = emit_xcompare (cfg, klass, arg0_type, args [0], args [1]); switch (id) { case SN_GreaterThan: ins->inst_c0 = is_unsigned ? CMP_GT_UN : CMP_GT; break; case SN_GreaterThanOrEqual: ins->inst_c0 = is_unsigned ? CMP_GE_UN : CMP_GE; break; case SN_LessThan: ins->inst_c0 = is_unsigned ? CMP_LT_UN : CMP_LT; break; case SN_LessThanOrEqual: ins->inst_c0 = is_unsigned ? CMP_LE_UN : CMP_LE; break; default: g_assert_not_reached (); } return ins; } case SN_Negate: case SN_OnesComplement: { #ifdef TARGET_ARM64 int op = id == SN_Negate ? OP_ARM64_XNEG : OP_ARM64_MVN; return emit_simd_ins_for_sig (cfg, klass, op, -1, arg0_type, fsig, args); #else return NULL; #endif } case SN_Sqrt: { #ifdef TARGET_ARM64 if ((arg0_type != MONO_TYPE_R4) && (arg0_type != MONO_TYPE_R8)) return NULL; return emit_simd_ins_for_sig (cfg, klass, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FSQRT, arg0_type, fsig, args); #else return NULL; #endif } case SN_ToScalar: { MonoType *arg_type = get_vector_t_elem_type (fsig->params [0]); if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (arg_type)) return NULL; int extract_op = type_to_extract_op (arg0_type); return emit_simd_ins_for_sig (cfg, klass, extract_op, 0, arg0_type, fsig, args); } case SN_ToVector128: case SN_ToVector128Unsafe: { MonoType *arg_type = get_vector_t_elem_type (fsig->params [0]); if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (arg_type)) return NULL; int op = id == SN_ToVector128 ? OP_XWIDEN : OP_XWIDEN_UNSAFE; return emit_simd_ins_for_sig (cfg, klass, op, 0, arg0_type, fsig, args); } case SN_WithElement: { MonoClass *arg_class = mono_class_from_mono_type_internal (fsig->params [0]); MonoType *etype = mono_class_get_context (arg_class)->class_inst->type_argv [0]; if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (etype)) return NULL; int size = mono_class_value_size (arg_class, NULL); int esize = mono_class_value_size (mono_class_from_mono_type_internal (etype), NULL); int elems = size / esize; MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [1]->dreg, elems); MONO_EMIT_NEW_COND_EXC (cfg, GE_UN, "ArgumentOutOfRangeException"); int insert_op = type_to_xinsert_op (arg0_type); MonoInst *ins = emit_simd_ins (cfg, klass, insert_op, args [0]->dreg, args [2]->dreg); ins->sreg3 = args [1]->dreg; ins->inst_c1 = arg0_type; return ins; } case SN_WithLower: case SN_WithUpper: { MonoType *arg_type = get_vector_t_elem_type (fsig->params [0]); if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (arg_type)) return NULL; int op = id == SN_GetLower ? OP_XINSERT_LOWER : OP_XINSERT_UPPER; return emit_simd_ins_for_sig (cfg, klass, op, 0, arg0_type, fsig, args); } default: break; } return NULL; } static guint16 vector64_vector128_t_methods [] = { SN_Equals, SN_get_AllBitsSet, SN_get_Count, SN_get_IsSupported, SN_get_Zero, SN_op_Addition, SN_op_Equality, SN_op_Inequality, SN_op_Subtraction, }; static MonoInst* emit_vector64_vector128_t (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { int id = lookup_intrins (vector64_vector128_t_methods, sizeof (vector64_vector128_t_methods), cmethod); if (id == -1) return NULL; MonoClass *klass = cmethod->klass; MonoType *type = m_class_get_byval_arg (klass); MonoType *etype = mono_class_get_context (klass)->class_inst->type_argv [0]; int size = mono_class_value_size (klass, NULL); int esize = mono_class_value_size (mono_class_from_mono_type_internal (etype), NULL); g_assert (size > 0); g_assert (esize > 0); int len = size / esize; if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (etype)) return NULL; if (cfg->verbose_level > 1) { char *name = mono_method_full_name (cmethod, TRUE); printf (" SIMD intrinsic %s\n", name); g_free (name); } switch (id) { case SN_get_IsSupported: { MonoInst *ins = NULL; EMIT_NEW_ICONST (cfg, ins, 1); return ins; } default: break; } if (!COMPILE_LLVM (cfg)) return NULL; switch (id) { case SN_get_Count: { MonoInst *ins = NULL; if (!(fsig->param_count == 0 && fsig->ret->type == MONO_TYPE_I4)) break; EMIT_NEW_ICONST (cfg, ins, len); return ins; } case SN_get_Zero: { return emit_xzero (cfg, klass); } case SN_get_AllBitsSet: { MonoInst *ins = emit_xzero (cfg, klass); return emit_xcompare (cfg, klass, etype->type, ins, ins); } case SN_Equals: { if (fsig->param_count == 1 && fsig->ret->type == MONO_TYPE_BOOLEAN && mono_metadata_type_equal (fsig->params [0], type)) { int sreg1 = load_simd_vreg (cfg, cmethod, args [0], NULL); return emit_simd_ins (cfg, klass, OP_XEQUAL, sreg1, args [1]->dreg); } break; } case SN_op_Addition: case SN_op_Subtraction: { if (!(fsig->param_count == 2 && mono_metadata_type_equal (fsig->ret, type) && mono_metadata_type_equal (fsig->params [0], type) && mono_metadata_type_equal (fsig->params [1], type))) return NULL; MonoInst *ins = emit_simd_ins (cfg, klass, OP_XBINOP, args [0]->dreg, args [1]->dreg); ins->inst_c1 = etype->type; if (etype->type == MONO_TYPE_R4 || etype->type == MONO_TYPE_R8) ins->inst_c0 = id == SN_op_Addition ? OP_FADD : OP_FSUB; else ins->inst_c0 = id == SN_op_Addition ? OP_IADD : OP_ISUB; return ins; } case SN_op_Equality: case SN_op_Inequality: g_assert (fsig->param_count == 2 && fsig->ret->type == MONO_TYPE_BOOLEAN && mono_metadata_type_equal (fsig->params [0], type) && mono_metadata_type_equal (fsig->params [1], type)); switch (id) { case SN_op_Equality: return emit_xequal (cfg, klass, args [0], args [1]); case SN_op_Inequality: return emit_not_xequal (cfg, klass, args [0], args [1]); default: g_assert_not_reached (); } default: break; } return NULL; } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) #ifdef TARGET_AMD64 static guint16 vector_methods [] = { SN_ConvertToDouble, SN_ConvertToInt32, SN_ConvertToInt64, SN_ConvertToSingle, SN_ConvertToUInt32, SN_ConvertToUInt64, SN_Narrow, SN_Widen, SN_get_IsHardwareAccelerated, }; static MonoInst* emit_sys_numerics_vector (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins; gboolean supported = FALSE; int id; MonoType *etype; id = lookup_intrins (vector_methods, sizeof (vector_methods), cmethod); if (id == -1) return NULL; //printf ("%s\n", mono_method_full_name (cmethod, 1)); #ifdef MONO_ARCH_SIMD_INTRINSICS supported = TRUE; #endif if (cfg->verbose_level > 1) { char *name = mono_method_full_name (cmethod, TRUE); printf (" SIMD intrinsic %s\n", name); g_free (name); } switch (id) { case SN_get_IsHardwareAccelerated: EMIT_NEW_ICONST (cfg, ins, supported ? 1 : 0); ins->type = STACK_I4; return ins; case SN_ConvertToInt32: etype = get_vector_t_elem_type (fsig->params [0]); g_assert (etype->type == MONO_TYPE_R4); return emit_simd_ins (cfg, mono_class_from_mono_type_internal (fsig->ret), OP_CVTPS2DQ, args [0]->dreg, -1); case SN_ConvertToSingle: etype = get_vector_t_elem_type (fsig->params [0]); g_assert (etype->type == MONO_TYPE_I4 || etype->type == MONO_TYPE_U4); // FIXME: if (etype->type == MONO_TYPE_U4) return NULL; return emit_simd_ins (cfg, mono_class_from_mono_type_internal (fsig->ret), OP_CVTDQ2PS, args [0]->dreg, -1); case SN_ConvertToDouble: case SN_ConvertToInt64: case SN_ConvertToUInt32: case SN_ConvertToUInt64: case SN_Narrow: case SN_Widen: // FIXME: break; default: break; } return NULL; } static guint16 vector_t_methods [] = { SN_ctor, SN_CopyTo, SN_Equals, SN_GreaterThan, SN_GreaterThanOrEqual, SN_LessThan, SN_LessThanOrEqual, SN_Max, SN_Min, SN_get_AllBitsSet, SN_get_Count, SN_get_Item, SN_get_One, SN_get_Zero, SN_op_Addition, SN_op_BitwiseAnd, SN_op_BitwiseOr, SN_op_Division, SN_op_Equality, SN_op_ExclusiveOr, SN_op_Explicit, SN_op_Inequality, SN_op_Multiply, SN_op_Subtraction }; static MonoInst* emit_sys_numerics_vector_t (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins; MonoType *type, *etype; MonoClass *klass; int size, len, id; gboolean is_unsigned; static const float r4_one = 1.0f; static const double r8_one = 1.0; id = lookup_intrins (vector_t_methods, sizeof (vector_t_methods), cmethod); if (id == -1) return NULL; klass = cmethod->klass; type = m_class_get_byval_arg (klass); etype = mono_class_get_context (klass)->class_inst->type_argv [0]; size = mono_class_value_size (mono_class_from_mono_type_internal (etype), NULL); g_assert (size); len = register_size / size; if (!MONO_TYPE_IS_PRIMITIVE (etype) || etype->type == MONO_TYPE_CHAR || etype->type == MONO_TYPE_BOOLEAN) return NULL; if (cfg->verbose_level > 1) { char *name = mono_method_full_name (cmethod, TRUE); printf (" SIMD intrinsic %s\n", name); g_free (name); } switch (id) { case SN_get_Count: if (!(fsig->param_count == 0 && fsig->ret->type == MONO_TYPE_I4)) break; EMIT_NEW_ICONST (cfg, ins, len); return ins; case SN_get_Zero: g_assert (fsig->param_count == 0 && mono_metadata_type_equal (fsig->ret, type)); return emit_xzero (cfg, klass); case SN_get_One: { g_assert (fsig->param_count == 0 && mono_metadata_type_equal (fsig->ret, type)); MonoInst *one = NULL; int expand_opcode = type_to_expand_op (etype); MONO_INST_NEW (cfg, one, -1); switch (expand_opcode) { case OP_EXPAND_R4: one->opcode = OP_R4CONST; one->type = STACK_R4; one->inst_p0 = (void *) &r4_one; break; case OP_EXPAND_R8: one->opcode = OP_R8CONST; one->type = STACK_R8; one->inst_p0 = (void *) &r8_one; break; default: one->opcode = OP_ICONST; one->type = STACK_I4; one->inst_c0 = 1; break; } one->dreg = alloc_dreg (cfg, (MonoStackType)one->type); MONO_ADD_INS (cfg->cbb, one); return emit_simd_ins (cfg, klass, expand_opcode, one->dreg, -1); } case SN_get_AllBitsSet: { /* Compare a zero vector with itself */ ins = emit_xzero (cfg, klass); return emit_xcompare (cfg, klass, etype->type, ins, ins); } case SN_get_Item: { if (!COMPILE_LLVM (cfg)) return NULL; MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [1]->dreg, len); MONO_EMIT_NEW_COND_EXC (cfg, GE_UN, "ArgumentOutOfRangeException"); MonoTypeEnum ty = etype->type; int opcode = type_to_xextract_op (ty); int src1 = load_simd_vreg (cfg, cmethod, args [0], NULL); MonoInst *ins = emit_simd_ins (cfg, klass, opcode, src1, args [1]->dreg); ins->inst_c1 = ty; return ins; } case SN_ctor: if (fsig->param_count == 1 && mono_metadata_type_equal (fsig->params [0], etype)) { int dreg = load_simd_vreg (cfg, cmethod, args [0], NULL); int opcode = type_to_expand_op (etype); ins = emit_simd_ins (cfg, klass, opcode, args [1]->dreg, -1); ins->dreg = dreg; return ins; } if ((fsig->param_count == 1 || fsig->param_count == 2) && (fsig->params [0]->type == MONO_TYPE_SZARRAY)) { MonoInst *array_ins = args [1]; MonoInst *index_ins; MonoInst *ldelema_ins; MonoInst *var; int end_index_reg; if (args [0]->opcode != OP_LDADDR) return NULL; /* .ctor (T[]) or .ctor (T[], index) */ if (fsig->param_count == 2) { index_ins = args [2]; } else { EMIT_NEW_ICONST (cfg, index_ins, 0); } /* Emit bounds check for the index (index >= 0) */ mini_emit_bounds_check_offset (cfg, array_ins->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length), index_ins->dreg, "ArgumentOutOfRangeException"); /* Emit bounds check for the end (index + len - 1 < array length) */ end_index_reg = alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, OP_IADD_IMM, end_index_reg, index_ins->dreg, len - 1); mini_emit_bounds_check_offset (cfg, array_ins->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length), end_index_reg, "ArgumentOutOfRangeException"); /* Load the array slice into the simd reg */ ldelema_ins = mini_emit_ldelema_1_ins (cfg, mono_class_from_mono_type_internal (etype), array_ins, index_ins, FALSE, FALSE); g_assert (args [0]->opcode == OP_LDADDR); var = (MonoInst*)args [0]->inst_p0; EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADX_MEMBASE, var->dreg, ldelema_ins->dreg, 0); ins->klass = cmethod->klass; return args [0]; } break; case SN_CopyTo: if ((fsig->param_count == 1 || fsig->param_count == 2) && (fsig->params [0]->type == MONO_TYPE_SZARRAY)) { MonoInst *array_ins = args [1]; MonoInst *index_ins; MonoInst *ldelema_ins; int val_vreg, end_index_reg; val_vreg = load_simd_vreg (cfg, cmethod, args [0], NULL); /* CopyTo (T[]) or CopyTo (T[], index) */ if (fsig->param_count == 2) { index_ins = args [2]; } else { EMIT_NEW_ICONST (cfg, index_ins, 0); } /* CopyTo () does complicated argument checks */ mini_emit_bounds_check_offset (cfg, array_ins->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length), index_ins->dreg, "ArgumentOutOfRangeException"); end_index_reg = alloc_ireg (cfg); int len_reg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP_FLAGS (cfg, OP_LOADI4_MEMBASE, len_reg, array_ins->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length), MONO_INST_INVARIANT_LOAD); EMIT_NEW_BIALU (cfg, ins, OP_ISUB, end_index_reg, len_reg, index_ins->dreg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, end_index_reg, len); MONO_EMIT_NEW_COND_EXC (cfg, LT, "ArgumentException"); /* Load the array slice into the simd reg */ ldelema_ins = mini_emit_ldelema_1_ins (cfg, mono_class_from_mono_type_internal (etype), array_ins, index_ins, FALSE, FALSE); EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREX_MEMBASE, ldelema_ins->dreg, 0, val_vreg); ins->klass = cmethod->klass; return ins; } break; case SN_Equals: if (fsig->param_count == 1 && fsig->ret->type == MONO_TYPE_BOOLEAN && mono_metadata_type_equal (fsig->params [0], type)) { int sreg1 = load_simd_vreg (cfg, cmethod, args [0], NULL); return emit_simd_ins (cfg, klass, OP_XEQUAL, sreg1, args [1]->dreg); } else if (fsig->param_count == 2 && mono_metadata_type_equal (fsig->ret, type) && mono_metadata_type_equal (fsig->params [0], type) && mono_metadata_type_equal (fsig->params [1], type)) { /* Per element equality */ return emit_xcompare (cfg, klass, etype->type, args [0], args [1]); } break; case SN_op_Equality: case SN_op_Inequality: g_assert (fsig->param_count == 2 && fsig->ret->type == MONO_TYPE_BOOLEAN && mono_metadata_type_equal (fsig->params [0], type) && mono_metadata_type_equal (fsig->params [1], type)); switch (id) { case SN_op_Equality: return emit_xequal (cfg, klass, args [0], args [1]); case SN_op_Inequality: return emit_not_xequal (cfg, klass, args [0], args [1]); default: g_assert_not_reached (); } case SN_GreaterThan: case SN_GreaterThanOrEqual: case SN_LessThan: case SN_LessThanOrEqual: g_assert (fsig->param_count == 2 && mono_metadata_type_equal (fsig->ret, type) && mono_metadata_type_equal (fsig->params [0], type) && mono_metadata_type_equal (fsig->params [1], type)); is_unsigned = etype->type == MONO_TYPE_U1 || etype->type == MONO_TYPE_U2 || etype->type == MONO_TYPE_U4 || etype->type == MONO_TYPE_U8 || etype->type == MONO_TYPE_U; ins = emit_xcompare (cfg, klass, etype->type, args [0], args [1]); switch (id) { case SN_GreaterThan: ins->inst_c0 = is_unsigned ? CMP_GT_UN : CMP_GT; break; case SN_GreaterThanOrEqual: ins->inst_c0 = is_unsigned ? CMP_GE_UN : CMP_GE; break; case SN_LessThan: ins->inst_c0 = is_unsigned ? CMP_LT_UN : CMP_LT; break; case SN_LessThanOrEqual: ins->inst_c0 = is_unsigned ? CMP_LE_UN : CMP_LE; break; default: g_assert_not_reached (); } return ins; case SN_op_Explicit: return emit_simd_ins (cfg, klass, OP_XCAST, args [0]->dreg, -1); case SN_op_Addition: case SN_op_Subtraction: case SN_op_Division: case SN_op_Multiply: case SN_op_BitwiseAnd: case SN_op_BitwiseOr: case SN_op_ExclusiveOr: case SN_Max: case SN_Min: if (!(fsig->param_count == 2 && mono_metadata_type_equal (fsig->ret, type) && mono_metadata_type_equal (fsig->params [0], type) && mono_metadata_type_equal (fsig->params [1], type))) return NULL; ins = emit_simd_ins (cfg, klass, OP_XBINOP, args [0]->dreg, args [1]->dreg); ins->inst_c1 = etype->type; if (etype->type == MONO_TYPE_R4 || etype->type == MONO_TYPE_R8) { switch (id) { case SN_op_Addition: ins->inst_c0 = OP_FADD; break; case SN_op_Subtraction: ins->inst_c0 = OP_FSUB; break; case SN_op_Multiply: ins->inst_c0 = OP_FMUL; break; case SN_op_Division: ins->inst_c0 = OP_FDIV; break; case SN_Max: ins->inst_c0 = OP_FMAX; break; case SN_Min: ins->inst_c0 = OP_FMIN; break; default: NULLIFY_INS (ins); return NULL; } } else { switch (id) { case SN_op_Addition: ins->inst_c0 = OP_IADD; break; case SN_op_Subtraction: ins->inst_c0 = OP_ISUB; break; /* case SN_op_Division: ins->inst_c0 = OP_IDIV; break; case SN_op_Multiply: ins->inst_c0 = OP_IMUL; break; */ case SN_op_BitwiseAnd: ins->inst_c0 = OP_IAND; break; case SN_op_BitwiseOr: ins->inst_c0 = OP_IOR; break; case SN_op_ExclusiveOr: ins->inst_c0 = OP_IXOR; break; case SN_Max: ins->inst_c0 = OP_IMAX; break; case SN_Min: ins->inst_c0 = OP_IMIN; break; default: NULLIFY_INS (ins); return NULL; } } return ins; default: break; } return NULL; } #endif // TARGET_AMD64 #ifdef TARGET_ARM64 static SimdIntrinsic armbase_methods [] = { {SN_LeadingSignCount}, {SN_LeadingZeroCount}, {SN_MultiplyHigh}, {SN_ReverseElementBits}, {SN_get_IsSupported}, }; static SimdIntrinsic crc32_methods [] = { {SN_ComputeCrc32}, {SN_ComputeCrc32C}, {SN_get_IsSupported} }; static SimdIntrinsic crypto_aes_methods [] = { {SN_Decrypt, OP_XOP_X_X_X, INTRINS_AARCH64_AESD}, {SN_Encrypt, OP_XOP_X_X_X, INTRINS_AARCH64_AESE}, {SN_InverseMixColumns, OP_XOP_X_X, INTRINS_AARCH64_AESIMC}, {SN_MixColumns, OP_XOP_X_X, INTRINS_AARCH64_AESMC}, {SN_PolynomialMultiplyWideningLower}, {SN_PolynomialMultiplyWideningUpper}, {SN_get_IsSupported}, }; static SimdIntrinsic sha1_methods [] = { {SN_FixedRotate, OP_XOP_X_X, INTRINS_AARCH64_SHA1H}, {SN_HashUpdateChoose, OP_XOP_X_X_X_X, INTRINS_AARCH64_SHA1C}, {SN_HashUpdateMajority, OP_XOP_X_X_X_X, INTRINS_AARCH64_SHA1M}, {SN_HashUpdateParity, OP_XOP_X_X_X_X, INTRINS_AARCH64_SHA1P}, {SN_ScheduleUpdate0, OP_XOP_X_X_X_X, INTRINS_AARCH64_SHA1SU0}, {SN_ScheduleUpdate1, OP_XOP_X_X_X, INTRINS_AARCH64_SHA1SU1}, {SN_get_IsSupported} }; static SimdIntrinsic sha256_methods [] = { {SN_HashUpdate1, OP_XOP_X_X_X_X, INTRINS_AARCH64_SHA256H}, {SN_HashUpdate2, OP_XOP_X_X_X_X, INTRINS_AARCH64_SHA256H2}, {SN_ScheduleUpdate0, OP_XOP_X_X_X, INTRINS_AARCH64_SHA256SU0}, {SN_ScheduleUpdate1, OP_XOP_X_X_X_X, INTRINS_AARCH64_SHA256SU1}, {SN_get_IsSupported} }; // This table must be kept in sorted order. ASCII } is sorted after alphanumeric // characters, so blind use of your editor's "sort lines" facility will // mis-order the lines. // // In Vim you can use `sort /.*{[0-9A-z]*/ r` to sort this table. static SimdIntrinsic advsimd_methods [] = { {SN_Abs, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_ABS, None, None, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FABS}, {SN_AbsSaturate, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_SQABS}, {SN_AbsSaturateScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_SQABS}, {SN_AbsScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_ABS, None, None, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FABS}, {SN_AbsoluteCompareGreaterThan}, {SN_AbsoluteCompareGreaterThanOrEqual}, {SN_AbsoluteCompareGreaterThanOrEqualScalar}, {SN_AbsoluteCompareGreaterThanScalar}, {SN_AbsoluteCompareLessThan}, {SN_AbsoluteCompareLessThanOrEqual}, {SN_AbsoluteCompareLessThanOrEqualScalar}, {SN_AbsoluteCompareLessThanScalar}, {SN_AbsoluteDifference, OP_ARM64_SABD, None, OP_ARM64_UABD, None, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FABD}, {SN_AbsoluteDifferenceAdd, OP_ARM64_SABA, None, OP_ARM64_UABA}, {SN_AbsoluteDifferenceScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FABD_SCALAR}, {SN_AbsoluteDifferenceWideningLower, OP_ARM64_SABDL, None, OP_ARM64_UABDL}, {SN_AbsoluteDifferenceWideningLowerAndAdd, OP_ARM64_SABAL, None, OP_ARM64_UABAL}, {SN_AbsoluteDifferenceWideningUpper, OP_ARM64_SABDL2, None, OP_ARM64_UABDL2}, {SN_AbsoluteDifferenceWideningUpperAndAdd, OP_ARM64_SABAL2, None, OP_ARM64_UABAL2}, {SN_Add, OP_XBINOP, OP_IADD, None, None, OP_XBINOP, OP_FADD}, {SN_AddAcross, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_SADDV, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_UADDV}, {SN_AddAcrossWidening, OP_ARM64_SADDLV, None, OP_ARM64_UADDLV}, {SN_AddHighNarrowingLower, OP_ARM64_ADDHN}, {SN_AddHighNarrowingUpper, OP_ARM64_ADDHN2}, {SN_AddPairwise, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_ADDP, None, None, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FADDP}, {SN_AddPairwiseScalar, OP_ARM64_ADDP_SCALAR, None, None, None, OP_ARM64_FADDP_SCALAR}, {SN_AddPairwiseWidening, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_SADDLP, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_UADDLP}, {SN_AddPairwiseWideningAndAdd, OP_ARM64_SADALP, None, OP_ARM64_UADALP}, {SN_AddPairwiseWideningAndAddScalar, OP_ARM64_SADALP, None, OP_ARM64_UADALP}, {SN_AddPairwiseWideningScalar, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_SADDLP, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_UADDLP}, {SN_AddRoundedHighNarrowingLower, OP_ARM64_RADDHN}, {SN_AddRoundedHighNarrowingUpper, OP_ARM64_RADDHN2}, {SN_AddSaturate}, {SN_AddSaturateScalar}, {SN_AddScalar, OP_XBINOP_SCALAR, OP_IADD, None, None, OP_XBINOP_SCALAR, OP_FADD}, {SN_AddWideningLower, OP_ARM64_SADD, None, OP_ARM64_UADD}, {SN_AddWideningUpper, OP_ARM64_SADD2, None, OP_ARM64_UADD2}, {SN_And, OP_XBINOP_FORCEINT, XBINOP_FORCEINT_and}, {SN_BitwiseClear, OP_ARM64_BIC}, {SN_BitwiseSelect, OP_ARM64_BSL}, {SN_Ceiling, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTP}, {SN_CeilingScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTP}, {SN_CompareEqual, OP_XCOMPARE, CMP_EQ, OP_XCOMPARE, CMP_EQ, OP_XCOMPARE_FP, CMP_EQ}, {SN_CompareEqualScalar, OP_XCOMPARE_SCALAR, CMP_EQ, OP_XCOMPARE_SCALAR, CMP_EQ, OP_XCOMPARE_FP_SCALAR, CMP_EQ}, {SN_CompareGreaterThan, OP_XCOMPARE, CMP_GT, OP_XCOMPARE, CMP_GT_UN, OP_XCOMPARE_FP, CMP_GT}, {SN_CompareGreaterThanOrEqual, OP_XCOMPARE, CMP_GE, OP_XCOMPARE, CMP_GE_UN, OP_XCOMPARE_FP, CMP_GE}, {SN_CompareGreaterThanOrEqualScalar, OP_XCOMPARE_SCALAR, CMP_GE, OP_XCOMPARE_SCALAR, CMP_GE_UN, OP_XCOMPARE_FP_SCALAR, CMP_GE}, {SN_CompareGreaterThanScalar, OP_XCOMPARE_SCALAR, CMP_GT, OP_XCOMPARE_SCALAR, CMP_GT_UN, OP_XCOMPARE_FP_SCALAR, CMP_GT}, {SN_CompareLessThan, OP_XCOMPARE, CMP_LT, OP_XCOMPARE, CMP_LT_UN, OP_XCOMPARE_FP, CMP_LT}, {SN_CompareLessThanOrEqual, OP_XCOMPARE, CMP_LE, OP_XCOMPARE, CMP_LE_UN, OP_XCOMPARE_FP, CMP_LE}, {SN_CompareLessThanOrEqualScalar, OP_XCOMPARE_SCALAR, CMP_LE, OP_XCOMPARE_SCALAR, CMP_LE_UN, OP_XCOMPARE_FP_SCALAR, CMP_LE}, {SN_CompareLessThanScalar, OP_XCOMPARE_SCALAR, CMP_LT, OP_XCOMPARE_SCALAR, CMP_LT_UN, OP_XCOMPARE_FP_SCALAR, CMP_LT}, {SN_CompareTest, OP_ARM64_CMTST}, {SN_CompareTestScalar, OP_ARM64_CMTST}, {SN_ConvertToDouble, OP_ARM64_SCVTF, None, OP_ARM64_UCVTF, None, OP_ARM64_FCVTL}, {SN_ConvertToDoubleScalar, OP_ARM64_SCVTF_SCALAR, None, OP_ARM64_UCVTF_SCALAR}, {SN_ConvertToDoubleUpper, OP_ARM64_FCVTL2}, {SN_ConvertToInt32RoundAwayFromZero, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAS}, {SN_ConvertToInt32RoundAwayFromZeroScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAS}, {SN_ConvertToInt32RoundToEven, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNS}, {SN_ConvertToInt32RoundToEvenScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNS}, {SN_ConvertToInt32RoundToNegativeInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMS}, {SN_ConvertToInt32RoundToNegativeInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMS}, {SN_ConvertToInt32RoundToPositiveInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPS}, {SN_ConvertToInt32RoundToPositiveInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPS}, {SN_ConvertToInt32RoundToZero, OP_ARM64_FCVTZS}, {SN_ConvertToInt32RoundToZeroScalar, OP_ARM64_FCVTZS_SCALAR}, {SN_ConvertToInt64RoundAwayFromZero, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAS}, {SN_ConvertToInt64RoundAwayFromZeroScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAS}, {SN_ConvertToInt64RoundToEven, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNS}, {SN_ConvertToInt64RoundToEvenScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNS}, {SN_ConvertToInt64RoundToNegativeInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMS}, {SN_ConvertToInt64RoundToNegativeInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMS}, {SN_ConvertToInt64RoundToPositiveInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPS}, {SN_ConvertToInt64RoundToPositiveInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPS}, {SN_ConvertToInt64RoundToZero, OP_ARM64_FCVTZS}, {SN_ConvertToInt64RoundToZeroScalar, OP_ARM64_FCVTZS_SCALAR}, {SN_ConvertToSingle, OP_ARM64_SCVTF, None, OP_ARM64_UCVTF}, {SN_ConvertToSingleLower, OP_ARM64_FCVTN}, {SN_ConvertToSingleRoundToOddLower, OP_ARM64_FCVTXN}, {SN_ConvertToSingleRoundToOddUpper, OP_ARM64_FCVTXN2}, {SN_ConvertToSingleScalar, OP_ARM64_SCVTF_SCALAR, None, OP_ARM64_UCVTF_SCALAR}, {SN_ConvertToSingleUpper, OP_ARM64_FCVTN2}, {SN_ConvertToUInt32RoundAwayFromZero, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAU}, {SN_ConvertToUInt32RoundAwayFromZeroScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAU}, {SN_ConvertToUInt32RoundToEven, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNU}, {SN_ConvertToUInt32RoundToEvenScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNU}, {SN_ConvertToUInt32RoundToNegativeInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMU}, {SN_ConvertToUInt32RoundToNegativeInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMU}, {SN_ConvertToUInt32RoundToPositiveInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPU}, {SN_ConvertToUInt32RoundToPositiveInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPU}, {SN_ConvertToUInt32RoundToZero, OP_ARM64_FCVTZU}, {SN_ConvertToUInt32RoundToZeroScalar, OP_ARM64_FCVTZU_SCALAR}, {SN_ConvertToUInt64RoundAwayFromZero, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAU}, {SN_ConvertToUInt64RoundAwayFromZeroScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAU}, {SN_ConvertToUInt64RoundToEven, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNU}, {SN_ConvertToUInt64RoundToEvenScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNU}, {SN_ConvertToUInt64RoundToNegativeInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMU}, {SN_ConvertToUInt64RoundToNegativeInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMU}, {SN_ConvertToUInt64RoundToPositiveInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPU}, {SN_ConvertToUInt64RoundToPositiveInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPU}, {SN_ConvertToUInt64RoundToZero, OP_ARM64_FCVTZU}, {SN_ConvertToUInt64RoundToZeroScalar, OP_ARM64_FCVTZU_SCALAR}, {SN_Divide, OP_XBINOP, OP_FDIV}, {SN_DivideScalar, OP_XBINOP_SCALAR, OP_FDIV}, {SN_DuplicateSelectedScalarToVector128}, {SN_DuplicateSelectedScalarToVector64}, {SN_DuplicateToVector128}, {SN_DuplicateToVector64}, {SN_Extract}, {SN_ExtractNarrowingLower, OP_ARM64_XTN}, {SN_ExtractNarrowingSaturateLower, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_SQXTN, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_UQXTN}, {SN_ExtractNarrowingSaturateScalar, OP_ARM64_XNARROW_SCALAR, INTRINS_AARCH64_ADV_SIMD_SQXTN, OP_ARM64_XNARROW_SCALAR, INTRINS_AARCH64_ADV_SIMD_UQXTN}, {SN_ExtractNarrowingSaturateUnsignedLower, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_SQXTUN}, {SN_ExtractNarrowingSaturateUnsignedScalar, OP_ARM64_XNARROW_SCALAR, INTRINS_AARCH64_ADV_SIMD_SQXTUN}, {SN_ExtractNarrowingSaturateUnsignedUpper, OP_ARM64_SQXTUN2}, {SN_ExtractNarrowingSaturateUpper, OP_ARM64_SQXTN2, None, OP_ARM64_UQXTN2}, {SN_ExtractNarrowingUpper, OP_ARM64_XTN2}, {SN_ExtractVector128, OP_ARM64_EXT}, {SN_ExtractVector64, OP_ARM64_EXT}, {SN_Floor, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTM}, {SN_FloorScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTM}, {SN_FusedAddHalving, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SHADD, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UHADD}, {SN_FusedAddRoundedHalving, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SRHADD, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_URHADD}, {SN_FusedMultiplyAdd, OP_ARM64_FMADD}, {SN_FusedMultiplyAddByScalar, OP_ARM64_FMADD_BYSCALAR}, {SN_FusedMultiplyAddBySelectedScalar}, {SN_FusedMultiplyAddNegatedScalar, OP_ARM64_FNMADD_SCALAR}, {SN_FusedMultiplyAddScalar, OP_ARM64_FMADD_SCALAR}, {SN_FusedMultiplyAddScalarBySelectedScalar}, {SN_FusedMultiplySubtract, OP_ARM64_FMSUB}, {SN_FusedMultiplySubtractByScalar, OP_ARM64_FMSUB_BYSCALAR}, {SN_FusedMultiplySubtractBySelectedScalar}, {SN_FusedMultiplySubtractNegatedScalar, OP_ARM64_FNMSUB_SCALAR}, {SN_FusedMultiplySubtractScalar, OP_ARM64_FMSUB_SCALAR}, {SN_FusedMultiplySubtractScalarBySelectedScalar}, {SN_FusedSubtractHalving, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SHSUB, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UHSUB}, {SN_Insert}, {SN_InsertScalar}, {SN_InsertSelectedScalar}, {SN_LeadingSignCount, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_CLS}, {SN_LeadingZeroCount, OP_ARM64_CLZ}, {SN_LoadAndInsertScalar, OP_ARM64_LD1_INSERT}, {SN_LoadAndReplicateToVector128, OP_ARM64_LD1R}, {SN_LoadAndReplicateToVector64, OP_ARM64_LD1R}, {SN_LoadPairScalarVector64, OP_ARM64_LDP_SCALAR}, {SN_LoadPairScalarVector64NonTemporal, OP_ARM64_LDNP_SCALAR}, {SN_LoadPairVector128, OP_ARM64_LDP}, {SN_LoadPairVector128NonTemporal, OP_ARM64_LDNP}, {SN_LoadPairVector64, OP_ARM64_LDP}, {SN_LoadPairVector64NonTemporal, OP_ARM64_LDNP}, {SN_LoadVector128, OP_ARM64_LD1}, {SN_LoadVector64, OP_ARM64_LD1}, {SN_Max, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SMAX, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UMAX, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMAX}, {SN_MaxAcross, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_SMAXV, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_UMAXV, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMAXV}, {SN_MaxNumber, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMAXNM}, {SN_MaxNumberAcross, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMAXNMV}, {SN_MaxNumberPairwise, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMAXNMP}, {SN_MaxNumberPairwiseScalar, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMAXNMV}, {SN_MaxNumberScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMAXNM}, {SN_MaxPairwise, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SMAXP, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UMAXP, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMAXP}, {SN_MaxPairwiseScalar, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMAXV}, {SN_MaxScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMAX}, {SN_Min, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SMIN, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UMIN, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMIN}, {SN_MinAcross, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_SMINV, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_UMINV, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMINV}, {SN_MinNumber, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMINNM}, {SN_MinNumberAcross, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMINNMV}, {SN_MinNumberPairwise, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMINNMP}, {SN_MinNumberPairwiseScalar, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMINNMV}, {SN_MinNumberScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMINNM}, {SN_MinPairwise, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SMINP, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UMINP, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMINP}, {SN_MinPairwiseScalar, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMINV}, {SN_MinScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMIN}, {SN_Multiply, OP_XBINOP, OP_IMUL, None, None, OP_XBINOP, OP_FMUL}, {SN_MultiplyAdd, OP_ARM64_MLA}, {SN_MultiplyAddByScalar, OP_ARM64_MLA_SCALAR}, {SN_MultiplyAddBySelectedScalar}, {SN_MultiplyByScalar, OP_XBINOP_BYSCALAR, OP_IMUL, None, None, OP_XBINOP_BYSCALAR, OP_FMUL}, {SN_MultiplyBySelectedScalar}, {SN_MultiplyBySelectedScalarWideningLower}, {SN_MultiplyBySelectedScalarWideningLowerAndAdd}, {SN_MultiplyBySelectedScalarWideningLowerAndSubtract}, {SN_MultiplyBySelectedScalarWideningUpper}, {SN_MultiplyBySelectedScalarWideningUpperAndAdd}, {SN_MultiplyBySelectedScalarWideningUpperAndSubtract}, {SN_MultiplyDoublingByScalarSaturateHigh, OP_XOP_OVR_BYSCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQDMULH}, {SN_MultiplyDoublingBySelectedScalarSaturateHigh}, {SN_MultiplyDoublingSaturateHigh, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQDMULH}, {SN_MultiplyDoublingSaturateHighScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQDMULH}, {SN_MultiplyDoublingScalarBySelectedScalarSaturateHigh}, {SN_MultiplyDoublingWideningAndAddSaturateScalar, OP_ARM64_SQDMLAL_SCALAR}, {SN_MultiplyDoublingWideningAndSubtractSaturateScalar, OP_ARM64_SQDMLSL_SCALAR}, {SN_MultiplyDoublingWideningLowerAndAddSaturate, OP_ARM64_SQDMLAL}, {SN_MultiplyDoublingWideningLowerAndSubtractSaturate, OP_ARM64_SQDMLSL}, {SN_MultiplyDoublingWideningLowerByScalarAndAddSaturate, OP_ARM64_SQDMLAL_BYSCALAR}, {SN_MultiplyDoublingWideningLowerByScalarAndSubtractSaturate, OP_ARM64_SQDMLSL_BYSCALAR}, {SN_MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate}, {SN_MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate}, {SN_MultiplyDoublingWideningSaturateLower, OP_ARM64_SQDMULL}, {SN_MultiplyDoublingWideningSaturateLowerByScalar, OP_ARM64_SQDMULL_BYSCALAR}, {SN_MultiplyDoublingWideningSaturateLowerBySelectedScalar}, {SN_MultiplyDoublingWideningSaturateScalar, OP_ARM64_SQDMULL_SCALAR}, {SN_MultiplyDoublingWideningSaturateScalarBySelectedScalar}, {SN_MultiplyDoublingWideningSaturateUpper, OP_ARM64_SQDMULL2}, {SN_MultiplyDoublingWideningSaturateUpperByScalar, OP_ARM64_SQDMULL2_BYSCALAR}, {SN_MultiplyDoublingWideningSaturateUpperBySelectedScalar}, {SN_MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate}, {SN_MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate}, {SN_MultiplyDoublingWideningUpperAndAddSaturate, OP_ARM64_SQDMLAL2}, {SN_MultiplyDoublingWideningUpperAndSubtractSaturate, OP_ARM64_SQDMLSL2}, {SN_MultiplyDoublingWideningUpperByScalarAndAddSaturate, OP_ARM64_SQDMLAL2_BYSCALAR}, {SN_MultiplyDoublingWideningUpperByScalarAndSubtractSaturate, OP_ARM64_SQDMLSL2_BYSCALAR}, {SN_MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate}, {SN_MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate}, {SN_MultiplyExtended, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMULX}, {SN_MultiplyExtendedByScalar, OP_XOP_OVR_BYSCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMULX}, {SN_MultiplyExtendedBySelectedScalar}, {SN_MultiplyExtendedScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMULX}, {SN_MultiplyExtendedScalarBySelectedScalar}, {SN_MultiplyRoundedDoublingByScalarSaturateHigh, OP_XOP_OVR_BYSCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQRDMULH}, {SN_MultiplyRoundedDoublingBySelectedScalarSaturateHigh}, {SN_MultiplyRoundedDoublingSaturateHigh, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQRDMULH}, {SN_MultiplyRoundedDoublingSaturateHighScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQRDMULH}, {SN_MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh}, {SN_MultiplyScalar, OP_XBINOP_SCALAR, OP_FMUL}, {SN_MultiplyScalarBySelectedScalar, OP_ARM64_FMUL_SEL}, {SN_MultiplySubtract, OP_ARM64_MLS}, {SN_MultiplySubtractByScalar, OP_ARM64_MLS_SCALAR}, {SN_MultiplySubtractBySelectedScalar}, {SN_MultiplyWideningLower, OP_ARM64_SMULL, None, OP_ARM64_UMULL}, {SN_MultiplyWideningLowerAndAdd, OP_ARM64_SMLAL, None, OP_ARM64_UMLAL}, {SN_MultiplyWideningLowerAndSubtract, OP_ARM64_SMLSL, None, OP_ARM64_UMLSL}, {SN_MultiplyWideningUpper, OP_ARM64_SMULL2, None, OP_ARM64_UMULL2}, {SN_MultiplyWideningUpperAndAdd, OP_ARM64_SMLAL2, None, OP_ARM64_UMLAL2}, {SN_MultiplyWideningUpperAndSubtract, OP_ARM64_SMLSL2, None, OP_ARM64_UMLSL2}, {SN_Negate, OP_ARM64_XNEG}, {SN_NegateSaturate, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_SQNEG}, {SN_NegateSaturateScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_SQNEG}, {SN_NegateScalar, OP_ARM64_XNEG_SCALAR}, {SN_Not, OP_ARM64_MVN}, {SN_Or, OP_XBINOP_FORCEINT, XBINOP_FORCEINT_or}, {SN_OrNot, OP_XBINOP_FORCEINT, XBINOP_FORCEINT_ornot}, {SN_PolynomialMultiply, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_PMUL}, {SN_PolynomialMultiplyWideningLower, OP_ARM64_PMULL}, {SN_PolynomialMultiplyWideningUpper, OP_ARM64_PMULL2}, {SN_PopCount, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_CNT}, {SN_ReciprocalEstimate, None, None, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_URECPE, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRECPE}, {SN_ReciprocalEstimateScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRECPE}, {SN_ReciprocalExponentScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRECPX}, {SN_ReciprocalSquareRootEstimate, None, None, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_URSQRTE, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRSQRTE}, {SN_ReciprocalSquareRootEstimateScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRSQRTE}, {SN_ReciprocalSquareRootStep, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FRSQRTS}, {SN_ReciprocalSquareRootStepScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FRSQRTS}, {SN_ReciprocalStep, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FRECPS}, {SN_ReciprocalStepScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FRECPS}, {SN_ReverseElement16, OP_ARM64_REVN, 16}, {SN_ReverseElement32, OP_ARM64_REVN, 32}, {SN_ReverseElement8, OP_ARM64_REVN, 8}, {SN_ReverseElementBits, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_RBIT}, {SN_RoundAwayFromZero, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTA}, {SN_RoundAwayFromZeroScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTA}, {SN_RoundToNearest, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTN}, {SN_RoundToNearestScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTN}, {SN_RoundToNegativeInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTM}, {SN_RoundToNegativeInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTM}, {SN_RoundToPositiveInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTP}, {SN_RoundToPositiveInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTP}, {SN_RoundToZero, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTZ}, {SN_RoundToZeroScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTZ}, {SN_ShiftArithmetic, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SSHL}, {SN_ShiftArithmeticRounded, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SRSHL}, {SN_ShiftArithmeticRoundedSaturate, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQRSHL}, {SN_ShiftArithmeticRoundedSaturateScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQRSHL}, {SN_ShiftArithmeticRoundedScalar, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SRSHL}, {SN_ShiftArithmeticSaturate, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQSHL}, {SN_ShiftArithmeticSaturateScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQSHL}, {SN_ShiftArithmeticScalar, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SSHL}, {SN_ShiftLeftAndInsert, OP_ARM64_SLI}, {SN_ShiftLeftAndInsertScalar, OP_ARM64_SLI}, {SN_ShiftLeftLogical, OP_ARM64_SHL}, {SN_ShiftLeftLogicalSaturate}, {SN_ShiftLeftLogicalSaturateScalar}, {SN_ShiftLeftLogicalSaturateUnsigned, OP_ARM64_SQSHLU}, {SN_ShiftLeftLogicalSaturateUnsignedScalar, OP_ARM64_SQSHLU_SCALAR}, {SN_ShiftLeftLogicalScalar, OP_ARM64_SHL}, {SN_ShiftLeftLogicalWideningLower, OP_ARM64_SSHLL, None, OP_ARM64_USHLL}, {SN_ShiftLeftLogicalWideningUpper, OP_ARM64_SSHLL2, None, OP_ARM64_USHLL2}, {SN_ShiftLogical, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_USHL}, {SN_ShiftLogicalRounded, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_URSHL}, {SN_ShiftLogicalRoundedSaturate, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UQRSHL}, {SN_ShiftLogicalRoundedSaturateScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UQRSHL}, {SN_ShiftLogicalRoundedScalar, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_URSHL}, {SN_ShiftLogicalSaturate, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UQSHL}, {SN_ShiftLogicalSaturateScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UQSHL}, {SN_ShiftLogicalScalar, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_USHL}, {SN_ShiftRightAndInsert, OP_ARM64_SRI}, {SN_ShiftRightAndInsertScalar, OP_ARM64_SRI}, {SN_ShiftRightArithmetic, OP_ARM64_SSHR}, {SN_ShiftRightArithmeticAdd, OP_ARM64_SSRA}, {SN_ShiftRightArithmeticAddScalar, OP_ARM64_SSRA}, {SN_ShiftRightArithmeticNarrowingSaturateLower, OP_ARM64_XNSHIFT, INTRINS_AARCH64_ADV_SIMD_SQSHRN}, {SN_ShiftRightArithmeticNarrowingSaturateScalar, OP_ARM64_XNSHIFT_SCALAR, INTRINS_AARCH64_ADV_SIMD_SQSHRN}, {SN_ShiftRightArithmeticNarrowingSaturateUnsignedLower, OP_ARM64_XNSHIFT, INTRINS_AARCH64_ADV_SIMD_SQSHRUN}, {SN_ShiftRightArithmeticNarrowingSaturateUnsignedScalar, OP_ARM64_XNSHIFT_SCALAR, INTRINS_AARCH64_ADV_SIMD_SQSHRUN}, {SN_ShiftRightArithmeticNarrowingSaturateUnsignedUpper, OP_ARM64_XNSHIFT2, INTRINS_AARCH64_ADV_SIMD_SQSHRUN}, {SN_ShiftRightArithmeticNarrowingSaturateUpper, OP_ARM64_XNSHIFT2, INTRINS_AARCH64_ADV_SIMD_SQSHRN}, {SN_ShiftRightArithmeticRounded, OP_ARM64_SRSHR}, {SN_ShiftRightArithmeticRoundedAdd, OP_ARM64_SRSRA}, {SN_ShiftRightArithmeticRoundedAddScalar, OP_ARM64_SRSRA}, {SN_ShiftRightArithmeticRoundedNarrowingSaturateLower, OP_ARM64_XNSHIFT, INTRINS_AARCH64_ADV_SIMD_SQRSHRN}, {SN_ShiftRightArithmeticRoundedNarrowingSaturateScalar, OP_ARM64_XNSHIFT_SCALAR, INTRINS_AARCH64_ADV_SIMD_SQRSHRN}, {SN_ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower, OP_ARM64_XNSHIFT, INTRINS_AARCH64_ADV_SIMD_SQRSHRUN}, {SN_ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar, OP_ARM64_XNSHIFT_SCALAR, INTRINS_AARCH64_ADV_SIMD_SQRSHRUN}, {SN_ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper, OP_ARM64_XNSHIFT2, INTRINS_AARCH64_ADV_SIMD_SQRSHRUN}, {SN_ShiftRightArithmeticRoundedNarrowingSaturateUpper, OP_ARM64_XNSHIFT2, INTRINS_AARCH64_ADV_SIMD_SQRSHRN}, {SN_ShiftRightArithmeticRoundedScalar, OP_ARM64_SRSHR}, {SN_ShiftRightArithmeticScalar, OP_ARM64_SSHR}, {SN_ShiftRightLogical, OP_ARM64_USHR}, {SN_ShiftRightLogicalAdd, OP_ARM64_USRA}, {SN_ShiftRightLogicalAddScalar, OP_ARM64_USRA}, {SN_ShiftRightLogicalNarrowingLower, OP_ARM64_SHRN}, {SN_ShiftRightLogicalNarrowingSaturateLower, OP_ARM64_XNSHIFT, INTRINS_AARCH64_ADV_SIMD_UQSHRN}, {SN_ShiftRightLogicalNarrowingSaturateScalar, OP_ARM64_XNSHIFT_SCALAR, INTRINS_AARCH64_ADV_SIMD_UQSHRN}, {SN_ShiftRightLogicalNarrowingSaturateUpper, OP_ARM64_XNSHIFT2, INTRINS_AARCH64_ADV_SIMD_UQSHRN}, {SN_ShiftRightLogicalNarrowingUpper, OP_ARM64_SHRN2}, {SN_ShiftRightLogicalRounded, OP_ARM64_URSHR}, {SN_ShiftRightLogicalRoundedAdd, OP_ARM64_URSRA}, {SN_ShiftRightLogicalRoundedAddScalar, OP_ARM64_URSRA}, {SN_ShiftRightLogicalRoundedNarrowingLower, OP_ARM64_XNSHIFT, INTRINS_AARCH64_ADV_SIMD_RSHRN}, {SN_ShiftRightLogicalRoundedNarrowingSaturateLower, OP_ARM64_XNSHIFT, INTRINS_AARCH64_ADV_SIMD_UQRSHRN}, {SN_ShiftRightLogicalRoundedNarrowingSaturateScalar, OP_ARM64_XNSHIFT_SCALAR, INTRINS_AARCH64_ADV_SIMD_UQRSHRN}, {SN_ShiftRightLogicalRoundedNarrowingSaturateUpper, OP_ARM64_XNSHIFT2, INTRINS_AARCH64_ADV_SIMD_UQRSHRN}, {SN_ShiftRightLogicalRoundedNarrowingUpper, OP_ARM64_XNSHIFT2, INTRINS_AARCH64_ADV_SIMD_RSHRN}, {SN_ShiftRightLogicalRoundedScalar, OP_ARM64_URSHR}, {SN_ShiftRightLogicalScalar, OP_ARM64_USHR}, {SN_SignExtendWideningLower, OP_ARM64_SXTL}, {SN_SignExtendWideningUpper, OP_ARM64_SXTL2}, {SN_Sqrt, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FSQRT}, {SN_SqrtScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FSQRT}, {SN_Store, OP_ARM64_ST1}, {SN_StorePair, OP_ARM64_STP}, {SN_StorePairNonTemporal, OP_ARM64_STNP}, {SN_StorePairScalar, OP_ARM64_STP_SCALAR}, {SN_StorePairScalarNonTemporal, OP_ARM64_STNP_SCALAR}, {SN_StoreSelectedScalar, OP_ARM64_ST1_SCALAR}, {SN_Subtract, OP_XBINOP, OP_ISUB, None, None, OP_XBINOP, OP_FSUB}, {SN_SubtractHighNarrowingLower, OP_ARM64_SUBHN}, {SN_SubtractHighNarrowingUpper, OP_ARM64_SUBHN2}, {SN_SubtractRoundedHighNarrowingLower, OP_ARM64_RSUBHN}, {SN_SubtractRoundedHighNarrowingUpper, OP_ARM64_RSUBHN2}, {SN_SubtractSaturate, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQSUB, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UQSUB}, {SN_SubtractSaturateScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQSUB, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UQSUB}, {SN_SubtractScalar, OP_XBINOP_SCALAR, OP_ISUB, None, None, OP_XBINOP_SCALAR, OP_FSUB}, {SN_SubtractWideningLower, OP_ARM64_SSUB, None, OP_ARM64_USUB}, {SN_SubtractWideningUpper, OP_ARM64_SSUB2, None, OP_ARM64_USUB2}, {SN_TransposeEven, OP_ARM64_TRN1}, {SN_TransposeOdd, OP_ARM64_TRN2}, {SN_UnzipEven, OP_ARM64_UZP1}, {SN_UnzipOdd, OP_ARM64_UZP2}, {SN_VectorTableLookup, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_TBL1}, {SN_VectorTableLookupExtension, OP_XOP_OVR_X_X_X_X, INTRINS_AARCH64_ADV_SIMD_TBX1}, {SN_Xor, OP_XBINOP_FORCEINT, XBINOP_FORCEINT_xor}, {SN_ZeroExtendWideningLower, OP_ARM64_UXTL}, {SN_ZeroExtendWideningUpper, OP_ARM64_UXTL2}, {SN_ZipHigh, OP_ARM64_ZIP2}, {SN_ZipLow, OP_ARM64_ZIP1}, {SN_get_IsSupported}, }; static const SimdIntrinsic rdm_methods [] = { {SN_MultiplyRoundedDoublingAndAddSaturateHigh, OP_ARM64_SQRDMLAH}, {SN_MultiplyRoundedDoublingAndAddSaturateHighScalar, OP_ARM64_SQRDMLAH_SCALAR}, {SN_MultiplyRoundedDoublingAndSubtractSaturateHigh, OP_ARM64_SQRDMLSH}, {SN_MultiplyRoundedDoublingAndSubtractSaturateHighScalar, OP_ARM64_SQRDMLSH_SCALAR}, {SN_MultiplyRoundedDoublingBySelectedScalarAndAddSaturateHigh}, {SN_MultiplyRoundedDoublingBySelectedScalarAndSubtractSaturateHigh}, {SN_MultiplyRoundedDoublingScalarBySelectedScalarAndAddSaturateHigh}, {SN_MultiplyRoundedDoublingScalarBySelectedScalarAndSubtractSaturateHigh}, {SN_get_IsSupported}, }; static const SimdIntrinsic dp_methods [] = { {SN_DotProduct, OP_XOP_OVR_X_X_X_X, INTRINS_AARCH64_ADV_SIMD_SDOT, OP_XOP_OVR_X_X_X_X, INTRINS_AARCH64_ADV_SIMD_UDOT}, {SN_DotProductBySelectedQuadruplet}, {SN_get_IsSupported}, }; static const IntrinGroup supported_arm_intrinsics [] = { { "AdvSimd", MONO_CPU_ARM64_NEON, advsimd_methods, sizeof (advsimd_methods) }, { "Aes", MONO_CPU_ARM64_CRYPTO, crypto_aes_methods, sizeof (crypto_aes_methods) }, { "ArmBase", MONO_CPU_ARM64_BASE, armbase_methods, sizeof (armbase_methods) }, { "Crc32", MONO_CPU_ARM64_CRC, crc32_methods, sizeof (crc32_methods) }, { "Dp", MONO_CPU_ARM64_DP, dp_methods, sizeof (dp_methods) }, { "Rdm", MONO_CPU_ARM64_RDM, rdm_methods, sizeof (rdm_methods) }, { "Sha1", MONO_CPU_ARM64_CRYPTO, sha1_methods, sizeof (sha1_methods) }, { "Sha256", MONO_CPU_ARM64_CRYPTO, sha256_methods, sizeof (sha256_methods) }, }; static MonoInst* emit_arm64_intrinsics ( MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoClass *klass, const IntrinGroup *intrin_group, const SimdIntrinsic *info, int id, MonoTypeEnum arg0_type, gboolean is_64bit) { MonoCPUFeatures feature = intrin_group->feature; gboolean arg0_i32 = (arg0_type == MONO_TYPE_I4) || (arg0_type == MONO_TYPE_U4); #if TARGET_SIZEOF_VOID_P == 4 arg0_i32 = arg0_i32 || (arg0_type == MONO_TYPE_I) || (arg0_type == MONO_TYPE_U); #endif if (feature == MONO_CPU_ARM64_BASE) { switch (id) { case SN_LeadingZeroCount: return emit_simd_ins_for_sig (cfg, klass, arg0_i32 ? OP_LZCNT32 : OP_LZCNT64, 0, arg0_type, fsig, args); case SN_LeadingSignCount: return emit_simd_ins_for_sig (cfg, klass, arg0_i32 ? OP_LSCNT32 : OP_LSCNT64, 0, arg0_type, fsig, args); case SN_MultiplyHigh: return emit_simd_ins_for_sig (cfg, klass, (arg0_type == MONO_TYPE_I8 ? OP_ARM64_SMULH : OP_ARM64_UMULH), 0, arg0_type, fsig, args); case SN_ReverseElementBits: return emit_simd_ins_for_sig (cfg, klass, (is_64bit ? OP_XOP_I8_I8 : OP_XOP_I4_I4), (is_64bit ? INTRINS_BITREVERSE_I64 : INTRINS_BITREVERSE_I32), arg0_type, fsig, args); default: g_assert_not_reached (); // if a new API is added we need to either implement it or change IsSupported to false } } if (feature == MONO_CPU_ARM64_CRC) { switch (id) { case SN_ComputeCrc32: case SN_ComputeCrc32C: { IntrinsicId op = (IntrinsicId)0; gboolean is_c = info->id == SN_ComputeCrc32C; switch (get_underlying_type (fsig->params [1])) { case MONO_TYPE_U1: op = is_c ? INTRINS_AARCH64_CRC32CB : INTRINS_AARCH64_CRC32B; break; case MONO_TYPE_U2: op = is_c ? INTRINS_AARCH64_CRC32CH : INTRINS_AARCH64_CRC32H; break; case MONO_TYPE_U4: op = is_c ? INTRINS_AARCH64_CRC32CW : INTRINS_AARCH64_CRC32W; break; case MONO_TYPE_U8: op = is_c ? INTRINS_AARCH64_CRC32CX : INTRINS_AARCH64_CRC32X; break; default: g_assert_not_reached (); break; } return emit_simd_ins_for_sig (cfg, klass, is_64bit ? OP_XOP_I4_I4_I8 : OP_XOP_I4_I4_I4, op, arg0_type, fsig, args); } default: g_assert_not_reached (); // if a new API is added we need to either implement it or change IsSupported to false } } if (feature == MONO_CPU_ARM64_NEON) { switch (id) { case SN_AbsoluteCompareGreaterThan: case SN_AbsoluteCompareGreaterThanOrEqual: case SN_AbsoluteCompareLessThan: case SN_AbsoluteCompareLessThanOrEqual: case SN_AbsoluteCompareGreaterThanScalar: case SN_AbsoluteCompareGreaterThanOrEqualScalar: case SN_AbsoluteCompareLessThanScalar: case SN_AbsoluteCompareLessThanOrEqualScalar: { gboolean reverse_args = FALSE; gboolean use_geq = FALSE; gboolean scalar = FALSE; MonoInst *cmp_args [] = { args [0], args [1] }; switch (id) { case SN_AbsoluteCompareGreaterThanScalar: scalar = TRUE; case SN_AbsoluteCompareGreaterThan: break; case SN_AbsoluteCompareGreaterThanOrEqualScalar: scalar = TRUE; case SN_AbsoluteCompareGreaterThanOrEqual: use_geq = TRUE; break; case SN_AbsoluteCompareLessThanScalar: scalar = TRUE; case SN_AbsoluteCompareLessThan: reverse_args = TRUE; break; case SN_AbsoluteCompareLessThanOrEqualScalar: scalar = TRUE; case SN_AbsoluteCompareLessThanOrEqual: reverse_args = TRUE; use_geq = TRUE; break; } if (reverse_args) { cmp_args [0] = args [1]; cmp_args [1] = args [0]; } int iid = use_geq ? INTRINS_AARCH64_ADV_SIMD_FACGE : INTRINS_AARCH64_ADV_SIMD_FACGT; return emit_simd_ins_for_sig (cfg, klass, OP_ARM64_ABSCOMPARE, iid, scalar, fsig, cmp_args); } case SN_AddSaturate: case SN_AddSaturateScalar: { gboolean arg0_unsigned = type_is_unsigned (fsig->params [0]); gboolean arg1_unsigned = type_is_unsigned (fsig->params [1]); int iid = 0; if (arg0_unsigned && arg1_unsigned) iid = INTRINS_AARCH64_ADV_SIMD_UQADD; else if (arg0_unsigned && !arg1_unsigned) iid = INTRINS_AARCH64_ADV_SIMD_USQADD; else if (!arg0_unsigned && arg1_unsigned) iid = INTRINS_AARCH64_ADV_SIMD_SUQADD; else iid = INTRINS_AARCH64_ADV_SIMD_SQADD; int op = id == SN_AddSaturateScalar ? OP_XOP_OVR_SCALAR_X_X_X : OP_XOP_OVR_X_X_X; return emit_simd_ins_for_sig (cfg, klass, op, iid, arg0_type, fsig, args); } case SN_DuplicateSelectedScalarToVector128: case SN_DuplicateSelectedScalarToVector64: case SN_DuplicateToVector64: case SN_DuplicateToVector128: { MonoClass *ret_klass = mono_class_from_mono_type_internal (fsig->ret); MonoType *rtype = get_vector_t_elem_type (fsig->ret); int scalar_src_reg = args [0]->dreg; switch (id) { case SN_DuplicateSelectedScalarToVector128: case SN_DuplicateSelectedScalarToVector64: { MonoInst *ins = emit_simd_ins (cfg, ret_klass, type_to_xextract_op (rtype->type), args [0]->dreg, args [1]->dreg); ins->inst_c1 = arg0_type; scalar_src_reg = ins->dreg; break; } } return emit_simd_ins (cfg, ret_klass, type_to_expand_op (rtype), scalar_src_reg, -1); } case SN_Extract: { int extract_op = type_to_xextract_op (arg0_type); MonoInst *ins = emit_simd_ins (cfg, klass, extract_op, args [0]->dreg, args [1]->dreg); ins->inst_c1 = arg0_type; return ins; } case SN_InsertSelectedScalar: case SN_InsertScalar: case SN_Insert: { MonoClass *ret_klass = mono_class_from_mono_type_internal (fsig->ret); int insert_op = 0; int extract_op = 0; switch (arg0_type) { case MONO_TYPE_I1: case MONO_TYPE_U1: insert_op = OP_XINSERT_I1; extract_op = OP_EXTRACT_I1; break; case MONO_TYPE_I2: case MONO_TYPE_U2: insert_op = OP_XINSERT_I2; extract_op = OP_EXTRACT_I2; break; case MONO_TYPE_I4: case MONO_TYPE_U4: insert_op = OP_XINSERT_I4; extract_op = OP_EXTRACT_I4; break; case MONO_TYPE_I8: case MONO_TYPE_U8: insert_op = OP_XINSERT_I8; extract_op = OP_EXTRACT_I8; break; case MONO_TYPE_R4: insert_op = OP_XINSERT_R4; extract_op = OP_EXTRACT_R4; break; case MONO_TYPE_R8: insert_op = OP_XINSERT_R8; extract_op = OP_EXTRACT_R8; break; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 insert_op = OP_XINSERT_I8; extract_op = OP_EXTRACT_I8; #else insert_op = OP_XINSERT_I4; extract_op = OP_EXTRACT_I4; #endif break; default: g_assert_not_reached (); } int val_src_reg = args [2]->dreg; switch (id) { case SN_InsertSelectedScalar: { MonoInst *scalar = emit_simd_ins (cfg, klass, OP_ARM64_SELECT_SCALAR, args [2]->dreg, args [3]->dreg); val_src_reg = scalar->dreg; // fallthrough } case SN_InsertScalar: { MonoInst *ins = emit_simd_ins (cfg, klass, extract_op, val_src_reg, -1); ins->inst_c0 = 0; ins->inst_c1 = arg0_type; val_src_reg = ins->dreg; break; } } MonoInst *ins = emit_simd_ins (cfg, ret_klass, insert_op, args [0]->dreg, val_src_reg); ins->sreg3 = args [1]->dreg; ins->inst_c1 = arg0_type; return ins; } case SN_ShiftLeftLogicalSaturate: case SN_ShiftLeftLogicalSaturateScalar: { MonoClass *ret_klass = mono_class_from_mono_type_internal (fsig->ret); MonoType *etype = get_vector_t_elem_type (fsig->ret); gboolean is_unsigned = type_is_unsigned (fsig->ret); gboolean scalar = id == SN_ShiftLeftLogicalSaturateScalar; int s2v = scalar ? OP_CREATE_SCALAR_UNSAFE : type_to_expand_op (etype); int xop = scalar ? OP_XOP_OVR_SCALAR_X_X_X : OP_XOP_OVR_X_X_X; int iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UQSHL : INTRINS_AARCH64_ADV_SIMD_SQSHL; MonoInst *shift_vector = emit_simd_ins (cfg, ret_klass, s2v, args [1]->dreg, -1); shift_vector->inst_c1 = etype->type; MonoInst *ret = emit_simd_ins (cfg, ret_klass, xop, args [0]->dreg, shift_vector->dreg); ret->inst_c0 = iid; ret->inst_c1 = etype->type; return ret; } case SN_MultiplyRoundedDoublingBySelectedScalarSaturateHigh: case SN_MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh: case SN_MultiplyDoublingScalarBySelectedScalarSaturateHigh: case SN_MultiplyDoublingWideningSaturateScalarBySelectedScalar: case SN_MultiplyExtendedBySelectedScalar: case SN_MultiplyExtendedScalarBySelectedScalar: case SN_MultiplyBySelectedScalar: case SN_MultiplyBySelectedScalarWideningLower: case SN_MultiplyBySelectedScalarWideningUpper: case SN_MultiplyDoublingBySelectedScalarSaturateHigh: case SN_MultiplyDoublingWideningSaturateLowerBySelectedScalar: case SN_MultiplyDoublingWideningSaturateUpperBySelectedScalar: { MonoClass *ret_klass = mono_class_from_mono_type_internal (fsig->ret); gboolean is_unsigned = type_is_unsigned (fsig->ret); gboolean is_float = type_is_float (fsig->ret); int opcode = 0; int c0 = 0; switch (id) { case SN_MultiplyRoundedDoublingBySelectedScalarSaturateHigh: opcode = OP_XOP_OVR_BYSCALAR_X_X_X; c0 = INTRINS_AARCH64_ADV_SIMD_SQRDMULH; break; case SN_MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh: opcode = OP_XOP_OVR_SCALAR_X_X_X; c0 = INTRINS_AARCH64_ADV_SIMD_SQRDMULH; break; case SN_MultiplyDoublingScalarBySelectedScalarSaturateHigh: opcode = OP_XOP_OVR_SCALAR_X_X_X; c0 = INTRINS_AARCH64_ADV_SIMD_SQDMULH; break; case SN_MultiplyDoublingWideningSaturateScalarBySelectedScalar: opcode = OP_ARM64_SQDMULL_SCALAR; break; case SN_MultiplyExtendedBySelectedScalar: opcode = OP_XOP_OVR_BYSCALAR_X_X_X; c0 = INTRINS_AARCH64_ADV_SIMD_FMULX; break; case SN_MultiplyExtendedScalarBySelectedScalar: opcode = OP_XOP_OVR_SCALAR_X_X_X; c0 = INTRINS_AARCH64_ADV_SIMD_FMULX; break; case SN_MultiplyBySelectedScalar: opcode = OP_XBINOP_BYSCALAR; c0 = OP_IMUL; break; case SN_MultiplyBySelectedScalarWideningLower: opcode = OP_ARM64_SMULL_SCALAR; break; case SN_MultiplyBySelectedScalarWideningUpper: opcode = OP_ARM64_SMULL2_SCALAR; break; case SN_MultiplyDoublingBySelectedScalarSaturateHigh: opcode = OP_XOP_OVR_BYSCALAR_X_X_X; c0 = INTRINS_AARCH64_ADV_SIMD_SQDMULH; break; case SN_MultiplyDoublingWideningSaturateLowerBySelectedScalar: opcode = OP_ARM64_SQDMULL_BYSCALAR; break; case SN_MultiplyDoublingWideningSaturateUpperBySelectedScalar: opcode = OP_ARM64_SQDMULL2_BYSCALAR; break; default: g_assert_not_reached(); } if (is_unsigned) switch (opcode) { case OP_ARM64_SMULL_SCALAR: opcode = OP_ARM64_UMULL_SCALAR; break; case OP_ARM64_SMULL2_SCALAR: opcode = OP_ARM64_UMULL2_SCALAR; break; } if (is_float) switch (opcode) { case OP_XBINOP_BYSCALAR: c0 = OP_FMUL; } MonoInst *scalar = emit_simd_ins (cfg, ret_klass, OP_ARM64_SELECT_SCALAR, args [1]->dreg, args [2]->dreg); MonoInst *ret = emit_simd_ins (cfg, ret_klass, opcode, args [0]->dreg, scalar->dreg); ret->inst_c0 = c0; ret->inst_c1 = arg0_type; return ret; } case SN_FusedMultiplyAddBySelectedScalar: case SN_FusedMultiplyAddScalarBySelectedScalar: case SN_FusedMultiplySubtractBySelectedScalar: case SN_FusedMultiplySubtractScalarBySelectedScalar: case SN_MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate: case SN_MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate: case SN_MultiplyAddBySelectedScalar: case SN_MultiplySubtractBySelectedScalar: case SN_MultiplyBySelectedScalarWideningLowerAndAdd: case SN_MultiplyBySelectedScalarWideningLowerAndSubtract: case SN_MultiplyBySelectedScalarWideningUpperAndAdd: case SN_MultiplyBySelectedScalarWideningUpperAndSubtract: case SN_MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate: case SN_MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate: case SN_MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate: case SN_MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate: { MonoClass *ret_klass = mono_class_from_mono_type_internal (fsig->ret); gboolean is_unsigned = type_is_unsigned (fsig->ret); int opcode = 0; switch (id) { case SN_FusedMultiplyAddBySelectedScalar: opcode = OP_ARM64_FMADD_BYSCALAR; break; case SN_FusedMultiplyAddScalarBySelectedScalar: opcode = OP_ARM64_FMADD_SCALAR; break; case SN_FusedMultiplySubtractBySelectedScalar: opcode = OP_ARM64_FMSUB_BYSCALAR; break; case SN_FusedMultiplySubtractScalarBySelectedScalar: opcode = OP_ARM64_FMSUB_SCALAR; break; case SN_MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate: opcode = OP_ARM64_SQDMLAL_SCALAR; break; case SN_MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate: opcode = OP_ARM64_SQDMLSL_SCALAR; break; case SN_MultiplyAddBySelectedScalar: opcode = OP_ARM64_MLA_SCALAR; break; case SN_MultiplySubtractBySelectedScalar: opcode = OP_ARM64_MLS_SCALAR; break; case SN_MultiplyBySelectedScalarWideningLowerAndAdd: opcode = OP_ARM64_SMLAL_SCALAR; break; case SN_MultiplyBySelectedScalarWideningLowerAndSubtract: opcode = OP_ARM64_SMLSL_SCALAR; break; case SN_MultiplyBySelectedScalarWideningUpperAndAdd: opcode = OP_ARM64_SMLAL2_SCALAR; break; case SN_MultiplyBySelectedScalarWideningUpperAndSubtract: opcode = OP_ARM64_SMLSL2_SCALAR; break; case SN_MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate: opcode = OP_ARM64_SQDMLAL_BYSCALAR; break; case SN_MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate: opcode = OP_ARM64_SQDMLSL_BYSCALAR; break; case SN_MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate: opcode = OP_ARM64_SQDMLAL2_BYSCALAR; break; case SN_MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate: opcode = OP_ARM64_SQDMLSL2_BYSCALAR; break; default: g_assert_not_reached(); } if (is_unsigned) switch (opcode) { case OP_ARM64_SMLAL_SCALAR: opcode = OP_ARM64_UMLAL_SCALAR; break; case OP_ARM64_SMLSL_SCALAR: opcode = OP_ARM64_UMLSL_SCALAR; break; case OP_ARM64_SMLAL2_SCALAR: opcode = OP_ARM64_UMLAL2_SCALAR; break; case OP_ARM64_SMLSL2_SCALAR: opcode = OP_ARM64_UMLSL2_SCALAR; break; } MonoInst *scalar = emit_simd_ins (cfg, ret_klass, OP_ARM64_SELECT_SCALAR, args [2]->dreg, args [3]->dreg); MonoInst *ret = emit_simd_ins (cfg, ret_klass, opcode, args [0]->dreg, args [1]->dreg); ret->sreg3 = scalar->dreg; return ret; } default: g_assert_not_reached (); } } if (feature == MONO_CPU_ARM64_CRYPTO) { switch (id) { case SN_PolynomialMultiplyWideningLower: return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_AARCH64_PMULL64, 0, fsig, args); case SN_PolynomialMultiplyWideningUpper: return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_AARCH64_PMULL64, 1, fsig, args); default: g_assert_not_reached (); } } if (feature == MONO_CPU_ARM64_RDM) { switch (id) { case SN_MultiplyRoundedDoublingBySelectedScalarAndAddSaturateHigh: case SN_MultiplyRoundedDoublingBySelectedScalarAndSubtractSaturateHigh: case SN_MultiplyRoundedDoublingScalarBySelectedScalarAndAddSaturateHigh: case SN_MultiplyRoundedDoublingScalarBySelectedScalarAndSubtractSaturateHigh: { MonoClass *ret_klass = mono_class_from_mono_type_internal (fsig->ret); int opcode = 0; switch (id) { case SN_MultiplyRoundedDoublingBySelectedScalarAndAddSaturateHigh: opcode = OP_ARM64_SQRDMLAH_BYSCALAR; break; case SN_MultiplyRoundedDoublingBySelectedScalarAndSubtractSaturateHigh: opcode = OP_ARM64_SQRDMLSH_BYSCALAR; break; case SN_MultiplyRoundedDoublingScalarBySelectedScalarAndAddSaturateHigh: opcode = OP_ARM64_SQRDMLAH_SCALAR; break; case SN_MultiplyRoundedDoublingScalarBySelectedScalarAndSubtractSaturateHigh: opcode = OP_ARM64_SQRDMLSH_SCALAR; break; } MonoInst *scalar = emit_simd_ins (cfg, ret_klass, OP_ARM64_SELECT_SCALAR, args [2]->dreg, args [3]->dreg); MonoInst *ret = emit_simd_ins (cfg, ret_klass, opcode, args [0]->dreg, args [1]->dreg); ret->inst_c1 = arg0_type; ret->sreg3 = scalar->dreg; return ret; } default: g_assert_not_reached (); } } if (feature == MONO_CPU_ARM64_DP) { switch (id) { case SN_DotProductBySelectedQuadruplet: { MonoClass *ret_klass = mono_class_from_mono_type_internal (fsig->ret); MonoClass *arg_klass = mono_class_from_mono_type_internal (fsig->params [1]); MonoClass *quad_klass = mono_class_from_mono_type_internal (fsig->params [2]); gboolean is_unsigned = type_is_unsigned (fsig->ret); int iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UDOT : INTRINS_AARCH64_ADV_SIMD_SDOT; MonoInst *quad = emit_simd_ins (cfg, arg_klass, OP_ARM64_SELECT_QUAD, args [2]->dreg, args [3]->dreg); quad->data.op [1].klass = quad_klass; MonoInst *ret = emit_simd_ins (cfg, ret_klass, OP_XOP_OVR_X_X_X_X, args [0]->dreg, args [1]->dreg); ret->sreg3 = quad->dreg; ret->inst_c0 = iid; return ret; } default: g_assert_not_reached (); } } return NULL; } #endif // TARGET_ARM64 #ifdef TARGET_AMD64 static SimdIntrinsic sse_methods [] = { {SN_Add, OP_XBINOP, OP_FADD}, {SN_AddScalar, OP_SSE_ADDSS}, {SN_And, OP_SSE_AND}, {SN_AndNot, OP_SSE_ANDN}, {SN_CompareEqual, OP_XCOMPARE_FP, CMP_EQ}, {SN_CompareGreaterThan, OP_XCOMPARE_FP,CMP_GT}, {SN_CompareGreaterThanOrEqual, OP_XCOMPARE_FP, CMP_GE}, {SN_CompareLessThan, OP_XCOMPARE_FP, CMP_LT}, {SN_CompareLessThanOrEqual, OP_XCOMPARE_FP, CMP_LE}, {SN_CompareNotEqual, OP_XCOMPARE_FP, CMP_NE}, {SN_CompareNotGreaterThan, OP_XCOMPARE_FP, CMP_LE_UN}, {SN_CompareNotGreaterThanOrEqual, OP_XCOMPARE_FP, CMP_LT_UN}, {SN_CompareNotLessThan, OP_XCOMPARE_FP, CMP_GE_UN}, {SN_CompareNotLessThanOrEqual, OP_XCOMPARE_FP, CMP_GT_UN}, {SN_CompareOrdered, OP_XCOMPARE_FP, CMP_ORD}, {SN_CompareScalarEqual, OP_SSE_CMPSS, CMP_EQ}, {SN_CompareScalarGreaterThan, OP_SSE_CMPSS, CMP_GT}, {SN_CompareScalarGreaterThanOrEqual, OP_SSE_CMPSS, CMP_GE}, {SN_CompareScalarLessThan, OP_SSE_CMPSS, CMP_LT}, {SN_CompareScalarLessThanOrEqual, OP_SSE_CMPSS, CMP_LE}, {SN_CompareScalarNotEqual, OP_SSE_CMPSS, CMP_NE}, {SN_CompareScalarNotGreaterThan, OP_SSE_CMPSS, CMP_LE_UN}, {SN_CompareScalarNotGreaterThanOrEqual, OP_SSE_CMPSS, CMP_LT_UN}, {SN_CompareScalarNotLessThan, OP_SSE_CMPSS, CMP_GE_UN}, {SN_CompareScalarNotLessThanOrEqual, OP_SSE_CMPSS, CMP_GT_UN}, {SN_CompareScalarOrdered, OP_SSE_CMPSS, CMP_ORD}, {SN_CompareScalarOrderedEqual, OP_SSE_COMISS, CMP_EQ}, {SN_CompareScalarOrderedGreaterThan, OP_SSE_COMISS, CMP_GT}, {SN_CompareScalarOrderedGreaterThanOrEqual, OP_SSE_COMISS, CMP_GE}, {SN_CompareScalarOrderedLessThan, OP_SSE_COMISS, CMP_LT}, {SN_CompareScalarOrderedLessThanOrEqual, OP_SSE_COMISS, CMP_LE}, {SN_CompareScalarOrderedNotEqual, OP_SSE_COMISS, CMP_NE}, {SN_CompareScalarUnordered, OP_SSE_CMPSS, CMP_UNORD}, {SN_CompareScalarUnorderedEqual, OP_SSE_UCOMISS, CMP_EQ}, {SN_CompareScalarUnorderedGreaterThan, OP_SSE_UCOMISS, CMP_GT}, {SN_CompareScalarUnorderedGreaterThanOrEqual, OP_SSE_UCOMISS, CMP_GE}, {SN_CompareScalarUnorderedLessThan, OP_SSE_UCOMISS, CMP_LT}, {SN_CompareScalarUnorderedLessThanOrEqual, OP_SSE_UCOMISS, CMP_LE}, {SN_CompareScalarUnorderedNotEqual, OP_SSE_UCOMISS, CMP_NE}, {SN_CompareUnordered, OP_XCOMPARE_FP, CMP_UNORD}, {SN_ConvertScalarToVector128Single}, {SN_ConvertToInt32, OP_XOP_I4_X, INTRINS_SSE_CVTSS2SI}, {SN_ConvertToInt32WithTruncation, OP_XOP_I4_X, INTRINS_SSE_CVTTSS2SI}, {SN_ConvertToInt64, OP_XOP_I8_X, INTRINS_SSE_CVTSS2SI64}, {SN_ConvertToInt64WithTruncation, OP_XOP_I8_X, INTRINS_SSE_CVTTSS2SI64}, {SN_Divide, OP_XBINOP, OP_FDIV}, {SN_DivideScalar, OP_SSE_DIVSS}, {SN_LoadAlignedVector128, OP_SSE_LOADU, 16 /* alignment */}, {SN_LoadHigh, OP_SSE_MOVHPS_LOAD}, {SN_LoadLow, OP_SSE_MOVLPS_LOAD}, {SN_LoadScalarVector128, OP_SSE_MOVSS}, {SN_LoadVector128, OP_SSE_LOADU, 1 /* alignment */}, {SN_Max, OP_XOP_X_X_X, INTRINS_SSE_MAXPS}, {SN_MaxScalar, OP_XOP_X_X_X, INTRINS_SSE_MAXSS}, {SN_Min, OP_XOP_X_X_X, INTRINS_SSE_MINPS}, {SN_MinScalar, OP_XOP_X_X_X, INTRINS_SSE_MINSS}, {SN_MoveHighToLow, OP_SSE_MOVEHL}, {SN_MoveLowToHigh, OP_SSE_MOVELH}, {SN_MoveMask, OP_SSE_MOVMSK}, {SN_MoveScalar, OP_SSE_MOVS2}, {SN_Multiply, OP_XBINOP, OP_FMUL}, {SN_MultiplyScalar, OP_SSE_MULSS}, {SN_Or, OP_SSE_OR}, {SN_Prefetch0, OP_SSE_PREFETCHT0}, {SN_Prefetch1, OP_SSE_PREFETCHT1}, {SN_Prefetch2, OP_SSE_PREFETCHT2}, {SN_PrefetchNonTemporal, OP_SSE_PREFETCHNTA}, {SN_Reciprocal, OP_XOP_X_X, INTRINS_SSE_RCP_PS}, {SN_ReciprocalScalar}, {SN_ReciprocalSqrt, OP_XOP_X_X, INTRINS_SSE_RSQRT_PS}, {SN_ReciprocalSqrtScalar}, {SN_Shuffle}, {SN_Sqrt, OP_XOP_X_X, INTRINS_SSE_SQRT_PS}, {SN_SqrtScalar}, {SN_Store, OP_SSE_STORE, 1 /* alignment */}, {SN_StoreAligned, OP_SSE_STORE, 16 /* alignment */}, {SN_StoreAlignedNonTemporal, OP_SSE_MOVNTPS, 16 /* alignment */}, {SN_StoreFence, OP_XOP, INTRINS_SSE_SFENCE}, {SN_StoreHigh, OP_SSE_MOVHPS_STORE}, {SN_StoreLow, OP_SSE_MOVLPS_STORE}, {SN_StoreScalar, OP_SSE_MOVSS_STORE}, {SN_Subtract, OP_XBINOP, OP_FSUB}, {SN_SubtractScalar, OP_SSE_SUBSS}, {SN_UnpackHigh, OP_SSE_UNPACKHI}, {SN_UnpackLow, OP_SSE_UNPACKLO}, {SN_Xor, OP_SSE_XOR}, {SN_get_IsSupported} }; static SimdIntrinsic sse2_methods [] = { {SN_Add}, {SN_AddSaturate, OP_SSE2_ADDS}, {SN_AddScalar, OP_SSE2_ADDSD}, {SN_And, OP_SSE_AND}, {SN_AndNot, OP_SSE_ANDN}, {SN_Average}, {SN_CompareEqual}, {SN_CompareGreaterThan}, {SN_CompareGreaterThanOrEqual, OP_XCOMPARE_FP, CMP_GE}, {SN_CompareLessThan}, {SN_CompareLessThanOrEqual, OP_XCOMPARE_FP, CMP_LE}, {SN_CompareNotEqual, OP_XCOMPARE_FP, CMP_NE}, {SN_CompareNotGreaterThan, OP_XCOMPARE_FP, CMP_LE_UN}, {SN_CompareNotGreaterThanOrEqual, OP_XCOMPARE_FP, CMP_LT_UN}, {SN_CompareNotLessThan, OP_XCOMPARE_FP, CMP_GE_UN}, {SN_CompareNotLessThanOrEqual, OP_XCOMPARE_FP, CMP_GT_UN}, {SN_CompareOrdered, OP_XCOMPARE_FP, CMP_ORD}, {SN_CompareScalarEqual, OP_SSE2_CMPSD, CMP_EQ}, {SN_CompareScalarGreaterThan, OP_SSE2_CMPSD, CMP_GT}, {SN_CompareScalarGreaterThanOrEqual, OP_SSE2_CMPSD, CMP_GE}, {SN_CompareScalarLessThan, OP_SSE2_CMPSD, CMP_LT}, {SN_CompareScalarLessThanOrEqual, OP_SSE2_CMPSD, CMP_LE}, {SN_CompareScalarNotEqual, OP_SSE2_CMPSD, CMP_NE}, {SN_CompareScalarNotGreaterThan, OP_SSE2_CMPSD, CMP_LE_UN}, {SN_CompareScalarNotGreaterThanOrEqual, OP_SSE2_CMPSD, CMP_LT_UN}, {SN_CompareScalarNotLessThan, OP_SSE2_CMPSD, CMP_GE_UN}, {SN_CompareScalarNotLessThanOrEqual, OP_SSE2_CMPSD, CMP_GT_UN}, {SN_CompareScalarOrdered, OP_SSE2_CMPSD, CMP_ORD}, {SN_CompareScalarOrderedEqual, OP_SSE2_COMISD, CMP_EQ}, {SN_CompareScalarOrderedGreaterThan, OP_SSE2_COMISD, CMP_GT}, {SN_CompareScalarOrderedGreaterThanOrEqual, OP_SSE2_COMISD, CMP_GE}, {SN_CompareScalarOrderedLessThan, OP_SSE2_COMISD, CMP_LT}, {SN_CompareScalarOrderedLessThanOrEqual, OP_SSE2_COMISD, CMP_LE}, {SN_CompareScalarOrderedNotEqual, OP_SSE2_COMISD, CMP_NE}, {SN_CompareScalarUnordered, OP_SSE2_CMPSD, CMP_UNORD}, {SN_CompareScalarUnorderedEqual, OP_SSE2_UCOMISD, CMP_EQ}, {SN_CompareScalarUnorderedGreaterThan, OP_SSE2_UCOMISD, CMP_GT}, {SN_CompareScalarUnorderedGreaterThanOrEqual, OP_SSE2_UCOMISD, CMP_GE}, {SN_CompareScalarUnorderedLessThan, OP_SSE2_UCOMISD, CMP_LT}, {SN_CompareScalarUnorderedLessThanOrEqual, OP_SSE2_UCOMISD, CMP_LE}, {SN_CompareScalarUnorderedNotEqual, OP_SSE2_UCOMISD, CMP_NE}, {SN_CompareUnordered, OP_XCOMPARE_FP, CMP_UNORD}, {SN_ConvertScalarToVector128Double}, {SN_ConvertScalarToVector128Int32}, {SN_ConvertScalarToVector128Int64}, {SN_ConvertScalarToVector128Single, OP_XOP_X_X_X, INTRINS_SSE_CVTSD2SS}, {SN_ConvertScalarToVector128UInt32}, {SN_ConvertScalarToVector128UInt64}, {SN_ConvertToInt32}, {SN_ConvertToInt32WithTruncation, OP_XOP_I4_X, INTRINS_SSE_CVTTSD2SI}, {SN_ConvertToInt64}, {SN_ConvertToInt64WithTruncation, OP_XOP_I8_X, INTRINS_SSE_CVTTSD2SI64}, {SN_ConvertToUInt32}, {SN_ConvertToUInt64}, {SN_ConvertToVector128Double}, {SN_ConvertToVector128Int32}, {SN_ConvertToVector128Int32WithTruncation}, {SN_ConvertToVector128Single}, {SN_Divide, OP_XBINOP, OP_FDIV}, {SN_DivideScalar, OP_SSE2_DIVSD}, {SN_Extract}, {SN_Insert}, {SN_LoadAlignedVector128}, {SN_LoadFence, OP_XOP, INTRINS_SSE_LFENCE}, {SN_LoadHigh, OP_SSE2_MOVHPD_LOAD}, {SN_LoadLow, OP_SSE2_MOVLPD_LOAD}, {SN_LoadScalarVector128}, {SN_LoadVector128}, {SN_MaskMove, OP_SSE2_MASKMOVDQU}, {SN_Max}, {SN_MaxScalar, OP_XOP_X_X_X, INTRINS_SSE_MAXSD}, {SN_MemoryFence, OP_XOP, INTRINS_SSE_MFENCE}, {SN_Min}, // FIXME: {SN_MinScalar, OP_XOP_X_X_X, INTRINS_SSE_MINSD}, {SN_MoveMask, OP_SSE_MOVMSK}, {SN_MoveScalar}, {SN_Multiply}, {SN_MultiplyAddAdjacent, OP_XOP_X_X_X, INTRINS_SSE_PMADDWD}, {SN_MultiplyHigh}, {SN_MultiplyLow, OP_PMULW}, {SN_MultiplyScalar, OP_SSE2_MULSD}, {SN_Or, OP_SSE_OR}, {SN_PackSignedSaturate}, {SN_PackUnsignedSaturate}, {SN_ShiftLeftLogical}, {SN_ShiftLeftLogical128BitLane}, {SN_ShiftRightArithmetic}, {SN_ShiftRightLogical}, {SN_ShiftRightLogical128BitLane}, {SN_Shuffle}, {SN_ShuffleHigh}, {SN_ShuffleLow}, {SN_Sqrt, OP_XOP_X_X, INTRINS_SSE_SQRT_PD}, {SN_SqrtScalar}, {SN_Store, OP_SSE_STORE, 1 /* alignment */}, {SN_StoreAligned, OP_SSE_STORE, 16 /* alignment */}, {SN_StoreAlignedNonTemporal, OP_SSE_MOVNTPS, 16 /* alignment */}, {SN_StoreHigh, OP_SSE2_MOVHPD_STORE}, {SN_StoreLow, OP_SSE2_MOVLPD_STORE}, {SN_StoreNonTemporal, OP_SSE_MOVNTPS, 1 /* alignment */}, {SN_StoreScalar, OP_SSE_STORES}, {SN_Subtract}, {SN_SubtractSaturate, OP_SSE2_SUBS}, {SN_SubtractScalar, OP_SSE2_SUBSD}, {SN_SumAbsoluteDifferences, OP_XOP_X_X_X, INTRINS_SSE_PSADBW}, {SN_UnpackHigh, OP_SSE_UNPACKHI}, {SN_UnpackLow, OP_SSE_UNPACKLO}, {SN_Xor, OP_SSE_XOR}, {SN_get_IsSupported} }; static SimdIntrinsic sse3_methods [] = { {SN_AddSubtract}, {SN_HorizontalAdd}, {SN_HorizontalSubtract}, {SN_LoadAndDuplicateToVector128, OP_SSE3_MOVDDUP_MEM}, {SN_LoadDquVector128, OP_XOP_X_I, INTRINS_SSE_LDU_DQ}, {SN_MoveAndDuplicate, OP_SSE3_MOVDDUP}, {SN_MoveHighAndDuplicate, OP_SSE3_MOVSHDUP}, {SN_MoveLowAndDuplicate, OP_SSE3_MOVSLDUP}, {SN_get_IsSupported} }; static SimdIntrinsic ssse3_methods [] = { {SN_Abs, OP_SSSE3_ABS}, {SN_AlignRight}, {SN_HorizontalAdd}, {SN_HorizontalAddSaturate, OP_XOP_X_X_X, INTRINS_SSE_PHADDSW}, {SN_HorizontalSubtract}, {SN_HorizontalSubtractSaturate, OP_XOP_X_X_X, INTRINS_SSE_PHSUBSW}, {SN_MultiplyAddAdjacent, OP_XOP_X_X_X, INTRINS_SSE_PMADDUBSW}, {SN_MultiplyHighRoundScale, OP_XOP_X_X_X, INTRINS_SSE_PMULHRSW}, {SN_Shuffle, OP_SSSE3_SHUFFLE}, {SN_Sign}, {SN_get_IsSupported} }; static SimdIntrinsic sse41_methods [] = { {SN_Blend}, {SN_BlendVariable}, {SN_Ceiling, OP_SSE41_ROUNDP, 10 /*round mode*/}, {SN_CeilingScalar, 0, 10 /*round mode*/}, {SN_CompareEqual, OP_XCOMPARE, CMP_EQ}, {SN_ConvertToVector128Int16, OP_SSE_CVTII, MONO_TYPE_I2}, {SN_ConvertToVector128Int32, OP_SSE_CVTII, MONO_TYPE_I4}, {SN_ConvertToVector128Int64, OP_SSE_CVTII, MONO_TYPE_I8}, {SN_DotProduct}, {SN_Extract}, {SN_Floor, OP_SSE41_ROUNDP, 9 /*round mode*/}, {SN_FloorScalar, 0, 9 /*round mode*/}, {SN_Insert}, {SN_LoadAlignedVector128NonTemporal, OP_SSE41_LOADANT}, {SN_Max, OP_XBINOP, OP_IMAX}, {SN_Min, OP_XBINOP, OP_IMIN}, {SN_MinHorizontal, OP_XOP_X_X, INTRINS_SSE_PHMINPOSUW}, {SN_MultipleSumAbsoluteDifferences}, {SN_Multiply, OP_SSE41_MUL}, {SN_MultiplyLow, OP_SSE41_MULLO}, {SN_PackUnsignedSaturate, OP_XOP_X_X_X, INTRINS_SSE_PACKUSDW}, {SN_RoundCurrentDirection, OP_SSE41_ROUNDP, 4 /*round mode*/}, {SN_RoundCurrentDirectionScalar, 0, 4 /*round mode*/}, {SN_RoundToNearestInteger, OP_SSE41_ROUNDP, 8 /*round mode*/}, {SN_RoundToNearestIntegerScalar, 0, 8 /*round mode*/}, {SN_RoundToNegativeInfinity, OP_SSE41_ROUNDP, 9 /*round mode*/}, {SN_RoundToNegativeInfinityScalar, 0, 9 /*round mode*/}, {SN_RoundToPositiveInfinity, OP_SSE41_ROUNDP, 10 /*round mode*/}, {SN_RoundToPositiveInfinityScalar, 0, 10 /*round mode*/}, {SN_RoundToZero, OP_SSE41_ROUNDP, 11 /*round mode*/}, {SN_RoundToZeroScalar, 0, 11 /*round mode*/}, {SN_TestC, OP_XOP_I4_X_X, INTRINS_SSE_TESTC}, {SN_TestNotZAndNotC, OP_XOP_I4_X_X, INTRINS_SSE_TESTNZ}, {SN_TestZ, OP_XOP_I4_X_X, INTRINS_SSE_TESTZ}, {SN_get_IsSupported} }; static SimdIntrinsic sse42_methods [] = { {SN_CompareGreaterThan, OP_XCOMPARE, CMP_GT}, {SN_Crc32}, {SN_get_IsSupported} }; static SimdIntrinsic pclmulqdq_methods [] = { {SN_CarrylessMultiply}, {SN_get_IsSupported} }; static SimdIntrinsic aes_methods [] = { {SN_Decrypt, OP_XOP_X_X_X, INTRINS_AESNI_AESDEC}, {SN_DecryptLast, OP_XOP_X_X_X, INTRINS_AESNI_AESDECLAST}, {SN_Encrypt, OP_XOP_X_X_X, INTRINS_AESNI_AESENC}, {SN_EncryptLast, OP_XOP_X_X_X, INTRINS_AESNI_AESENCLAST}, {SN_InverseMixColumns, OP_XOP_X_X, INTRINS_AESNI_AESIMC}, {SN_KeygenAssist}, {SN_get_IsSupported} }; static SimdIntrinsic popcnt_methods [] = { {SN_PopCount}, {SN_get_IsSupported} }; static SimdIntrinsic lzcnt_methods [] = { {SN_LeadingZeroCount}, {SN_get_IsSupported} }; static SimdIntrinsic bmi1_methods [] = { {SN_AndNot}, {SN_BitFieldExtract}, {SN_ExtractLowestSetBit}, {SN_GetMaskUpToLowestSetBit}, {SN_ResetLowestSetBit}, {SN_TrailingZeroCount}, {SN_get_IsSupported} }; static SimdIntrinsic bmi2_methods [] = { {SN_MultiplyNoFlags}, {SN_ParallelBitDeposit}, {SN_ParallelBitExtract}, {SN_ZeroHighBits}, {SN_get_IsSupported} }; static SimdIntrinsic x86base_methods [] = { {SN_BitScanForward}, {SN_BitScanReverse}, {SN_get_IsSupported} }; static const IntrinGroup supported_x86_intrinsics [] = { { "Aes", MONO_CPU_X86_AES, aes_methods, sizeof (aes_methods) }, { "Avx", MONO_CPU_X86_AVX, unsupported, sizeof (unsupported) }, { "Avx2", MONO_CPU_X86_AVX2, unsupported, sizeof (unsupported) }, { "AvxVnni", 0, unsupported, sizeof (unsupported) }, { "Bmi1", MONO_CPU_X86_BMI1, bmi1_methods, sizeof (bmi1_methods) }, { "Bmi2", MONO_CPU_X86_BMI2, bmi2_methods, sizeof (bmi2_methods) }, { "Fma", MONO_CPU_X86_FMA, unsupported, sizeof (unsupported) }, { "Lzcnt", MONO_CPU_X86_LZCNT, lzcnt_methods, sizeof (lzcnt_methods), TRUE }, { "Pclmulqdq", MONO_CPU_X86_PCLMUL, pclmulqdq_methods, sizeof (pclmulqdq_methods) }, { "Popcnt", MONO_CPU_X86_POPCNT, popcnt_methods, sizeof (popcnt_methods), TRUE }, { "Sse", MONO_CPU_X86_SSE, sse_methods, sizeof (sse_methods) }, { "Sse2", MONO_CPU_X86_SSE2, sse2_methods, sizeof (sse2_methods) }, { "Sse3", MONO_CPU_X86_SSE3, sse3_methods, sizeof (sse3_methods) }, { "Sse41", MONO_CPU_X86_SSE41, sse41_methods, sizeof (sse41_methods) }, { "Sse42", MONO_CPU_X86_SSE42, sse42_methods, sizeof (sse42_methods) }, { "Ssse3", MONO_CPU_X86_SSSE3, ssse3_methods, sizeof (ssse3_methods) }, { "X86Base", 0, x86base_methods, sizeof (x86base_methods) }, }; static MonoInst* emit_x86_intrinsics ( MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoClass *klass, const IntrinGroup *intrin_group, const SimdIntrinsic *info, int id, MonoTypeEnum arg0_type, gboolean is_64bit) { MonoCPUFeatures feature = intrin_group->feature; const SimdIntrinsic *intrinsics = intrin_group->intrinsics; if (feature == MONO_CPU_X86_SSE) { switch (id) { case SN_Shuffle: return emit_simd_ins_for_sig (cfg, klass, OP_SSE_SHUFPS, 0, arg0_type, fsig, args); case SN_ConvertScalarToVector128Single: { int op = 0; switch (fsig->params [1]->type) { case MONO_TYPE_I4: op = OP_SSE_CVTSI2SS; break; case MONO_TYPE_I8: op = OP_SSE_CVTSI2SS64; break; default: g_assert_not_reached (); break; } return emit_simd_ins_for_sig (cfg, klass, op, 0, 0, fsig, args); } case SN_ReciprocalScalar: case SN_ReciprocalSqrtScalar: case SN_SqrtScalar: { int op = 0; switch (id) { case SN_ReciprocalScalar: op = OP_SSE_RCPSS; break; case SN_ReciprocalSqrtScalar: op = OP_SSE_RSQRTSS; break; case SN_SqrtScalar: op = OP_SSE_SQRTSS; break; }; if (fsig->param_count == 1) return emit_simd_ins (cfg, klass, op, args [0]->dreg, args[0]->dreg); else if (fsig->param_count == 2) return emit_simd_ins (cfg, klass, op, args [0]->dreg, args[1]->dreg); else g_assert_not_reached (); break; } case SN_LoadScalarVector128: return NULL; default: return NULL; } } if (feature == MONO_CPU_X86_SSE2) { switch (id) { case SN_Subtract: return emit_simd_ins_for_sig (cfg, klass, OP_XBINOP, arg0_type == MONO_TYPE_R8 ? OP_FSUB : OP_ISUB, arg0_type, fsig, args); case SN_Add: return emit_simd_ins_for_sig (cfg, klass, OP_XBINOP, arg0_type == MONO_TYPE_R8 ? OP_FADD : OP_IADD, arg0_type, fsig, args); case SN_Average: if (arg0_type == MONO_TYPE_U1) return emit_simd_ins_for_sig (cfg, klass, OP_PAVGB_UN, -1, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_U2) return emit_simd_ins_for_sig (cfg, klass, OP_PAVGW_UN, -1, arg0_type, fsig, args); else return NULL; case SN_CompareNotEqual: return emit_simd_ins_for_sig (cfg, klass, arg0_type == MONO_TYPE_R8 ? OP_XCOMPARE_FP : OP_XCOMPARE, CMP_NE, arg0_type, fsig, args); case SN_CompareEqual: return emit_simd_ins_for_sig (cfg, klass, arg0_type == MONO_TYPE_R8 ? OP_XCOMPARE_FP : OP_XCOMPARE, CMP_EQ, arg0_type, fsig, args); case SN_CompareGreaterThan: return emit_simd_ins_for_sig (cfg, klass, arg0_type == MONO_TYPE_R8 ? OP_XCOMPARE_FP : OP_XCOMPARE, CMP_GT, arg0_type, fsig, args); case SN_CompareLessThan: return emit_simd_ins_for_sig (cfg, klass, arg0_type == MONO_TYPE_R8 ? OP_XCOMPARE_FP : OP_XCOMPARE, CMP_LT, arg0_type, fsig, args); case SN_ConvertToInt32: if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_I4_X, INTRINS_SSE_CVTSD2SI, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_I4) return emit_simd_ins_for_sig (cfg, klass, OP_EXTRACT_I4, 0, arg0_type, fsig, args); else return NULL; case SN_ConvertToInt64: if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_I8_X, INTRINS_SSE_CVTSD2SI64, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_I8) return emit_simd_ins_for_sig (cfg, klass, OP_EXTRACT_I8, 0 /*element index*/, arg0_type, fsig, args); else g_assert_not_reached (); break; case SN_ConvertScalarToVector128Double: { int op = OP_SSE2_CVTSS2SD; switch (fsig->params [1]->type) { case MONO_TYPE_I4: op = OP_SSE2_CVTSI2SD; break; case MONO_TYPE_I8: op = OP_SSE2_CVTSI2SD64; break; } return emit_simd_ins_for_sig (cfg, klass, op, 0, 0, fsig, args); } case SN_ConvertScalarToVector128Int32: case SN_ConvertScalarToVector128Int64: case SN_ConvertScalarToVector128UInt32: case SN_ConvertScalarToVector128UInt64: return emit_simd_ins_for_sig (cfg, klass, OP_CREATE_SCALAR, -1, arg0_type, fsig, args); case SN_ConvertToUInt32: return emit_simd_ins_for_sig (cfg, klass, OP_EXTRACT_I4, 0 /*element index*/, arg0_type, fsig, args); case SN_ConvertToUInt64: return emit_simd_ins_for_sig (cfg, klass, OP_EXTRACT_I8, 0 /*element index*/, arg0_type, fsig, args); case SN_ConvertToVector128Double: if (arg0_type == MONO_TYPE_R4) return emit_simd_ins_for_sig (cfg, klass, OP_CVTPS2PD, 0, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_I4) return emit_simd_ins_for_sig (cfg, klass, OP_CVTDQ2PD, 0, arg0_type, fsig, args); else return NULL; case SN_ConvertToVector128Int32: if (arg0_type == MONO_TYPE_R4) return emit_simd_ins_for_sig (cfg, klass, OP_CVTPS2DQ, 0, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_CVTPD2DQ, 0, arg0_type, fsig, args); else return NULL; case SN_ConvertToVector128Int32WithTruncation: if (arg0_type == MONO_TYPE_R4) return emit_simd_ins_for_sig (cfg, klass, OP_CVTTPS2DQ, 0, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_CVTTPD2DQ, 0, arg0_type, fsig, args); else return NULL; case SN_ConvertToVector128Single: if (arg0_type == MONO_TYPE_I4) return emit_simd_ins_for_sig (cfg, klass, OP_CVTDQ2PS, 0, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_CVTPD2PS, 0, arg0_type, fsig, args); else return NULL; case SN_LoadAlignedVector128: return emit_simd_ins_for_sig (cfg, klass, OP_SSE_LOADU, 16 /*alignment*/, arg0_type, fsig, args); case SN_LoadVector128: return emit_simd_ins_for_sig (cfg, klass, OP_SSE_LOADU, 1 /*alignment*/, arg0_type, fsig, args); case SN_MoveScalar: return emit_simd_ins_for_sig (cfg, klass, fsig->param_count == 2 ? OP_SSE_MOVS2 : OP_SSE_MOVS, -1, arg0_type, fsig, args); case SN_Max: switch (arg0_type) { case MONO_TYPE_U1: return emit_simd_ins_for_sig (cfg, klass, OP_PMAXB_UN, 0, arg0_type, fsig, args); case MONO_TYPE_I2: return emit_simd_ins_for_sig (cfg, klass, OP_PMAXW, 0, arg0_type, fsig, args); case MONO_TYPE_R8: return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_MAXPD, arg0_type, fsig, args); default: g_assert_not_reached (); break; } break; case SN_Min: switch (arg0_type) { case MONO_TYPE_U1: return emit_simd_ins_for_sig (cfg, klass, OP_PMINB_UN, 0, arg0_type, fsig, args); case MONO_TYPE_I2: return emit_simd_ins_for_sig (cfg, klass, OP_PMINW, 0, arg0_type, fsig, args); case MONO_TYPE_R8: return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_MINPD, arg0_type, fsig, args); default: g_assert_not_reached (); break; } break; case SN_Multiply: if (arg0_type == MONO_TYPE_U4) return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_PMULUDQ, 0, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_MULPD, 0, arg0_type, fsig, args); else g_assert_not_reached (); case SN_MultiplyHigh: if (arg0_type == MONO_TYPE_I2) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PMULHW, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_U2) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PMULHUW, arg0_type, fsig, args); else g_assert_not_reached (); case SN_PackSignedSaturate: if (arg0_type == MONO_TYPE_I2) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PACKSSWB, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_I4) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PACKSSDW, arg0_type, fsig, args); else g_assert_not_reached (); case SN_PackUnsignedSaturate: return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_PACKUS, -1, arg0_type, fsig, args); case SN_Extract: g_assert (arg0_type == MONO_TYPE_U2); return emit_simd_ins_for_sig (cfg, klass, OP_XEXTRACT_I4, 0, arg0_type, fsig, args); case SN_Insert: g_assert (arg0_type == MONO_TYPE_I2 || arg0_type == MONO_TYPE_U2); return emit_simd_ins_for_sig (cfg, klass, OP_XINSERT_I2, 0, arg0_type, fsig, args); case SN_ShiftRightLogical: { gboolean is_imm = fsig->params [1]->type == MONO_TYPE_U1; IntrinsicId op = (IntrinsicId)0; switch (arg0_type) { case MONO_TYPE_I2: case MONO_TYPE_U2: op = is_imm ? INTRINS_SSE_PSRLI_W : INTRINS_SSE_PSRL_W; break; case MONO_TYPE_I4: case MONO_TYPE_U4: op = is_imm ? INTRINS_SSE_PSRLI_D : INTRINS_SSE_PSRL_D; break; case MONO_TYPE_I8: case MONO_TYPE_U8: op = is_imm ? INTRINS_SSE_PSRLI_Q : INTRINS_SSE_PSRL_Q; break; default: g_assert_not_reached (); break; } return emit_simd_ins_for_sig (cfg, klass, is_imm ? OP_XOP_X_X_I4 : OP_XOP_X_X_X, op, arg0_type, fsig, args); } case SN_ShiftRightArithmetic: { gboolean is_imm = fsig->params [1]->type == MONO_TYPE_U1; IntrinsicId op = (IntrinsicId)0; switch (arg0_type) { case MONO_TYPE_I2: case MONO_TYPE_U2: op = is_imm ? INTRINS_SSE_PSRAI_W : INTRINS_SSE_PSRA_W; break; case MONO_TYPE_I4: case MONO_TYPE_U4: op = is_imm ? INTRINS_SSE_PSRAI_D : INTRINS_SSE_PSRA_D; break; default: g_assert_not_reached (); break; } return emit_simd_ins_for_sig (cfg, klass, is_imm ? OP_XOP_X_X_I4 : OP_XOP_X_X_X, op, arg0_type, fsig, args); } case SN_ShiftLeftLogical: { gboolean is_imm = fsig->params [1]->type == MONO_TYPE_U1; IntrinsicId op = (IntrinsicId)0; switch (arg0_type) { case MONO_TYPE_I2: case MONO_TYPE_U2: op = is_imm ? INTRINS_SSE_PSLLI_W : INTRINS_SSE_PSLL_W; break; case MONO_TYPE_I4: case MONO_TYPE_U4: op = is_imm ? INTRINS_SSE_PSLLI_D : INTRINS_SSE_PSLL_D; break; case MONO_TYPE_I8: case MONO_TYPE_U8: op = is_imm ? INTRINS_SSE_PSLLI_Q : INTRINS_SSE_PSLL_Q; break; default: g_assert_not_reached (); break; } return emit_simd_ins_for_sig (cfg, klass, is_imm ? OP_XOP_X_X_I4 : OP_XOP_X_X_X, op, arg0_type, fsig, args); } case SN_ShiftLeftLogical128BitLane: return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_PSLLDQ, 0, arg0_type, fsig, args); case SN_ShiftRightLogical128BitLane: return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_PSRLDQ, 0, arg0_type, fsig, args); case SN_Shuffle: { if (fsig->param_count == 2) { g_assert (arg0_type == MONO_TYPE_I4 || arg0_type == MONO_TYPE_U4); return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_PSHUFD, 0, arg0_type, fsig, args); } else if (fsig->param_count == 3) { g_assert (arg0_type == MONO_TYPE_R8); return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_SHUFPD, 0, arg0_type, fsig, args); } else { g_assert_not_reached (); break; } } case SN_ShuffleHigh: g_assert (fsig->param_count == 2); return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_PSHUFHW, 0, arg0_type, fsig, args); case SN_ShuffleLow: g_assert (fsig->param_count == 2); return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_PSHUFLW, 0, arg0_type, fsig, args); case SN_SqrtScalar: { if (fsig->param_count == 1) return emit_simd_ins (cfg, klass, OP_SSE2_SQRTSD, args [0]->dreg, args[0]->dreg); else if (fsig->param_count == 2) return emit_simd_ins (cfg, klass, OP_SSE2_SQRTSD, args [0]->dreg, args[1]->dreg); else { g_assert_not_reached (); break; } } case SN_LoadScalarVector128: { int op = 0; switch (arg0_type) { case MONO_TYPE_I4: case MONO_TYPE_U4: op = OP_SSE2_MOVD; break; case MONO_TYPE_I8: case MONO_TYPE_U8: op = OP_SSE2_MOVQ; break; case MONO_TYPE_R8: op = OP_SSE2_MOVUPD; break; default: g_assert_not_reached(); break; } return emit_simd_ins_for_sig (cfg, klass, op, 0, 0, fsig, args); } default: return NULL; } } if (feature == MONO_CPU_X86_SSE3) { switch (id) { case SN_AddSubtract: if (arg0_type == MONO_TYPE_R4) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_ADDSUBPS, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_ADDSUBPD, arg0_type, fsig, args); else g_assert_not_reached (); break; case SN_HorizontalAdd: if (arg0_type == MONO_TYPE_R4) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_HADDPS, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_HADDPD, arg0_type, fsig, args); else g_assert_not_reached (); break; case SN_HorizontalSubtract: if (arg0_type == MONO_TYPE_R4) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_HSUBPS, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_HSUBPD, arg0_type, fsig, args); else g_assert_not_reached (); break; default: g_assert_not_reached (); break; } } if (feature == MONO_CPU_X86_SSSE3) { switch (id) { case SN_AlignRight: return emit_simd_ins_for_sig (cfg, klass, OP_SSSE3_ALIGNR, 0, arg0_type, fsig, args); case SN_HorizontalAdd: if (arg0_type == MONO_TYPE_I2) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PHADDW, arg0_type, fsig, args); return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PHADDD, arg0_type, fsig, args); case SN_HorizontalSubtract: if (arg0_type == MONO_TYPE_I2) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PHSUBW, arg0_type, fsig, args); return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PHSUBD, arg0_type, fsig, args); case SN_Sign: if (arg0_type == MONO_TYPE_I1) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PSIGNB, arg0_type, fsig, args); if (arg0_type == MONO_TYPE_I2) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PSIGNW, arg0_type, fsig, args); return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PSIGND, arg0_type, fsig, args); default: g_assert_not_reached (); break; } } if (feature == MONO_CPU_X86_SSE41) { switch (id) { case SN_DotProduct: { int op = 0; switch (arg0_type) { case MONO_TYPE_R4: op = OP_SSE41_DPPS; break; case MONO_TYPE_R8: op = OP_SSE41_DPPD; break; default: g_assert_not_reached (); break; } return emit_simd_ins_for_sig (cfg, klass, op, 0, arg0_type, fsig, args); } case SN_MultipleSumAbsoluteDifferences: return emit_simd_ins_for_sig (cfg, klass, OP_SSE41_MPSADBW, 0, arg0_type, fsig, args); case SN_Blend: return emit_simd_ins_for_sig (cfg, klass, OP_SSE41_BLEND, 0, arg0_type, fsig, args); case SN_BlendVariable: return emit_simd_ins_for_sig (cfg, klass, OP_SSE41_BLENDV, -1, arg0_type, fsig, args); case SN_Extract: { int op = 0; switch (arg0_type) { case MONO_TYPE_U1: op = OP_XEXTRACT_I1; break; case MONO_TYPE_U4: case MONO_TYPE_I4: op = OP_XEXTRACT_I4; break; case MONO_TYPE_U8: case MONO_TYPE_I8: op = OP_XEXTRACT_I8; break; case MONO_TYPE_R4: op = OP_XEXTRACT_R4; break; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 op = OP_XEXTRACT_I8; #else op = OP_XEXTRACT_I4; #endif break; default: g_assert_not_reached(); break; } return emit_simd_ins_for_sig (cfg, klass, op, 0, arg0_type, fsig, args); } case SN_Insert: { int op = arg0_type == MONO_TYPE_R4 ? OP_SSE41_INSERTPS : type_to_xinsert_op (arg0_type); return emit_simd_ins_for_sig (cfg, klass, op, -1, arg0_type, fsig, args); } case SN_CeilingScalar: case SN_FloorScalar: case SN_RoundCurrentDirectionScalar: case SN_RoundToNearestIntegerScalar: case SN_RoundToNegativeInfinityScalar: case SN_RoundToPositiveInfinityScalar: case SN_RoundToZeroScalar: if (fsig->param_count == 2) { return emit_simd_ins_for_sig (cfg, klass, OP_SSE41_ROUNDS, info->default_instc0, arg0_type, fsig, args); } else { MonoInst* ins = emit_simd_ins (cfg, klass, OP_SSE41_ROUNDS, args [0]->dreg, args [0]->dreg); ins->inst_c0 = info->default_instc0; ins->inst_c1 = arg0_type; return ins; } break; default: g_assert_not_reached (); break; } } if (feature == MONO_CPU_X86_SSE42) { switch (id) { case SN_Crc32: { MonoTypeEnum arg1_type = get_underlying_type (fsig->params [1]); return emit_simd_ins_for_sig (cfg, klass, arg1_type == MONO_TYPE_U8 ? OP_SSE42_CRC64 : OP_SSE42_CRC32, arg1_type, arg0_type, fsig, args); } default: g_assert_not_reached (); break; } } if (feature == MONO_CPU_X86_PCLMUL) { switch (id) { case SN_CarrylessMultiply: { return emit_simd_ins_for_sig (cfg, klass, OP_PCLMULQDQ, 0, arg0_type, fsig, args); } default: g_assert_not_reached (); break; } } if (feature == MONO_CPU_X86_AES) { switch (id) { case SN_KeygenAssist: { return emit_simd_ins_for_sig (cfg, klass, OP_AES_KEYGENASSIST, 0, arg0_type, fsig, args); } default: g_assert_not_reached (); break; } } MonoInst *ins = NULL; if (feature == MONO_CPU_X86_POPCNT) { switch (id) { case SN_PopCount: MONO_INST_NEW (cfg, ins, is_64bit ? OP_POPCNT64 : OP_POPCNT32); ins->dreg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->type = is_64bit ? STACK_I8 : STACK_I4; MONO_ADD_INS (cfg->cbb, ins); return ins; default: return NULL; } } if (feature == MONO_CPU_X86_LZCNT) { switch (id) { case SN_LeadingZeroCount: return emit_simd_ins_for_sig (cfg, klass, is_64bit ? OP_LZCNT64 : OP_LZCNT32, 0, arg0_type, fsig, args); default: return NULL; } } if (feature == MONO_CPU_X86_BMI1) { switch (id) { case SN_AndNot: { // (a ^ -1) & b // LLVM replaces it with `andn` int tmp_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); int result_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, is_64bit ? OP_LXOR_IMM : OP_IXOR_IMM, tmp_reg, args [0]->dreg, -1); EMIT_NEW_BIALU (cfg, ins, is_64bit ? OP_LAND : OP_IAND, result_reg, tmp_reg, args [1]->dreg); return ins; } case SN_BitFieldExtract: { int ctlreg = args [1]->dreg; if (fsig->param_count == 2) { } else if (fsig->param_count == 3) { MonoInst *ins = NULL; /* This intrinsic is also implemented in managed code. * TODO: remove this if cross-AOT-assembly inlining works */ int startreg = args [1]->dreg; int lenreg = args [2]->dreg; int dreg1 = alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, OP_SHL_IMM, dreg1, lenreg, 8); int dreg2 = alloc_ireg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_IOR, dreg2, startreg, dreg1); ctlreg = dreg2; } else { g_assert_not_reached (); } return emit_simd_ins (cfg, klass, is_64bit ? OP_BMI1_BEXTR64 : OP_BMI1_BEXTR32, args [0]->dreg, ctlreg); } case SN_GetMaskUpToLowestSetBit: { // x ^ (x - 1) // LLVM replaces it with `blsmsk` int tmp_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); int result_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, is_64bit ? OP_LSUB_IMM : OP_ISUB_IMM, tmp_reg, args [0]->dreg, 1); EMIT_NEW_BIALU (cfg, ins, is_64bit ? OP_LXOR : OP_IXOR, result_reg, args [0]->dreg, tmp_reg); return ins; } case SN_ResetLowestSetBit: { // x & (x - 1) // LLVM replaces it with `blsr` int tmp_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); int result_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, is_64bit ? OP_LSUB_IMM : OP_ISUB_IMM, tmp_reg, args [0]->dreg, 1); EMIT_NEW_BIALU (cfg, ins, is_64bit ? OP_LAND : OP_IAND, result_reg, args [0]->dreg, tmp_reg); return ins; } case SN_ExtractLowestSetBit: { // x & (0 - x) // LLVM replaces it with `blsi` int tmp_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); int result_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); int zero_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); MONO_EMIT_NEW_ICONST (cfg, zero_reg, 0); EMIT_NEW_BIALU (cfg, ins, is_64bit ? OP_LSUB : OP_ISUB, tmp_reg, zero_reg, args [0]->dreg); EMIT_NEW_BIALU (cfg, ins, is_64bit ? OP_LAND : OP_IAND, result_reg, args [0]->dreg, tmp_reg); return ins; } case SN_TrailingZeroCount: MONO_INST_NEW (cfg, ins, is_64bit ? OP_CTTZ64 : OP_CTTZ32); ins->dreg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->type = is_64bit ? STACK_I8 : STACK_I4; MONO_ADD_INS (cfg->cbb, ins); return ins; default: g_assert_not_reached (); } } if (feature == MONO_CPU_X86_BMI2) { switch (id) { case SN_MultiplyNoFlags: { int op = 0; if (fsig->param_count == 2) { op = is_64bit ? OP_MULX_H64 : OP_MULX_H32; } else if (fsig->param_count == 3) { op = is_64bit ? OP_MULX_HL64 : OP_MULX_HL32; } else { g_assert_not_reached (); } return emit_simd_ins_for_sig (cfg, klass, op, 0, 0, fsig, args); } case SN_ZeroHighBits: MONO_INST_NEW (cfg, ins, is_64bit ? OP_BZHI64 : OP_BZHI32); ins->dreg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; ins->type = is_64bit ? STACK_I8 : STACK_I4; MONO_ADD_INS (cfg->cbb, ins); return ins; case SN_ParallelBitExtract: MONO_INST_NEW (cfg, ins, is_64bit ? OP_PEXT64 : OP_PEXT32); ins->dreg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; ins->type = is_64bit ? STACK_I8 : STACK_I4; MONO_ADD_INS (cfg->cbb, ins); return ins; case SN_ParallelBitDeposit: MONO_INST_NEW (cfg, ins, is_64bit ? OP_PDEP64 : OP_PDEP32); ins->dreg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; ins->type = is_64bit ? STACK_I8 : STACK_I4; MONO_ADD_INS (cfg->cbb, ins); return ins; default: g_assert_not_reached (); } } if (intrinsics == x86base_methods) { switch (id) { case SN_BitScanForward: MONO_INST_NEW (cfg, ins, is_64bit ? OP_X86_BSF64 : OP_X86_BSF32); ins->dreg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->type = is_64bit ? STACK_I8 : STACK_I4; MONO_ADD_INS (cfg->cbb, ins); return ins; case SN_BitScanReverse: MONO_INST_NEW (cfg, ins, is_64bit ? OP_X86_BSR64 : OP_X86_BSR32); ins->dreg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->type = is_64bit ? STACK_I8 : STACK_I4; MONO_ADD_INS (cfg->cbb, ins); return ins; default: g_assert_not_reached (); } } return NULL; } static guint16 vector_256_t_methods [] = { SN_get_Count, }; static MonoInst* emit_vector256_t (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins; MonoType *etype; MonoClass *klass; int size, len, id; id = lookup_intrins (vector_256_t_methods, sizeof (vector_256_t_methods), cmethod); if (id == -1) return NULL; klass = cmethod->klass; etype = mono_class_get_context (klass)->class_inst->type_argv [0]; size = mono_class_value_size (mono_class_from_mono_type_internal (etype), NULL); g_assert (size); len = 32 / size; if (!MONO_TYPE_IS_PRIMITIVE (etype) || etype->type == MONO_TYPE_CHAR || etype->type == MONO_TYPE_BOOLEAN || etype->type == MONO_TYPE_I || etype->type == MONO_TYPE_U) return NULL; if (cfg->verbose_level > 1) { char *name = mono_method_full_name (cmethod, TRUE); printf (" SIMD intrinsic %s\n", name); g_free (name); } switch (id) { case SN_get_Count: if (!(fsig->param_count == 0 && fsig->ret->type == MONO_TYPE_I4)) break; EMIT_NEW_ICONST (cfg, ins, len); return ins; default: break; } return NULL; } static MonoInst* emit_amd64_intrinsics (const char *class_ns, const char *class_name, MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { if (!strcmp (class_ns, "System.Runtime.Intrinsics.X86")) { return emit_hardware_intrinsics (cfg, cmethod, fsig, args, supported_x86_intrinsics, sizeof (supported_x86_intrinsics), emit_x86_intrinsics); } if (!strcmp (class_ns, "System.Runtime.Intrinsics")) { if (!strcmp (class_name, "Vector256`1")) return emit_vector256_t (cfg, cmethod, fsig, args); } if (!strcmp (class_ns, "System.Numerics")) { if (!strcmp (class_name, "Vector")) return emit_sys_numerics_vector (cfg, cmethod, fsig, args); if (!strcmp (class_name, "Vector`1")) return emit_sys_numerics_vector_t (cfg, cmethod, fsig, args); } return NULL; } #endif // !TARGET_ARM64 #ifdef TARGET_ARM64 static MonoInst* emit_simd_intrinsics (const char *class_ns, const char *class_name, MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { // FIXME: implement Vector64<T>, Vector128<T> and Vector<T> for Arm64 if (!strcmp (class_ns, "System.Runtime.Intrinsics.Arm")) { return emit_hardware_intrinsics(cfg, cmethod, fsig, args, supported_arm_intrinsics, sizeof (supported_arm_intrinsics), emit_arm64_intrinsics); } return NULL; } #elif TARGET_AMD64 // TODO: test and enable for x86 too static MonoInst* emit_simd_intrinsics (const char *class_ns, const char *class_name, MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *simd_inst = emit_amd64_intrinsics (class_ns, class_name, cfg, cmethod, fsig, args); if (simd_inst != NULL) cfg->uses_simd_intrinsics |= MONO_CFG_USES_SIMD_INTRINSICS; return simd_inst; } #else static MonoInst* emit_simd_intrinsics (const char *class_ns, const char *class_name, MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { return NULL; } #endif MonoInst* mono_emit_simd_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { const char *class_name; const char *class_ns; MonoImage *image = m_class_get_image (cmethod->klass); if (image != mono_get_corlib ()) return NULL; class_ns = m_class_get_name_space (cmethod->klass); class_name = m_class_get_name (cmethod->klass); // If cmethod->klass is nested, the namespace is on the enclosing class. if (m_class_get_nested_in (cmethod->klass)) class_ns = m_class_get_name_space (m_class_get_nested_in (cmethod->klass)); #if defined(TARGET_ARM64) || defined(TARGET_AMD64) if (!strcmp (class_ns, "System.Runtime.Intrinsics")) { if (!strcmp (class_name, "Vector128") || !strcmp (class_name, "Vector64")) return emit_sri_vector (cfg, cmethod, fsig, args); } if (!strcmp (class_ns, "System.Runtime.Intrinsics")) { if (!strcmp (class_name, "Vector128`1") || !strcmp (class_name, "Vector64`1")) return emit_vector64_vector128_t (cfg, cmethod, fsig, args); } #endif // defined(TARGET_ARM64) || defined(TARGET_AMD64) return emit_simd_intrinsics (class_ns, class_name, cfg, cmethod, fsig, args); } /* * Windows x64 value type ABI uses reg/stack references (ArgValuetypeAddrInIReg/ArgValuetypeAddrOnStack) * for function arguments. When using SIMD intrinsics arguments optimized into OP_ARG needs to be decomposed * into correspondig SIMD LOADX/STOREX instructions. */ #if defined(TARGET_WIN32) && defined(TARGET_AMD64) static gboolean decompose_vtype_opt_uses_simd_intrinsics (MonoCompile *cfg, MonoInst *ins) { if (cfg->uses_simd_intrinsics & MONO_CFG_USES_SIMD_INTRINSICS) return TRUE; switch (ins->opcode) { case OP_XMOVE: case OP_XZERO: case OP_XPHI: case OP_LOADX_MEMBASE: case OP_LOADX_ALIGNED_MEMBASE: case OP_STOREX_MEMBASE: case OP_STOREX_ALIGNED_MEMBASE_REG: return TRUE; default: return FALSE; } } static void decompose_vtype_opt_load_arg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, gint32 *sreg_int32) { guint32 *sreg = (guint32*)sreg_int32; MonoInst *src_var = get_vreg_to_inst (cfg, *sreg); if (src_var && src_var->opcode == OP_ARG && src_var->klass && MONO_CLASS_IS_SIMD (cfg, src_var->klass)) { MonoInst *varload_ins, *load_ins; NEW_VARLOADA (cfg, varload_ins, src_var, src_var->inst_vtype); mono_bblock_insert_before_ins (bb, ins, varload_ins); MONO_INST_NEW (cfg, load_ins, OP_LOADX_MEMBASE); load_ins->klass = src_var->klass; load_ins->type = STACK_VTYPE; load_ins->sreg1 = varload_ins->dreg; load_ins->dreg = alloc_xreg (cfg); mono_bblock_insert_after_ins (bb, varload_ins, load_ins); *sreg = load_ins->dreg; } } static void decompose_vtype_opt_store_arg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, gint32 *dreg_int32) { guint32 *dreg = (guint32*)dreg_int32; MonoInst *dest_var = get_vreg_to_inst (cfg, *dreg); if (dest_var && dest_var->opcode == OP_ARG && dest_var->klass && MONO_CLASS_IS_SIMD (cfg, dest_var->klass)) { MonoInst *varload_ins, *store_ins; *dreg = alloc_xreg (cfg); NEW_VARLOADA (cfg, varload_ins, dest_var, dest_var->inst_vtype); mono_bblock_insert_after_ins (bb, ins, varload_ins); MONO_INST_NEW (cfg, store_ins, OP_STOREX_MEMBASE); store_ins->klass = dest_var->klass; store_ins->type = STACK_VTYPE; store_ins->sreg1 = *dreg; store_ins->dreg = varload_ins->dreg; mono_bblock_insert_after_ins (bb, varload_ins, store_ins); } } void mono_simd_decompose_intrinsic (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins) { if ((cfg->opt & MONO_OPT_SIMD) && decompose_vtype_opt_uses_simd_intrinsics(cfg, ins)) { const char *spec = INS_INFO (ins->opcode); if (spec [MONO_INST_SRC1] == 'x') decompose_vtype_opt_load_arg (cfg, bb, ins, &(ins->sreg1)); if (spec [MONO_INST_SRC2] == 'x') decompose_vtype_opt_load_arg (cfg, bb, ins, &(ins->sreg2)); if (spec [MONO_INST_SRC3] == 'x') decompose_vtype_opt_load_arg (cfg, bb, ins, &(ins->sreg3)); if (spec [MONO_INST_DEST] == 'x') decompose_vtype_opt_store_arg (cfg, bb, ins, &(ins->dreg)); } } #else void mono_simd_decompose_intrinsic (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins) { } #endif /*defined(TARGET_WIN32) && defined(TARGET_AMD64)*/ void mono_simd_simplify_indirection (MonoCompile *cfg) { } #endif /* DISABLE_JIT */ #endif /* MONO_ARCH_SIMD_INTRINSICS */ #if defined(TARGET_AMD64) void ves_icall_System_Runtime_Intrinsics_X86_X86Base___cpuidex (int abcd[4], int function_id, int subfunction_id) { #ifndef MONO_CROSS_COMPILE mono_hwcap_x86_call_cpuidex (function_id, subfunction_id, &abcd [0], &abcd [1], &abcd [2], &abcd [3]); #endif } #endif MONO_EMPTY_SOURCE_FILE (simd_intrinsics_netcore);
/** * SIMD Intrinsics support for netcore. * Only LLVM is supported as a backend. */ #include <config.h> #include <mono/utils/mono-compiler.h> #include <mono/metadata/icall-decl.h> #include "mini.h" #include "mini-runtime.h" #include "ir-emit.h" #include "llvm-intrinsics-types.h" #ifdef ENABLE_LLVM #include "mini-llvm.h" #include "mini-llvm-cpp.h" #endif #include "mono/utils/bsearch.h" #include <mono/metadata/abi-details.h> #include <mono/metadata/reflection-internals.h> #include <mono/utils/mono-hwcap.h> #if defined (MONO_ARCH_SIMD_INTRINSICS) #if defined(DISABLE_JIT) void mono_simd_intrinsics_init (void) { } #else #define MSGSTRFIELD(line) MSGSTRFIELD1(line) #define MSGSTRFIELD1(line) str##line static const struct msgstr_t { #define METHOD(name) char MSGSTRFIELD(__LINE__) [sizeof (#name)]; #define METHOD2(str,name) char MSGSTRFIELD(__LINE__) [sizeof (str)]; #include "simd-methods.h" #undef METHOD #undef METHOD2 } method_names = { #define METHOD(name) #name, #define METHOD2(str,name) str, #include "simd-methods.h" #undef METHOD #undef METHOD2 }; enum { #define METHOD(name) SN_ ## name = offsetof (struct msgstr_t, MSGSTRFIELD(__LINE__)), #define METHOD2(str,name) SN_ ## name = offsetof (struct msgstr_t, MSGSTRFIELD(__LINE__)), #include "simd-methods.h" }; #define method_name(idx) ((const char*)&method_names + (idx)) static int register_size; #define None 0 typedef struct { uint16_t id; // One of the SN_ constants uint16_t default_op; // ins->opcode uint16_t default_instc0; // ins->inst_c0 uint16_t unsigned_op; uint16_t unsigned_instc0; uint16_t floating_op; uint16_t floating_instc0; } SimdIntrinsic; static const SimdIntrinsic unsupported [] = { {SN_get_IsSupported} }; void mono_simd_intrinsics_init (void) { register_size = 16; #if 0 if ((mini_get_cpu_features () & MONO_CPU_X86_AVX) != 0) register_size = 32; #endif /* Tell the class init code the size of the System.Numerics.Register type */ mono_simd_register_size = register_size; } MonoInst* mono_emit_simd_field_load (MonoCompile *cfg, MonoClassField *field, MonoInst *addr) { return NULL; } static int simd_intrinsic_compare_by_name (const void *key, const void *value) { return strcmp ((const char*)key, method_name (*(guint16*)value)); } static int simd_intrinsic_info_compare_by_name (const void *key, const void *value) { SimdIntrinsic *info = (SimdIntrinsic*)value; return strcmp ((const char*)key, method_name (info->id)); } static int lookup_intrins (guint16 *intrinsics, int size, MonoMethod *cmethod) { const guint16 *result = (const guint16 *)mono_binary_search (cmethod->name, intrinsics, size / sizeof (guint16), sizeof (guint16), &simd_intrinsic_compare_by_name); if (result == NULL) return -1; else return (int)*result; } static SimdIntrinsic* lookup_intrins_info (SimdIntrinsic *intrinsics, int size, MonoMethod *cmethod) { #if 0 for (int i = 0; i < (size / sizeof (SimdIntrinsic)) - 1; ++i) { const char *n1 = method_name (intrinsics [i].id); const char *n2 = method_name (intrinsics [i + 1].id); int len1 = strlen (n1); int len2 = strlen (n2); for (int j = 0; j < len1 && j < len2; ++j) { if (n1 [j] > n2 [j]) { printf ("%s %s\n", n1, n2); g_assert_not_reached (); } else if (n1 [j] < n2 [j]) { break; } } } #endif return (SimdIntrinsic *)mono_binary_search (cmethod->name, intrinsics, size / sizeof (SimdIntrinsic), sizeof (SimdIntrinsic), &simd_intrinsic_info_compare_by_name); } /* * Return a simd vreg for the simd value represented by SRC. * SRC is the 'this' argument to methods. * Set INDIRECT to TRUE if the value was loaded from memory. */ static int load_simd_vreg_class (MonoCompile *cfg, MonoClass *klass, MonoInst *src, gboolean *indirect) { const char *spec = INS_INFO (src->opcode); if (indirect) *indirect = FALSE; if (src->opcode == OP_XMOVE) { return src->sreg1; } else if (src->opcode == OP_LDADDR) { int res = ((MonoInst*)src->inst_p0)->dreg; return res; } else if (spec [MONO_INST_DEST] == 'x') { return src->dreg; } else if (src->type == STACK_PTR || src->type == STACK_MP) { MonoInst *ins; if (indirect) *indirect = TRUE; MONO_INST_NEW (cfg, ins, OP_LOADX_MEMBASE); ins->klass = klass; ins->sreg1 = src->dreg; ins->type = STACK_VTYPE; ins->dreg = alloc_ireg (cfg); MONO_ADD_INS (cfg->cbb, ins); return ins->dreg; } g_warning ("load_simd_vreg:: could not infer source simd (%d) vreg for op", src->type); mono_print_ins (src); g_assert_not_reached (); } static int load_simd_vreg (MonoCompile *cfg, MonoMethod *cmethod, MonoInst *src, gboolean *indirect) { return load_simd_vreg_class (cfg, cmethod->klass, src, indirect); } /* Create and emit a SIMD instruction, dreg is auto-allocated */ static MonoInst* emit_simd_ins (MonoCompile *cfg, MonoClass *klass, int opcode, int sreg1, int sreg2) { const char *spec = INS_INFO (opcode); MonoInst *ins; MONO_INST_NEW (cfg, ins, opcode); if (spec [MONO_INST_DEST] == 'x') { ins->dreg = alloc_xreg (cfg); ins->type = STACK_VTYPE; } else if (spec [MONO_INST_DEST] == 'i') { ins->dreg = alloc_ireg (cfg); ins->type = STACK_I4; } else if (spec [MONO_INST_DEST] == 'l') { ins->dreg = alloc_lreg (cfg); ins->type = STACK_I8; } else if (spec [MONO_INST_DEST] == 'f') { ins->dreg = alloc_freg (cfg); ins->type = STACK_R8; } else if (spec [MONO_INST_DEST] == 'v') { ins->dreg = alloc_dreg (cfg, STACK_VTYPE); ins->type = STACK_VTYPE; } ins->sreg1 = sreg1; ins->sreg2 = sreg2; ins->klass = klass; MONO_ADD_INS (cfg->cbb, ins); return ins; } static MonoInst* emit_simd_ins_for_sig (MonoCompile *cfg, MonoClass *klass, int opcode, int instc0, int instc1, MonoMethodSignature *fsig, MonoInst **args) { g_assert (fsig->param_count <= 3); MonoInst* ins = emit_simd_ins (cfg, klass, opcode, fsig->param_count > 0 ? args [0]->dreg : -1, fsig->param_count > 1 ? args [1]->dreg : -1); if (instc0 != -1) ins->inst_c0 = instc0; if (instc1 != -1) ins->inst_c1 = instc1; if (fsig->param_count == 3) ins->sreg3 = args [2]->dreg; return ins; } static gboolean is_hw_intrinsics_class (MonoClass *klass, const char *name, gboolean *is_64bit) { const char *class_name = m_class_get_name (klass); if ((!strcmp (class_name, "X64") || !strcmp (class_name, "Arm64")) && m_class_get_nested_in (klass)) { *is_64bit = TRUE; return !strcmp (m_class_get_name (m_class_get_nested_in (klass)), name); } else { *is_64bit = FALSE; return !strcmp (class_name, name); } } static MonoTypeEnum get_underlying_type (MonoType* type) { MonoClass* klass = mono_class_from_mono_type_internal (type); if (type->type == MONO_TYPE_PTR) // e.g. int* => MONO_TYPE_I4 return m_class_get_byval_arg (m_class_get_element_class (klass))->type; else if (type->type == MONO_TYPE_GENERICINST) // e.g. Vector128<int> => MONO_TYPE_I4 return mono_class_get_context (klass)->class_inst->type_argv [0]->type; else return type->type; } static MonoInst* emit_xcompare (MonoCompile *cfg, MonoClass *klass, MonoTypeEnum etype, MonoInst *arg1, MonoInst *arg2) { MonoInst *ins; gboolean is_fp = etype == MONO_TYPE_R4 || etype == MONO_TYPE_R8; ins = emit_simd_ins (cfg, klass, is_fp ? OP_XCOMPARE_FP : OP_XCOMPARE, arg1->dreg, arg2->dreg); ins->inst_c0 = CMP_EQ; ins->inst_c1 = etype; return ins; } static MonoInst* emit_xequal (MonoCompile *cfg, MonoClass *klass, MonoInst *arg1, MonoInst *arg2) { return emit_simd_ins (cfg, klass, OP_XEQUAL, arg1->dreg, arg2->dreg); } static MonoInst* emit_not_xequal (MonoCompile *cfg, MonoClass *klass, MonoInst *arg1, MonoInst *arg2) { MonoInst *ins = emit_simd_ins (cfg, klass, OP_XEQUAL, arg1->dreg, arg2->dreg); int sreg = ins->dreg; int dreg = alloc_ireg (cfg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sreg, 0); EMIT_NEW_UNALU (cfg, ins, OP_CEQ, dreg, -1); return ins; } static MonoInst* emit_xzero (MonoCompile *cfg, MonoClass *klass) { return emit_simd_ins (cfg, klass, OP_XZERO, -1, -1); } static gboolean is_intrinsics_vector_type (MonoType *vector_type) { if (vector_type->type != MONO_TYPE_GENERICINST) return FALSE; MonoClass *klass = mono_class_from_mono_type_internal (vector_type); const char *name = m_class_get_name (klass); return !strcmp (name, "Vector64`1") || !strcmp (name, "Vector128`1") || !strcmp (name, "Vector256`1"); } static MonoType* get_vector_t_elem_type (MonoType *vector_type) { MonoClass *klass; MonoType *etype; g_assert (vector_type->type == MONO_TYPE_GENERICINST); klass = mono_class_from_mono_type_internal (vector_type); g_assert ( !strcmp (m_class_get_name (klass), "Vector`1") || !strcmp (m_class_get_name (klass), "Vector64`1") || !strcmp (m_class_get_name (klass), "Vector128`1") || !strcmp (m_class_get_name (klass), "Vector256`1")); etype = mono_class_get_context (klass)->class_inst->type_argv [0]; return etype; } static gboolean type_is_unsigned (MonoType *type) { MonoClass *klass = mono_class_from_mono_type_internal (type); MonoType *etype = mono_class_get_context (klass)->class_inst->type_argv [0]; switch (etype->type) { case MONO_TYPE_U1: case MONO_TYPE_U2: case MONO_TYPE_U4: case MONO_TYPE_U8: case MONO_TYPE_U: return TRUE; } return FALSE; } static gboolean type_is_float (MonoType *type) { MonoClass *klass = mono_class_from_mono_type_internal (type); MonoType *etype = mono_class_get_context (klass)->class_inst->type_argv [0]; switch (etype->type) { case MONO_TYPE_R4: case MONO_TYPE_R8: return TRUE; } return FALSE; } static int type_to_expand_op (MonoType *type) { switch (type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_EXPAND_I1; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_EXPAND_I2; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_EXPAND_I4; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_EXPAND_I8; case MONO_TYPE_R4: return OP_EXPAND_R4; case MONO_TYPE_R8: return OP_EXPAND_R8; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_EXPAND_I8; #else return OP_EXPAND_I4; #endif default: g_assert_not_reached (); } } static int type_to_insert_op (MonoType *type) { switch (type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_INSERT_I1; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_INSERT_I2; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_INSERT_I4; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_INSERT_I8; case MONO_TYPE_R4: return OP_INSERT_R4; case MONO_TYPE_R8: return OP_INSERT_R8; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_INSERT_I8; #else return OP_INSERT_I4; #endif default: g_assert_not_reached (); } } typedef struct { const char *name; MonoCPUFeatures feature; const SimdIntrinsic *intrinsics; int intrinsics_size; gboolean jit_supported; } IntrinGroup; typedef MonoInst * (* EmitIntrinsicFn) ( MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoClass *klass, const IntrinGroup *intrin_group, const SimdIntrinsic *info, int id, MonoTypeEnum arg0_type, gboolean is_64bit); static const IntrinGroup unsupported_intrin_group [] = { { "", 0, unsupported, sizeof (unsupported) }, }; static MonoInst * emit_hardware_intrinsics ( MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, const IntrinGroup *groups, int groups_size_bytes, EmitIntrinsicFn custom_emit) { MonoClass *klass = cmethod->klass; const IntrinGroup *intrin_group = unsupported_intrin_group; gboolean is_64bit = FALSE; int groups_size = groups_size_bytes / sizeof (groups [0]); for (int i = 0; i < groups_size; ++i) { const IntrinGroup *group = &groups [i]; if (is_hw_intrinsics_class (klass, group->name, &is_64bit)) { intrin_group = group; break; } } gboolean supported = FALSE; MonoTypeEnum arg0_type = fsig->param_count > 0 ? get_underlying_type (fsig->params [0]) : MONO_TYPE_VOID; int id = -1; uint16_t op = 0; uint16_t c0 = 0; const SimdIntrinsic *intrinsics = intrin_group->intrinsics; int intrinsics_size = intrin_group->intrinsics_size; MonoCPUFeatures feature = intrin_group->feature; const SimdIntrinsic *info = lookup_intrins_info ((SimdIntrinsic *) intrinsics, intrinsics_size, cmethod); { if (!info) goto support_probe_complete; id = info->id; // Hardware intrinsics are LLVM-only. if (!COMPILE_LLVM (cfg) && !intrin_group->jit_supported) goto support_probe_complete; if (intrin_group->intrinsics == unsupported) supported = FALSE; else if (feature) supported = (mini_get_cpu_features (cfg) & feature) != 0; else supported = TRUE; op = info->default_op; c0 = info->default_instc0; gboolean is_unsigned = FALSE; gboolean is_float = FALSE; switch (arg0_type) { case MONO_TYPE_U1: case MONO_TYPE_U2: case MONO_TYPE_U4: case MONO_TYPE_U8: case MONO_TYPE_U: is_unsigned = TRUE; break; case MONO_TYPE_R4: case MONO_TYPE_R8: is_float = TRUE; break; } if (is_unsigned && info->unsigned_op != 0) { op = info->unsigned_op; c0 = info->unsigned_instc0; } else if (is_float && info->floating_op != 0) { op = info->floating_op; c0 = info->floating_instc0; } } support_probe_complete: if (id == SN_get_IsSupported) { MonoInst *ins = NULL; EMIT_NEW_ICONST (cfg, ins, supported ? 1 : 0); return ins; } if (!supported) { // Can't emit non-supported llvm intrinsics if (cfg->method != cmethod) { // Keep the original call so we end up in the intrinsic method return NULL; } else { // Emit an exception from the intrinsic method mono_emit_jit_icall (cfg, mono_throw_platform_not_supported, NULL); return NULL; } } if (op != 0) return emit_simd_ins_for_sig (cfg, klass, op, c0, arg0_type, fsig, args); return custom_emit (cfg, fsig, args, klass, intrin_group, info, id, arg0_type, is_64bit); } static MonoInst * emit_vector_create_elementwise ( MonoCompile *cfg, MonoMethodSignature *fsig, MonoType *vtype, MonoType *etype, MonoInst **args) { int op = type_to_insert_op (etype); MonoClass *vklass = mono_class_from_mono_type_internal (vtype); MonoInst *ins = emit_xzero (cfg, vklass); for (int i = 0; i < fsig->param_count; ++i) { ins = emit_simd_ins (cfg, vklass, op, ins->dreg, args [i]->dreg); ins->inst_c0 = i; } return ins; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) static int type_to_xinsert_op (MonoTypeEnum type) { switch (type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_XINSERT_I1; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_XINSERT_I2; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_XINSERT_I4; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_XINSERT_I8; case MONO_TYPE_R4: return OP_XINSERT_R4; case MONO_TYPE_R8: return OP_XINSERT_R8; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_XINSERT_I8; #else return OP_XINSERT_I4; #endif default: g_assert_not_reached (); } } static int type_to_xextract_op (MonoTypeEnum type) { switch (type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_XEXTRACT_I1; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_XEXTRACT_I2; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_XEXTRACT_I4; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_XEXTRACT_I8; case MONO_TYPE_R4: return OP_XEXTRACT_R4; case MONO_TYPE_R8: return OP_XEXTRACT_R8; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_XEXTRACT_I8; #else return OP_XEXTRACT_I4; #endif default: g_assert_not_reached (); } } static int type_to_extract_op (MonoTypeEnum type) { switch (type) { case MONO_TYPE_I1: case MONO_TYPE_U1: return OP_EXTRACT_I1; case MONO_TYPE_I2: case MONO_TYPE_U2: return OP_EXTRACT_I2; case MONO_TYPE_I4: case MONO_TYPE_U4: return OP_EXTRACT_I4; case MONO_TYPE_I8: case MONO_TYPE_U8: return OP_EXTRACT_I8; case MONO_TYPE_R4: return OP_EXTRACT_R4; case MONO_TYPE_R8: return OP_EXTRACT_R8; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 return OP_EXTRACT_I8; #else return OP_EXTRACT_I4; #endif default: g_assert_not_reached (); } } static guint16 sri_vector_methods [] = { SN_Abs, SN_Add, SN_AndNot, SN_As, SN_AsByte, SN_AsDouble, SN_AsInt16, SN_AsInt32, SN_AsInt64, SN_AsSByte, SN_AsSingle, SN_AsUInt16, SN_AsUInt32, SN_AsUInt64, SN_BitwiseAnd, SN_BitwiseOr, SN_AsVector128, SN_AsVector2, SN_AsVector256, SN_AsVector3, SN_AsVector4, SN_Ceiling, SN_ConditionalSelect, SN_ConvertToDouble, SN_ConvertToInt32, SN_ConvertToUInt32, SN_Create, SN_CreateScalar, SN_CreateScalarUnsafe, SN_Divide, SN_Equals, SN_EqualsAll, SN_EqualsAny, SN_Floor, SN_GetElement, SN_GetLower, SN_GetUpper, SN_GreaterThan, SN_GreaterThanOrEqual, SN_LessThan, SN_LessThanOrEqual, SN_Max, SN_Min, SN_Multiply, SN_Negate, SN_OnesComplement, SN_Sqrt, SN_Subtract, SN_ToScalar, SN_ToVector128, SN_ToVector128Unsafe, SN_ToVector256, SN_ToVector256Unsafe, SN_WithElement, SN_Xor, }; /* nint and nuint haven't been enabled yet for System.Runtime.Intrinsics. * Remove this once support has been added. */ #define MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE(t) ((MONO_TYPE_IS_VECTOR_PRIMITIVE(t)) && ((t)->type != MONO_TYPE_I) && ((t)->type != MONO_TYPE_U)) static gboolean is_elementwise_create_overload (MonoMethodSignature *fsig, MonoType *ret_type) { uint16_t param_count = fsig->param_count; if (param_count < 1) return FALSE; MonoType *type = fsig->params [0]; if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (type)) return FALSE; if (!mono_metadata_type_equal (ret_type, type)) return FALSE; for (uint16_t i = 1; i < param_count; ++i) if (!mono_metadata_type_equal (type, fsig->params [i])) return FALSE; return TRUE; } static gboolean is_create_from_half_vectors_overload (MonoMethodSignature *fsig) { if (fsig->param_count != 2) return FALSE; if (!is_intrinsics_vector_type (fsig->params [0])) return FALSE; return mono_metadata_type_equal (fsig->params [0], fsig->params [1]); } static MonoInst* emit_sri_vector (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { if (!COMPILE_LLVM (cfg)) return NULL; MonoClass *klass = cmethod->klass; int id = lookup_intrins (sri_vector_methods, sizeof (sri_vector_methods), cmethod); if (id == -1) return NULL; if (!strcmp (m_class_get_name (cfg->method->klass), "Vector256")) return NULL; // TODO: Fix Vector256.WithUpper/WithLower MonoTypeEnum arg0_type = fsig->param_count > 0 ? get_underlying_type (fsig->params [0]) : MONO_TYPE_VOID; switch (id) { case SN_Abs: { #ifdef TARGET_ARM64 switch (arg0_type) { case MONO_TYPE_U1: case MONO_TYPE_U2: case MONO_TYPE_U4: case MONO_TYPE_U8: case MONO_TYPE_U: return NULL; } gboolean is_float = arg0_type == MONO_TYPE_R4 || arg0_type == MONO_TYPE_R8; int iid = is_float ? INTRINS_AARCH64_ADV_SIMD_FABS : INTRINS_AARCH64_ADV_SIMD_ABS; return emit_simd_ins_for_sig (cfg, klass, OP_XOP_OVR_X_X, iid, arg0_type, fsig, args); #else return NULL; #endif } case SN_Add: case SN_Max: case SN_Min: case SN_Multiply: case SN_Subtract: { int instc0 = -1; if (arg0_type == MONO_TYPE_R4 || arg0_type == MONO_TYPE_R8) { switch (id) { case SN_Add: instc0 = OP_FADD; break; case SN_Max: instc0 = OP_FMAX; break; case SN_Min: instc0 = OP_FMIN; break; case SN_Multiply: instc0 = OP_FMUL; break; case SN_Subtract: instc0 = OP_FSUB; break; default: g_assert_not_reached (); } } else { switch (id) { case SN_Add: instc0 = OP_IADD; break; case SN_Max: instc0 = OP_IMAX; break; case SN_Min: instc0 = OP_IMIN; break; case SN_Multiply: instc0 = OP_IMUL; break; case SN_Subtract: instc0 = OP_ISUB; break; default: g_assert_not_reached (); } } return emit_simd_ins_for_sig (cfg, klass, OP_XBINOP, instc0, arg0_type, fsig, args); } case SN_Divide: { if ((arg0_type != MONO_TYPE_R4) && (arg0_type != MONO_TYPE_R8)) return NULL; return emit_simd_ins_for_sig (cfg, klass, OP_XBINOP, OP_FDIV, arg0_type, fsig, args); } case SN_AndNot: #ifdef TARGET_ARM64 return emit_simd_ins_for_sig (cfg, klass, OP_ARM64_BIC, -1, arg0_type, fsig, args); #else return NULL; #endif case SN_BitwiseAnd: return emit_simd_ins_for_sig (cfg, klass, OP_XBINOP, OP_IAND, arg0_type, fsig, args); case SN_BitwiseOr: return emit_simd_ins_for_sig (cfg, klass, OP_XBINOP, OP_IOR, arg0_type, fsig, args); case SN_Xor: { if ((arg0_type == MONO_TYPE_R4) || (arg0_type == MONO_TYPE_R8)) return NULL; return emit_simd_ins_for_sig (cfg, klass, OP_XBINOP, OP_IXOR, arg0_type, fsig, args); } case SN_As: case SN_AsByte: case SN_AsDouble: case SN_AsInt16: case SN_AsInt32: case SN_AsInt64: case SN_AsSByte: case SN_AsSingle: case SN_AsUInt16: case SN_AsUInt32: case SN_AsUInt64: { MonoType *ret_type = get_vector_t_elem_type (fsig->ret); MonoType *arg_type = get_vector_t_elem_type (fsig->params [0]); if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (ret_type) || !MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (arg_type)) return NULL; return emit_simd_ins (cfg, klass, OP_XCAST, args [0]->dreg, -1); } case SN_Ceiling: case SN_Floor: { #ifdef TARGET_ARM64 if ((arg0_type != MONO_TYPE_R4) && (arg0_type != MONO_TYPE_R8)) return NULL; int ceil_or_floor = id == SN_Ceiling ? INTRINS_AARCH64_ADV_SIMD_FRINTP : INTRINS_AARCH64_ADV_SIMD_FRINTM; return emit_simd_ins_for_sig (cfg, klass, OP_XOP_OVR_X_X, ceil_or_floor, arg0_type, fsig, args); #else return NULL; #endif } case SN_ConditionalSelect: { #ifdef TARGET_ARM64 return emit_simd_ins_for_sig (cfg, klass, OP_ARM64_BSL, -1, arg0_type, fsig, args); #else return NULL; #endif } case SN_ConvertToDouble: { #ifdef TARGET_ARM64 if ((arg0_type != MONO_TYPE_I8) && (arg0_type != MONO_TYPE_U8)) return NULL; MonoClass *arg_class = mono_class_from_mono_type_internal (fsig->params [0]); int size = mono_class_value_size (arg_class, NULL); int op = -1; if (size == 8) op = arg0_type == MONO_TYPE_I8 ? OP_ARM64_SCVTF_SCALAR : OP_ARM64_UCVTF_SCALAR; else op = arg0_type == MONO_TYPE_I8 ? OP_ARM64_SCVTF : OP_ARM64_UCVTF; return emit_simd_ins_for_sig (cfg, klass, op, -1, arg0_type, fsig, args); #else return NULL; #endif } case SN_ConvertToInt32: case SN_ConvertToUInt32: { #ifdef TARGET_ARM64 if (arg0_type != MONO_TYPE_R4) return NULL; int op = id == SN_ConvertToInt32 ? OP_ARM64_FCVTZS : OP_ARM64_FCVTZU; return emit_simd_ins_for_sig (cfg, klass, op, -1, arg0_type, fsig, args); #else return NULL; #endif } case SN_Create: { MonoType *etype = get_vector_t_elem_type (fsig->ret); if (fsig->param_count == 1 && mono_metadata_type_equal (fsig->params [0], etype)) return emit_simd_ins (cfg, klass, type_to_expand_op (etype), args [0]->dreg, -1); else if (is_create_from_half_vectors_overload (fsig)) return emit_simd_ins (cfg, klass, OP_XCONCAT, args [0]->dreg, args [1]->dreg); else if (is_elementwise_create_overload (fsig, etype)) return emit_vector_create_elementwise (cfg, fsig, fsig->ret, etype, args); break; } case SN_CreateScalar: return emit_simd_ins_for_sig (cfg, klass, OP_CREATE_SCALAR, -1, arg0_type, fsig, args); case SN_CreateScalarUnsafe: return emit_simd_ins_for_sig (cfg, klass, OP_CREATE_SCALAR_UNSAFE, -1, arg0_type, fsig, args); case SN_Equals: case SN_EqualsAll: case SN_EqualsAny: { MonoType *arg_type = get_vector_t_elem_type (fsig->params [0]); if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (arg_type)) return NULL; switch (id) { case SN_Equals: return emit_xcompare (cfg, klass, arg0_type, args [0], args [1]); case SN_EqualsAll: return emit_xequal (cfg, klass, args [0], args [1]); case SN_EqualsAny: { MonoClass *arg_class = mono_class_from_mono_type_internal (fsig->params [0]); MonoInst *cmp_eq = emit_xcompare (cfg, arg_class, arg0_type, args [0], args [1]); MonoInst *zero = emit_xzero (cfg, arg_class); return emit_not_xequal (cfg, arg_class, cmp_eq, zero); } default: g_assert_not_reached (); } } case SN_GetElement: { MonoClass *arg_class = mono_class_from_mono_type_internal (fsig->params [0]); MonoType *etype = mono_class_get_context (arg_class)->class_inst->type_argv [0]; if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (etype)) return NULL; int size = mono_class_value_size (arg_class, NULL); int esize = mono_class_value_size (mono_class_from_mono_type_internal (etype), NULL); int elems = size / esize; MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [1]->dreg, elems); MONO_EMIT_NEW_COND_EXC (cfg, GE_UN, "ArgumentOutOfRangeException"); int extract_op = type_to_xextract_op (arg0_type); return emit_simd_ins_for_sig (cfg, klass, extract_op, -1, arg0_type, fsig, args); } case SN_GetLower: case SN_GetUpper: { MonoType *arg_type = get_vector_t_elem_type (fsig->params [0]); if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (arg_type)) return NULL; int op = id == SN_GetLower ? OP_XLOWER : OP_XUPPER; return emit_simd_ins_for_sig (cfg, klass, op, 0, arg0_type, fsig, args); } case SN_GreaterThan: case SN_GreaterThanOrEqual: case SN_LessThan: case SN_LessThanOrEqual: { MonoType *arg_type = get_vector_t_elem_type (fsig->params [0]); if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (arg_type)) return NULL; gboolean is_unsigned = type_is_unsigned (fsig->params [0]); MonoInst *ins = emit_xcompare (cfg, klass, arg0_type, args [0], args [1]); switch (id) { case SN_GreaterThan: ins->inst_c0 = is_unsigned ? CMP_GT_UN : CMP_GT; break; case SN_GreaterThanOrEqual: ins->inst_c0 = is_unsigned ? CMP_GE_UN : CMP_GE; break; case SN_LessThan: ins->inst_c0 = is_unsigned ? CMP_LT_UN : CMP_LT; break; case SN_LessThanOrEqual: ins->inst_c0 = is_unsigned ? CMP_LE_UN : CMP_LE; break; default: g_assert_not_reached (); } return ins; } case SN_Negate: case SN_OnesComplement: { #ifdef TARGET_ARM64 int op = id == SN_Negate ? OP_ARM64_XNEG : OP_ARM64_MVN; return emit_simd_ins_for_sig (cfg, klass, op, -1, arg0_type, fsig, args); #else return NULL; #endif } case SN_Sqrt: { #ifdef TARGET_ARM64 if ((arg0_type != MONO_TYPE_R4) && (arg0_type != MONO_TYPE_R8)) return NULL; return emit_simd_ins_for_sig (cfg, klass, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FSQRT, arg0_type, fsig, args); #else return NULL; #endif } case SN_ToScalar: { MonoType *arg_type = get_vector_t_elem_type (fsig->params [0]); if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (arg_type)) return NULL; int extract_op = type_to_extract_op (arg0_type); return emit_simd_ins_for_sig (cfg, klass, extract_op, 0, arg0_type, fsig, args); } case SN_ToVector128: case SN_ToVector128Unsafe: { MonoType *arg_type = get_vector_t_elem_type (fsig->params [0]); if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (arg_type)) return NULL; int op = id == SN_ToVector128 ? OP_XWIDEN : OP_XWIDEN_UNSAFE; return emit_simd_ins_for_sig (cfg, klass, op, 0, arg0_type, fsig, args); } case SN_WithElement: { MonoClass *arg_class = mono_class_from_mono_type_internal (fsig->params [0]); MonoType *etype = mono_class_get_context (arg_class)->class_inst->type_argv [0]; if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (etype)) return NULL; int size = mono_class_value_size (arg_class, NULL); int esize = mono_class_value_size (mono_class_from_mono_type_internal (etype), NULL); int elems = size / esize; MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [1]->dreg, elems); MONO_EMIT_NEW_COND_EXC (cfg, GE_UN, "ArgumentOutOfRangeException"); int insert_op = type_to_xinsert_op (arg0_type); MonoInst *ins = emit_simd_ins (cfg, klass, insert_op, args [0]->dreg, args [2]->dreg); ins->sreg3 = args [1]->dreg; ins->inst_c1 = arg0_type; return ins; } case SN_WithLower: case SN_WithUpper: { MonoType *arg_type = get_vector_t_elem_type (fsig->params [0]); if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (arg_type)) return NULL; int op = id == SN_GetLower ? OP_XINSERT_LOWER : OP_XINSERT_UPPER; return emit_simd_ins_for_sig (cfg, klass, op, 0, arg0_type, fsig, args); } default: break; } return NULL; } static guint16 vector64_vector128_t_methods [] = { SN_Equals, SN_get_AllBitsSet, SN_get_Count, SN_get_IsSupported, SN_get_Zero, SN_op_Addition, SN_op_Equality, SN_op_Inequality, SN_op_Subtraction, }; static MonoInst* emit_vector64_vector128_t (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { int id = lookup_intrins (vector64_vector128_t_methods, sizeof (vector64_vector128_t_methods), cmethod); if (id == -1) return NULL; MonoClass *klass = cmethod->klass; MonoType *type = m_class_get_byval_arg (klass); MonoType *etype = mono_class_get_context (klass)->class_inst->type_argv [0]; int size = mono_class_value_size (klass, NULL); int esize = mono_class_value_size (mono_class_from_mono_type_internal (etype), NULL); g_assert (size > 0); g_assert (esize > 0); int len = size / esize; if (!MONO_TYPE_IS_INTRINSICS_VECTOR_PRIMITIVE (etype)) return NULL; if (cfg->verbose_level > 1) { char *name = mono_method_full_name (cmethod, TRUE); printf (" SIMD intrinsic %s\n", name); g_free (name); } switch (id) { case SN_get_IsSupported: { MonoInst *ins = NULL; EMIT_NEW_ICONST (cfg, ins, 1); return ins; } default: break; } if (!COMPILE_LLVM (cfg)) return NULL; switch (id) { case SN_get_Count: { MonoInst *ins = NULL; if (!(fsig->param_count == 0 && fsig->ret->type == MONO_TYPE_I4)) break; EMIT_NEW_ICONST (cfg, ins, len); return ins; } case SN_get_Zero: { return emit_xzero (cfg, klass); } case SN_get_AllBitsSet: { MonoInst *ins = emit_xzero (cfg, klass); return emit_xcompare (cfg, klass, etype->type, ins, ins); } case SN_Equals: { if (fsig->param_count == 1 && fsig->ret->type == MONO_TYPE_BOOLEAN && mono_metadata_type_equal (fsig->params [0], type)) { int sreg1 = load_simd_vreg (cfg, cmethod, args [0], NULL); return emit_simd_ins (cfg, klass, OP_XEQUAL, sreg1, args [1]->dreg); } break; } case SN_op_Addition: case SN_op_Subtraction: { if (!(fsig->param_count == 2 && mono_metadata_type_equal (fsig->ret, type) && mono_metadata_type_equal (fsig->params [0], type) && mono_metadata_type_equal (fsig->params [1], type))) return NULL; MonoInst *ins = emit_simd_ins (cfg, klass, OP_XBINOP, args [0]->dreg, args [1]->dreg); ins->inst_c1 = etype->type; if (etype->type == MONO_TYPE_R4 || etype->type == MONO_TYPE_R8) ins->inst_c0 = id == SN_op_Addition ? OP_FADD : OP_FSUB; else ins->inst_c0 = id == SN_op_Addition ? OP_IADD : OP_ISUB; return ins; } case SN_op_Equality: case SN_op_Inequality: g_assert (fsig->param_count == 2 && fsig->ret->type == MONO_TYPE_BOOLEAN && mono_metadata_type_equal (fsig->params [0], type) && mono_metadata_type_equal (fsig->params [1], type)); switch (id) { case SN_op_Equality: return emit_xequal (cfg, klass, args [0], args [1]); case SN_op_Inequality: return emit_not_xequal (cfg, klass, args [0], args [1]); default: g_assert_not_reached (); } default: break; } return NULL; } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) #ifdef TARGET_AMD64 static guint16 vector_methods [] = { SN_ConvertToDouble, SN_ConvertToInt32, SN_ConvertToInt64, SN_ConvertToSingle, SN_ConvertToUInt32, SN_ConvertToUInt64, SN_Narrow, SN_Widen, SN_get_IsHardwareAccelerated, }; static MonoInst* emit_sys_numerics_vector (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins; gboolean supported = FALSE; int id; MonoType *etype; id = lookup_intrins (vector_methods, sizeof (vector_methods), cmethod); if (id == -1) return NULL; //printf ("%s\n", mono_method_full_name (cmethod, 1)); #ifdef MONO_ARCH_SIMD_INTRINSICS supported = TRUE; #endif if (cfg->verbose_level > 1) { char *name = mono_method_full_name (cmethod, TRUE); printf (" SIMD intrinsic %s\n", name); g_free (name); } switch (id) { case SN_get_IsHardwareAccelerated: EMIT_NEW_ICONST (cfg, ins, supported ? 1 : 0); ins->type = STACK_I4; return ins; case SN_ConvertToInt32: etype = get_vector_t_elem_type (fsig->params [0]); g_assert (etype->type == MONO_TYPE_R4); return emit_simd_ins (cfg, mono_class_from_mono_type_internal (fsig->ret), OP_CVTPS2DQ, args [0]->dreg, -1); case SN_ConvertToSingle: etype = get_vector_t_elem_type (fsig->params [0]); g_assert (etype->type == MONO_TYPE_I4 || etype->type == MONO_TYPE_U4); // FIXME: if (etype->type == MONO_TYPE_U4) return NULL; return emit_simd_ins (cfg, mono_class_from_mono_type_internal (fsig->ret), OP_CVTDQ2PS, args [0]->dreg, -1); case SN_ConvertToDouble: case SN_ConvertToInt64: case SN_ConvertToUInt32: case SN_ConvertToUInt64: case SN_Narrow: case SN_Widen: // FIXME: break; default: break; } return NULL; } static guint16 vector_t_methods [] = { SN_ctor, SN_CopyTo, SN_Equals, SN_GreaterThan, SN_GreaterThanOrEqual, SN_LessThan, SN_LessThanOrEqual, SN_Max, SN_Min, SN_get_AllBitsSet, SN_get_Count, SN_get_Item, SN_get_One, SN_get_Zero, SN_op_Addition, SN_op_BitwiseAnd, SN_op_BitwiseOr, SN_op_Division, SN_op_Equality, SN_op_ExclusiveOr, SN_op_Explicit, SN_op_Inequality, SN_op_Multiply, SN_op_Subtraction }; static MonoInst* emit_sys_numerics_vector_t (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins; MonoType *type, *etype; MonoClass *klass; int size, len, id; gboolean is_unsigned; static const float r4_one = 1.0f; static const double r8_one = 1.0; id = lookup_intrins (vector_t_methods, sizeof (vector_t_methods), cmethod); if (id == -1) return NULL; klass = cmethod->klass; type = m_class_get_byval_arg (klass); etype = mono_class_get_context (klass)->class_inst->type_argv [0]; size = mono_class_value_size (mono_class_from_mono_type_internal (etype), NULL); g_assert (size); len = register_size / size; if (!MONO_TYPE_IS_PRIMITIVE (etype) || etype->type == MONO_TYPE_CHAR || etype->type == MONO_TYPE_BOOLEAN) return NULL; if (cfg->verbose_level > 1) { char *name = mono_method_full_name (cmethod, TRUE); printf (" SIMD intrinsic %s\n", name); g_free (name); } switch (id) { case SN_get_Count: if (!(fsig->param_count == 0 && fsig->ret->type == MONO_TYPE_I4)) break; EMIT_NEW_ICONST (cfg, ins, len); return ins; case SN_get_Zero: g_assert (fsig->param_count == 0 && mono_metadata_type_equal (fsig->ret, type)); return emit_xzero (cfg, klass); case SN_get_One: { g_assert (fsig->param_count == 0 && mono_metadata_type_equal (fsig->ret, type)); MonoInst *one = NULL; int expand_opcode = type_to_expand_op (etype); MONO_INST_NEW (cfg, one, -1); switch (expand_opcode) { case OP_EXPAND_R4: one->opcode = OP_R4CONST; one->type = STACK_R4; one->inst_p0 = (void *) &r4_one; break; case OP_EXPAND_R8: one->opcode = OP_R8CONST; one->type = STACK_R8; one->inst_p0 = (void *) &r8_one; break; default: one->opcode = OP_ICONST; one->type = STACK_I4; one->inst_c0 = 1; break; } one->dreg = alloc_dreg (cfg, (MonoStackType)one->type); MONO_ADD_INS (cfg->cbb, one); return emit_simd_ins (cfg, klass, expand_opcode, one->dreg, -1); } case SN_get_AllBitsSet: { /* Compare a zero vector with itself */ ins = emit_xzero (cfg, klass); return emit_xcompare (cfg, klass, etype->type, ins, ins); } case SN_get_Item: { if (!COMPILE_LLVM (cfg)) return NULL; MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [1]->dreg, len); MONO_EMIT_NEW_COND_EXC (cfg, GE_UN, "ArgumentOutOfRangeException"); MonoTypeEnum ty = etype->type; int opcode = type_to_xextract_op (ty); int src1 = load_simd_vreg (cfg, cmethod, args [0], NULL); MonoInst *ins = emit_simd_ins (cfg, klass, opcode, src1, args [1]->dreg); ins->inst_c1 = ty; return ins; } case SN_ctor: if (fsig->param_count == 1 && mono_metadata_type_equal (fsig->params [0], etype)) { int dreg = load_simd_vreg (cfg, cmethod, args [0], NULL); int opcode = type_to_expand_op (etype); ins = emit_simd_ins (cfg, klass, opcode, args [1]->dreg, -1); ins->dreg = dreg; return ins; } if ((fsig->param_count == 1 || fsig->param_count == 2) && (fsig->params [0]->type == MONO_TYPE_SZARRAY)) { MonoInst *array_ins = args [1]; MonoInst *index_ins; MonoInst *ldelema_ins; MonoInst *var; int end_index_reg; if (args [0]->opcode != OP_LDADDR) return NULL; /* .ctor (T[]) or .ctor (T[], index) */ if (fsig->param_count == 2) { index_ins = args [2]; } else { EMIT_NEW_ICONST (cfg, index_ins, 0); } /* Emit bounds check for the index (index >= 0) */ mini_emit_bounds_check_offset (cfg, array_ins->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length), index_ins->dreg, "ArgumentOutOfRangeException"); /* Emit bounds check for the end (index + len - 1 < array length) */ end_index_reg = alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, OP_IADD_IMM, end_index_reg, index_ins->dreg, len - 1); mini_emit_bounds_check_offset (cfg, array_ins->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length), end_index_reg, "ArgumentOutOfRangeException"); /* Load the array slice into the simd reg */ ldelema_ins = mini_emit_ldelema_1_ins (cfg, mono_class_from_mono_type_internal (etype), array_ins, index_ins, FALSE, FALSE); g_assert (args [0]->opcode == OP_LDADDR); var = (MonoInst*)args [0]->inst_p0; EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADX_MEMBASE, var->dreg, ldelema_ins->dreg, 0); ins->klass = cmethod->klass; return args [0]; } break; case SN_CopyTo: if ((fsig->param_count == 1 || fsig->param_count == 2) && (fsig->params [0]->type == MONO_TYPE_SZARRAY)) { MonoInst *array_ins = args [1]; MonoInst *index_ins; MonoInst *ldelema_ins; int val_vreg, end_index_reg; val_vreg = load_simd_vreg (cfg, cmethod, args [0], NULL); /* CopyTo (T[]) or CopyTo (T[], index) */ if (fsig->param_count == 2) { index_ins = args [2]; } else { EMIT_NEW_ICONST (cfg, index_ins, 0); } /* CopyTo () does complicated argument checks */ mini_emit_bounds_check_offset (cfg, array_ins->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length), index_ins->dreg, "ArgumentOutOfRangeException"); end_index_reg = alloc_ireg (cfg); int len_reg = alloc_ireg (cfg); MONO_EMIT_NEW_LOAD_MEMBASE_OP_FLAGS (cfg, OP_LOADI4_MEMBASE, len_reg, array_ins->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length), MONO_INST_INVARIANT_LOAD); EMIT_NEW_BIALU (cfg, ins, OP_ISUB, end_index_reg, len_reg, index_ins->dreg); MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, end_index_reg, len); MONO_EMIT_NEW_COND_EXC (cfg, LT, "ArgumentException"); /* Load the array slice into the simd reg */ ldelema_ins = mini_emit_ldelema_1_ins (cfg, mono_class_from_mono_type_internal (etype), array_ins, index_ins, FALSE, FALSE); EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREX_MEMBASE, ldelema_ins->dreg, 0, val_vreg); ins->klass = cmethod->klass; return ins; } break; case SN_Equals: if (fsig->param_count == 1 && fsig->ret->type == MONO_TYPE_BOOLEAN && mono_metadata_type_equal (fsig->params [0], type)) { int sreg1 = load_simd_vreg (cfg, cmethod, args [0], NULL); return emit_simd_ins (cfg, klass, OP_XEQUAL, sreg1, args [1]->dreg); } else if (fsig->param_count == 2 && mono_metadata_type_equal (fsig->ret, type) && mono_metadata_type_equal (fsig->params [0], type) && mono_metadata_type_equal (fsig->params [1], type)) { /* Per element equality */ return emit_xcompare (cfg, klass, etype->type, args [0], args [1]); } break; case SN_op_Equality: case SN_op_Inequality: g_assert (fsig->param_count == 2 && fsig->ret->type == MONO_TYPE_BOOLEAN && mono_metadata_type_equal (fsig->params [0], type) && mono_metadata_type_equal (fsig->params [1], type)); switch (id) { case SN_op_Equality: return emit_xequal (cfg, klass, args [0], args [1]); case SN_op_Inequality: return emit_not_xequal (cfg, klass, args [0], args [1]); default: g_assert_not_reached (); } case SN_GreaterThan: case SN_GreaterThanOrEqual: case SN_LessThan: case SN_LessThanOrEqual: g_assert (fsig->param_count == 2 && mono_metadata_type_equal (fsig->ret, type) && mono_metadata_type_equal (fsig->params [0], type) && mono_metadata_type_equal (fsig->params [1], type)); is_unsigned = etype->type == MONO_TYPE_U1 || etype->type == MONO_TYPE_U2 || etype->type == MONO_TYPE_U4 || etype->type == MONO_TYPE_U8 || etype->type == MONO_TYPE_U; ins = emit_xcompare (cfg, klass, etype->type, args [0], args [1]); switch (id) { case SN_GreaterThan: ins->inst_c0 = is_unsigned ? CMP_GT_UN : CMP_GT; break; case SN_GreaterThanOrEqual: ins->inst_c0 = is_unsigned ? CMP_GE_UN : CMP_GE; break; case SN_LessThan: ins->inst_c0 = is_unsigned ? CMP_LT_UN : CMP_LT; break; case SN_LessThanOrEqual: ins->inst_c0 = is_unsigned ? CMP_LE_UN : CMP_LE; break; default: g_assert_not_reached (); } return ins; case SN_op_Explicit: return emit_simd_ins (cfg, klass, OP_XCAST, args [0]->dreg, -1); case SN_op_Addition: case SN_op_Subtraction: case SN_op_Division: case SN_op_Multiply: case SN_op_BitwiseAnd: case SN_op_BitwiseOr: case SN_op_ExclusiveOr: case SN_Max: case SN_Min: if (!(fsig->param_count == 2 && mono_metadata_type_equal (fsig->ret, type) && mono_metadata_type_equal (fsig->params [0], type) && mono_metadata_type_equal (fsig->params [1], type))) return NULL; ins = emit_simd_ins (cfg, klass, OP_XBINOP, args [0]->dreg, args [1]->dreg); ins->inst_c1 = etype->type; if (etype->type == MONO_TYPE_R4 || etype->type == MONO_TYPE_R8) { switch (id) { case SN_op_Addition: ins->inst_c0 = OP_FADD; break; case SN_op_Subtraction: ins->inst_c0 = OP_FSUB; break; case SN_op_Multiply: ins->inst_c0 = OP_FMUL; break; case SN_op_Division: ins->inst_c0 = OP_FDIV; break; case SN_Max: ins->inst_c0 = OP_FMAX; break; case SN_Min: ins->inst_c0 = OP_FMIN; break; default: NULLIFY_INS (ins); return NULL; } } else { switch (id) { case SN_op_Addition: ins->inst_c0 = OP_IADD; break; case SN_op_Subtraction: ins->inst_c0 = OP_ISUB; break; /* case SN_op_Division: ins->inst_c0 = OP_IDIV; break; case SN_op_Multiply: ins->inst_c0 = OP_IMUL; break; */ case SN_op_BitwiseAnd: ins->inst_c0 = OP_IAND; break; case SN_op_BitwiseOr: ins->inst_c0 = OP_IOR; break; case SN_op_ExclusiveOr: ins->inst_c0 = OP_IXOR; break; case SN_Max: ins->inst_c0 = OP_IMAX; break; case SN_Min: ins->inst_c0 = OP_IMIN; break; default: NULLIFY_INS (ins); return NULL; } } return ins; default: break; } return NULL; } #endif // TARGET_AMD64 #ifdef TARGET_ARM64 static SimdIntrinsic armbase_methods [] = { {SN_LeadingSignCount}, {SN_LeadingZeroCount}, {SN_MultiplyHigh}, {SN_ReverseElementBits}, {SN_get_IsSupported}, }; static SimdIntrinsic crc32_methods [] = { {SN_ComputeCrc32}, {SN_ComputeCrc32C}, {SN_get_IsSupported} }; static SimdIntrinsic crypto_aes_methods [] = { {SN_Decrypt, OP_XOP_X_X_X, INTRINS_AARCH64_AESD}, {SN_Encrypt, OP_XOP_X_X_X, INTRINS_AARCH64_AESE}, {SN_InverseMixColumns, OP_XOP_X_X, INTRINS_AARCH64_AESIMC}, {SN_MixColumns, OP_XOP_X_X, INTRINS_AARCH64_AESMC}, {SN_PolynomialMultiplyWideningLower}, {SN_PolynomialMultiplyWideningUpper}, {SN_get_IsSupported}, }; static SimdIntrinsic sha1_methods [] = { {SN_FixedRotate, OP_XOP_X_X, INTRINS_AARCH64_SHA1H}, {SN_HashUpdateChoose, OP_XOP_X_X_X_X, INTRINS_AARCH64_SHA1C}, {SN_HashUpdateMajority, OP_XOP_X_X_X_X, INTRINS_AARCH64_SHA1M}, {SN_HashUpdateParity, OP_XOP_X_X_X_X, INTRINS_AARCH64_SHA1P}, {SN_ScheduleUpdate0, OP_XOP_X_X_X_X, INTRINS_AARCH64_SHA1SU0}, {SN_ScheduleUpdate1, OP_XOP_X_X_X, INTRINS_AARCH64_SHA1SU1}, {SN_get_IsSupported} }; static SimdIntrinsic sha256_methods [] = { {SN_HashUpdate1, OP_XOP_X_X_X_X, INTRINS_AARCH64_SHA256H}, {SN_HashUpdate2, OP_XOP_X_X_X_X, INTRINS_AARCH64_SHA256H2}, {SN_ScheduleUpdate0, OP_XOP_X_X_X, INTRINS_AARCH64_SHA256SU0}, {SN_ScheduleUpdate1, OP_XOP_X_X_X_X, INTRINS_AARCH64_SHA256SU1}, {SN_get_IsSupported} }; // This table must be kept in sorted order. ASCII } is sorted after alphanumeric // characters, so blind use of your editor's "sort lines" facility will // mis-order the lines. // // In Vim you can use `sort /.*{[0-9A-z]*/ r` to sort this table. static SimdIntrinsic advsimd_methods [] = { {SN_Abs, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_ABS, None, None, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FABS}, {SN_AbsSaturate, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_SQABS}, {SN_AbsSaturateScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_SQABS}, {SN_AbsScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_ABS, None, None, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FABS}, {SN_AbsoluteCompareGreaterThan}, {SN_AbsoluteCompareGreaterThanOrEqual}, {SN_AbsoluteCompareGreaterThanOrEqualScalar}, {SN_AbsoluteCompareGreaterThanScalar}, {SN_AbsoluteCompareLessThan}, {SN_AbsoluteCompareLessThanOrEqual}, {SN_AbsoluteCompareLessThanOrEqualScalar}, {SN_AbsoluteCompareLessThanScalar}, {SN_AbsoluteDifference, OP_ARM64_SABD, None, OP_ARM64_UABD, None, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FABD}, {SN_AbsoluteDifferenceAdd, OP_ARM64_SABA, None, OP_ARM64_UABA}, {SN_AbsoluteDifferenceScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FABD_SCALAR}, {SN_AbsoluteDifferenceWideningLower, OP_ARM64_SABDL, None, OP_ARM64_UABDL}, {SN_AbsoluteDifferenceWideningLowerAndAdd, OP_ARM64_SABAL, None, OP_ARM64_UABAL}, {SN_AbsoluteDifferenceWideningUpper, OP_ARM64_SABDL2, None, OP_ARM64_UABDL2}, {SN_AbsoluteDifferenceWideningUpperAndAdd, OP_ARM64_SABAL2, None, OP_ARM64_UABAL2}, {SN_Add, OP_XBINOP, OP_IADD, None, None, OP_XBINOP, OP_FADD}, {SN_AddAcross, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_SADDV, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_UADDV}, {SN_AddAcrossWidening, OP_ARM64_SADDLV, None, OP_ARM64_UADDLV}, {SN_AddHighNarrowingLower, OP_ARM64_ADDHN}, {SN_AddHighNarrowingUpper, OP_ARM64_ADDHN2}, {SN_AddPairwise, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_ADDP, None, None, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FADDP}, {SN_AddPairwiseScalar, OP_ARM64_ADDP_SCALAR, None, None, None, OP_ARM64_FADDP_SCALAR}, {SN_AddPairwiseWidening, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_SADDLP, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_UADDLP}, {SN_AddPairwiseWideningAndAdd, OP_ARM64_SADALP, None, OP_ARM64_UADALP}, {SN_AddPairwiseWideningAndAddScalar, OP_ARM64_SADALP, None, OP_ARM64_UADALP}, {SN_AddPairwiseWideningScalar, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_SADDLP, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_UADDLP}, {SN_AddRoundedHighNarrowingLower, OP_ARM64_RADDHN}, {SN_AddRoundedHighNarrowingUpper, OP_ARM64_RADDHN2}, {SN_AddSaturate}, {SN_AddSaturateScalar}, {SN_AddScalar, OP_XBINOP_SCALAR, OP_IADD, None, None, OP_XBINOP_SCALAR, OP_FADD}, {SN_AddWideningLower, OP_ARM64_SADD, None, OP_ARM64_UADD}, {SN_AddWideningUpper, OP_ARM64_SADD2, None, OP_ARM64_UADD2}, {SN_And, OP_XBINOP_FORCEINT, XBINOP_FORCEINT_and}, {SN_BitwiseClear, OP_ARM64_BIC}, {SN_BitwiseSelect, OP_ARM64_BSL}, {SN_Ceiling, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTP}, {SN_CeilingScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTP}, {SN_CompareEqual, OP_XCOMPARE, CMP_EQ, OP_XCOMPARE, CMP_EQ, OP_XCOMPARE_FP, CMP_EQ}, {SN_CompareEqualScalar, OP_XCOMPARE_SCALAR, CMP_EQ, OP_XCOMPARE_SCALAR, CMP_EQ, OP_XCOMPARE_FP_SCALAR, CMP_EQ}, {SN_CompareGreaterThan, OP_XCOMPARE, CMP_GT, OP_XCOMPARE, CMP_GT_UN, OP_XCOMPARE_FP, CMP_GT}, {SN_CompareGreaterThanOrEqual, OP_XCOMPARE, CMP_GE, OP_XCOMPARE, CMP_GE_UN, OP_XCOMPARE_FP, CMP_GE}, {SN_CompareGreaterThanOrEqualScalar, OP_XCOMPARE_SCALAR, CMP_GE, OP_XCOMPARE_SCALAR, CMP_GE_UN, OP_XCOMPARE_FP_SCALAR, CMP_GE}, {SN_CompareGreaterThanScalar, OP_XCOMPARE_SCALAR, CMP_GT, OP_XCOMPARE_SCALAR, CMP_GT_UN, OP_XCOMPARE_FP_SCALAR, CMP_GT}, {SN_CompareLessThan, OP_XCOMPARE, CMP_LT, OP_XCOMPARE, CMP_LT_UN, OP_XCOMPARE_FP, CMP_LT}, {SN_CompareLessThanOrEqual, OP_XCOMPARE, CMP_LE, OP_XCOMPARE, CMP_LE_UN, OP_XCOMPARE_FP, CMP_LE}, {SN_CompareLessThanOrEqualScalar, OP_XCOMPARE_SCALAR, CMP_LE, OP_XCOMPARE_SCALAR, CMP_LE_UN, OP_XCOMPARE_FP_SCALAR, CMP_LE}, {SN_CompareLessThanScalar, OP_XCOMPARE_SCALAR, CMP_LT, OP_XCOMPARE_SCALAR, CMP_LT_UN, OP_XCOMPARE_FP_SCALAR, CMP_LT}, {SN_CompareTest, OP_ARM64_CMTST}, {SN_CompareTestScalar, OP_ARM64_CMTST}, {SN_ConvertToDouble, OP_ARM64_SCVTF, None, OP_ARM64_UCVTF, None, OP_ARM64_FCVTL}, {SN_ConvertToDoubleScalar, OP_ARM64_SCVTF_SCALAR, None, OP_ARM64_UCVTF_SCALAR}, {SN_ConvertToDoubleUpper, OP_ARM64_FCVTL2}, {SN_ConvertToInt32RoundAwayFromZero, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAS}, {SN_ConvertToInt32RoundAwayFromZeroScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAS}, {SN_ConvertToInt32RoundToEven, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNS}, {SN_ConvertToInt32RoundToEvenScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNS}, {SN_ConvertToInt32RoundToNegativeInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMS}, {SN_ConvertToInt32RoundToNegativeInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMS}, {SN_ConvertToInt32RoundToPositiveInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPS}, {SN_ConvertToInt32RoundToPositiveInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPS}, {SN_ConvertToInt32RoundToZero, OP_ARM64_FCVTZS}, {SN_ConvertToInt32RoundToZeroScalar, OP_ARM64_FCVTZS_SCALAR}, {SN_ConvertToInt64RoundAwayFromZero, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAS}, {SN_ConvertToInt64RoundAwayFromZeroScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAS}, {SN_ConvertToInt64RoundToEven, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNS}, {SN_ConvertToInt64RoundToEvenScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNS}, {SN_ConvertToInt64RoundToNegativeInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMS}, {SN_ConvertToInt64RoundToNegativeInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMS}, {SN_ConvertToInt64RoundToPositiveInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPS}, {SN_ConvertToInt64RoundToPositiveInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPS}, {SN_ConvertToInt64RoundToZero, OP_ARM64_FCVTZS}, {SN_ConvertToInt64RoundToZeroScalar, OP_ARM64_FCVTZS_SCALAR}, {SN_ConvertToSingle, OP_ARM64_SCVTF, None, OP_ARM64_UCVTF}, {SN_ConvertToSingleLower, OP_ARM64_FCVTN}, {SN_ConvertToSingleRoundToOddLower, OP_ARM64_FCVTXN}, {SN_ConvertToSingleRoundToOddUpper, OP_ARM64_FCVTXN2}, {SN_ConvertToSingleScalar, OP_ARM64_SCVTF_SCALAR, None, OP_ARM64_UCVTF_SCALAR}, {SN_ConvertToSingleUpper, OP_ARM64_FCVTN2}, {SN_ConvertToUInt32RoundAwayFromZero, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAU}, {SN_ConvertToUInt32RoundAwayFromZeroScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAU}, {SN_ConvertToUInt32RoundToEven, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNU}, {SN_ConvertToUInt32RoundToEvenScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNU}, {SN_ConvertToUInt32RoundToNegativeInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMU}, {SN_ConvertToUInt32RoundToNegativeInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMU}, {SN_ConvertToUInt32RoundToPositiveInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPU}, {SN_ConvertToUInt32RoundToPositiveInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPU}, {SN_ConvertToUInt32RoundToZero, OP_ARM64_FCVTZU}, {SN_ConvertToUInt32RoundToZeroScalar, OP_ARM64_FCVTZU_SCALAR}, {SN_ConvertToUInt64RoundAwayFromZero, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAU}, {SN_ConvertToUInt64RoundAwayFromZeroScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTAU}, {SN_ConvertToUInt64RoundToEven, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNU}, {SN_ConvertToUInt64RoundToEvenScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTNU}, {SN_ConvertToUInt64RoundToNegativeInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMU}, {SN_ConvertToUInt64RoundToNegativeInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTMU}, {SN_ConvertToUInt64RoundToPositiveInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPU}, {SN_ConvertToUInt64RoundToPositiveInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FCVTPU}, {SN_ConvertToUInt64RoundToZero, OP_ARM64_FCVTZU}, {SN_ConvertToUInt64RoundToZeroScalar, OP_ARM64_FCVTZU_SCALAR}, {SN_Divide, OP_XBINOP, OP_FDIV}, {SN_DivideScalar, OP_XBINOP_SCALAR, OP_FDIV}, {SN_DuplicateSelectedScalarToVector128}, {SN_DuplicateSelectedScalarToVector64}, {SN_DuplicateToVector128}, {SN_DuplicateToVector64}, {SN_Extract}, {SN_ExtractNarrowingLower, OP_ARM64_XTN}, {SN_ExtractNarrowingSaturateLower, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_SQXTN, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_UQXTN}, {SN_ExtractNarrowingSaturateScalar, OP_ARM64_XNARROW_SCALAR, INTRINS_AARCH64_ADV_SIMD_SQXTN, OP_ARM64_XNARROW_SCALAR, INTRINS_AARCH64_ADV_SIMD_UQXTN}, {SN_ExtractNarrowingSaturateUnsignedLower, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_SQXTUN}, {SN_ExtractNarrowingSaturateUnsignedScalar, OP_ARM64_XNARROW_SCALAR, INTRINS_AARCH64_ADV_SIMD_SQXTUN}, {SN_ExtractNarrowingSaturateUnsignedUpper, OP_ARM64_SQXTUN2}, {SN_ExtractNarrowingSaturateUpper, OP_ARM64_SQXTN2, None, OP_ARM64_UQXTN2}, {SN_ExtractNarrowingUpper, OP_ARM64_XTN2}, {SN_ExtractVector128, OP_ARM64_EXT}, {SN_ExtractVector64, OP_ARM64_EXT}, {SN_Floor, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTM}, {SN_FloorScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTM}, {SN_FusedAddHalving, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SHADD, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UHADD}, {SN_FusedAddRoundedHalving, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SRHADD, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_URHADD}, {SN_FusedMultiplyAdd, OP_ARM64_FMADD}, {SN_FusedMultiplyAddByScalar, OP_ARM64_FMADD_BYSCALAR}, {SN_FusedMultiplyAddBySelectedScalar}, {SN_FusedMultiplyAddNegatedScalar, OP_ARM64_FNMADD_SCALAR}, {SN_FusedMultiplyAddScalar, OP_ARM64_FMADD_SCALAR}, {SN_FusedMultiplyAddScalarBySelectedScalar}, {SN_FusedMultiplySubtract, OP_ARM64_FMSUB}, {SN_FusedMultiplySubtractByScalar, OP_ARM64_FMSUB_BYSCALAR}, {SN_FusedMultiplySubtractBySelectedScalar}, {SN_FusedMultiplySubtractNegatedScalar, OP_ARM64_FNMSUB_SCALAR}, {SN_FusedMultiplySubtractScalar, OP_ARM64_FMSUB_SCALAR}, {SN_FusedMultiplySubtractScalarBySelectedScalar}, {SN_FusedSubtractHalving, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SHSUB, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UHSUB}, {SN_Insert}, {SN_InsertScalar}, {SN_InsertSelectedScalar}, {SN_LeadingSignCount, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_CLS}, {SN_LeadingZeroCount, OP_ARM64_CLZ}, {SN_LoadAndInsertScalar, OP_ARM64_LD1_INSERT}, {SN_LoadAndReplicateToVector128, OP_ARM64_LD1R}, {SN_LoadAndReplicateToVector64, OP_ARM64_LD1R}, {SN_LoadPairScalarVector64, OP_ARM64_LDP_SCALAR}, {SN_LoadPairScalarVector64NonTemporal, OP_ARM64_LDNP_SCALAR}, {SN_LoadPairVector128, OP_ARM64_LDP}, {SN_LoadPairVector128NonTemporal, OP_ARM64_LDNP}, {SN_LoadPairVector64, OP_ARM64_LDP}, {SN_LoadPairVector64NonTemporal, OP_ARM64_LDNP}, {SN_LoadVector128, OP_ARM64_LD1}, {SN_LoadVector64, OP_ARM64_LD1}, {SN_Max, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SMAX, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UMAX, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMAX}, {SN_MaxAcross, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_SMAXV, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_UMAXV, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMAXV}, {SN_MaxNumber, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMAXNM}, {SN_MaxNumberAcross, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMAXNMV}, {SN_MaxNumberPairwise, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMAXNMP}, {SN_MaxNumberPairwiseScalar, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMAXNMV}, {SN_MaxNumberScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMAXNM}, {SN_MaxPairwise, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SMAXP, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UMAXP, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMAXP}, {SN_MaxPairwiseScalar, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMAXV}, {SN_MaxScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMAX}, {SN_Min, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SMIN, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UMIN, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMIN}, {SN_MinAcross, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_SMINV, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_UMINV, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMINV}, {SN_MinNumber, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMINNM}, {SN_MinNumberAcross, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMINNMV}, {SN_MinNumberPairwise, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMINNMP}, {SN_MinNumberPairwiseScalar, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMINNMV}, {SN_MinNumberScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMINNM}, {SN_MinPairwise, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SMINP, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UMINP, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMINP}, {SN_MinPairwiseScalar, OP_ARM64_XHORIZ, INTRINS_AARCH64_ADV_SIMD_FMINV}, {SN_MinScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMIN}, {SN_Multiply, OP_XBINOP, OP_IMUL, None, None, OP_XBINOP, OP_FMUL}, {SN_MultiplyAdd, OP_ARM64_MLA}, {SN_MultiplyAddByScalar, OP_ARM64_MLA_SCALAR}, {SN_MultiplyAddBySelectedScalar}, {SN_MultiplyByScalar, OP_XBINOP_BYSCALAR, OP_IMUL, None, None, OP_XBINOP_BYSCALAR, OP_FMUL}, {SN_MultiplyBySelectedScalar}, {SN_MultiplyBySelectedScalarWideningLower}, {SN_MultiplyBySelectedScalarWideningLowerAndAdd}, {SN_MultiplyBySelectedScalarWideningLowerAndSubtract}, {SN_MultiplyBySelectedScalarWideningUpper}, {SN_MultiplyBySelectedScalarWideningUpperAndAdd}, {SN_MultiplyBySelectedScalarWideningUpperAndSubtract}, {SN_MultiplyDoublingByScalarSaturateHigh, OP_XOP_OVR_BYSCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQDMULH}, {SN_MultiplyDoublingBySelectedScalarSaturateHigh}, {SN_MultiplyDoublingSaturateHigh, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQDMULH}, {SN_MultiplyDoublingSaturateHighScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQDMULH}, {SN_MultiplyDoublingScalarBySelectedScalarSaturateHigh}, {SN_MultiplyDoublingWideningAndAddSaturateScalar, OP_ARM64_SQDMLAL_SCALAR}, {SN_MultiplyDoublingWideningAndSubtractSaturateScalar, OP_ARM64_SQDMLSL_SCALAR}, {SN_MultiplyDoublingWideningLowerAndAddSaturate, OP_ARM64_SQDMLAL}, {SN_MultiplyDoublingWideningLowerAndSubtractSaturate, OP_ARM64_SQDMLSL}, {SN_MultiplyDoublingWideningLowerByScalarAndAddSaturate, OP_ARM64_SQDMLAL_BYSCALAR}, {SN_MultiplyDoublingWideningLowerByScalarAndSubtractSaturate, OP_ARM64_SQDMLSL_BYSCALAR}, {SN_MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate}, {SN_MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate}, {SN_MultiplyDoublingWideningSaturateLower, OP_ARM64_SQDMULL}, {SN_MultiplyDoublingWideningSaturateLowerByScalar, OP_ARM64_SQDMULL_BYSCALAR}, {SN_MultiplyDoublingWideningSaturateLowerBySelectedScalar}, {SN_MultiplyDoublingWideningSaturateScalar, OP_ARM64_SQDMULL_SCALAR}, {SN_MultiplyDoublingWideningSaturateScalarBySelectedScalar}, {SN_MultiplyDoublingWideningSaturateUpper, OP_ARM64_SQDMULL2}, {SN_MultiplyDoublingWideningSaturateUpperByScalar, OP_ARM64_SQDMULL2_BYSCALAR}, {SN_MultiplyDoublingWideningSaturateUpperBySelectedScalar}, {SN_MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate}, {SN_MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate}, {SN_MultiplyDoublingWideningUpperAndAddSaturate, OP_ARM64_SQDMLAL2}, {SN_MultiplyDoublingWideningUpperAndSubtractSaturate, OP_ARM64_SQDMLSL2}, {SN_MultiplyDoublingWideningUpperByScalarAndAddSaturate, OP_ARM64_SQDMLAL2_BYSCALAR}, {SN_MultiplyDoublingWideningUpperByScalarAndSubtractSaturate, OP_ARM64_SQDMLSL2_BYSCALAR}, {SN_MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate}, {SN_MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate}, {SN_MultiplyExtended, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMULX}, {SN_MultiplyExtendedByScalar, OP_XOP_OVR_BYSCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMULX}, {SN_MultiplyExtendedBySelectedScalar}, {SN_MultiplyExtendedScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FMULX}, {SN_MultiplyExtendedScalarBySelectedScalar}, {SN_MultiplyRoundedDoublingByScalarSaturateHigh, OP_XOP_OVR_BYSCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQRDMULH}, {SN_MultiplyRoundedDoublingBySelectedScalarSaturateHigh}, {SN_MultiplyRoundedDoublingSaturateHigh, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQRDMULH}, {SN_MultiplyRoundedDoublingSaturateHighScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQRDMULH}, {SN_MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh}, {SN_MultiplyScalar, OP_XBINOP_SCALAR, OP_FMUL}, {SN_MultiplyScalarBySelectedScalar, OP_ARM64_FMUL_SEL}, {SN_MultiplySubtract, OP_ARM64_MLS}, {SN_MultiplySubtractByScalar, OP_ARM64_MLS_SCALAR}, {SN_MultiplySubtractBySelectedScalar}, {SN_MultiplyWideningLower, OP_ARM64_SMULL, None, OP_ARM64_UMULL}, {SN_MultiplyWideningLowerAndAdd, OP_ARM64_SMLAL, None, OP_ARM64_UMLAL}, {SN_MultiplyWideningLowerAndSubtract, OP_ARM64_SMLSL, None, OP_ARM64_UMLSL}, {SN_MultiplyWideningUpper, OP_ARM64_SMULL2, None, OP_ARM64_UMULL2}, {SN_MultiplyWideningUpperAndAdd, OP_ARM64_SMLAL2, None, OP_ARM64_UMLAL2}, {SN_MultiplyWideningUpperAndSubtract, OP_ARM64_SMLSL2, None, OP_ARM64_UMLSL2}, {SN_Negate, OP_ARM64_XNEG}, {SN_NegateSaturate, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_SQNEG}, {SN_NegateSaturateScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_SQNEG}, {SN_NegateScalar, OP_ARM64_XNEG_SCALAR}, {SN_Not, OP_ARM64_MVN}, {SN_Or, OP_XBINOP_FORCEINT, XBINOP_FORCEINT_or}, {SN_OrNot, OP_XBINOP_FORCEINT, XBINOP_FORCEINT_ornot}, {SN_PolynomialMultiply, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_PMUL}, {SN_PolynomialMultiplyWideningLower, OP_ARM64_PMULL}, {SN_PolynomialMultiplyWideningUpper, OP_ARM64_PMULL2}, {SN_PopCount, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_CNT}, {SN_ReciprocalEstimate, None, None, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_URECPE, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRECPE}, {SN_ReciprocalEstimateScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRECPE}, {SN_ReciprocalExponentScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRECPX}, {SN_ReciprocalSquareRootEstimate, None, None, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_URSQRTE, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRSQRTE}, {SN_ReciprocalSquareRootEstimateScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRSQRTE}, {SN_ReciprocalSquareRootStep, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FRSQRTS}, {SN_ReciprocalSquareRootStepScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FRSQRTS}, {SN_ReciprocalStep, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FRECPS}, {SN_ReciprocalStepScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_FRECPS}, {SN_ReverseElement16, OP_ARM64_REVN, 16}, {SN_ReverseElement32, OP_ARM64_REVN, 32}, {SN_ReverseElement8, OP_ARM64_REVN, 8}, {SN_ReverseElementBits, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_RBIT}, {SN_RoundAwayFromZero, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTA}, {SN_RoundAwayFromZeroScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTA}, {SN_RoundToNearest, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTN}, {SN_RoundToNearestScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTN}, {SN_RoundToNegativeInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTM}, {SN_RoundToNegativeInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTM}, {SN_RoundToPositiveInfinity, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTP}, {SN_RoundToPositiveInfinityScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTP}, {SN_RoundToZero, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTZ}, {SN_RoundToZeroScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FRINTZ}, {SN_ShiftArithmetic, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SSHL}, {SN_ShiftArithmeticRounded, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SRSHL}, {SN_ShiftArithmeticRoundedSaturate, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQRSHL}, {SN_ShiftArithmeticRoundedSaturateScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQRSHL}, {SN_ShiftArithmeticRoundedScalar, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SRSHL}, {SN_ShiftArithmeticSaturate, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQSHL}, {SN_ShiftArithmeticSaturateScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQSHL}, {SN_ShiftArithmeticScalar, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SSHL}, {SN_ShiftLeftAndInsert, OP_ARM64_SLI}, {SN_ShiftLeftAndInsertScalar, OP_ARM64_SLI}, {SN_ShiftLeftLogical, OP_ARM64_SHL}, {SN_ShiftLeftLogicalSaturate}, {SN_ShiftLeftLogicalSaturateScalar}, {SN_ShiftLeftLogicalSaturateUnsigned, OP_ARM64_SQSHLU}, {SN_ShiftLeftLogicalSaturateUnsignedScalar, OP_ARM64_SQSHLU_SCALAR}, {SN_ShiftLeftLogicalScalar, OP_ARM64_SHL}, {SN_ShiftLeftLogicalWideningLower, OP_ARM64_SSHLL, None, OP_ARM64_USHLL}, {SN_ShiftLeftLogicalWideningUpper, OP_ARM64_SSHLL2, None, OP_ARM64_USHLL2}, {SN_ShiftLogical, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_USHL}, {SN_ShiftLogicalRounded, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_URSHL}, {SN_ShiftLogicalRoundedSaturate, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UQRSHL}, {SN_ShiftLogicalRoundedSaturateScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UQRSHL}, {SN_ShiftLogicalRoundedScalar, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_URSHL}, {SN_ShiftLogicalSaturate, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UQSHL}, {SN_ShiftLogicalSaturateScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UQSHL}, {SN_ShiftLogicalScalar, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_USHL}, {SN_ShiftRightAndInsert, OP_ARM64_SRI}, {SN_ShiftRightAndInsertScalar, OP_ARM64_SRI}, {SN_ShiftRightArithmetic, OP_ARM64_SSHR}, {SN_ShiftRightArithmeticAdd, OP_ARM64_SSRA}, {SN_ShiftRightArithmeticAddScalar, OP_ARM64_SSRA}, {SN_ShiftRightArithmeticNarrowingSaturateLower, OP_ARM64_XNSHIFT, INTRINS_AARCH64_ADV_SIMD_SQSHRN}, {SN_ShiftRightArithmeticNarrowingSaturateScalar, OP_ARM64_XNSHIFT_SCALAR, INTRINS_AARCH64_ADV_SIMD_SQSHRN}, {SN_ShiftRightArithmeticNarrowingSaturateUnsignedLower, OP_ARM64_XNSHIFT, INTRINS_AARCH64_ADV_SIMD_SQSHRUN}, {SN_ShiftRightArithmeticNarrowingSaturateUnsignedScalar, OP_ARM64_XNSHIFT_SCALAR, INTRINS_AARCH64_ADV_SIMD_SQSHRUN}, {SN_ShiftRightArithmeticNarrowingSaturateUnsignedUpper, OP_ARM64_XNSHIFT2, INTRINS_AARCH64_ADV_SIMD_SQSHRUN}, {SN_ShiftRightArithmeticNarrowingSaturateUpper, OP_ARM64_XNSHIFT2, INTRINS_AARCH64_ADV_SIMD_SQSHRN}, {SN_ShiftRightArithmeticRounded, OP_ARM64_SRSHR}, {SN_ShiftRightArithmeticRoundedAdd, OP_ARM64_SRSRA}, {SN_ShiftRightArithmeticRoundedAddScalar, OP_ARM64_SRSRA}, {SN_ShiftRightArithmeticRoundedNarrowingSaturateLower, OP_ARM64_XNSHIFT, INTRINS_AARCH64_ADV_SIMD_SQRSHRN}, {SN_ShiftRightArithmeticRoundedNarrowingSaturateScalar, OP_ARM64_XNSHIFT_SCALAR, INTRINS_AARCH64_ADV_SIMD_SQRSHRN}, {SN_ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower, OP_ARM64_XNSHIFT, INTRINS_AARCH64_ADV_SIMD_SQRSHRUN}, {SN_ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar, OP_ARM64_XNSHIFT_SCALAR, INTRINS_AARCH64_ADV_SIMD_SQRSHRUN}, {SN_ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper, OP_ARM64_XNSHIFT2, INTRINS_AARCH64_ADV_SIMD_SQRSHRUN}, {SN_ShiftRightArithmeticRoundedNarrowingSaturateUpper, OP_ARM64_XNSHIFT2, INTRINS_AARCH64_ADV_SIMD_SQRSHRN}, {SN_ShiftRightArithmeticRoundedScalar, OP_ARM64_SRSHR}, {SN_ShiftRightArithmeticScalar, OP_ARM64_SSHR}, {SN_ShiftRightLogical, OP_ARM64_USHR}, {SN_ShiftRightLogicalAdd, OP_ARM64_USRA}, {SN_ShiftRightLogicalAddScalar, OP_ARM64_USRA}, {SN_ShiftRightLogicalNarrowingLower, OP_ARM64_SHRN}, {SN_ShiftRightLogicalNarrowingSaturateLower, OP_ARM64_XNSHIFT, INTRINS_AARCH64_ADV_SIMD_UQSHRN}, {SN_ShiftRightLogicalNarrowingSaturateScalar, OP_ARM64_XNSHIFT_SCALAR, INTRINS_AARCH64_ADV_SIMD_UQSHRN}, {SN_ShiftRightLogicalNarrowingSaturateUpper, OP_ARM64_XNSHIFT2, INTRINS_AARCH64_ADV_SIMD_UQSHRN}, {SN_ShiftRightLogicalNarrowingUpper, OP_ARM64_SHRN2}, {SN_ShiftRightLogicalRounded, OP_ARM64_URSHR}, {SN_ShiftRightLogicalRoundedAdd, OP_ARM64_URSRA}, {SN_ShiftRightLogicalRoundedAddScalar, OP_ARM64_URSRA}, {SN_ShiftRightLogicalRoundedNarrowingLower, OP_ARM64_XNSHIFT, INTRINS_AARCH64_ADV_SIMD_RSHRN}, {SN_ShiftRightLogicalRoundedNarrowingSaturateLower, OP_ARM64_XNSHIFT, INTRINS_AARCH64_ADV_SIMD_UQRSHRN}, {SN_ShiftRightLogicalRoundedNarrowingSaturateScalar, OP_ARM64_XNSHIFT_SCALAR, INTRINS_AARCH64_ADV_SIMD_UQRSHRN}, {SN_ShiftRightLogicalRoundedNarrowingSaturateUpper, OP_ARM64_XNSHIFT2, INTRINS_AARCH64_ADV_SIMD_UQRSHRN}, {SN_ShiftRightLogicalRoundedNarrowingUpper, OP_ARM64_XNSHIFT2, INTRINS_AARCH64_ADV_SIMD_RSHRN}, {SN_ShiftRightLogicalRoundedScalar, OP_ARM64_URSHR}, {SN_ShiftRightLogicalScalar, OP_ARM64_USHR}, {SN_SignExtendWideningLower, OP_ARM64_SXTL}, {SN_SignExtendWideningUpper, OP_ARM64_SXTL2}, {SN_Sqrt, OP_XOP_OVR_X_X, INTRINS_AARCH64_ADV_SIMD_FSQRT}, {SN_SqrtScalar, OP_XOP_OVR_SCALAR_X_X, INTRINS_AARCH64_ADV_SIMD_FSQRT}, {SN_Store, OP_ARM64_ST1}, {SN_StorePair, OP_ARM64_STP}, {SN_StorePairNonTemporal, OP_ARM64_STNP}, {SN_StorePairScalar, OP_ARM64_STP_SCALAR}, {SN_StorePairScalarNonTemporal, OP_ARM64_STNP_SCALAR}, {SN_StoreSelectedScalar, OP_ARM64_ST1_SCALAR}, {SN_Subtract, OP_XBINOP, OP_ISUB, None, None, OP_XBINOP, OP_FSUB}, {SN_SubtractHighNarrowingLower, OP_ARM64_SUBHN}, {SN_SubtractHighNarrowingUpper, OP_ARM64_SUBHN2}, {SN_SubtractRoundedHighNarrowingLower, OP_ARM64_RSUBHN}, {SN_SubtractRoundedHighNarrowingUpper, OP_ARM64_RSUBHN2}, {SN_SubtractSaturate, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQSUB, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UQSUB}, {SN_SubtractSaturateScalar, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_SQSUB, OP_XOP_OVR_SCALAR_X_X_X, INTRINS_AARCH64_ADV_SIMD_UQSUB}, {SN_SubtractScalar, OP_XBINOP_SCALAR, OP_ISUB, None, None, OP_XBINOP_SCALAR, OP_FSUB}, {SN_SubtractWideningLower, OP_ARM64_SSUB, None, OP_ARM64_USUB}, {SN_SubtractWideningUpper, OP_ARM64_SSUB2, None, OP_ARM64_USUB2}, {SN_TransposeEven, OP_ARM64_TRN1}, {SN_TransposeOdd, OP_ARM64_TRN2}, {SN_UnzipEven, OP_ARM64_UZP1}, {SN_UnzipOdd, OP_ARM64_UZP2}, {SN_VectorTableLookup, OP_XOP_OVR_X_X_X, INTRINS_AARCH64_ADV_SIMD_TBL1}, {SN_VectorTableLookupExtension, OP_XOP_OVR_X_X_X_X, INTRINS_AARCH64_ADV_SIMD_TBX1}, {SN_Xor, OP_XBINOP_FORCEINT, XBINOP_FORCEINT_xor}, {SN_ZeroExtendWideningLower, OP_ARM64_UXTL}, {SN_ZeroExtendWideningUpper, OP_ARM64_UXTL2}, {SN_ZipHigh, OP_ARM64_ZIP2}, {SN_ZipLow, OP_ARM64_ZIP1}, {SN_get_IsSupported}, }; static const SimdIntrinsic rdm_methods [] = { {SN_MultiplyRoundedDoublingAndAddSaturateHigh, OP_ARM64_SQRDMLAH}, {SN_MultiplyRoundedDoublingAndAddSaturateHighScalar, OP_ARM64_SQRDMLAH_SCALAR}, {SN_MultiplyRoundedDoublingAndSubtractSaturateHigh, OP_ARM64_SQRDMLSH}, {SN_MultiplyRoundedDoublingAndSubtractSaturateHighScalar, OP_ARM64_SQRDMLSH_SCALAR}, {SN_MultiplyRoundedDoublingBySelectedScalarAndAddSaturateHigh}, {SN_MultiplyRoundedDoublingBySelectedScalarAndSubtractSaturateHigh}, {SN_MultiplyRoundedDoublingScalarBySelectedScalarAndAddSaturateHigh}, {SN_MultiplyRoundedDoublingScalarBySelectedScalarAndSubtractSaturateHigh}, {SN_get_IsSupported}, }; static const SimdIntrinsic dp_methods [] = { {SN_DotProduct, OP_XOP_OVR_X_X_X_X, INTRINS_AARCH64_ADV_SIMD_SDOT, OP_XOP_OVR_X_X_X_X, INTRINS_AARCH64_ADV_SIMD_UDOT}, {SN_DotProductBySelectedQuadruplet}, {SN_get_IsSupported}, }; static const IntrinGroup supported_arm_intrinsics [] = { { "AdvSimd", MONO_CPU_ARM64_NEON, advsimd_methods, sizeof (advsimd_methods) }, { "Aes", MONO_CPU_ARM64_CRYPTO, crypto_aes_methods, sizeof (crypto_aes_methods) }, { "ArmBase", MONO_CPU_ARM64_BASE, armbase_methods, sizeof (armbase_methods) }, { "Crc32", MONO_CPU_ARM64_CRC, crc32_methods, sizeof (crc32_methods) }, { "Dp", MONO_CPU_ARM64_DP, dp_methods, sizeof (dp_methods) }, { "Rdm", MONO_CPU_ARM64_RDM, rdm_methods, sizeof (rdm_methods) }, { "Sha1", MONO_CPU_ARM64_CRYPTO, sha1_methods, sizeof (sha1_methods) }, { "Sha256", MONO_CPU_ARM64_CRYPTO, sha256_methods, sizeof (sha256_methods) }, }; static MonoInst* emit_arm64_intrinsics ( MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoClass *klass, const IntrinGroup *intrin_group, const SimdIntrinsic *info, int id, MonoTypeEnum arg0_type, gboolean is_64bit) { MonoCPUFeatures feature = intrin_group->feature; gboolean arg0_i32 = (arg0_type == MONO_TYPE_I4) || (arg0_type == MONO_TYPE_U4); #if TARGET_SIZEOF_VOID_P == 4 arg0_i32 = arg0_i32 || (arg0_type == MONO_TYPE_I) || (arg0_type == MONO_TYPE_U); #endif if (feature == MONO_CPU_ARM64_BASE) { switch (id) { case SN_LeadingZeroCount: return emit_simd_ins_for_sig (cfg, klass, arg0_i32 ? OP_LZCNT32 : OP_LZCNT64, 0, arg0_type, fsig, args); case SN_LeadingSignCount: return emit_simd_ins_for_sig (cfg, klass, arg0_i32 ? OP_LSCNT32 : OP_LSCNT64, 0, arg0_type, fsig, args); case SN_MultiplyHigh: return emit_simd_ins_for_sig (cfg, klass, (arg0_type == MONO_TYPE_I8 ? OP_ARM64_SMULH : OP_ARM64_UMULH), 0, arg0_type, fsig, args); case SN_ReverseElementBits: return emit_simd_ins_for_sig (cfg, klass, (is_64bit ? OP_XOP_I8_I8 : OP_XOP_I4_I4), (is_64bit ? INTRINS_BITREVERSE_I64 : INTRINS_BITREVERSE_I32), arg0_type, fsig, args); default: g_assert_not_reached (); // if a new API is added we need to either implement it or change IsSupported to false } } if (feature == MONO_CPU_ARM64_CRC) { switch (id) { case SN_ComputeCrc32: case SN_ComputeCrc32C: { IntrinsicId op = (IntrinsicId)0; gboolean is_c = info->id == SN_ComputeCrc32C; switch (get_underlying_type (fsig->params [1])) { case MONO_TYPE_U1: op = is_c ? INTRINS_AARCH64_CRC32CB : INTRINS_AARCH64_CRC32B; break; case MONO_TYPE_U2: op = is_c ? INTRINS_AARCH64_CRC32CH : INTRINS_AARCH64_CRC32H; break; case MONO_TYPE_U4: op = is_c ? INTRINS_AARCH64_CRC32CW : INTRINS_AARCH64_CRC32W; break; case MONO_TYPE_U8: op = is_c ? INTRINS_AARCH64_CRC32CX : INTRINS_AARCH64_CRC32X; break; default: g_assert_not_reached (); break; } return emit_simd_ins_for_sig (cfg, klass, is_64bit ? OP_XOP_I4_I4_I8 : OP_XOP_I4_I4_I4, op, arg0_type, fsig, args); } default: g_assert_not_reached (); // if a new API is added we need to either implement it or change IsSupported to false } } if (feature == MONO_CPU_ARM64_NEON) { switch (id) { case SN_AbsoluteCompareGreaterThan: case SN_AbsoluteCompareGreaterThanOrEqual: case SN_AbsoluteCompareLessThan: case SN_AbsoluteCompareLessThanOrEqual: case SN_AbsoluteCompareGreaterThanScalar: case SN_AbsoluteCompareGreaterThanOrEqualScalar: case SN_AbsoluteCompareLessThanScalar: case SN_AbsoluteCompareLessThanOrEqualScalar: { gboolean reverse_args = FALSE; gboolean use_geq = FALSE; gboolean scalar = FALSE; MonoInst *cmp_args [] = { args [0], args [1] }; switch (id) { case SN_AbsoluteCompareGreaterThanScalar: scalar = TRUE; case SN_AbsoluteCompareGreaterThan: break; case SN_AbsoluteCompareGreaterThanOrEqualScalar: scalar = TRUE; case SN_AbsoluteCompareGreaterThanOrEqual: use_geq = TRUE; break; case SN_AbsoluteCompareLessThanScalar: scalar = TRUE; case SN_AbsoluteCompareLessThan: reverse_args = TRUE; break; case SN_AbsoluteCompareLessThanOrEqualScalar: scalar = TRUE; case SN_AbsoluteCompareLessThanOrEqual: reverse_args = TRUE; use_geq = TRUE; break; } if (reverse_args) { cmp_args [0] = args [1]; cmp_args [1] = args [0]; } int iid = use_geq ? INTRINS_AARCH64_ADV_SIMD_FACGE : INTRINS_AARCH64_ADV_SIMD_FACGT; return emit_simd_ins_for_sig (cfg, klass, OP_ARM64_ABSCOMPARE, iid, scalar, fsig, cmp_args); } case SN_AddSaturate: case SN_AddSaturateScalar: { gboolean arg0_unsigned = type_is_unsigned (fsig->params [0]); gboolean arg1_unsigned = type_is_unsigned (fsig->params [1]); int iid = 0; if (arg0_unsigned && arg1_unsigned) iid = INTRINS_AARCH64_ADV_SIMD_UQADD; else if (arg0_unsigned && !arg1_unsigned) iid = INTRINS_AARCH64_ADV_SIMD_USQADD; else if (!arg0_unsigned && arg1_unsigned) iid = INTRINS_AARCH64_ADV_SIMD_SUQADD; else iid = INTRINS_AARCH64_ADV_SIMD_SQADD; int op = id == SN_AddSaturateScalar ? OP_XOP_OVR_SCALAR_X_X_X : OP_XOP_OVR_X_X_X; return emit_simd_ins_for_sig (cfg, klass, op, iid, arg0_type, fsig, args); } case SN_DuplicateSelectedScalarToVector128: case SN_DuplicateSelectedScalarToVector64: case SN_DuplicateToVector64: case SN_DuplicateToVector128: { MonoClass *ret_klass = mono_class_from_mono_type_internal (fsig->ret); MonoType *rtype = get_vector_t_elem_type (fsig->ret); int scalar_src_reg = args [0]->dreg; switch (id) { case SN_DuplicateSelectedScalarToVector128: case SN_DuplicateSelectedScalarToVector64: { MonoInst *ins = emit_simd_ins (cfg, ret_klass, type_to_xextract_op (rtype->type), args [0]->dreg, args [1]->dreg); ins->inst_c1 = arg0_type; scalar_src_reg = ins->dreg; break; } } return emit_simd_ins (cfg, ret_klass, type_to_expand_op (rtype), scalar_src_reg, -1); } case SN_Extract: { int extract_op = type_to_xextract_op (arg0_type); MonoInst *ins = emit_simd_ins (cfg, klass, extract_op, args [0]->dreg, args [1]->dreg); ins->inst_c1 = arg0_type; return ins; } case SN_InsertSelectedScalar: case SN_InsertScalar: case SN_Insert: { MonoClass *ret_klass = mono_class_from_mono_type_internal (fsig->ret); int insert_op = 0; int extract_op = 0; switch (arg0_type) { case MONO_TYPE_I1: case MONO_TYPE_U1: insert_op = OP_XINSERT_I1; extract_op = OP_EXTRACT_I1; break; case MONO_TYPE_I2: case MONO_TYPE_U2: insert_op = OP_XINSERT_I2; extract_op = OP_EXTRACT_I2; break; case MONO_TYPE_I4: case MONO_TYPE_U4: insert_op = OP_XINSERT_I4; extract_op = OP_EXTRACT_I4; break; case MONO_TYPE_I8: case MONO_TYPE_U8: insert_op = OP_XINSERT_I8; extract_op = OP_EXTRACT_I8; break; case MONO_TYPE_R4: insert_op = OP_XINSERT_R4; extract_op = OP_EXTRACT_R4; break; case MONO_TYPE_R8: insert_op = OP_XINSERT_R8; extract_op = OP_EXTRACT_R8; break; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 insert_op = OP_XINSERT_I8; extract_op = OP_EXTRACT_I8; #else insert_op = OP_XINSERT_I4; extract_op = OP_EXTRACT_I4; #endif break; default: g_assert_not_reached (); } int val_src_reg = args [2]->dreg; switch (id) { case SN_InsertSelectedScalar: { MonoInst *scalar = emit_simd_ins (cfg, klass, OP_ARM64_SELECT_SCALAR, args [2]->dreg, args [3]->dreg); val_src_reg = scalar->dreg; // fallthrough } case SN_InsertScalar: { MonoInst *ins = emit_simd_ins (cfg, klass, extract_op, val_src_reg, -1); ins->inst_c0 = 0; ins->inst_c1 = arg0_type; val_src_reg = ins->dreg; break; } } MonoInst *ins = emit_simd_ins (cfg, ret_klass, insert_op, args [0]->dreg, val_src_reg); ins->sreg3 = args [1]->dreg; ins->inst_c1 = arg0_type; return ins; } case SN_ShiftLeftLogicalSaturate: case SN_ShiftLeftLogicalSaturateScalar: { MonoClass *ret_klass = mono_class_from_mono_type_internal (fsig->ret); MonoType *etype = get_vector_t_elem_type (fsig->ret); gboolean is_unsigned = type_is_unsigned (fsig->ret); gboolean scalar = id == SN_ShiftLeftLogicalSaturateScalar; int s2v = scalar ? OP_CREATE_SCALAR_UNSAFE : type_to_expand_op (etype); int xop = scalar ? OP_XOP_OVR_SCALAR_X_X_X : OP_XOP_OVR_X_X_X; int iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UQSHL : INTRINS_AARCH64_ADV_SIMD_SQSHL; MonoInst *shift_vector = emit_simd_ins (cfg, ret_klass, s2v, args [1]->dreg, -1); shift_vector->inst_c1 = etype->type; MonoInst *ret = emit_simd_ins (cfg, ret_klass, xop, args [0]->dreg, shift_vector->dreg); ret->inst_c0 = iid; ret->inst_c1 = etype->type; return ret; } case SN_MultiplyRoundedDoublingBySelectedScalarSaturateHigh: case SN_MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh: case SN_MultiplyDoublingScalarBySelectedScalarSaturateHigh: case SN_MultiplyDoublingWideningSaturateScalarBySelectedScalar: case SN_MultiplyExtendedBySelectedScalar: case SN_MultiplyExtendedScalarBySelectedScalar: case SN_MultiplyBySelectedScalar: case SN_MultiplyBySelectedScalarWideningLower: case SN_MultiplyBySelectedScalarWideningUpper: case SN_MultiplyDoublingBySelectedScalarSaturateHigh: case SN_MultiplyDoublingWideningSaturateLowerBySelectedScalar: case SN_MultiplyDoublingWideningSaturateUpperBySelectedScalar: { MonoClass *ret_klass = mono_class_from_mono_type_internal (fsig->ret); gboolean is_unsigned = type_is_unsigned (fsig->ret); gboolean is_float = type_is_float (fsig->ret); int opcode = 0; int c0 = 0; switch (id) { case SN_MultiplyRoundedDoublingBySelectedScalarSaturateHigh: opcode = OP_XOP_OVR_BYSCALAR_X_X_X; c0 = INTRINS_AARCH64_ADV_SIMD_SQRDMULH; break; case SN_MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh: opcode = OP_XOP_OVR_SCALAR_X_X_X; c0 = INTRINS_AARCH64_ADV_SIMD_SQRDMULH; break; case SN_MultiplyDoublingScalarBySelectedScalarSaturateHigh: opcode = OP_XOP_OVR_SCALAR_X_X_X; c0 = INTRINS_AARCH64_ADV_SIMD_SQDMULH; break; case SN_MultiplyDoublingWideningSaturateScalarBySelectedScalar: opcode = OP_ARM64_SQDMULL_SCALAR; break; case SN_MultiplyExtendedBySelectedScalar: opcode = OP_XOP_OVR_BYSCALAR_X_X_X; c0 = INTRINS_AARCH64_ADV_SIMD_FMULX; break; case SN_MultiplyExtendedScalarBySelectedScalar: opcode = OP_XOP_OVR_SCALAR_X_X_X; c0 = INTRINS_AARCH64_ADV_SIMD_FMULX; break; case SN_MultiplyBySelectedScalar: opcode = OP_XBINOP_BYSCALAR; c0 = OP_IMUL; break; case SN_MultiplyBySelectedScalarWideningLower: opcode = OP_ARM64_SMULL_SCALAR; break; case SN_MultiplyBySelectedScalarWideningUpper: opcode = OP_ARM64_SMULL2_SCALAR; break; case SN_MultiplyDoublingBySelectedScalarSaturateHigh: opcode = OP_XOP_OVR_BYSCALAR_X_X_X; c0 = INTRINS_AARCH64_ADV_SIMD_SQDMULH; break; case SN_MultiplyDoublingWideningSaturateLowerBySelectedScalar: opcode = OP_ARM64_SQDMULL_BYSCALAR; break; case SN_MultiplyDoublingWideningSaturateUpperBySelectedScalar: opcode = OP_ARM64_SQDMULL2_BYSCALAR; break; default: g_assert_not_reached(); } if (is_unsigned) switch (opcode) { case OP_ARM64_SMULL_SCALAR: opcode = OP_ARM64_UMULL_SCALAR; break; case OP_ARM64_SMULL2_SCALAR: opcode = OP_ARM64_UMULL2_SCALAR; break; } if (is_float) switch (opcode) { case OP_XBINOP_BYSCALAR: c0 = OP_FMUL; } MonoInst *scalar = emit_simd_ins (cfg, ret_klass, OP_ARM64_SELECT_SCALAR, args [1]->dreg, args [2]->dreg); MonoInst *ret = emit_simd_ins (cfg, ret_klass, opcode, args [0]->dreg, scalar->dreg); ret->inst_c0 = c0; ret->inst_c1 = arg0_type; return ret; } case SN_FusedMultiplyAddBySelectedScalar: case SN_FusedMultiplyAddScalarBySelectedScalar: case SN_FusedMultiplySubtractBySelectedScalar: case SN_FusedMultiplySubtractScalarBySelectedScalar: case SN_MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate: case SN_MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate: case SN_MultiplyAddBySelectedScalar: case SN_MultiplySubtractBySelectedScalar: case SN_MultiplyBySelectedScalarWideningLowerAndAdd: case SN_MultiplyBySelectedScalarWideningLowerAndSubtract: case SN_MultiplyBySelectedScalarWideningUpperAndAdd: case SN_MultiplyBySelectedScalarWideningUpperAndSubtract: case SN_MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate: case SN_MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate: case SN_MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate: case SN_MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate: { MonoClass *ret_klass = mono_class_from_mono_type_internal (fsig->ret); gboolean is_unsigned = type_is_unsigned (fsig->ret); int opcode = 0; switch (id) { case SN_FusedMultiplyAddBySelectedScalar: opcode = OP_ARM64_FMADD_BYSCALAR; break; case SN_FusedMultiplyAddScalarBySelectedScalar: opcode = OP_ARM64_FMADD_SCALAR; break; case SN_FusedMultiplySubtractBySelectedScalar: opcode = OP_ARM64_FMSUB_BYSCALAR; break; case SN_FusedMultiplySubtractScalarBySelectedScalar: opcode = OP_ARM64_FMSUB_SCALAR; break; case SN_MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate: opcode = OP_ARM64_SQDMLAL_SCALAR; break; case SN_MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate: opcode = OP_ARM64_SQDMLSL_SCALAR; break; case SN_MultiplyAddBySelectedScalar: opcode = OP_ARM64_MLA_SCALAR; break; case SN_MultiplySubtractBySelectedScalar: opcode = OP_ARM64_MLS_SCALAR; break; case SN_MultiplyBySelectedScalarWideningLowerAndAdd: opcode = OP_ARM64_SMLAL_SCALAR; break; case SN_MultiplyBySelectedScalarWideningLowerAndSubtract: opcode = OP_ARM64_SMLSL_SCALAR; break; case SN_MultiplyBySelectedScalarWideningUpperAndAdd: opcode = OP_ARM64_SMLAL2_SCALAR; break; case SN_MultiplyBySelectedScalarWideningUpperAndSubtract: opcode = OP_ARM64_SMLSL2_SCALAR; break; case SN_MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate: opcode = OP_ARM64_SQDMLAL_BYSCALAR; break; case SN_MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate: opcode = OP_ARM64_SQDMLSL_BYSCALAR; break; case SN_MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate: opcode = OP_ARM64_SQDMLAL2_BYSCALAR; break; case SN_MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate: opcode = OP_ARM64_SQDMLSL2_BYSCALAR; break; default: g_assert_not_reached(); } if (is_unsigned) switch (opcode) { case OP_ARM64_SMLAL_SCALAR: opcode = OP_ARM64_UMLAL_SCALAR; break; case OP_ARM64_SMLSL_SCALAR: opcode = OP_ARM64_UMLSL_SCALAR; break; case OP_ARM64_SMLAL2_SCALAR: opcode = OP_ARM64_UMLAL2_SCALAR; break; case OP_ARM64_SMLSL2_SCALAR: opcode = OP_ARM64_UMLSL2_SCALAR; break; } MonoInst *scalar = emit_simd_ins (cfg, ret_klass, OP_ARM64_SELECT_SCALAR, args [2]->dreg, args [3]->dreg); MonoInst *ret = emit_simd_ins (cfg, ret_klass, opcode, args [0]->dreg, args [1]->dreg); ret->sreg3 = scalar->dreg; return ret; } default: g_assert_not_reached (); } } if (feature == MONO_CPU_ARM64_CRYPTO) { switch (id) { case SN_PolynomialMultiplyWideningLower: return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_AARCH64_PMULL64, 0, fsig, args); case SN_PolynomialMultiplyWideningUpper: return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_AARCH64_PMULL64, 1, fsig, args); default: g_assert_not_reached (); } } if (feature == MONO_CPU_ARM64_RDM) { switch (id) { case SN_MultiplyRoundedDoublingBySelectedScalarAndAddSaturateHigh: case SN_MultiplyRoundedDoublingBySelectedScalarAndSubtractSaturateHigh: case SN_MultiplyRoundedDoublingScalarBySelectedScalarAndAddSaturateHigh: case SN_MultiplyRoundedDoublingScalarBySelectedScalarAndSubtractSaturateHigh: { MonoClass *ret_klass = mono_class_from_mono_type_internal (fsig->ret); int opcode = 0; switch (id) { case SN_MultiplyRoundedDoublingBySelectedScalarAndAddSaturateHigh: opcode = OP_ARM64_SQRDMLAH_BYSCALAR; break; case SN_MultiplyRoundedDoublingBySelectedScalarAndSubtractSaturateHigh: opcode = OP_ARM64_SQRDMLSH_BYSCALAR; break; case SN_MultiplyRoundedDoublingScalarBySelectedScalarAndAddSaturateHigh: opcode = OP_ARM64_SQRDMLAH_SCALAR; break; case SN_MultiplyRoundedDoublingScalarBySelectedScalarAndSubtractSaturateHigh: opcode = OP_ARM64_SQRDMLSH_SCALAR; break; } MonoInst *scalar = emit_simd_ins (cfg, ret_klass, OP_ARM64_SELECT_SCALAR, args [2]->dreg, args [3]->dreg); MonoInst *ret = emit_simd_ins (cfg, ret_klass, opcode, args [0]->dreg, args [1]->dreg); ret->inst_c1 = arg0_type; ret->sreg3 = scalar->dreg; return ret; } default: g_assert_not_reached (); } } if (feature == MONO_CPU_ARM64_DP) { switch (id) { case SN_DotProductBySelectedQuadruplet: { MonoClass *ret_klass = mono_class_from_mono_type_internal (fsig->ret); MonoClass *arg_klass = mono_class_from_mono_type_internal (fsig->params [1]); MonoClass *quad_klass = mono_class_from_mono_type_internal (fsig->params [2]); gboolean is_unsigned = type_is_unsigned (fsig->ret); int iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UDOT : INTRINS_AARCH64_ADV_SIMD_SDOT; MonoInst *quad = emit_simd_ins (cfg, arg_klass, OP_ARM64_SELECT_QUAD, args [2]->dreg, args [3]->dreg); quad->data.op [1].klass = quad_klass; MonoInst *ret = emit_simd_ins (cfg, ret_klass, OP_XOP_OVR_X_X_X_X, args [0]->dreg, args [1]->dreg); ret->sreg3 = quad->dreg; ret->inst_c0 = iid; return ret; } default: g_assert_not_reached (); } } return NULL; } #endif // TARGET_ARM64 #ifdef TARGET_AMD64 static SimdIntrinsic sse_methods [] = { {SN_Add, OP_XBINOP, OP_FADD}, {SN_AddScalar, OP_SSE_ADDSS}, {SN_And, OP_SSE_AND}, {SN_AndNot, OP_SSE_ANDN}, {SN_CompareEqual, OP_XCOMPARE_FP, CMP_EQ}, {SN_CompareGreaterThan, OP_XCOMPARE_FP,CMP_GT}, {SN_CompareGreaterThanOrEqual, OP_XCOMPARE_FP, CMP_GE}, {SN_CompareLessThan, OP_XCOMPARE_FP, CMP_LT}, {SN_CompareLessThanOrEqual, OP_XCOMPARE_FP, CMP_LE}, {SN_CompareNotEqual, OP_XCOMPARE_FP, CMP_NE}, {SN_CompareNotGreaterThan, OP_XCOMPARE_FP, CMP_LE_UN}, {SN_CompareNotGreaterThanOrEqual, OP_XCOMPARE_FP, CMP_LT_UN}, {SN_CompareNotLessThan, OP_XCOMPARE_FP, CMP_GE_UN}, {SN_CompareNotLessThanOrEqual, OP_XCOMPARE_FP, CMP_GT_UN}, {SN_CompareOrdered, OP_XCOMPARE_FP, CMP_ORD}, {SN_CompareScalarEqual, OP_SSE_CMPSS, CMP_EQ}, {SN_CompareScalarGreaterThan, OP_SSE_CMPSS, CMP_GT}, {SN_CompareScalarGreaterThanOrEqual, OP_SSE_CMPSS, CMP_GE}, {SN_CompareScalarLessThan, OP_SSE_CMPSS, CMP_LT}, {SN_CompareScalarLessThanOrEqual, OP_SSE_CMPSS, CMP_LE}, {SN_CompareScalarNotEqual, OP_SSE_CMPSS, CMP_NE}, {SN_CompareScalarNotGreaterThan, OP_SSE_CMPSS, CMP_LE_UN}, {SN_CompareScalarNotGreaterThanOrEqual, OP_SSE_CMPSS, CMP_LT_UN}, {SN_CompareScalarNotLessThan, OP_SSE_CMPSS, CMP_GE_UN}, {SN_CompareScalarNotLessThanOrEqual, OP_SSE_CMPSS, CMP_GT_UN}, {SN_CompareScalarOrdered, OP_SSE_CMPSS, CMP_ORD}, {SN_CompareScalarOrderedEqual, OP_SSE_COMISS, CMP_EQ}, {SN_CompareScalarOrderedGreaterThan, OP_SSE_COMISS, CMP_GT}, {SN_CompareScalarOrderedGreaterThanOrEqual, OP_SSE_COMISS, CMP_GE}, {SN_CompareScalarOrderedLessThan, OP_SSE_COMISS, CMP_LT}, {SN_CompareScalarOrderedLessThanOrEqual, OP_SSE_COMISS, CMP_LE}, {SN_CompareScalarOrderedNotEqual, OP_SSE_COMISS, CMP_NE}, {SN_CompareScalarUnordered, OP_SSE_CMPSS, CMP_UNORD}, {SN_CompareScalarUnorderedEqual, OP_SSE_UCOMISS, CMP_EQ}, {SN_CompareScalarUnorderedGreaterThan, OP_SSE_UCOMISS, CMP_GT}, {SN_CompareScalarUnorderedGreaterThanOrEqual, OP_SSE_UCOMISS, CMP_GE}, {SN_CompareScalarUnorderedLessThan, OP_SSE_UCOMISS, CMP_LT}, {SN_CompareScalarUnorderedLessThanOrEqual, OP_SSE_UCOMISS, CMP_LE}, {SN_CompareScalarUnorderedNotEqual, OP_SSE_UCOMISS, CMP_NE}, {SN_CompareUnordered, OP_XCOMPARE_FP, CMP_UNORD}, {SN_ConvertScalarToVector128Single}, {SN_ConvertToInt32, OP_XOP_I4_X, INTRINS_SSE_CVTSS2SI}, {SN_ConvertToInt32WithTruncation, OP_XOP_I4_X, INTRINS_SSE_CVTTSS2SI}, {SN_ConvertToInt64, OP_XOP_I8_X, INTRINS_SSE_CVTSS2SI64}, {SN_ConvertToInt64WithTruncation, OP_XOP_I8_X, INTRINS_SSE_CVTTSS2SI64}, {SN_Divide, OP_XBINOP, OP_FDIV}, {SN_DivideScalar, OP_SSE_DIVSS}, {SN_LoadAlignedVector128, OP_SSE_LOADU, 16 /* alignment */}, {SN_LoadHigh, OP_SSE_MOVHPS_LOAD}, {SN_LoadLow, OP_SSE_MOVLPS_LOAD}, {SN_LoadScalarVector128, OP_SSE_MOVSS}, {SN_LoadVector128, OP_SSE_LOADU, 1 /* alignment */}, {SN_Max, OP_XOP_X_X_X, INTRINS_SSE_MAXPS}, {SN_MaxScalar, OP_XOP_X_X_X, INTRINS_SSE_MAXSS}, {SN_Min, OP_XOP_X_X_X, INTRINS_SSE_MINPS}, {SN_MinScalar, OP_XOP_X_X_X, INTRINS_SSE_MINSS}, {SN_MoveHighToLow, OP_SSE_MOVEHL}, {SN_MoveLowToHigh, OP_SSE_MOVELH}, {SN_MoveMask, OP_SSE_MOVMSK}, {SN_MoveScalar, OP_SSE_MOVS2}, {SN_Multiply, OP_XBINOP, OP_FMUL}, {SN_MultiplyScalar, OP_SSE_MULSS}, {SN_Or, OP_SSE_OR}, {SN_Prefetch0, OP_SSE_PREFETCHT0}, {SN_Prefetch1, OP_SSE_PREFETCHT1}, {SN_Prefetch2, OP_SSE_PREFETCHT2}, {SN_PrefetchNonTemporal, OP_SSE_PREFETCHNTA}, {SN_Reciprocal, OP_XOP_X_X, INTRINS_SSE_RCP_PS}, {SN_ReciprocalScalar}, {SN_ReciprocalSqrt, OP_XOP_X_X, INTRINS_SSE_RSQRT_PS}, {SN_ReciprocalSqrtScalar}, {SN_Shuffle}, {SN_Sqrt, OP_XOP_X_X, INTRINS_SSE_SQRT_PS}, {SN_SqrtScalar}, {SN_Store, OP_SSE_STORE, 1 /* alignment */}, {SN_StoreAligned, OP_SSE_STORE, 16 /* alignment */}, {SN_StoreAlignedNonTemporal, OP_SSE_MOVNTPS, 16 /* alignment */}, {SN_StoreFence, OP_XOP, INTRINS_SSE_SFENCE}, {SN_StoreHigh, OP_SSE_MOVHPS_STORE}, {SN_StoreLow, OP_SSE_MOVLPS_STORE}, {SN_StoreScalar, OP_SSE_MOVSS_STORE}, {SN_Subtract, OP_XBINOP, OP_FSUB}, {SN_SubtractScalar, OP_SSE_SUBSS}, {SN_UnpackHigh, OP_SSE_UNPACKHI}, {SN_UnpackLow, OP_SSE_UNPACKLO}, {SN_Xor, OP_SSE_XOR}, {SN_get_IsSupported} }; static SimdIntrinsic sse2_methods [] = { {SN_Add}, {SN_AddSaturate, OP_SSE2_ADDS}, {SN_AddScalar, OP_SSE2_ADDSD}, {SN_And, OP_SSE_AND}, {SN_AndNot, OP_SSE_ANDN}, {SN_Average}, {SN_CompareEqual}, {SN_CompareGreaterThan}, {SN_CompareGreaterThanOrEqual, OP_XCOMPARE_FP, CMP_GE}, {SN_CompareLessThan}, {SN_CompareLessThanOrEqual, OP_XCOMPARE_FP, CMP_LE}, {SN_CompareNotEqual, OP_XCOMPARE_FP, CMP_NE}, {SN_CompareNotGreaterThan, OP_XCOMPARE_FP, CMP_LE_UN}, {SN_CompareNotGreaterThanOrEqual, OP_XCOMPARE_FP, CMP_LT_UN}, {SN_CompareNotLessThan, OP_XCOMPARE_FP, CMP_GE_UN}, {SN_CompareNotLessThanOrEqual, OP_XCOMPARE_FP, CMP_GT_UN}, {SN_CompareOrdered, OP_XCOMPARE_FP, CMP_ORD}, {SN_CompareScalarEqual, OP_SSE2_CMPSD, CMP_EQ}, {SN_CompareScalarGreaterThan, OP_SSE2_CMPSD, CMP_GT}, {SN_CompareScalarGreaterThanOrEqual, OP_SSE2_CMPSD, CMP_GE}, {SN_CompareScalarLessThan, OP_SSE2_CMPSD, CMP_LT}, {SN_CompareScalarLessThanOrEqual, OP_SSE2_CMPSD, CMP_LE}, {SN_CompareScalarNotEqual, OP_SSE2_CMPSD, CMP_NE}, {SN_CompareScalarNotGreaterThan, OP_SSE2_CMPSD, CMP_LE_UN}, {SN_CompareScalarNotGreaterThanOrEqual, OP_SSE2_CMPSD, CMP_LT_UN}, {SN_CompareScalarNotLessThan, OP_SSE2_CMPSD, CMP_GE_UN}, {SN_CompareScalarNotLessThanOrEqual, OP_SSE2_CMPSD, CMP_GT_UN}, {SN_CompareScalarOrdered, OP_SSE2_CMPSD, CMP_ORD}, {SN_CompareScalarOrderedEqual, OP_SSE2_COMISD, CMP_EQ}, {SN_CompareScalarOrderedGreaterThan, OP_SSE2_COMISD, CMP_GT}, {SN_CompareScalarOrderedGreaterThanOrEqual, OP_SSE2_COMISD, CMP_GE}, {SN_CompareScalarOrderedLessThan, OP_SSE2_COMISD, CMP_LT}, {SN_CompareScalarOrderedLessThanOrEqual, OP_SSE2_COMISD, CMP_LE}, {SN_CompareScalarOrderedNotEqual, OP_SSE2_COMISD, CMP_NE}, {SN_CompareScalarUnordered, OP_SSE2_CMPSD, CMP_UNORD}, {SN_CompareScalarUnorderedEqual, OP_SSE2_UCOMISD, CMP_EQ}, {SN_CompareScalarUnorderedGreaterThan, OP_SSE2_UCOMISD, CMP_GT}, {SN_CompareScalarUnorderedGreaterThanOrEqual, OP_SSE2_UCOMISD, CMP_GE}, {SN_CompareScalarUnorderedLessThan, OP_SSE2_UCOMISD, CMP_LT}, {SN_CompareScalarUnorderedLessThanOrEqual, OP_SSE2_UCOMISD, CMP_LE}, {SN_CompareScalarUnorderedNotEqual, OP_SSE2_UCOMISD, CMP_NE}, {SN_CompareUnordered, OP_XCOMPARE_FP, CMP_UNORD}, {SN_ConvertScalarToVector128Double}, {SN_ConvertScalarToVector128Int32}, {SN_ConvertScalarToVector128Int64}, {SN_ConvertScalarToVector128Single, OP_XOP_X_X_X, INTRINS_SSE_CVTSD2SS}, {SN_ConvertScalarToVector128UInt32}, {SN_ConvertScalarToVector128UInt64}, {SN_ConvertToInt32}, {SN_ConvertToInt32WithTruncation, OP_XOP_I4_X, INTRINS_SSE_CVTTSD2SI}, {SN_ConvertToInt64}, {SN_ConvertToInt64WithTruncation, OP_XOP_I8_X, INTRINS_SSE_CVTTSD2SI64}, {SN_ConvertToUInt32}, {SN_ConvertToUInt64}, {SN_ConvertToVector128Double}, {SN_ConvertToVector128Int32}, {SN_ConvertToVector128Int32WithTruncation}, {SN_ConvertToVector128Single}, {SN_Divide, OP_XBINOP, OP_FDIV}, {SN_DivideScalar, OP_SSE2_DIVSD}, {SN_Extract}, {SN_Insert}, {SN_LoadAlignedVector128}, {SN_LoadFence, OP_XOP, INTRINS_SSE_LFENCE}, {SN_LoadHigh, OP_SSE2_MOVHPD_LOAD}, {SN_LoadLow, OP_SSE2_MOVLPD_LOAD}, {SN_LoadScalarVector128}, {SN_LoadVector128}, {SN_MaskMove, OP_SSE2_MASKMOVDQU}, {SN_Max}, {SN_MaxScalar, OP_XOP_X_X_X, INTRINS_SSE_MAXSD}, {SN_MemoryFence, OP_XOP, INTRINS_SSE_MFENCE}, {SN_Min}, // FIXME: {SN_MinScalar, OP_XOP_X_X_X, INTRINS_SSE_MINSD}, {SN_MoveMask, OP_SSE_MOVMSK}, {SN_MoveScalar}, {SN_Multiply}, {SN_MultiplyAddAdjacent, OP_XOP_X_X_X, INTRINS_SSE_PMADDWD}, {SN_MultiplyHigh}, {SN_MultiplyLow, OP_PMULW}, {SN_MultiplyScalar, OP_SSE2_MULSD}, {SN_Or, OP_SSE_OR}, {SN_PackSignedSaturate}, {SN_PackUnsignedSaturate}, {SN_ShiftLeftLogical}, {SN_ShiftLeftLogical128BitLane}, {SN_ShiftRightArithmetic}, {SN_ShiftRightLogical}, {SN_ShiftRightLogical128BitLane}, {SN_Shuffle}, {SN_ShuffleHigh}, {SN_ShuffleLow}, {SN_Sqrt, OP_XOP_X_X, INTRINS_SSE_SQRT_PD}, {SN_SqrtScalar}, {SN_Store, OP_SSE_STORE, 1 /* alignment */}, {SN_StoreAligned, OP_SSE_STORE, 16 /* alignment */}, {SN_StoreAlignedNonTemporal, OP_SSE_MOVNTPS, 16 /* alignment */}, {SN_StoreHigh, OP_SSE2_MOVHPD_STORE}, {SN_StoreLow, OP_SSE2_MOVLPD_STORE}, {SN_StoreNonTemporal, OP_SSE_MOVNTPS, 1 /* alignment */}, {SN_StoreScalar, OP_SSE_STORES}, {SN_Subtract}, {SN_SubtractSaturate, OP_SSE2_SUBS}, {SN_SubtractScalar, OP_SSE2_SUBSD}, {SN_SumAbsoluteDifferences, OP_XOP_X_X_X, INTRINS_SSE_PSADBW}, {SN_UnpackHigh, OP_SSE_UNPACKHI}, {SN_UnpackLow, OP_SSE_UNPACKLO}, {SN_Xor, OP_SSE_XOR}, {SN_get_IsSupported} }; static SimdIntrinsic sse3_methods [] = { {SN_AddSubtract}, {SN_HorizontalAdd}, {SN_HorizontalSubtract}, {SN_LoadAndDuplicateToVector128, OP_SSE3_MOVDDUP_MEM}, {SN_LoadDquVector128, OP_XOP_X_I, INTRINS_SSE_LDU_DQ}, {SN_MoveAndDuplicate, OP_SSE3_MOVDDUP}, {SN_MoveHighAndDuplicate, OP_SSE3_MOVSHDUP}, {SN_MoveLowAndDuplicate, OP_SSE3_MOVSLDUP}, {SN_get_IsSupported} }; static SimdIntrinsic ssse3_methods [] = { {SN_Abs, OP_SSSE3_ABS}, {SN_AlignRight}, {SN_HorizontalAdd}, {SN_HorizontalAddSaturate, OP_XOP_X_X_X, INTRINS_SSE_PHADDSW}, {SN_HorizontalSubtract}, {SN_HorizontalSubtractSaturate, OP_XOP_X_X_X, INTRINS_SSE_PHSUBSW}, {SN_MultiplyAddAdjacent, OP_XOP_X_X_X, INTRINS_SSE_PMADDUBSW}, {SN_MultiplyHighRoundScale, OP_XOP_X_X_X, INTRINS_SSE_PMULHRSW}, {SN_Shuffle, OP_SSSE3_SHUFFLE}, {SN_Sign}, {SN_get_IsSupported} }; static SimdIntrinsic sse41_methods [] = { {SN_Blend}, {SN_BlendVariable}, {SN_Ceiling, OP_SSE41_ROUNDP, 10 /*round mode*/}, {SN_CeilingScalar, 0, 10 /*round mode*/}, {SN_CompareEqual, OP_XCOMPARE, CMP_EQ}, {SN_ConvertToVector128Int16, OP_SSE_CVTII, MONO_TYPE_I2}, {SN_ConvertToVector128Int32, OP_SSE_CVTII, MONO_TYPE_I4}, {SN_ConvertToVector128Int64, OP_SSE_CVTII, MONO_TYPE_I8}, {SN_DotProduct}, {SN_Extract}, {SN_Floor, OP_SSE41_ROUNDP, 9 /*round mode*/}, {SN_FloorScalar, 0, 9 /*round mode*/}, {SN_Insert}, {SN_LoadAlignedVector128NonTemporal, OP_SSE41_LOADANT}, {SN_Max, OP_XBINOP, OP_IMAX}, {SN_Min, OP_XBINOP, OP_IMIN}, {SN_MinHorizontal, OP_XOP_X_X, INTRINS_SSE_PHMINPOSUW}, {SN_MultipleSumAbsoluteDifferences}, {SN_Multiply, OP_SSE41_MUL}, {SN_MultiplyLow, OP_SSE41_MULLO}, {SN_PackUnsignedSaturate, OP_XOP_X_X_X, INTRINS_SSE_PACKUSDW}, {SN_RoundCurrentDirection, OP_SSE41_ROUNDP, 4 /*round mode*/}, {SN_RoundCurrentDirectionScalar, 0, 4 /*round mode*/}, {SN_RoundToNearestInteger, OP_SSE41_ROUNDP, 8 /*round mode*/}, {SN_RoundToNearestIntegerScalar, 0, 8 /*round mode*/}, {SN_RoundToNegativeInfinity, OP_SSE41_ROUNDP, 9 /*round mode*/}, {SN_RoundToNegativeInfinityScalar, 0, 9 /*round mode*/}, {SN_RoundToPositiveInfinity, OP_SSE41_ROUNDP, 10 /*round mode*/}, {SN_RoundToPositiveInfinityScalar, 0, 10 /*round mode*/}, {SN_RoundToZero, OP_SSE41_ROUNDP, 11 /*round mode*/}, {SN_RoundToZeroScalar, 0, 11 /*round mode*/}, {SN_TestC, OP_XOP_I4_X_X, INTRINS_SSE_TESTC}, {SN_TestNotZAndNotC, OP_XOP_I4_X_X, INTRINS_SSE_TESTNZ}, {SN_TestZ, OP_XOP_I4_X_X, INTRINS_SSE_TESTZ}, {SN_get_IsSupported} }; static SimdIntrinsic sse42_methods [] = { {SN_CompareGreaterThan, OP_XCOMPARE, CMP_GT}, {SN_Crc32}, {SN_get_IsSupported} }; static SimdIntrinsic pclmulqdq_methods [] = { {SN_CarrylessMultiply}, {SN_get_IsSupported} }; static SimdIntrinsic aes_methods [] = { {SN_Decrypt, OP_XOP_X_X_X, INTRINS_AESNI_AESDEC}, {SN_DecryptLast, OP_XOP_X_X_X, INTRINS_AESNI_AESDECLAST}, {SN_Encrypt, OP_XOP_X_X_X, INTRINS_AESNI_AESENC}, {SN_EncryptLast, OP_XOP_X_X_X, INTRINS_AESNI_AESENCLAST}, {SN_InverseMixColumns, OP_XOP_X_X, INTRINS_AESNI_AESIMC}, {SN_KeygenAssist}, {SN_get_IsSupported} }; static SimdIntrinsic popcnt_methods [] = { {SN_PopCount}, {SN_get_IsSupported} }; static SimdIntrinsic lzcnt_methods [] = { {SN_LeadingZeroCount}, {SN_get_IsSupported} }; static SimdIntrinsic bmi1_methods [] = { {SN_AndNot}, {SN_BitFieldExtract}, {SN_ExtractLowestSetBit}, {SN_GetMaskUpToLowestSetBit}, {SN_ResetLowestSetBit}, {SN_TrailingZeroCount}, {SN_get_IsSupported} }; static SimdIntrinsic bmi2_methods [] = { {SN_MultiplyNoFlags}, {SN_ParallelBitDeposit}, {SN_ParallelBitExtract}, {SN_ZeroHighBits}, {SN_get_IsSupported} }; static SimdIntrinsic x86base_methods [] = { {SN_BitScanForward}, {SN_BitScanReverse}, {SN_get_IsSupported} }; static const IntrinGroup supported_x86_intrinsics [] = { { "Aes", MONO_CPU_X86_AES, aes_methods, sizeof (aes_methods) }, { "Avx", MONO_CPU_X86_AVX, unsupported, sizeof (unsupported) }, { "Avx2", MONO_CPU_X86_AVX2, unsupported, sizeof (unsupported) }, { "AvxVnni", 0, unsupported, sizeof (unsupported) }, { "Bmi1", MONO_CPU_X86_BMI1, bmi1_methods, sizeof (bmi1_methods) }, { "Bmi2", MONO_CPU_X86_BMI2, bmi2_methods, sizeof (bmi2_methods) }, { "Fma", MONO_CPU_X86_FMA, unsupported, sizeof (unsupported) }, { "Lzcnt", MONO_CPU_X86_LZCNT, lzcnt_methods, sizeof (lzcnt_methods), TRUE }, { "Pclmulqdq", MONO_CPU_X86_PCLMUL, pclmulqdq_methods, sizeof (pclmulqdq_methods) }, { "Popcnt", MONO_CPU_X86_POPCNT, popcnt_methods, sizeof (popcnt_methods), TRUE }, { "Sse", MONO_CPU_X86_SSE, sse_methods, sizeof (sse_methods) }, { "Sse2", MONO_CPU_X86_SSE2, sse2_methods, sizeof (sse2_methods) }, { "Sse3", MONO_CPU_X86_SSE3, sse3_methods, sizeof (sse3_methods) }, { "Sse41", MONO_CPU_X86_SSE41, sse41_methods, sizeof (sse41_methods) }, { "Sse42", MONO_CPU_X86_SSE42, sse42_methods, sizeof (sse42_methods) }, { "Ssse3", MONO_CPU_X86_SSSE3, ssse3_methods, sizeof (ssse3_methods) }, { "X86Base", 0, x86base_methods, sizeof (x86base_methods) }, }; static MonoInst* emit_x86_intrinsics ( MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoClass *klass, const IntrinGroup *intrin_group, const SimdIntrinsic *info, int id, MonoTypeEnum arg0_type, gboolean is_64bit) { MonoCPUFeatures feature = intrin_group->feature; const SimdIntrinsic *intrinsics = intrin_group->intrinsics; if (feature == MONO_CPU_X86_SSE) { switch (id) { case SN_Shuffle: return emit_simd_ins_for_sig (cfg, klass, OP_SSE_SHUFPS, 0, arg0_type, fsig, args); case SN_ConvertScalarToVector128Single: { int op = 0; switch (fsig->params [1]->type) { case MONO_TYPE_I4: op = OP_SSE_CVTSI2SS; break; case MONO_TYPE_I8: op = OP_SSE_CVTSI2SS64; break; default: g_assert_not_reached (); break; } return emit_simd_ins_for_sig (cfg, klass, op, 0, 0, fsig, args); } case SN_ReciprocalScalar: case SN_ReciprocalSqrtScalar: case SN_SqrtScalar: { int op = 0; switch (id) { case SN_ReciprocalScalar: op = OP_SSE_RCPSS; break; case SN_ReciprocalSqrtScalar: op = OP_SSE_RSQRTSS; break; case SN_SqrtScalar: op = OP_SSE_SQRTSS; break; }; if (fsig->param_count == 1) return emit_simd_ins (cfg, klass, op, args [0]->dreg, args[0]->dreg); else if (fsig->param_count == 2) return emit_simd_ins (cfg, klass, op, args [0]->dreg, args[1]->dreg); else g_assert_not_reached (); break; } case SN_LoadScalarVector128: return NULL; default: return NULL; } } if (feature == MONO_CPU_X86_SSE2) { switch (id) { case SN_Subtract: return emit_simd_ins_for_sig (cfg, klass, OP_XBINOP, arg0_type == MONO_TYPE_R8 ? OP_FSUB : OP_ISUB, arg0_type, fsig, args); case SN_Add: return emit_simd_ins_for_sig (cfg, klass, OP_XBINOP, arg0_type == MONO_TYPE_R8 ? OP_FADD : OP_IADD, arg0_type, fsig, args); case SN_Average: if (arg0_type == MONO_TYPE_U1) return emit_simd_ins_for_sig (cfg, klass, OP_PAVGB_UN, -1, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_U2) return emit_simd_ins_for_sig (cfg, klass, OP_PAVGW_UN, -1, arg0_type, fsig, args); else return NULL; case SN_CompareNotEqual: return emit_simd_ins_for_sig (cfg, klass, arg0_type == MONO_TYPE_R8 ? OP_XCOMPARE_FP : OP_XCOMPARE, CMP_NE, arg0_type, fsig, args); case SN_CompareEqual: return emit_simd_ins_for_sig (cfg, klass, arg0_type == MONO_TYPE_R8 ? OP_XCOMPARE_FP : OP_XCOMPARE, CMP_EQ, arg0_type, fsig, args); case SN_CompareGreaterThan: return emit_simd_ins_for_sig (cfg, klass, arg0_type == MONO_TYPE_R8 ? OP_XCOMPARE_FP : OP_XCOMPARE, CMP_GT, arg0_type, fsig, args); case SN_CompareLessThan: return emit_simd_ins_for_sig (cfg, klass, arg0_type == MONO_TYPE_R8 ? OP_XCOMPARE_FP : OP_XCOMPARE, CMP_LT, arg0_type, fsig, args); case SN_ConvertToInt32: if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_I4_X, INTRINS_SSE_CVTSD2SI, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_I4) return emit_simd_ins_for_sig (cfg, klass, OP_EXTRACT_I4, 0, arg0_type, fsig, args); else return NULL; case SN_ConvertToInt64: if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_I8_X, INTRINS_SSE_CVTSD2SI64, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_I8) return emit_simd_ins_for_sig (cfg, klass, OP_EXTRACT_I8, 0 /*element index*/, arg0_type, fsig, args); else g_assert_not_reached (); break; case SN_ConvertScalarToVector128Double: { int op = OP_SSE2_CVTSS2SD; switch (fsig->params [1]->type) { case MONO_TYPE_I4: op = OP_SSE2_CVTSI2SD; break; case MONO_TYPE_I8: op = OP_SSE2_CVTSI2SD64; break; } return emit_simd_ins_for_sig (cfg, klass, op, 0, 0, fsig, args); } case SN_ConvertScalarToVector128Int32: case SN_ConvertScalarToVector128Int64: case SN_ConvertScalarToVector128UInt32: case SN_ConvertScalarToVector128UInt64: return emit_simd_ins_for_sig (cfg, klass, OP_CREATE_SCALAR, -1, arg0_type, fsig, args); case SN_ConvertToUInt32: return emit_simd_ins_for_sig (cfg, klass, OP_EXTRACT_I4, 0 /*element index*/, arg0_type, fsig, args); case SN_ConvertToUInt64: return emit_simd_ins_for_sig (cfg, klass, OP_EXTRACT_I8, 0 /*element index*/, arg0_type, fsig, args); case SN_ConvertToVector128Double: if (arg0_type == MONO_TYPE_R4) return emit_simd_ins_for_sig (cfg, klass, OP_CVTPS2PD, 0, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_I4) return emit_simd_ins_for_sig (cfg, klass, OP_CVTDQ2PD, 0, arg0_type, fsig, args); else return NULL; case SN_ConvertToVector128Int32: if (arg0_type == MONO_TYPE_R4) return emit_simd_ins_for_sig (cfg, klass, OP_CVTPS2DQ, 0, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_CVTPD2DQ, 0, arg0_type, fsig, args); else return NULL; case SN_ConvertToVector128Int32WithTruncation: if (arg0_type == MONO_TYPE_R4) return emit_simd_ins_for_sig (cfg, klass, OP_CVTTPS2DQ, 0, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_CVTTPD2DQ, 0, arg0_type, fsig, args); else return NULL; case SN_ConvertToVector128Single: if (arg0_type == MONO_TYPE_I4) return emit_simd_ins_for_sig (cfg, klass, OP_CVTDQ2PS, 0, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_CVTPD2PS, 0, arg0_type, fsig, args); else return NULL; case SN_LoadAlignedVector128: return emit_simd_ins_for_sig (cfg, klass, OP_SSE_LOADU, 16 /*alignment*/, arg0_type, fsig, args); case SN_LoadVector128: return emit_simd_ins_for_sig (cfg, klass, OP_SSE_LOADU, 1 /*alignment*/, arg0_type, fsig, args); case SN_MoveScalar: return emit_simd_ins_for_sig (cfg, klass, fsig->param_count == 2 ? OP_SSE_MOVS2 : OP_SSE_MOVS, -1, arg0_type, fsig, args); case SN_Max: switch (arg0_type) { case MONO_TYPE_U1: return emit_simd_ins_for_sig (cfg, klass, OP_PMAXB_UN, 0, arg0_type, fsig, args); case MONO_TYPE_I2: return emit_simd_ins_for_sig (cfg, klass, OP_PMAXW, 0, arg0_type, fsig, args); case MONO_TYPE_R8: return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_MAXPD, arg0_type, fsig, args); default: g_assert_not_reached (); break; } break; case SN_Min: switch (arg0_type) { case MONO_TYPE_U1: return emit_simd_ins_for_sig (cfg, klass, OP_PMINB_UN, 0, arg0_type, fsig, args); case MONO_TYPE_I2: return emit_simd_ins_for_sig (cfg, klass, OP_PMINW, 0, arg0_type, fsig, args); case MONO_TYPE_R8: return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_MINPD, arg0_type, fsig, args); default: g_assert_not_reached (); break; } break; case SN_Multiply: if (arg0_type == MONO_TYPE_U4) return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_PMULUDQ, 0, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_MULPD, 0, arg0_type, fsig, args); else g_assert_not_reached (); case SN_MultiplyHigh: if (arg0_type == MONO_TYPE_I2) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PMULHW, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_U2) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PMULHUW, arg0_type, fsig, args); else g_assert_not_reached (); case SN_PackSignedSaturate: if (arg0_type == MONO_TYPE_I2) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PACKSSWB, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_I4) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PACKSSDW, arg0_type, fsig, args); else g_assert_not_reached (); case SN_PackUnsignedSaturate: return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_PACKUS, -1, arg0_type, fsig, args); case SN_Extract: g_assert (arg0_type == MONO_TYPE_U2); return emit_simd_ins_for_sig (cfg, klass, OP_XEXTRACT_I4, 0, arg0_type, fsig, args); case SN_Insert: g_assert (arg0_type == MONO_TYPE_I2 || arg0_type == MONO_TYPE_U2); return emit_simd_ins_for_sig (cfg, klass, OP_XINSERT_I2, 0, arg0_type, fsig, args); case SN_ShiftRightLogical: { gboolean is_imm = fsig->params [1]->type == MONO_TYPE_U1; IntrinsicId op = (IntrinsicId)0; switch (arg0_type) { case MONO_TYPE_I2: case MONO_TYPE_U2: op = is_imm ? INTRINS_SSE_PSRLI_W : INTRINS_SSE_PSRL_W; break; case MONO_TYPE_I4: case MONO_TYPE_U4: op = is_imm ? INTRINS_SSE_PSRLI_D : INTRINS_SSE_PSRL_D; break; case MONO_TYPE_I8: case MONO_TYPE_U8: op = is_imm ? INTRINS_SSE_PSRLI_Q : INTRINS_SSE_PSRL_Q; break; default: g_assert_not_reached (); break; } return emit_simd_ins_for_sig (cfg, klass, is_imm ? OP_XOP_X_X_I4 : OP_XOP_X_X_X, op, arg0_type, fsig, args); } case SN_ShiftRightArithmetic: { gboolean is_imm = fsig->params [1]->type == MONO_TYPE_U1; IntrinsicId op = (IntrinsicId)0; switch (arg0_type) { case MONO_TYPE_I2: case MONO_TYPE_U2: op = is_imm ? INTRINS_SSE_PSRAI_W : INTRINS_SSE_PSRA_W; break; case MONO_TYPE_I4: case MONO_TYPE_U4: op = is_imm ? INTRINS_SSE_PSRAI_D : INTRINS_SSE_PSRA_D; break; default: g_assert_not_reached (); break; } return emit_simd_ins_for_sig (cfg, klass, is_imm ? OP_XOP_X_X_I4 : OP_XOP_X_X_X, op, arg0_type, fsig, args); } case SN_ShiftLeftLogical: { gboolean is_imm = fsig->params [1]->type == MONO_TYPE_U1; IntrinsicId op = (IntrinsicId)0; switch (arg0_type) { case MONO_TYPE_I2: case MONO_TYPE_U2: op = is_imm ? INTRINS_SSE_PSLLI_W : INTRINS_SSE_PSLL_W; break; case MONO_TYPE_I4: case MONO_TYPE_U4: op = is_imm ? INTRINS_SSE_PSLLI_D : INTRINS_SSE_PSLL_D; break; case MONO_TYPE_I8: case MONO_TYPE_U8: op = is_imm ? INTRINS_SSE_PSLLI_Q : INTRINS_SSE_PSLL_Q; break; default: g_assert_not_reached (); break; } return emit_simd_ins_for_sig (cfg, klass, is_imm ? OP_XOP_X_X_I4 : OP_XOP_X_X_X, op, arg0_type, fsig, args); } case SN_ShiftLeftLogical128BitLane: return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_PSLLDQ, 0, arg0_type, fsig, args); case SN_ShiftRightLogical128BitLane: return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_PSRLDQ, 0, arg0_type, fsig, args); case SN_Shuffle: { if (fsig->param_count == 2) { g_assert (arg0_type == MONO_TYPE_I4 || arg0_type == MONO_TYPE_U4); return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_PSHUFD, 0, arg0_type, fsig, args); } else if (fsig->param_count == 3) { g_assert (arg0_type == MONO_TYPE_R8); return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_SHUFPD, 0, arg0_type, fsig, args); } else { g_assert_not_reached (); break; } } case SN_ShuffleHigh: g_assert (fsig->param_count == 2); return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_PSHUFHW, 0, arg0_type, fsig, args); case SN_ShuffleLow: g_assert (fsig->param_count == 2); return emit_simd_ins_for_sig (cfg, klass, OP_SSE2_PSHUFLW, 0, arg0_type, fsig, args); case SN_SqrtScalar: { if (fsig->param_count == 1) return emit_simd_ins (cfg, klass, OP_SSE2_SQRTSD, args [0]->dreg, args[0]->dreg); else if (fsig->param_count == 2) return emit_simd_ins (cfg, klass, OP_SSE2_SQRTSD, args [0]->dreg, args[1]->dreg); else { g_assert_not_reached (); break; } } case SN_LoadScalarVector128: { int op = 0; switch (arg0_type) { case MONO_TYPE_I4: case MONO_TYPE_U4: op = OP_SSE2_MOVD; break; case MONO_TYPE_I8: case MONO_TYPE_U8: op = OP_SSE2_MOVQ; break; case MONO_TYPE_R8: op = OP_SSE2_MOVUPD; break; default: g_assert_not_reached(); break; } return emit_simd_ins_for_sig (cfg, klass, op, 0, 0, fsig, args); } default: return NULL; } } if (feature == MONO_CPU_X86_SSE3) { switch (id) { case SN_AddSubtract: if (arg0_type == MONO_TYPE_R4) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_ADDSUBPS, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_ADDSUBPD, arg0_type, fsig, args); else g_assert_not_reached (); break; case SN_HorizontalAdd: if (arg0_type == MONO_TYPE_R4) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_HADDPS, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_HADDPD, arg0_type, fsig, args); else g_assert_not_reached (); break; case SN_HorizontalSubtract: if (arg0_type == MONO_TYPE_R4) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_HSUBPS, arg0_type, fsig, args); else if (arg0_type == MONO_TYPE_R8) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_HSUBPD, arg0_type, fsig, args); else g_assert_not_reached (); break; default: g_assert_not_reached (); break; } } if (feature == MONO_CPU_X86_SSSE3) { switch (id) { case SN_AlignRight: return emit_simd_ins_for_sig (cfg, klass, OP_SSSE3_ALIGNR, 0, arg0_type, fsig, args); case SN_HorizontalAdd: if (arg0_type == MONO_TYPE_I2) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PHADDW, arg0_type, fsig, args); return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PHADDD, arg0_type, fsig, args); case SN_HorizontalSubtract: if (arg0_type == MONO_TYPE_I2) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PHSUBW, arg0_type, fsig, args); return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PHSUBD, arg0_type, fsig, args); case SN_Sign: if (arg0_type == MONO_TYPE_I1) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PSIGNB, arg0_type, fsig, args); if (arg0_type == MONO_TYPE_I2) return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PSIGNW, arg0_type, fsig, args); return emit_simd_ins_for_sig (cfg, klass, OP_XOP_X_X_X, INTRINS_SSE_PSIGND, arg0_type, fsig, args); default: g_assert_not_reached (); break; } } if (feature == MONO_CPU_X86_SSE41) { switch (id) { case SN_DotProduct: { int op = 0; switch (arg0_type) { case MONO_TYPE_R4: op = OP_SSE41_DPPS; break; case MONO_TYPE_R8: op = OP_SSE41_DPPD; break; default: g_assert_not_reached (); break; } return emit_simd_ins_for_sig (cfg, klass, op, 0, arg0_type, fsig, args); } case SN_MultipleSumAbsoluteDifferences: return emit_simd_ins_for_sig (cfg, klass, OP_SSE41_MPSADBW, 0, arg0_type, fsig, args); case SN_Blend: return emit_simd_ins_for_sig (cfg, klass, OP_SSE41_BLEND, 0, arg0_type, fsig, args); case SN_BlendVariable: return emit_simd_ins_for_sig (cfg, klass, OP_SSE41_BLENDV, -1, arg0_type, fsig, args); case SN_Extract: { int op = 0; switch (arg0_type) { case MONO_TYPE_U1: op = OP_XEXTRACT_I1; break; case MONO_TYPE_U4: case MONO_TYPE_I4: op = OP_XEXTRACT_I4; break; case MONO_TYPE_U8: case MONO_TYPE_I8: op = OP_XEXTRACT_I8; break; case MONO_TYPE_R4: op = OP_XEXTRACT_R4; break; case MONO_TYPE_I: case MONO_TYPE_U: #if TARGET_SIZEOF_VOID_P == 8 op = OP_XEXTRACT_I8; #else op = OP_XEXTRACT_I4; #endif break; default: g_assert_not_reached(); break; } return emit_simd_ins_for_sig (cfg, klass, op, 0, arg0_type, fsig, args); } case SN_Insert: { int op = arg0_type == MONO_TYPE_R4 ? OP_SSE41_INSERTPS : type_to_xinsert_op (arg0_type); return emit_simd_ins_for_sig (cfg, klass, op, -1, arg0_type, fsig, args); } case SN_CeilingScalar: case SN_FloorScalar: case SN_RoundCurrentDirectionScalar: case SN_RoundToNearestIntegerScalar: case SN_RoundToNegativeInfinityScalar: case SN_RoundToPositiveInfinityScalar: case SN_RoundToZeroScalar: if (fsig->param_count == 2) { return emit_simd_ins_for_sig (cfg, klass, OP_SSE41_ROUNDS, info->default_instc0, arg0_type, fsig, args); } else { MonoInst* ins = emit_simd_ins (cfg, klass, OP_SSE41_ROUNDS, args [0]->dreg, args [0]->dreg); ins->inst_c0 = info->default_instc0; ins->inst_c1 = arg0_type; return ins; } break; default: g_assert_not_reached (); break; } } if (feature == MONO_CPU_X86_SSE42) { switch (id) { case SN_Crc32: { MonoTypeEnum arg1_type = get_underlying_type (fsig->params [1]); return emit_simd_ins_for_sig (cfg, klass, arg1_type == MONO_TYPE_U8 ? OP_SSE42_CRC64 : OP_SSE42_CRC32, arg1_type, arg0_type, fsig, args); } default: g_assert_not_reached (); break; } } if (feature == MONO_CPU_X86_PCLMUL) { switch (id) { case SN_CarrylessMultiply: { return emit_simd_ins_for_sig (cfg, klass, OP_PCLMULQDQ, 0, arg0_type, fsig, args); } default: g_assert_not_reached (); break; } } if (feature == MONO_CPU_X86_AES) { switch (id) { case SN_KeygenAssist: { return emit_simd_ins_for_sig (cfg, klass, OP_AES_KEYGENASSIST, 0, arg0_type, fsig, args); } default: g_assert_not_reached (); break; } } MonoInst *ins = NULL; if (feature == MONO_CPU_X86_POPCNT) { switch (id) { case SN_PopCount: MONO_INST_NEW (cfg, ins, is_64bit ? OP_POPCNT64 : OP_POPCNT32); ins->dreg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->type = is_64bit ? STACK_I8 : STACK_I4; MONO_ADD_INS (cfg->cbb, ins); return ins; default: return NULL; } } if (feature == MONO_CPU_X86_LZCNT) { switch (id) { case SN_LeadingZeroCount: return emit_simd_ins_for_sig (cfg, klass, is_64bit ? OP_LZCNT64 : OP_LZCNT32, 0, arg0_type, fsig, args); default: return NULL; } } if (feature == MONO_CPU_X86_BMI1) { switch (id) { case SN_AndNot: { // (a ^ -1) & b // LLVM replaces it with `andn` int tmp_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); int result_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, is_64bit ? OP_LXOR_IMM : OP_IXOR_IMM, tmp_reg, args [0]->dreg, -1); EMIT_NEW_BIALU (cfg, ins, is_64bit ? OP_LAND : OP_IAND, result_reg, tmp_reg, args [1]->dreg); return ins; } case SN_BitFieldExtract: { int ctlreg = args [1]->dreg; if (fsig->param_count == 2) { } else if (fsig->param_count == 3) { MonoInst *ins = NULL; /* This intrinsic is also implemented in managed code. * TODO: remove this if cross-AOT-assembly inlining works */ int startreg = args [1]->dreg; int lenreg = args [2]->dreg; int dreg1 = alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, OP_SHL_IMM, dreg1, lenreg, 8); int dreg2 = alloc_ireg (cfg); EMIT_NEW_BIALU (cfg, ins, OP_IOR, dreg2, startreg, dreg1); ctlreg = dreg2; } else { g_assert_not_reached (); } return emit_simd_ins (cfg, klass, is_64bit ? OP_BMI1_BEXTR64 : OP_BMI1_BEXTR32, args [0]->dreg, ctlreg); } case SN_GetMaskUpToLowestSetBit: { // x ^ (x - 1) // LLVM replaces it with `blsmsk` int tmp_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); int result_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, is_64bit ? OP_LSUB_IMM : OP_ISUB_IMM, tmp_reg, args [0]->dreg, 1); EMIT_NEW_BIALU (cfg, ins, is_64bit ? OP_LXOR : OP_IXOR, result_reg, args [0]->dreg, tmp_reg); return ins; } case SN_ResetLowestSetBit: { // x & (x - 1) // LLVM replaces it with `blsr` int tmp_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); int result_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); EMIT_NEW_BIALU_IMM (cfg, ins, is_64bit ? OP_LSUB_IMM : OP_ISUB_IMM, tmp_reg, args [0]->dreg, 1); EMIT_NEW_BIALU (cfg, ins, is_64bit ? OP_LAND : OP_IAND, result_reg, args [0]->dreg, tmp_reg); return ins; } case SN_ExtractLowestSetBit: { // x & (0 - x) // LLVM replaces it with `blsi` int tmp_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); int result_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); int zero_reg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); MONO_EMIT_NEW_ICONST (cfg, zero_reg, 0); EMIT_NEW_BIALU (cfg, ins, is_64bit ? OP_LSUB : OP_ISUB, tmp_reg, zero_reg, args [0]->dreg); EMIT_NEW_BIALU (cfg, ins, is_64bit ? OP_LAND : OP_IAND, result_reg, args [0]->dreg, tmp_reg); return ins; } case SN_TrailingZeroCount: MONO_INST_NEW (cfg, ins, is_64bit ? OP_CTTZ64 : OP_CTTZ32); ins->dreg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->type = is_64bit ? STACK_I8 : STACK_I4; MONO_ADD_INS (cfg->cbb, ins); return ins; default: g_assert_not_reached (); } } if (feature == MONO_CPU_X86_BMI2) { switch (id) { case SN_MultiplyNoFlags: { int op = 0; if (fsig->param_count == 2) { op = is_64bit ? OP_MULX_H64 : OP_MULX_H32; } else if (fsig->param_count == 3) { op = is_64bit ? OP_MULX_HL64 : OP_MULX_HL32; } else { g_assert_not_reached (); } return emit_simd_ins_for_sig (cfg, klass, op, 0, 0, fsig, args); } case SN_ZeroHighBits: MONO_INST_NEW (cfg, ins, is_64bit ? OP_BZHI64 : OP_BZHI32); ins->dreg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; ins->type = is_64bit ? STACK_I8 : STACK_I4; MONO_ADD_INS (cfg->cbb, ins); return ins; case SN_ParallelBitExtract: MONO_INST_NEW (cfg, ins, is_64bit ? OP_PEXT64 : OP_PEXT32); ins->dreg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; ins->type = is_64bit ? STACK_I8 : STACK_I4; MONO_ADD_INS (cfg->cbb, ins); return ins; case SN_ParallelBitDeposit: MONO_INST_NEW (cfg, ins, is_64bit ? OP_PDEP64 : OP_PDEP32); ins->dreg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->sreg2 = args [1]->dreg; ins->type = is_64bit ? STACK_I8 : STACK_I4; MONO_ADD_INS (cfg->cbb, ins); return ins; default: g_assert_not_reached (); } } if (intrinsics == x86base_methods) { switch (id) { case SN_BitScanForward: MONO_INST_NEW (cfg, ins, is_64bit ? OP_X86_BSF64 : OP_X86_BSF32); ins->dreg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->type = is_64bit ? STACK_I8 : STACK_I4; MONO_ADD_INS (cfg->cbb, ins); return ins; case SN_BitScanReverse: MONO_INST_NEW (cfg, ins, is_64bit ? OP_X86_BSR64 : OP_X86_BSR32); ins->dreg = is_64bit ? alloc_lreg (cfg) : alloc_ireg (cfg); ins->sreg1 = args [0]->dreg; ins->type = is_64bit ? STACK_I8 : STACK_I4; MONO_ADD_INS (cfg->cbb, ins); return ins; default: g_assert_not_reached (); } } return NULL; } static guint16 vector_256_t_methods [] = { SN_get_Count, }; static MonoInst* emit_vector256_t (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *ins; MonoType *etype; MonoClass *klass; int size, len, id; id = lookup_intrins (vector_256_t_methods, sizeof (vector_256_t_methods), cmethod); if (id == -1) return NULL; klass = cmethod->klass; etype = mono_class_get_context (klass)->class_inst->type_argv [0]; size = mono_class_value_size (mono_class_from_mono_type_internal (etype), NULL); g_assert (size); len = 32 / size; if (!MONO_TYPE_IS_PRIMITIVE (etype) || etype->type == MONO_TYPE_CHAR || etype->type == MONO_TYPE_BOOLEAN || etype->type == MONO_TYPE_I || etype->type == MONO_TYPE_U) return NULL; if (cfg->verbose_level > 1) { char *name = mono_method_full_name (cmethod, TRUE); printf (" SIMD intrinsic %s\n", name); g_free (name); } switch (id) { case SN_get_Count: if (!(fsig->param_count == 0 && fsig->ret->type == MONO_TYPE_I4)) break; EMIT_NEW_ICONST (cfg, ins, len); return ins; default: break; } return NULL; } static MonoInst* emit_amd64_intrinsics (const char *class_ns, const char *class_name, MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { if (!strcmp (class_ns, "System.Runtime.Intrinsics.X86")) { return emit_hardware_intrinsics (cfg, cmethod, fsig, args, supported_x86_intrinsics, sizeof (supported_x86_intrinsics), emit_x86_intrinsics); } if (!strcmp (class_ns, "System.Runtime.Intrinsics")) { if (!strcmp (class_name, "Vector256`1")) return emit_vector256_t (cfg, cmethod, fsig, args); } if (!strcmp (class_ns, "System.Numerics")) { if (!strcmp (class_name, "Vector")) return emit_sys_numerics_vector (cfg, cmethod, fsig, args); if (!strcmp (class_name, "Vector`1")) return emit_sys_numerics_vector_t (cfg, cmethod, fsig, args); } return NULL; } #endif // !TARGET_ARM64 #ifdef TARGET_ARM64 static MonoInst* emit_simd_intrinsics (const char *class_ns, const char *class_name, MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { // FIXME: implement Vector64<T>, Vector128<T> and Vector<T> for Arm64 if (!strcmp (class_ns, "System.Runtime.Intrinsics.Arm")) { return emit_hardware_intrinsics(cfg, cmethod, fsig, args, supported_arm_intrinsics, sizeof (supported_arm_intrinsics), emit_arm64_intrinsics); } return NULL; } #elif TARGET_AMD64 // TODO: test and enable for x86 too static MonoInst* emit_simd_intrinsics (const char *class_ns, const char *class_name, MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { MonoInst *simd_inst = emit_amd64_intrinsics (class_ns, class_name, cfg, cmethod, fsig, args); if (simd_inst != NULL) cfg->uses_simd_intrinsics |= MONO_CFG_USES_SIMD_INTRINSICS; return simd_inst; } #else static MonoInst* emit_simd_intrinsics (const char *class_ns, const char *class_name, MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { return NULL; } #endif MonoInst* mono_emit_simd_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { const char *class_name; const char *class_ns; MonoImage *image = m_class_get_image (cmethod->klass); if (image != mono_get_corlib ()) return NULL; class_ns = m_class_get_name_space (cmethod->klass); class_name = m_class_get_name (cmethod->klass); // If cmethod->klass is nested, the namespace is on the enclosing class. if (m_class_get_nested_in (cmethod->klass)) class_ns = m_class_get_name_space (m_class_get_nested_in (cmethod->klass)); #if defined(TARGET_ARM64) || defined(TARGET_AMD64) if (!strcmp (class_ns, "System.Runtime.Intrinsics")) { if (!strcmp (class_name, "Vector128") || !strcmp (class_name, "Vector64")) return emit_sri_vector (cfg, cmethod, fsig, args); } if (!strcmp (class_ns, "System.Runtime.Intrinsics")) { if (!strcmp (class_name, "Vector128`1") || !strcmp (class_name, "Vector64`1")) return emit_vector64_vector128_t (cfg, cmethod, fsig, args); } #endif // defined(TARGET_ARM64) || defined(TARGET_AMD64) return emit_simd_intrinsics (class_ns, class_name, cfg, cmethod, fsig, args); } /* * Windows x64 value type ABI uses reg/stack references (ArgValuetypeAddrInIReg/ArgValuetypeAddrOnStack) * for function arguments. When using SIMD intrinsics arguments optimized into OP_ARG needs to be decomposed * into correspondig SIMD LOADX/STOREX instructions. */ #if defined(TARGET_WIN32) && defined(TARGET_AMD64) static gboolean decompose_vtype_opt_uses_simd_intrinsics (MonoCompile *cfg, MonoInst *ins) { if (cfg->uses_simd_intrinsics & MONO_CFG_USES_SIMD_INTRINSICS) return TRUE; switch (ins->opcode) { case OP_XMOVE: case OP_XZERO: case OP_XPHI: case OP_LOADX_MEMBASE: case OP_LOADX_ALIGNED_MEMBASE: case OP_STOREX_MEMBASE: case OP_STOREX_ALIGNED_MEMBASE_REG: return TRUE; default: return FALSE; } } static void decompose_vtype_opt_load_arg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, gint32 *sreg_int32) { guint32 *sreg = (guint32*)sreg_int32; MonoInst *src_var = get_vreg_to_inst (cfg, *sreg); if (src_var && src_var->opcode == OP_ARG && src_var->klass && MONO_CLASS_IS_SIMD (cfg, src_var->klass)) { MonoInst *varload_ins, *load_ins; NEW_VARLOADA (cfg, varload_ins, src_var, src_var->inst_vtype); mono_bblock_insert_before_ins (bb, ins, varload_ins); MONO_INST_NEW (cfg, load_ins, OP_LOADX_MEMBASE); load_ins->klass = src_var->klass; load_ins->type = STACK_VTYPE; load_ins->sreg1 = varload_ins->dreg; load_ins->dreg = alloc_xreg (cfg); mono_bblock_insert_after_ins (bb, varload_ins, load_ins); *sreg = load_ins->dreg; } } static void decompose_vtype_opt_store_arg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, gint32 *dreg_int32) { guint32 *dreg = (guint32*)dreg_int32; MonoInst *dest_var = get_vreg_to_inst (cfg, *dreg); if (dest_var && dest_var->opcode == OP_ARG && dest_var->klass && MONO_CLASS_IS_SIMD (cfg, dest_var->klass)) { MonoInst *varload_ins, *store_ins; *dreg = alloc_xreg (cfg); NEW_VARLOADA (cfg, varload_ins, dest_var, dest_var->inst_vtype); mono_bblock_insert_after_ins (bb, ins, varload_ins); MONO_INST_NEW (cfg, store_ins, OP_STOREX_MEMBASE); store_ins->klass = dest_var->klass; store_ins->type = STACK_VTYPE; store_ins->sreg1 = *dreg; store_ins->dreg = varload_ins->dreg; mono_bblock_insert_after_ins (bb, varload_ins, store_ins); } } void mono_simd_decompose_intrinsic (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins) { if ((cfg->opt & MONO_OPT_SIMD) && decompose_vtype_opt_uses_simd_intrinsics(cfg, ins)) { const char *spec = INS_INFO (ins->opcode); if (spec [MONO_INST_SRC1] == 'x') decompose_vtype_opt_load_arg (cfg, bb, ins, &(ins->sreg1)); if (spec [MONO_INST_SRC2] == 'x') decompose_vtype_opt_load_arg (cfg, bb, ins, &(ins->sreg2)); if (spec [MONO_INST_SRC3] == 'x') decompose_vtype_opt_load_arg (cfg, bb, ins, &(ins->sreg3)); if (spec [MONO_INST_DEST] == 'x') decompose_vtype_opt_store_arg (cfg, bb, ins, &(ins->dreg)); } } #else void mono_simd_decompose_intrinsic (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins) { } #endif /*defined(TARGET_WIN32) && defined(TARGET_AMD64)*/ void mono_simd_simplify_indirection (MonoCompile *cfg) { } #endif /* DISABLE_JIT */ #endif /* MONO_ARCH_SIMD_INTRINSICS */ #if defined(TARGET_AMD64) void ves_icall_System_Runtime_Intrinsics_X86_X86Base___cpuidex (int abcd[4], int function_id, int subfunction_id) { #ifndef MONO_CROSS_COMPILE mono_hwcap_x86_call_cpuidex (function_id, subfunction_id, &abcd [0], &abcd [1], &abcd [2], &abcd [3]); #endif } #endif MONO_EMPTY_SOURCE_FILE (simd_intrinsics_netcore);
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/native/libs/System.Security.Cryptography.Native/pal_ocsp.c
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_ocsp.h" void CryptoNative_OcspRequestDestroy(OCSP_REQUEST* request) { if (request != NULL) { OCSP_REQUEST_free(request); } } int32_t CryptoNative_GetOcspRequestDerSize(OCSP_REQUEST* req) { ERR_clear_error(); return i2d_OCSP_REQUEST(req, NULL); } int32_t CryptoNative_EncodeOcspRequest(OCSP_REQUEST* req, uint8_t* buf) { ERR_clear_error(); return i2d_OCSP_REQUEST(req, &buf); } OCSP_RESPONSE* CryptoNative_DecodeOcspResponse(const uint8_t* buf, int32_t len) { ERR_clear_error(); if (buf == NULL || len == 0) { return NULL; } return d2i_OCSP_RESPONSE(NULL, &buf, len); } void CryptoNative_OcspResponseDestroy(OCSP_RESPONSE* response) { if (response != NULL) { OCSP_RESPONSE_free(response); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_ocsp.h" void CryptoNative_OcspRequestDestroy(OCSP_REQUEST* request) { if (request != NULL) { OCSP_REQUEST_free(request); } } int32_t CryptoNative_GetOcspRequestDerSize(OCSP_REQUEST* req) { ERR_clear_error(); return i2d_OCSP_REQUEST(req, NULL); } int32_t CryptoNative_EncodeOcspRequest(OCSP_REQUEST* req, uint8_t* buf) { ERR_clear_error(); return i2d_OCSP_REQUEST(req, &buf); } OCSP_RESPONSE* CryptoNative_DecodeOcspResponse(const uint8_t* buf, int32_t len) { ERR_clear_error(); if (buf == NULL || len == 0) { return NULL; } return d2i_OCSP_RESPONSE(NULL, &buf, len); } void CryptoNative_OcspResponseDestroy(OCSP_RESPONSE* response) { if (response != NULL) { OCSP_RESPONSE_free(response); } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/eglib/test/timer.c
#include <config.h> #include <glib.h> #include <string.h> #include <math.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <stdlib.h> #include <stdio.h> #ifdef G_OS_WIN32 #include <windows.h> #define sleep(t) Sleep((t) * 1000) #endif #include "test.h" static RESULT test_timer (void) { GTimer *timer; gdouble elapsed1, elapsed2; gulong usec = 0; timer = g_timer_new (); sleep (1); elapsed1 = g_timer_elapsed (timer, NULL); if ((elapsed1 + 0.1) < 1.0) return FAILED ("Elapsed time should be around 1s and was %f", elapsed1); g_timer_stop (timer); elapsed1 = g_timer_elapsed (timer, NULL); elapsed2 = g_timer_elapsed (timer, &usec); if (fabs (elapsed1 - elapsed2) > 0.000001) return FAILED ("The elapsed times are not equal %f - %f.", elapsed1, elapsed2); elapsed2 *= 1000000; while (elapsed2 > 1000000) elapsed2 -= 1000000; if (fabs (usec - elapsed2) > 100.0) return FAILED ("usecs are wrong."); g_timer_destroy (timer); return OK; } static Test timer_tests [] = { {"g_timer", test_timer}, {NULL, NULL} }; DEFINE_TEST_GROUP_INIT(timer_tests_init, timer_tests)
#include <config.h> #include <glib.h> #include <string.h> #include <math.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <stdlib.h> #include <stdio.h> #ifdef G_OS_WIN32 #include <windows.h> #define sleep(t) Sleep((t) * 1000) #endif #include "test.h" static RESULT test_timer (void) { GTimer *timer; gdouble elapsed1, elapsed2; gulong usec = 0; timer = g_timer_new (); sleep (1); elapsed1 = g_timer_elapsed (timer, NULL); if ((elapsed1 + 0.1) < 1.0) return FAILED ("Elapsed time should be around 1s and was %f", elapsed1); g_timer_stop (timer); elapsed1 = g_timer_elapsed (timer, NULL); elapsed2 = g_timer_elapsed (timer, &usec); if (fabs (elapsed1 - elapsed2) > 0.000001) return FAILED ("The elapsed times are not equal %f - %f.", elapsed1, elapsed2); elapsed2 *= 1000000; while (elapsed2 > 1000000) elapsed2 -= 1000000; if (fabs (usec - elapsed2) > 100.0) return FAILED ("usecs are wrong."); g_timer_destroy (timer); return OK; } static Test timer_tests [] = { {"g_timer", test_timer}, {NULL, NULL} }; DEFINE_TEST_GROUP_INIT(timer_tests_init, timer_tests)
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./docs/design/features/host-components.md
# Components of the hosting The .NET Core default hosting setup consists of several components which are described here. ## Entry-point hosts .NET Core comes with several executables/libraries which act as the main entry-point to start code execution. These are typically referred to as the "host": * `dotnet` (executable) - which comes from a shared location and is typically the latest version available on the machine. This is also sometimes called the "muxer". * `apphost` (executable) - which is used to give app an actual executable which can be run directly. The executable will be named using the application name. The advantage of having an app-local executable is that it can be customized for each app (not just the name, but icon, OS behavior and so on). * `comhost` (library) - which is used to enable COM server hosting. Component which wants to expose COM server objects will be built with this dynamic library in its output. The `comhost` then acts as the main entry point for the OS. * `ijwhost` (library) - which is used to enable loading of IJW assemblies. The library exposes functionality needed by the C++ compiler to generate mixed mode assemblies. * `nethost` (library) - which is used by native apps (any app which is not .NET Core) to load .NET Core code dynamically. The entry-point typically does just one thing: it finds the `hostfxr` library and passes control to it. It also exposes the right entry points for its purpose (so the "main" for `dotnet` and `apphost`, the COM exports for `comhost` and so on). * `dotnet` host - `hostfxr` is obtained from the `./host/fxr/<highestversion>` folder (relative to the location of the `dotnet` host). * `apphost`, `comhost` and the others - `hostfxr` is located using this process: 1. The app's folder is searched first. This is either the folder where the entry-point host lives or in case of `apphost` it is the path it has embedded in it as the app path. 2. If the `DOTNET_ROOT` environment variable is defined, that path is searched 3. The default shared locations are searched ## Host FXR This library finds and resolves the runtime and all the frameworks the app needs. Then it loads the `hostpolicy` library and transfers control to it. The host FXR library reads the `.runtimeconfig.json` of the app (and all it's dependent frameworks) and resolves the frameworks. It implements the algorithm for framework resolution as described in [SharedFX Lookup](multilevel-sharedfx-lookup.md) and in [Framework version resolution](framework-version-resolution.md). In most cases the latest available version of `hostfxr` is used. Self-contained apps use `hostfxr` from the app folder. The main reason to split the entry-point host and the `hostfxr` is to allow for servicing the logic in `hostfxr` without the need to stop all instances of the executable host currently running. In the case of `apphost` and all the library hosts, servicing the entry-point host is impossible as they are part of the application itself. ## Host Policy The host policy library implements all the policies to actually load the runtime, apply configuration, resolve all app's dependencies and calls the runtime to run the app or load the required component. The host policy library lives in the runtime folder and is versioned alongside it. Which version is used is specified by the app as it specifies which version of the .NET runtime to use (done directly or indirectly by referencing the `Microsoft.NETCore.App` framework, or carrying everything app-local). Host policy library reads the `.deps.json` file of the app (and the `.deps.json` of all the referenced frameworks). It resolves all the assemblies specified in the `.deps.json` for the app and frameworks and creates a list of assembly paths (also called TPA). It does a similar thing for native dependencies as well. Finally the host policy library loads the runtime `coreclr` library and initializes it (among other things with the TPA). The version of the runtime (and its location) is now already determined since the host policy was loaded from the same framework. Then it calls the runtime with the configuration information which runs the app or performs other requested actions (like COM activation).
# Components of the hosting The .NET Core default hosting setup consists of several components which are described here. ## Entry-point hosts .NET Core comes with several executables/libraries which act as the main entry-point to start code execution. These are typically referred to as the "host": * `dotnet` (executable) - which comes from a shared location and is typically the latest version available on the machine. This is also sometimes called the "muxer". * `apphost` (executable) - which is used to give app an actual executable which can be run directly. The executable will be named using the application name. The advantage of having an app-local executable is that it can be customized for each app (not just the name, but icon, OS behavior and so on). * `comhost` (library) - which is used to enable COM server hosting. Component which wants to expose COM server objects will be built with this dynamic library in its output. The `comhost` then acts as the main entry point for the OS. * `ijwhost` (library) - which is used to enable loading of IJW assemblies. The library exposes functionality needed by the C++ compiler to generate mixed mode assemblies. * `nethost` (library) - which is used by native apps (any app which is not .NET Core) to load .NET Core code dynamically. The entry-point typically does just one thing: it finds the `hostfxr` library and passes control to it. It also exposes the right entry points for its purpose (so the "main" for `dotnet` and `apphost`, the COM exports for `comhost` and so on). * `dotnet` host - `hostfxr` is obtained from the `./host/fxr/<highestversion>` folder (relative to the location of the `dotnet` host). * `apphost`, `comhost` and the others - `hostfxr` is located using this process: 1. The app's folder is searched first. This is either the folder where the entry-point host lives or in case of `apphost` it is the path it has embedded in it as the app path. 2. If the `DOTNET_ROOT` environment variable is defined, that path is searched 3. The default shared locations are searched ## Host FXR This library finds and resolves the runtime and all the frameworks the app needs. Then it loads the `hostpolicy` library and transfers control to it. The host FXR library reads the `.runtimeconfig.json` of the app (and all it's dependent frameworks) and resolves the frameworks. It implements the algorithm for framework resolution as described in [SharedFX Lookup](multilevel-sharedfx-lookup.md) and in [Framework version resolution](framework-version-resolution.md). In most cases the latest available version of `hostfxr` is used. Self-contained apps use `hostfxr` from the app folder. The main reason to split the entry-point host and the `hostfxr` is to allow for servicing the logic in `hostfxr` without the need to stop all instances of the executable host currently running. In the case of `apphost` and all the library hosts, servicing the entry-point host is impossible as they are part of the application itself. ## Host Policy The host policy library implements all the policies to actually load the runtime, apply configuration, resolve all app's dependencies and calls the runtime to run the app or load the required component. The host policy library lives in the runtime folder and is versioned alongside it. Which version is used is specified by the app as it specifies which version of the .NET runtime to use (done directly or indirectly by referencing the `Microsoft.NETCore.App` framework, or carrying everything app-local). Host policy library reads the `.deps.json` file of the app (and the `.deps.json` of all the referenced frameworks). It resolves all the assemblies specified in the `.deps.json` for the app and frameworks and creates a list of assembly paths (also called TPA). It does a similar thing for native dependencies as well. Finally the host policy library loads the runtime `coreclr` library and initializes it (among other things with the TPA). The version of the runtime (and its location) is now already determined since the host policy was loaded from the same framework. Then it calls the runtime with the configuration information which runs the app or performs other requested actions (like COM activation).
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/arm/Lcreate_addr_space.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gcreate_addr_space.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gcreate_addr_space.c" #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./eng/native/version/_version.c
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. static char sccsid[] __attribute__((used)) = "@(#)No version information produced";
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. static char sccsid[] __attribute__((used)) = "@(#)No version information produced";
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/eglib/test/spawn.c
#include <config.h> #include <glib.h> #include <string.h> #include <stdio.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include "test.h" #ifdef G_OS_WIN32 #include <io.h> #define read _read #define close _close #endif static RESULT test_spawn_async (void) { #if HAVE_G_SPAWN /* gboolean g_spawn_async_with_pipes (const gchar *working_directory, gchar **argv, gchar **envp, GSpawnFlags flags, GSpawnChildSetupFunc child_setup, gpointer user_data, GPid *child_pid, gint *standard_input, gint *standard_output, gint *standard_error, GError **gerror) */ char *argv [15]; int stdout_fd = -1; char buffer [512]; GPid child_pid = 0; memset (argv, 0, 15 * sizeof (char *)); argv [0] = (char*)"ls"; if (!g_spawn_async_with_pipes (NULL, argv, NULL, G_SPAWN_SEARCH_PATH, NULL, NULL, &child_pid, NULL, &stdout_fd, NULL, NULL)) return FAILED ("1 Failed to run ls"); if (child_pid == 0) return FAILED ("2 child pid not returned"); if (stdout_fd == -1) return FAILED ("3 out fd is -1"); while (read (stdout_fd, buffer, 512) > 0); close (stdout_fd); #endif return OK; } static Test spawn_tests [] = { {"g_spawn_async_with_pipes", test_spawn_async}, {NULL, NULL} }; DEFINE_TEST_GROUP_INIT(spawn_tests_init, spawn_tests)
#include <config.h> #include <glib.h> #include <string.h> #include <stdio.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include "test.h" #ifdef G_OS_WIN32 #include <io.h> #define read _read #define close _close #endif static RESULT test_spawn_async (void) { #if HAVE_G_SPAWN /* gboolean g_spawn_async_with_pipes (const gchar *working_directory, gchar **argv, gchar **envp, GSpawnFlags flags, GSpawnChildSetupFunc child_setup, gpointer user_data, GPid *child_pid, gint *standard_input, gint *standard_output, gint *standard_error, GError **gerror) */ char *argv [15]; int stdout_fd = -1; char buffer [512]; GPid child_pid = 0; memset (argv, 0, 15 * sizeof (char *)); argv [0] = (char*)"ls"; if (!g_spawn_async_with_pipes (NULL, argv, NULL, G_SPAWN_SEARCH_PATH, NULL, NULL, &child_pid, NULL, &stdout_fd, NULL, NULL)) return FAILED ("1 Failed to run ls"); if (child_pid == 0) return FAILED ("2 child pid not returned"); if (stdout_fd == -1) return FAILED ("3 out fd is -1"); while (read (stdout_fd, buffer, 512) > 0); close (stdout_fd); #endif return OK; } static Test spawn_tests [] = { {"g_spawn_async_with_pipes", test_spawn_async}, {NULL, NULL} }; DEFINE_TEST_GROUP_INIT(spawn_tests_init, spawn_tests)
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/native/libs/System.Security.Cryptography.Native/opensslshim.c
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include <assert.h> #include <dlfcn.h> #include <pthread.h> #include <stdio.h> #include <string.h> #include "opensslshim.h" #include "pal_atomic.h" // Define pointers to all the used OpenSSL functions #define REQUIRED_FUNCTION(fn) TYPEOF(fn) fn##_ptr; #define REQUIRED_FUNCTION_110(fn) TYPEOF(fn) fn##_ptr; #define LIGHTUP_FUNCTION(fn) TYPEOF(fn) fn##_ptr; #define FALLBACK_FUNCTION(fn) TYPEOF(fn) fn##_ptr; #define RENAMED_FUNCTION(fn,oldfn) TYPEOF(fn) fn##_ptr; #define LEGACY_FUNCTION(fn) TYPEOF(fn) fn##_ptr; FOR_ALL_OPENSSL_FUNCTIONS #undef LEGACY_FUNCTION #undef RENAMED_FUNCTION #undef FALLBACK_FUNCTION #undef LIGHTUP_FUNCTION #undef REQUIRED_FUNCTION_110 #undef REQUIRED_FUNCTION // x.x.x, considering the max number of decimal digits for each component #define MaxVersionStringLength 32 static void* volatile libssl = NULL; #ifdef __APPLE__ #define DYLIBNAME_PREFIX "libssl." #define DYLIBNAME_SUFFIX ".dylib" #define MAKELIB(v) DYLIBNAME_PREFIX v DYLIBNAME_SUFFIX #else #define SONAME_BASE "libssl.so." #define MAKELIB(v) SONAME_BASE v #endif static void DlOpen(const char* libraryName) { void* libsslNew = dlopen(libraryName, RTLD_LAZY); // check is someone else has opened and published libssl already if (!pal_atomic_cas_ptr(&libssl, libsslNew, NULL)) { dlclose(libsslNew); } } static void OpenLibraryOnce() { // If there is an override of the version specified using the CLR_OPENSSL_VERSION_OVERRIDE // env variable, try to load that first. // The format of the value in the env variable is expected to be the version numbers, // like 1.0.0, 1.0.2 etc. char* versionOverride = getenv("CLR_OPENSSL_VERSION_OVERRIDE"); if ((versionOverride != NULL) && strnlen(versionOverride, MaxVersionStringLength + 1) <= MaxVersionStringLength) { #ifdef __APPLE__ char soName[sizeof(DYLIBNAME_PREFIX) + MaxVersionStringLength + sizeof(DYLIBNAME_SUFFIX)] = DYLIBNAME_PREFIX; strcat(soName, versionOverride); strcat(soName, DYLIBNAME_SUFFIX); #else char soName[sizeof(SONAME_BASE) + MaxVersionStringLength] = SONAME_BASE; strcat(soName, versionOverride); #endif DlOpen(soName); } if (libssl == NULL) { // Prefer OpenSSL 3.x DlOpen(MAKELIB("3")); } if (libssl == NULL) { DlOpen(MAKELIB("1.1")); } if (libssl == NULL) { // Debian 9 has dropped support for SSLv3 and so they have bumped their soname. Let's try it // before trying the version 1.0.0 to make it less probable that some of our other dependencies // end up loading conflicting version of libssl. DlOpen(MAKELIB("1.0.2")); } if (libssl == NULL) { // Now try the default versioned so naming as described in the OpenSSL doc DlOpen(MAKELIB("1.0.0")); } if (libssl == NULL) { // Fedora derived distros use different naming for the version 1.0.0 DlOpen(MAKELIB("10")); } // FreeBSD uses a different suffix numbering convention. // Current supported FreeBSD releases should use the order .11 -> .111 if (libssl == NULL) { DlOpen(MAKELIB("11")); } if (libssl == NULL) { DlOpen(MAKELIB("111")); } } static pthread_once_t g_openLibrary = PTHREAD_ONCE_INIT; int OpenLibrary() { pthread_once(&g_openLibrary, OpenLibraryOnce); if (libssl != NULL) { return 1; } else { return 0; } } void InitializeOpenSSLShim(void) { if (!OpenLibrary()) { fprintf(stderr, "No usable version of libssl was found\n"); abort(); } // A function defined in libcrypto.so.1.0.0/libssl.so.1.0.0 that is not defined in // libcrypto.so.1.1.0/libssl.so.1.1.0 const void* v1_0_sentinel = dlsym(libssl, "SSL_state"); // Only permit a single assignment here so that two assemblies both triggering the initializer doesn't cause a // race where the fn_ptr is nullptr, then properly bound, then goes back to nullptr right before being used (then bound again). void* volatile tmp_ptr; // Get pointers to all the functions that are needed #define REQUIRED_FUNCTION(fn) \ if (!(fn##_ptr = (TYPEOF(fn))(dlsym(libssl, #fn)))) { fprintf(stderr, "Cannot get required symbol " #fn " from libssl\n"); abort(); } #define REQUIRED_FUNCTION_110(fn) \ if (!v1_0_sentinel && !(fn##_ptr = (TYPEOF(fn))(dlsym(libssl, #fn)))) { fprintf(stderr, "Cannot get required symbol " #fn " from libssl\n"); abort(); } #define LIGHTUP_FUNCTION(fn) \ fn##_ptr = (TYPEOF(fn))(dlsym(libssl, #fn)); #define FALLBACK_FUNCTION(fn) \ if (!(tmp_ptr = dlsym(libssl, #fn))) { tmp_ptr = (void*)local_##fn; } \ fn##_ptr = (TYPEOF(fn))tmp_ptr; #define RENAMED_FUNCTION(fn,oldfn) \ tmp_ptr = dlsym(libssl, #fn);\ if (!tmp_ptr && !(tmp_ptr = dlsym(libssl, #oldfn))) { fprintf(stderr, "Cannot get required symbol " #oldfn " from libssl\n"); abort(); } \ fn##_ptr = (TYPEOF(fn))tmp_ptr; #define LEGACY_FUNCTION(fn) \ if (v1_0_sentinel && !(fn##_ptr = (TYPEOF(fn))(dlsym(libssl, #fn)))) { fprintf(stderr, "Cannot get required symbol " #fn " from libssl\n"); abort(); } FOR_ALL_OPENSSL_FUNCTIONS #undef LEGACY_FUNCTION #undef RENAMED_FUNCTION #undef FALLBACK_FUNCTION #undef LIGHTUP_FUNCTION #undef REQUIRED_FUNCTION_110 #undef REQUIRED_FUNCTION // Sanity check that we have at least one functioning way of reporting errors. if (ERR_put_error_ptr == &local_ERR_put_error) { if (ERR_new_ptr == NULL || ERR_set_debug_ptr == NULL || ERR_set_error_ptr == NULL) { fprintf(stderr, "Cannot determine the error reporting routine from libssl\n"); abort(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include <assert.h> #include <dlfcn.h> #include <pthread.h> #include <stdio.h> #include <string.h> #include "opensslshim.h" #include "pal_atomic.h" // Define pointers to all the used OpenSSL functions #define REQUIRED_FUNCTION(fn) TYPEOF(fn) fn##_ptr; #define REQUIRED_FUNCTION_110(fn) TYPEOF(fn) fn##_ptr; #define LIGHTUP_FUNCTION(fn) TYPEOF(fn) fn##_ptr; #define FALLBACK_FUNCTION(fn) TYPEOF(fn) fn##_ptr; #define RENAMED_FUNCTION(fn,oldfn) TYPEOF(fn) fn##_ptr; #define LEGACY_FUNCTION(fn) TYPEOF(fn) fn##_ptr; FOR_ALL_OPENSSL_FUNCTIONS #undef LEGACY_FUNCTION #undef RENAMED_FUNCTION #undef FALLBACK_FUNCTION #undef LIGHTUP_FUNCTION #undef REQUIRED_FUNCTION_110 #undef REQUIRED_FUNCTION // x.x.x, considering the max number of decimal digits for each component #define MaxVersionStringLength 32 static void* volatile libssl = NULL; #ifdef __APPLE__ #define DYLIBNAME_PREFIX "libssl." #define DYLIBNAME_SUFFIX ".dylib" #define MAKELIB(v) DYLIBNAME_PREFIX v DYLIBNAME_SUFFIX #else #define SONAME_BASE "libssl.so." #define MAKELIB(v) SONAME_BASE v #endif static void DlOpen(const char* libraryName) { void* libsslNew = dlopen(libraryName, RTLD_LAZY); // check is someone else has opened and published libssl already if (!pal_atomic_cas_ptr(&libssl, libsslNew, NULL)) { dlclose(libsslNew); } } static void OpenLibraryOnce() { // If there is an override of the version specified using the CLR_OPENSSL_VERSION_OVERRIDE // env variable, try to load that first. // The format of the value in the env variable is expected to be the version numbers, // like 1.0.0, 1.0.2 etc. char* versionOverride = getenv("CLR_OPENSSL_VERSION_OVERRIDE"); if ((versionOverride != NULL) && strnlen(versionOverride, MaxVersionStringLength + 1) <= MaxVersionStringLength) { #ifdef __APPLE__ char soName[sizeof(DYLIBNAME_PREFIX) + MaxVersionStringLength + sizeof(DYLIBNAME_SUFFIX)] = DYLIBNAME_PREFIX; strcat(soName, versionOverride); strcat(soName, DYLIBNAME_SUFFIX); #else char soName[sizeof(SONAME_BASE) + MaxVersionStringLength] = SONAME_BASE; strcat(soName, versionOverride); #endif DlOpen(soName); } if (libssl == NULL) { // Prefer OpenSSL 3.x DlOpen(MAKELIB("3")); } if (libssl == NULL) { DlOpen(MAKELIB("1.1")); } if (libssl == NULL) { // Debian 9 has dropped support for SSLv3 and so they have bumped their soname. Let's try it // before trying the version 1.0.0 to make it less probable that some of our other dependencies // end up loading conflicting version of libssl. DlOpen(MAKELIB("1.0.2")); } if (libssl == NULL) { // Now try the default versioned so naming as described in the OpenSSL doc DlOpen(MAKELIB("1.0.0")); } if (libssl == NULL) { // Fedora derived distros use different naming for the version 1.0.0 DlOpen(MAKELIB("10")); } // FreeBSD uses a different suffix numbering convention. // Current supported FreeBSD releases should use the order .11 -> .111 if (libssl == NULL) { DlOpen(MAKELIB("11")); } if (libssl == NULL) { DlOpen(MAKELIB("111")); } } static pthread_once_t g_openLibrary = PTHREAD_ONCE_INIT; int OpenLibrary() { pthread_once(&g_openLibrary, OpenLibraryOnce); if (libssl != NULL) { return 1; } else { return 0; } } void InitializeOpenSSLShim(void) { if (!OpenLibrary()) { fprintf(stderr, "No usable version of libssl was found\n"); abort(); } // A function defined in libcrypto.so.1.0.0/libssl.so.1.0.0 that is not defined in // libcrypto.so.1.1.0/libssl.so.1.1.0 const void* v1_0_sentinel = dlsym(libssl, "SSL_state"); // Only permit a single assignment here so that two assemblies both triggering the initializer doesn't cause a // race where the fn_ptr is nullptr, then properly bound, then goes back to nullptr right before being used (then bound again). void* volatile tmp_ptr; // Get pointers to all the functions that are needed #define REQUIRED_FUNCTION(fn) \ if (!(fn##_ptr = (TYPEOF(fn))(dlsym(libssl, #fn)))) { fprintf(stderr, "Cannot get required symbol " #fn " from libssl\n"); abort(); } #define REQUIRED_FUNCTION_110(fn) \ if (!v1_0_sentinel && !(fn##_ptr = (TYPEOF(fn))(dlsym(libssl, #fn)))) { fprintf(stderr, "Cannot get required symbol " #fn " from libssl\n"); abort(); } #define LIGHTUP_FUNCTION(fn) \ fn##_ptr = (TYPEOF(fn))(dlsym(libssl, #fn)); #define FALLBACK_FUNCTION(fn) \ if (!(tmp_ptr = dlsym(libssl, #fn))) { tmp_ptr = (void*)local_##fn; } \ fn##_ptr = (TYPEOF(fn))tmp_ptr; #define RENAMED_FUNCTION(fn,oldfn) \ tmp_ptr = dlsym(libssl, #fn);\ if (!tmp_ptr && !(tmp_ptr = dlsym(libssl, #oldfn))) { fprintf(stderr, "Cannot get required symbol " #oldfn " from libssl\n"); abort(); } \ fn##_ptr = (TYPEOF(fn))tmp_ptr; #define LEGACY_FUNCTION(fn) \ if (v1_0_sentinel && !(fn##_ptr = (TYPEOF(fn))(dlsym(libssl, #fn)))) { fprintf(stderr, "Cannot get required symbol " #fn " from libssl\n"); abort(); } FOR_ALL_OPENSSL_FUNCTIONS #undef LEGACY_FUNCTION #undef RENAMED_FUNCTION #undef FALLBACK_FUNCTION #undef LIGHTUP_FUNCTION #undef REQUIRED_FUNCTION_110 #undef REQUIRED_FUNCTION // Sanity check that we have at least one functioning way of reporting errors. if (ERR_put_error_ptr == &local_ERR_put_error) { if (ERR_new_ptr == NULL || ERR_set_debug_ptr == NULL || ERR_set_error_ptr == NULL) { fprintf(stderr, "Cannot determine the error reporting routine from libssl\n"); abort(); } } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/profiler/native/README.md
# Profiler.dll This directory builds Profilers\Profiler.dll, which contains various implementations of ICorProfilerCallback used in our tests. It is used by ProfilerTestRunner.cs in ../common. ### Goals 1) Easy to run/debug a profiler test manually simply by executing the managed test binary + setting minimal env vars: CORECLR_ENABLE_PROFILING=1 CORECLR_PROFILER={CLSID_of_profiler} CORECLR_PROFILER_PATH=path_to_profiler_dll We should be very careful about adding any additional dependencies such as env vars or assumptions that certain files will reside in certain places. Any such dependencies need to be clearly documented. 2) Easy to understand what the test is doing given only an understanding of the ICorProfiler interfaces and basic C++. This means we make limited use of helper functions, macros, and new interfaces that wrap or abstract the underlying APIs. If we do add another layer, it should represent a non-trivial unit of complexity (eg IL-rewriting) and using it should be optional for only a subset of tests that need it. Tests should also avoid trying to test too much at the same time. Making a new test for new functionality is a relatively quick operation. ### Implementation of this profiler dll: There is a small set of shared implementation for all profiler implementations: 1. profiler.def - the dll exported entrypoints 2. dllmain.cpp - implementation of the exported entrypoints 3. classfactory.h/.cpp - implementation of standard COM IClassFactory, used to instantiate a new profiler 4. profiler.h/.cpp - a base class for all profiler implementations. It provides IUnknown, do-nothing implementations of all ICorProfilerCallbackXXX interfaces, and the pCorProfilerInfo field that allows calling back into the runtime All the rest of the implementation is in test-specific profiler implementations that derive from the Profiler class. Each of these is in a sub-directory. See gcbasicprofiler/gcbasicprofiler.h/.cpp for a simple example. ### Adding a new profiler When you want to test new profiler APIs you will need a new test profiler implementation. I recommend using the GC Basic Events test in gcbasicprofiler as an example. The steps are: 1) Get your new profiler building: - Copy and rename gcbasicprofiler folder. - Rename the source files and the gcbasicprofiler type within the source. - Add the new source files to CMakeLists.txt 2) Make your new profiler creatable via COM: - Create a new GUID and replace the one in YourProfiler::GetClsid() - Update classfactory.cpp to include your new profiler's header and update the list of profiler instances in ClassFactory::CreateInstance Profiler* profilers[] = { new GCBasicProfiler(), // add new profilers here }; 3) Override the profiler callback functions that are relevant for your test and delete the rest. At minimum you will need to ensure that the test prints the phrase "PROFILER TEST PASSES" at some point to indicate this is a passing test. Typically that occurs in the Shutdown() method. It is also likely you want to override Initialize() in order to call SetEventMask so that the profiler receives events.
# Profiler.dll This directory builds Profilers\Profiler.dll, which contains various implementations of ICorProfilerCallback used in our tests. It is used by ProfilerTestRunner.cs in ../common. ### Goals 1) Easy to run/debug a profiler test manually simply by executing the managed test binary + setting minimal env vars: CORECLR_ENABLE_PROFILING=1 CORECLR_PROFILER={CLSID_of_profiler} CORECLR_PROFILER_PATH=path_to_profiler_dll We should be very careful about adding any additional dependencies such as env vars or assumptions that certain files will reside in certain places. Any such dependencies need to be clearly documented. 2) Easy to understand what the test is doing given only an understanding of the ICorProfiler interfaces and basic C++. This means we make limited use of helper functions, macros, and new interfaces that wrap or abstract the underlying APIs. If we do add another layer, it should represent a non-trivial unit of complexity (eg IL-rewriting) and using it should be optional for only a subset of tests that need it. Tests should also avoid trying to test too much at the same time. Making a new test for new functionality is a relatively quick operation. ### Implementation of this profiler dll: There is a small set of shared implementation for all profiler implementations: 1. profiler.def - the dll exported entrypoints 2. dllmain.cpp - implementation of the exported entrypoints 3. classfactory.h/.cpp - implementation of standard COM IClassFactory, used to instantiate a new profiler 4. profiler.h/.cpp - a base class for all profiler implementations. It provides IUnknown, do-nothing implementations of all ICorProfilerCallbackXXX interfaces, and the pCorProfilerInfo field that allows calling back into the runtime All the rest of the implementation is in test-specific profiler implementations that derive from the Profiler class. Each of these is in a sub-directory. See gcbasicprofiler/gcbasicprofiler.h/.cpp for a simple example. ### Adding a new profiler When you want to test new profiler APIs you will need a new test profiler implementation. I recommend using the GC Basic Events test in gcbasicprofiler as an example. The steps are: 1) Get your new profiler building: - Copy and rename gcbasicprofiler folder. - Rename the source files and the gcbasicprofiler type within the source. - Add the new source files to CMakeLists.txt 2) Make your new profiler creatable via COM: - Create a new GUID and replace the one in YourProfiler::GetClsid() - Update classfactory.cpp to include your new profiler's header and update the list of profiler instances in ClassFactory::CreateInstance Profiler* profilers[] = { new GCBasicProfiler(), // add new profilers here }; 3) Override the profiler callback functions that are relevant for your test and delete the rest. At minimum you will need to ensure that the test prints the phrase "PROFILER TEST PASSES" at some point to indicate this is a passing test. Typically that occurs in the Shutdown() method. It is also likely you want to override Initialize() in order to call SetEventMask so that the profiler receives events.
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/utils/mono-os-semaphore-win32.c
/** * \file * MonoOSSemaphore on Win32 * * Author: * Ludovic Henry ([email protected]) * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include "mono-os-semaphore.h" MonoSemTimedwaitRet mono_os_sem_timedwait (MonoSemType *sem, guint32 timeout_ms, MonoSemFlags flags) { BOOL res; retry: res = mono_win32_wait_for_single_object_ex (*sem, timeout_ms, flags & MONO_SEM_FLAGS_ALERTABLE); if (G_UNLIKELY (res != WAIT_OBJECT_0 && res != WAIT_IO_COMPLETION && res != WAIT_TIMEOUT)) g_error ("%s: mono_win32_wait_for_single_object_ex failed with error %d", __func__, GetLastError ()); if (res == WAIT_IO_COMPLETION && !(flags & MONO_SEM_FLAGS_ALERTABLE)) goto retry; switch (res) { case WAIT_OBJECT_0: return MONO_SEM_TIMEDWAIT_RET_SUCCESS; case WAIT_IO_COMPLETION: return MONO_SEM_TIMEDWAIT_RET_ALERTED; case WAIT_TIMEOUT: return MONO_SEM_TIMEDWAIT_RET_TIMEDOUT; default: g_assert_not_reached (); } }
/** * \file * MonoOSSemaphore on Win32 * * Author: * Ludovic Henry ([email protected]) * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include "mono-os-semaphore.h" MonoSemTimedwaitRet mono_os_sem_timedwait (MonoSemType *sem, guint32 timeout_ms, MonoSemFlags flags) { BOOL res; retry: res = mono_win32_wait_for_single_object_ex (*sem, timeout_ms, flags & MONO_SEM_FLAGS_ALERTABLE); if (G_UNLIKELY (res != WAIT_OBJECT_0 && res != WAIT_IO_COMPLETION && res != WAIT_TIMEOUT)) g_error ("%s: mono_win32_wait_for_single_object_ex failed with error %d", __func__, GetLastError ()); if (res == WAIT_IO_COMPLETION && !(flags & MONO_SEM_FLAGS_ALERTABLE)) goto retry; switch (res) { case WAIT_OBJECT_0: return MONO_SEM_TIMEDWAIT_RET_SUCCESS; case WAIT_IO_COMPLETION: return MONO_SEM_TIMEDWAIT_RET_ALERTED; case WAIT_TIMEOUT: return MONO_SEM_TIMEDWAIT_RET_TIMEDOUT; default: g_assert_not_reached (); } }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/native/libs/System.Native/pal_errno.c
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_errno.h" int32_t SystemNative_ConvertErrorPlatformToPal(int32_t platformErrno) { return ConvertErrorPlatformToPal(platformErrno); } int32_t SystemNative_ConvertErrorPalToPlatform(int32_t error) { return ConvertErrorPalToPlatform(error); } const char* SystemNative_StrErrorR(int32_t platformErrno, char* buffer, int32_t bufferSize) { return StrErrorR(platformErrno, buffer, bufferSize); } int32_t SystemNative_GetErrNo(void) { return errno; } void SystemNative_SetErrNo(int32_t errorCode) { errno = errorCode; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_errno.h" int32_t SystemNative_ConvertErrorPlatformToPal(int32_t platformErrno) { return ConvertErrorPlatformToPal(platformErrno); } int32_t SystemNative_ConvertErrorPalToPlatform(int32_t error) { return ConvertErrorPalToPlatform(error); } const char* SystemNative_StrErrorR(int32_t platformErrno, char* buffer, int32_t bufferSize) { return StrErrorR(platformErrno, buffer, bufferSize); } int32_t SystemNative_GetErrNo(void) { return errno; } void SystemNative_SetErrNo(int32_t errorCode) { errno = errorCode; }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/ia64/Gparser.c
/* libunwind - a platform-independent unwind library Copyright (C) 2001-2004 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind_i.h" /* forward declaration: */ static int create_state_record_for (struct cursor *c, struct ia64_state_record *sr, unw_word_t ip); typedef unsigned long unw_word; #define alloc_reg_state() (mempool_alloc (&unw.reg_state_pool)) #define free_reg_state(rs) (mempool_free (&unw.reg_state_pool, rs)) #define alloc_labeled_state() (mempool_alloc (&unw.labeled_state_pool)) #define free_labeled_state(s) (mempool_free (&unw.labeled_state_pool, s)) /* Routines to manipulate the state stack. */ static inline void push (struct ia64_state_record *sr) { struct ia64_reg_state *rs; rs = alloc_reg_state (); if (!rs) { print_error ("libunwind: cannot stack reg state!\n"); return; } memcpy (rs, &sr->curr, sizeof (*rs)); sr->curr.next = rs; } static void pop (struct ia64_state_record *sr) { struct ia64_reg_state *rs = sr->curr.next; if (!rs) { print_error ("libunwind: stack underflow!\n"); return; } memcpy (&sr->curr, rs, sizeof (*rs)); free_reg_state (rs); } /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */ static struct ia64_reg_state * dup_state_stack (struct ia64_reg_state *rs) { struct ia64_reg_state *copy, *prev = NULL, *first = NULL; while (rs) { copy = alloc_reg_state (); if (!copy) { print_error ("unwind.dup_state_stack: out of memory\n"); return NULL; } memcpy (copy, rs, sizeof (*copy)); if (first) prev->next = copy; else first = copy; rs = rs->next; prev = copy; } return first; } /* Free all stacked register states (but not RS itself). */ static void free_state_stack (struct ia64_reg_state *rs) { struct ia64_reg_state *p, *next; for (p = rs->next; p != NULL; p = next) { next = p->next; free_reg_state (p); } rs->next = NULL; } /* Unwind decoder routines */ static enum ia64_pregnum CONST_ATTR decode_abreg (unsigned char abreg, int memory) { switch (abreg) { case 0x04 ... 0x07: return IA64_REG_R4 + (abreg - 0x04); case 0x22 ... 0x25: return IA64_REG_F2 + (abreg - 0x22); case 0x30 ... 0x3f: return IA64_REG_F16 + (abreg - 0x30); case 0x41 ... 0x45: return IA64_REG_B1 + (abreg - 0x41); case 0x60: return IA64_REG_PR; case 0x61: return IA64_REG_PSP; case 0x62: return memory ? IA64_REG_PRI_UNAT_MEM : IA64_REG_PRI_UNAT_GR; case 0x63: return IA64_REG_IP; case 0x64: return IA64_REG_BSP; case 0x65: return IA64_REG_BSPSTORE; case 0x66: return IA64_REG_RNAT; case 0x67: return IA64_REG_UNAT; case 0x68: return IA64_REG_FPSR; case 0x69: return IA64_REG_PFS; case 0x6a: return IA64_REG_LC; default: break; } Dprintf ("libunwind: bad abreg=0x%x\n", abreg); return IA64_REG_LC; } static void set_reg (struct ia64_reg_info *reg, enum ia64_where where, int when, unsigned long val) { reg->val = val; reg->where = where; if (reg->when == IA64_WHEN_NEVER) reg->when = when; } static void alloc_spill_area (unsigned long *offp, unsigned long regsize, struct ia64_reg_info *lo, struct ia64_reg_info *hi) { struct ia64_reg_info *reg; for (reg = hi; reg >= lo; --reg) { if (reg->where == IA64_WHERE_SPILL_HOME) { reg->where = IA64_WHERE_PSPREL; *offp -= regsize; reg->val = *offp; } } } static inline void spill_next_when (struct ia64_reg_info **regp, struct ia64_reg_info *lim, unw_word t) { struct ia64_reg_info *reg; for (reg = *regp; reg <= lim; ++reg) { if (reg->where == IA64_WHERE_SPILL_HOME) { reg->when = t; *regp = reg + 1; return; } } Dprintf ("libunwind: excess spill!\n"); } static inline void finish_prologue (struct ia64_state_record *sr) { struct ia64_reg_info *reg; unsigned long off; int i; /* First, resolve implicit register save locations (see Section "11.4.2.3 Rules for Using Unwind Descriptors", rule 3). */ for (i = 0; i < (int) ARRAY_SIZE (unw.save_order); ++i) { reg = sr->curr.reg + unw.save_order[i]; if (reg->where == IA64_WHERE_GR_SAVE) { reg->where = IA64_WHERE_GR; reg->val = sr->gr_save_loc++; } } /* Next, compute when the fp, general, and branch registers get saved. This must come before alloc_spill_area() because we need to know which registers are spilled to their home locations. */ if (sr->imask) { unsigned char kind, mask = 0, *cp = sr->imask; unsigned long t; static const unsigned char limit[3] = { IA64_REG_F31, IA64_REG_R7, IA64_REG_B5 }; struct ia64_reg_info *(regs[3]); regs[0] = sr->curr.reg + IA64_REG_F2; regs[1] = sr->curr.reg + IA64_REG_R4; regs[2] = sr->curr.reg + IA64_REG_B1; for (t = 0; (int) t < sr->region_len; ++t) { if ((t & 3) == 0) mask = *cp++; kind = (mask >> 2 * (3 - (t & 3))) & 3; if (kind > 0) spill_next_when (&regs[kind - 1], sr->curr.reg + limit[kind - 1], sr->region_start + t); } } /* Next, lay out the memory stack spill area. */ if (sr->any_spills) { off = sr->spill_offset; alloc_spill_area (&off, 16, sr->curr.reg + IA64_REG_F2, sr->curr.reg + IA64_REG_F31); alloc_spill_area (&off, 8, sr->curr.reg + IA64_REG_B1, sr->curr.reg + IA64_REG_B5); alloc_spill_area (&off, 8, sr->curr.reg + IA64_REG_R4, sr->curr.reg + IA64_REG_R7); } } /* Region header descriptors. */ static void desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave, struct ia64_state_record *sr) { int i, region_start; if (!(sr->in_body || sr->first_region)) finish_prologue (sr); sr->first_region = 0; /* check if we're done: */ if (sr->when_target < sr->region_start + sr->region_len) { sr->done = 1; return; } region_start = sr->region_start + sr->region_len; for (i = 0; i < sr->epilogue_count; ++i) pop (sr); sr->epilogue_count = 0; sr->when_sp_restored = IA64_WHEN_NEVER; sr->region_start = region_start; sr->region_len = rlen; sr->in_body = body; if (!body) { push (sr); if (mask) for (i = 0; i < 4; ++i) { if (mask & 0x8) set_reg (sr->curr.reg + unw.save_order[i], IA64_WHERE_GR, sr->region_start + sr->region_len - 1, grsave++); mask <<= 1; } sr->gr_save_loc = grsave; sr->any_spills = 0; sr->imask = 0; sr->spill_offset = 0x10; /* default to psp+16 */ } } /* Prologue descriptors. */ static inline void desc_abi (unsigned char abi, unsigned char context, struct ia64_state_record *sr) { sr->abi_marker = (abi << 8) | context; } static inline void desc_br_gr (unsigned char brmask, unsigned char gr, struct ia64_state_record *sr) { int i; for (i = 0; i < 5; ++i) { if (brmask & 1) set_reg (sr->curr.reg + IA64_REG_B1 + i, IA64_WHERE_GR, sr->region_start + sr->region_len - 1, gr++); brmask >>= 1; } } static inline void desc_br_mem (unsigned char brmask, struct ia64_state_record *sr) { int i; for (i = 0; i < 5; ++i) { if (brmask & 1) { set_reg (sr->curr.reg + IA64_REG_B1 + i, IA64_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } brmask >>= 1; } } static inline void desc_frgr_mem (unsigned char grmask, unw_word frmask, struct ia64_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) { set_reg (sr->curr.reg + IA64_REG_R4 + i, IA64_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } grmask >>= 1; } for (i = 0; i < 20; ++i) { if ((frmask & 1) != 0) { int base = (i < 4) ? IA64_REG_F2 : IA64_REG_F16 - 4; set_reg (sr->curr.reg + base + i, IA64_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } frmask >>= 1; } } static inline void desc_fr_mem (unsigned char frmask, struct ia64_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((frmask & 1) != 0) { set_reg (sr->curr.reg + IA64_REG_F2 + i, IA64_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } frmask >>= 1; } } static inline void desc_gr_gr (unsigned char grmask, unsigned char gr, struct ia64_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) set_reg (sr->curr.reg + IA64_REG_R4 + i, IA64_WHERE_GR, sr->region_start + sr->region_len - 1, gr++); grmask >>= 1; } } static inline void desc_gr_mem (unsigned char grmask, struct ia64_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) { set_reg (sr->curr.reg + IA64_REG_R4 + i, IA64_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } grmask >>= 1; } } static inline void desc_mem_stack_f (unw_word t, unw_word size, struct ia64_state_record *sr) { set_reg (sr->curr.reg + IA64_REG_PSP, IA64_WHERE_NONE, sr->region_start + MIN ((int) t, sr->region_len - 1), 16 * size); } static inline void desc_mem_stack_v (unw_word t, struct ia64_state_record *sr) { sr->curr.reg[IA64_REG_PSP].when = sr->region_start + MIN ((int) t, sr->region_len - 1); } static inline void desc_reg_gr (unsigned char reg, unsigned char dst, struct ia64_state_record *sr) { set_reg (sr->curr.reg + reg, IA64_WHERE_GR, sr->region_start + sr->region_len - 1, dst); } static inline void desc_reg_psprel (unsigned char reg, unw_word pspoff, struct ia64_state_record *sr) { set_reg (sr->curr.reg + reg, IA64_WHERE_PSPREL, sr->region_start + sr->region_len - 1, 0x10 - 4 * pspoff); } static inline void desc_reg_sprel (unsigned char reg, unw_word spoff, struct ia64_state_record *sr) { set_reg (sr->curr.reg + reg, IA64_WHERE_SPREL, sr->region_start + sr->region_len - 1, 4 * spoff); } static inline void desc_rp_br (unsigned char dst, struct ia64_state_record *sr) { sr->return_link_reg = dst; } static inline void desc_reg_when (unsigned char regnum, unw_word t, struct ia64_state_record *sr) { struct ia64_reg_info *reg = sr->curr.reg + regnum; if (reg->where == IA64_WHERE_NONE) reg->where = IA64_WHERE_GR_SAVE; reg->when = sr->region_start + MIN ((int) t, sr->region_len - 1); } static inline void desc_spill_base (unw_word pspoff, struct ia64_state_record *sr) { sr->spill_offset = 0x10 - 4 * pspoff; } static inline unsigned char * desc_spill_mask (unsigned char *imaskp, struct ia64_state_record *sr) { sr->imask = imaskp; return imaskp + (2 * sr->region_len + 7) / 8; } /* Body descriptors. */ static inline void desc_epilogue (unw_word t, unw_word ecount, struct ia64_state_record *sr) { sr->when_sp_restored = sr->region_start + sr->region_len - 1 - t; sr->epilogue_count = ecount + 1; } static inline void desc_copy_state (unw_word label, struct ia64_state_record *sr) { struct ia64_labeled_state *ls; for (ls = sr->labeled_states; ls; ls = ls->next) { if (ls->label == label) { free_state_stack (&sr->curr); memcpy (&sr->curr, &ls->saved_state, sizeof (sr->curr)); sr->curr.next = dup_state_stack (ls->saved_state.next); return; } } print_error ("libunwind: failed to find labeled state\n"); } static inline void desc_label_state (unw_word label, struct ia64_state_record *sr) { struct ia64_labeled_state *ls; ls = alloc_labeled_state (); if (!ls) { print_error ("unwind.desc_label_state(): out of memory\n"); return; } ls->label = label; memcpy (&ls->saved_state, &sr->curr, sizeof (ls->saved_state)); ls->saved_state.next = dup_state_stack (sr->curr.next); /* insert into list of labeled states: */ ls->next = sr->labeled_states; sr->labeled_states = ls; } /* General descriptors. */ static inline int desc_is_active (unsigned char qp, unw_word t, struct ia64_state_record *sr) { if (sr->when_target <= sr->region_start + MIN ((int) t, sr->region_len - 1)) return 0; if (qp > 0) { if ((sr->pr_val & ((unw_word_t) 1 << qp)) == 0) return 0; sr->pr_mask |= ((unw_word_t) 1 << qp); } return 1; } static inline void desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct ia64_state_record *sr) { struct ia64_reg_info *r; if (!desc_is_active (qp, t, sr)) return; r = sr->curr.reg + decode_abreg (abreg, 0); r->where = IA64_WHERE_NONE; r->when = IA64_WHEN_NEVER; r->val = 0; } static inline void desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x, unsigned char ytreg, struct ia64_state_record *sr) { enum ia64_where where = IA64_WHERE_GR; struct ia64_reg_info *r; if (!desc_is_active (qp, t, sr)) return; if (x) where = IA64_WHERE_BR; else if (ytreg & 0x80) where = IA64_WHERE_FR; r = sr->curr.reg + decode_abreg (abreg, 0); r->where = where; r->when = sr->region_start + MIN ((int) t, sr->region_len - 1); r->val = (ytreg & 0x7f); } static inline void desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff, struct ia64_state_record *sr) { struct ia64_reg_info *r; if (!desc_is_active (qp, t, sr)) return; r = sr->curr.reg + decode_abreg (abreg, 1); r->where = IA64_WHERE_PSPREL; r->when = sr->region_start + MIN ((int) t, sr->region_len - 1); r->val = 0x10 - 4 * pspoff; } static inline void desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff, struct ia64_state_record *sr) { struct ia64_reg_info *r; if (!desc_is_active (qp, t, sr)) return; r = sr->curr.reg + decode_abreg (abreg, 1); r->where = IA64_WHERE_SPREL; r->when = sr->region_start + MIN ((int) t, sr->region_len - 1); r->val = 4 * spoff; } #define UNW_DEC_BAD_CODE(code) \ print_error ("libunwind: unknown code encountered\n") /* Register names. */ #define UNW_REG_BSP IA64_REG_BSP #define UNW_REG_BSPSTORE IA64_REG_BSPSTORE #define UNW_REG_FPSR IA64_REG_FPSR #define UNW_REG_LC IA64_REG_LC #define UNW_REG_PFS IA64_REG_PFS #define UNW_REG_PR IA64_REG_PR #define UNW_REG_RNAT IA64_REG_RNAT #define UNW_REG_PSP IA64_REG_PSP #define UNW_REG_RP IA64_REG_IP #define UNW_REG_UNAT IA64_REG_UNAT /* Region headers. */ #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg) #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg) /* Prologue descriptors. */ #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg) #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg) #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg) #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg) #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg) #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg) #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg) #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg) #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg) #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg) #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg) #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg) #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg) #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) \ desc_reg_when(IA64_REG_PRI_UNAT_GR,t,arg) #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) \ desc_reg_when(IA64_REG_PRI_UNAT_MEM,t,arg) #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) \ desc_reg_gr(IA64_REG_PRI_UNAT_GR,r,arg) #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) \ desc_reg_psprel(IA64_REG_PRI_UNAT_MEM,o,arg) #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) \ desc_reg_sprel(IA64_REG_PRI_UNAT_MEM,o,arg) #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg) #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg) #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg)) /* Body descriptors. */ #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg) #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg) #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg) /* General unwind descriptors. */ #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg) #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg) #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) \ desc_spill_psprel_p(p,t,a,o,arg) #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) \ desc_spill_psprel_p(0,t,a,o,arg) #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg) #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg) #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg) #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg) #include "unwind_decoder.h" #ifdef _U_dyn_op /* parse dynamic unwind info */ static struct ia64_reg_info * lookup_preg (int regnum, int memory, struct ia64_state_record *sr) { int preg; switch (regnum) { case UNW_IA64_AR_BSP: preg = IA64_REG_BSP; break; case UNW_IA64_AR_BSPSTORE: preg = IA64_REG_BSPSTORE; break; case UNW_IA64_AR_FPSR: preg = IA64_REG_FPSR; break; case UNW_IA64_AR_LC: preg = IA64_REG_LC; break; case UNW_IA64_AR_PFS: preg = IA64_REG_PFS; break; case UNW_IA64_AR_RNAT: preg = IA64_REG_RNAT; break; case UNW_IA64_AR_UNAT: preg = IA64_REG_UNAT; break; case UNW_IA64_BR + 0: preg = IA64_REG_IP; break; case UNW_IA64_PR: preg = IA64_REG_PR; break; case UNW_IA64_SP: preg = IA64_REG_PSP; break; case UNW_IA64_NAT: if (memory) preg = IA64_REG_PRI_UNAT_MEM; else preg = IA64_REG_PRI_UNAT_GR; break; case UNW_IA64_GR + 4 ... UNW_IA64_GR + 7: preg = IA64_REG_R4 + (regnum - (UNW_IA64_GR + 4)); break; case UNW_IA64_BR + 1 ... UNW_IA64_BR + 5: preg = IA64_REG_B1 + (regnum - UNW_IA64_BR); break; case UNW_IA64_FR + 2 ... UNW_IA64_FR + 5: preg = IA64_REG_F2 + (regnum - (UNW_IA64_FR + 2)); break; case UNW_IA64_FR + 16 ... UNW_IA64_FR + 31: preg = IA64_REG_F16 + (regnum - (UNW_IA64_FR + 16)); break; default: Dprintf ("%s: invalid register number %d\n", __FUNCTION__, regnum); return NULL; } return sr->curr.reg + preg; } /* An alias directive inside a region of length RLEN is interpreted to mean that the region behaves exactly like the first RLEN instructions at the aliased IP. RLEN=0 implies that the current state matches exactly that of before the instruction at the aliased IP is executed. */ static int desc_alias (unw_dyn_op_t *op, struct cursor *c, struct ia64_state_record *sr) { struct ia64_state_record orig_sr = *sr; int i, ret, when, rlen = sr->region_len; unw_word_t new_ip; when = MIN (sr->when_target, rlen); new_ip = op->val + ((when / 3) * 16 + (when % 3)); if ((ret = ia64_fetch_proc_info (c, new_ip, 1)) < 0) return ret; if ((ret = create_state_record_for (c, sr, new_ip)) < 0) return ret; sr->first_region = orig_sr.first_region; sr->done = 0; sr->any_spills |= orig_sr.any_spills; sr->in_body = orig_sr.in_body; sr->region_start = orig_sr.region_start; sr->region_len = orig_sr.region_len; if (sr->when_sp_restored != IA64_WHEN_NEVER) sr->when_sp_restored = op->when + MIN (orig_sr.when_sp_restored, rlen); sr->epilogue_count = orig_sr.epilogue_count; sr->when_target = orig_sr.when_target; for (i = 0; i < IA64_NUM_PREGS; ++i) if (sr->curr.reg[i].when != IA64_WHEN_NEVER) sr->curr.reg[i].when = op->when + MIN (sr->curr.reg[i].when, rlen); ia64_free_state_record (sr); sr->labeled_states = orig_sr.labeled_states; sr->curr.next = orig_sr.curr.next; return 0; } static inline int parse_dynamic (struct cursor *c, struct ia64_state_record *sr) { unw_dyn_info_t *di = c->pi.unwind_info; unw_dyn_proc_info_t *proc = &di->u.pi; unw_dyn_region_info_t *r; struct ia64_reg_info *ri; enum ia64_where where; int32_t when, len; unw_dyn_op_t *op; unw_word_t val; int memory, ret; int8_t qp; for (r = proc->regions; r; r = r->next) { len = r->insn_count; if (len < 0) { if (r->next) { Debug (1, "negative region length allowed in last region only!"); return -UNW_EINVAL; } len = -len; /* hack old region info to set the start where we need it: */ sr->region_start = (di->end_ip - di->start_ip) / 0x10 * 3 - len; sr->region_len = 0; } /* all regions are treated as prologue regions: */ desc_prologue (0, len, 0, 0, sr); if (sr->done) return 0; for (op = r->op; op < r->op + r->op_count; ++op) { when = op->when; val = op->val; qp = op->qp; if (!desc_is_active (qp, when, sr)) continue; when = sr->region_start + MIN ((int) when, sr->region_len - 1); switch (op->tag) { case UNW_DYN_SAVE_REG: memory = 0; if ((unsigned) (val - UNW_IA64_GR) < 128) where = IA64_WHERE_GR; else if ((unsigned) (val - UNW_IA64_FR) < 128) where = IA64_WHERE_FR; else if ((unsigned) (val - UNW_IA64_BR) < 8) where = IA64_WHERE_BR; else { Dprintf ("%s: can't save to register number %d\n", __FUNCTION__, (int) op->reg); return -UNW_EBADREG; } /* fall through */ update_reg_info: ri = lookup_preg (op->reg, memory, sr); if (!ri) return -UNW_EBADREG; ri->where = where; ri->when = when; ri->val = val; break; case UNW_DYN_SPILL_FP_REL: memory = 1; where = IA64_WHERE_PSPREL; val = 0x10 - val; goto update_reg_info; case UNW_DYN_SPILL_SP_REL: memory = 1; where = IA64_WHERE_SPREL; goto update_reg_info; case UNW_DYN_ADD: if (op->reg == UNW_IA64_SP) { if (val & 0xf) { Dprintf ("%s: frame-size %ld not an integer " "multiple of 16\n", __FUNCTION__, (long) op->val); return -UNW_EINVAL; } desc_mem_stack_f (when, -((int64_t) val / 16), sr); } else { Dprintf ("%s: can only ADD to stack-pointer\n", __FUNCTION__); return -UNW_EBADREG; } break; case UNW_DYN_POP_FRAMES: sr->when_sp_restored = when; sr->epilogue_count = op->val; break; case UNW_DYN_LABEL_STATE: desc_label_state (op->val, sr); break; case UNW_DYN_COPY_STATE: desc_copy_state (op->val, sr); break; case UNW_DYN_ALIAS: if ((ret = desc_alias (op, c, sr)) < 0) return ret; case UNW_DYN_STOP: goto end_of_ops; } } end_of_ops: ; } return 0; } #else # define parse_dynamic(c,sr) (-UNW_EINVAL) #endif /* _U_dyn_op */ HIDDEN int ia64_fetch_proc_info (struct cursor *c, unw_word_t ip, int need_unwind_info) { int ret, dynamic = 1; if (c->pi_valid && !need_unwind_info) return 0; /* check dynamic info first --- it overrides everything else */ ret = unwi_find_dynamic_proc_info (c->as, ip, &c->pi, need_unwind_info, c->as_arg); if (ret == -UNW_ENOINFO) { dynamic = 0; ret = ia64_find_proc_info (c, ip, need_unwind_info); } c->pi_valid = 1; c->pi_is_dynamic = dynamic; return ret; } static inline void put_unwind_info (struct cursor *c, unw_proc_info_t *pi) { if (!c->pi_valid) return; if (c->pi_is_dynamic) unwi_put_dynamic_unwind_info (c->as, pi, c->as_arg); else ia64_put_unwind_info (c, pi); } static int create_state_record_for (struct cursor *c, struct ia64_state_record *sr, unw_word_t ip) { unw_word_t predicates = c->pr; struct ia64_reg_info *r; uint8_t *dp, *desc_end; int ret; assert (c->pi_valid); /* build state record */ memset (sr, 0, sizeof (*sr)); for (r = sr->curr.reg; r < sr->curr.reg + IA64_NUM_PREGS; ++r) r->when = IA64_WHEN_NEVER; sr->pr_val = predicates; sr->first_region = 1; if (!c->pi.unwind_info) { /* No info, return default unwinder (leaf proc, no mem stack, no saved regs), rp in b0, pfs in ar.pfs. */ Debug (1, "no unwind info for ip=0x%lx (gp=%lx)\n", (long) ip, (long) c->pi.gp); sr->curr.reg[IA64_REG_IP].where = IA64_WHERE_BR; sr->curr.reg[IA64_REG_IP].when = -1; sr->curr.reg[IA64_REG_IP].val = 0; goto out; } sr->when_target = (3 * ((ip & ~(unw_word_t) 0xf) - c->pi.start_ip) / 16 + (ip & 0xf)); switch (c->pi.format) { case UNW_INFO_FORMAT_TABLE: case UNW_INFO_FORMAT_REMOTE_TABLE: dp = c->pi.unwind_info; desc_end = dp + c->pi.unwind_info_size; while (!sr->done && dp < desc_end) dp = unw_decode (dp, sr->in_body, sr); ret = 0; break; case UNW_INFO_FORMAT_DYNAMIC: ret = parse_dynamic (c, sr); break; default: ret = -UNW_EINVAL; } put_unwind_info (c, &c->pi); if (ret < 0) return ret; if (sr->when_target > sr->when_sp_restored) { /* sp has been restored and all values on the memory stack below psp also have been restored. */ sr->curr.reg[IA64_REG_PSP].val = 0; sr->curr.reg[IA64_REG_PSP].where = IA64_WHERE_NONE; sr->curr.reg[IA64_REG_PSP].when = IA64_WHEN_NEVER; for (r = sr->curr.reg; r < sr->curr.reg + IA64_NUM_PREGS; ++r) if ((r->where == IA64_WHERE_PSPREL && r->val <= 0x10) || r->where == IA64_WHERE_SPREL) { r->val = 0; r->where = IA64_WHERE_NONE; r->when = IA64_WHEN_NEVER; } } /* If RP did't get saved, generate entry for the return link register. */ if (sr->curr.reg[IA64_REG_IP].when >= sr->when_target) { sr->curr.reg[IA64_REG_IP].where = IA64_WHERE_BR; sr->curr.reg[IA64_REG_IP].when = -1; sr->curr.reg[IA64_REG_IP].val = sr->return_link_reg; } if (sr->when_target > sr->curr.reg[IA64_REG_BSP].when && sr->when_target > sr->curr.reg[IA64_REG_BSPSTORE].when && sr->when_target > sr->curr.reg[IA64_REG_RNAT].when) { Debug (8, "func 0x%lx may switch the register-backing-store\n", c->pi.start_ip); c->pi.flags |= UNW_PI_FLAG_IA64_RBS_SWITCH; } out: #if UNW_DEBUG if (unwi_debug_level > 2) { Dprintf ("%s: state record for func 0x%lx, t=%u (flags=0x%lx):\n", __FUNCTION__, (long) c->pi.start_ip, sr->when_target, (long) c->pi.flags); for (r = sr->curr.reg; r < sr->curr.reg + IA64_NUM_PREGS; ++r) { if (r->where != IA64_WHERE_NONE || r->when != IA64_WHEN_NEVER) { Dprintf (" %s <- ", unw.preg_name[r - sr->curr.reg]); switch (r->where) { case IA64_WHERE_GR: Dprintf ("r%lu", (long) r->val); break; case IA64_WHERE_FR: Dprintf ("f%lu", (long) r->val); break; case IA64_WHERE_BR: Dprintf ("b%lu", (long) r->val); break; case IA64_WHERE_SPREL: Dprintf ("[sp+0x%lx]", (long) r->val); break; case IA64_WHERE_PSPREL: Dprintf ("[psp+0x%lx]", (long) r->val); break; case IA64_WHERE_NONE: Dprintf ("%s+0x%lx", unw.preg_name[r - sr->curr.reg], (long) r->val); break; default: Dprintf ("BADWHERE(%d)", r->where); break; } Dprintf ("\t\t%d\n", r->when); } } } #endif return 0; } /* The proc-info must be valid for IP before this routine can be called. */ HIDDEN int ia64_create_state_record (struct cursor *c, struct ia64_state_record *sr) { return create_state_record_for (c, sr, c->ip); } HIDDEN int ia64_free_state_record (struct ia64_state_record *sr) { struct ia64_labeled_state *ls, *next; /* free labeled register states & stack: */ for (ls = sr->labeled_states; ls; ls = next) { next = ls->next; free_state_stack (&ls->saved_state); free_labeled_state (ls); } free_state_stack (&sr->curr); return 0; } HIDDEN int ia64_make_proc_info (struct cursor *c) { int ret, caching = c->as->caching_policy != UNW_CACHE_NONE; if (!caching || ia64_get_cached_proc_info (c) < 0) { /* Lookup it up the slow way... */ if ((ret = ia64_fetch_proc_info (c, c->ip, 0)) < 0) return ret; if (caching) ia64_cache_proc_info (c); } return 0; }
/* libunwind - a platform-independent unwind library Copyright (C) 2001-2004 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind_i.h" /* forward declaration: */ static int create_state_record_for (struct cursor *c, struct ia64_state_record *sr, unw_word_t ip); typedef unsigned long unw_word; #define alloc_reg_state() (mempool_alloc (&unw.reg_state_pool)) #define free_reg_state(rs) (mempool_free (&unw.reg_state_pool, rs)) #define alloc_labeled_state() (mempool_alloc (&unw.labeled_state_pool)) #define free_labeled_state(s) (mempool_free (&unw.labeled_state_pool, s)) /* Routines to manipulate the state stack. */ static inline void push (struct ia64_state_record *sr) { struct ia64_reg_state *rs; rs = alloc_reg_state (); if (!rs) { print_error ("libunwind: cannot stack reg state!\n"); return; } memcpy (rs, &sr->curr, sizeof (*rs)); sr->curr.next = rs; } static void pop (struct ia64_state_record *sr) { struct ia64_reg_state *rs = sr->curr.next; if (!rs) { print_error ("libunwind: stack underflow!\n"); return; } memcpy (&sr->curr, rs, sizeof (*rs)); free_reg_state (rs); } /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */ static struct ia64_reg_state * dup_state_stack (struct ia64_reg_state *rs) { struct ia64_reg_state *copy, *prev = NULL, *first = NULL; while (rs) { copy = alloc_reg_state (); if (!copy) { print_error ("unwind.dup_state_stack: out of memory\n"); return NULL; } memcpy (copy, rs, sizeof (*copy)); if (first) prev->next = copy; else first = copy; rs = rs->next; prev = copy; } return first; } /* Free all stacked register states (but not RS itself). */ static void free_state_stack (struct ia64_reg_state *rs) { struct ia64_reg_state *p, *next; for (p = rs->next; p != NULL; p = next) { next = p->next; free_reg_state (p); } rs->next = NULL; } /* Unwind decoder routines */ static enum ia64_pregnum CONST_ATTR decode_abreg (unsigned char abreg, int memory) { switch (abreg) { case 0x04 ... 0x07: return IA64_REG_R4 + (abreg - 0x04); case 0x22 ... 0x25: return IA64_REG_F2 + (abreg - 0x22); case 0x30 ... 0x3f: return IA64_REG_F16 + (abreg - 0x30); case 0x41 ... 0x45: return IA64_REG_B1 + (abreg - 0x41); case 0x60: return IA64_REG_PR; case 0x61: return IA64_REG_PSP; case 0x62: return memory ? IA64_REG_PRI_UNAT_MEM : IA64_REG_PRI_UNAT_GR; case 0x63: return IA64_REG_IP; case 0x64: return IA64_REG_BSP; case 0x65: return IA64_REG_BSPSTORE; case 0x66: return IA64_REG_RNAT; case 0x67: return IA64_REG_UNAT; case 0x68: return IA64_REG_FPSR; case 0x69: return IA64_REG_PFS; case 0x6a: return IA64_REG_LC; default: break; } Dprintf ("libunwind: bad abreg=0x%x\n", abreg); return IA64_REG_LC; } static void set_reg (struct ia64_reg_info *reg, enum ia64_where where, int when, unsigned long val) { reg->val = val; reg->where = where; if (reg->when == IA64_WHEN_NEVER) reg->when = when; } static void alloc_spill_area (unsigned long *offp, unsigned long regsize, struct ia64_reg_info *lo, struct ia64_reg_info *hi) { struct ia64_reg_info *reg; for (reg = hi; reg >= lo; --reg) { if (reg->where == IA64_WHERE_SPILL_HOME) { reg->where = IA64_WHERE_PSPREL; *offp -= regsize; reg->val = *offp; } } } static inline void spill_next_when (struct ia64_reg_info **regp, struct ia64_reg_info *lim, unw_word t) { struct ia64_reg_info *reg; for (reg = *regp; reg <= lim; ++reg) { if (reg->where == IA64_WHERE_SPILL_HOME) { reg->when = t; *regp = reg + 1; return; } } Dprintf ("libunwind: excess spill!\n"); } static inline void finish_prologue (struct ia64_state_record *sr) { struct ia64_reg_info *reg; unsigned long off; int i; /* First, resolve implicit register save locations (see Section "11.4.2.3 Rules for Using Unwind Descriptors", rule 3). */ for (i = 0; i < (int) ARRAY_SIZE (unw.save_order); ++i) { reg = sr->curr.reg + unw.save_order[i]; if (reg->where == IA64_WHERE_GR_SAVE) { reg->where = IA64_WHERE_GR; reg->val = sr->gr_save_loc++; } } /* Next, compute when the fp, general, and branch registers get saved. This must come before alloc_spill_area() because we need to know which registers are spilled to their home locations. */ if (sr->imask) { unsigned char kind, mask = 0, *cp = sr->imask; unsigned long t; static const unsigned char limit[3] = { IA64_REG_F31, IA64_REG_R7, IA64_REG_B5 }; struct ia64_reg_info *(regs[3]); regs[0] = sr->curr.reg + IA64_REG_F2; regs[1] = sr->curr.reg + IA64_REG_R4; regs[2] = sr->curr.reg + IA64_REG_B1; for (t = 0; (int) t < sr->region_len; ++t) { if ((t & 3) == 0) mask = *cp++; kind = (mask >> 2 * (3 - (t & 3))) & 3; if (kind > 0) spill_next_when (&regs[kind - 1], sr->curr.reg + limit[kind - 1], sr->region_start + t); } } /* Next, lay out the memory stack spill area. */ if (sr->any_spills) { off = sr->spill_offset; alloc_spill_area (&off, 16, sr->curr.reg + IA64_REG_F2, sr->curr.reg + IA64_REG_F31); alloc_spill_area (&off, 8, sr->curr.reg + IA64_REG_B1, sr->curr.reg + IA64_REG_B5); alloc_spill_area (&off, 8, sr->curr.reg + IA64_REG_R4, sr->curr.reg + IA64_REG_R7); } } /* Region header descriptors. */ static void desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave, struct ia64_state_record *sr) { int i, region_start; if (!(sr->in_body || sr->first_region)) finish_prologue (sr); sr->first_region = 0; /* check if we're done: */ if (sr->when_target < sr->region_start + sr->region_len) { sr->done = 1; return; } region_start = sr->region_start + sr->region_len; for (i = 0; i < sr->epilogue_count; ++i) pop (sr); sr->epilogue_count = 0; sr->when_sp_restored = IA64_WHEN_NEVER; sr->region_start = region_start; sr->region_len = rlen; sr->in_body = body; if (!body) { push (sr); if (mask) for (i = 0; i < 4; ++i) { if (mask & 0x8) set_reg (sr->curr.reg + unw.save_order[i], IA64_WHERE_GR, sr->region_start + sr->region_len - 1, grsave++); mask <<= 1; } sr->gr_save_loc = grsave; sr->any_spills = 0; sr->imask = 0; sr->spill_offset = 0x10; /* default to psp+16 */ } } /* Prologue descriptors. */ static inline void desc_abi (unsigned char abi, unsigned char context, struct ia64_state_record *sr) { sr->abi_marker = (abi << 8) | context; } static inline void desc_br_gr (unsigned char brmask, unsigned char gr, struct ia64_state_record *sr) { int i; for (i = 0; i < 5; ++i) { if (brmask & 1) set_reg (sr->curr.reg + IA64_REG_B1 + i, IA64_WHERE_GR, sr->region_start + sr->region_len - 1, gr++); brmask >>= 1; } } static inline void desc_br_mem (unsigned char brmask, struct ia64_state_record *sr) { int i; for (i = 0; i < 5; ++i) { if (brmask & 1) { set_reg (sr->curr.reg + IA64_REG_B1 + i, IA64_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } brmask >>= 1; } } static inline void desc_frgr_mem (unsigned char grmask, unw_word frmask, struct ia64_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) { set_reg (sr->curr.reg + IA64_REG_R4 + i, IA64_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } grmask >>= 1; } for (i = 0; i < 20; ++i) { if ((frmask & 1) != 0) { int base = (i < 4) ? IA64_REG_F2 : IA64_REG_F16 - 4; set_reg (sr->curr.reg + base + i, IA64_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } frmask >>= 1; } } static inline void desc_fr_mem (unsigned char frmask, struct ia64_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((frmask & 1) != 0) { set_reg (sr->curr.reg + IA64_REG_F2 + i, IA64_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } frmask >>= 1; } } static inline void desc_gr_gr (unsigned char grmask, unsigned char gr, struct ia64_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) set_reg (sr->curr.reg + IA64_REG_R4 + i, IA64_WHERE_GR, sr->region_start + sr->region_len - 1, gr++); grmask >>= 1; } } static inline void desc_gr_mem (unsigned char grmask, struct ia64_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) { set_reg (sr->curr.reg + IA64_REG_R4 + i, IA64_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } grmask >>= 1; } } static inline void desc_mem_stack_f (unw_word t, unw_word size, struct ia64_state_record *sr) { set_reg (sr->curr.reg + IA64_REG_PSP, IA64_WHERE_NONE, sr->region_start + MIN ((int) t, sr->region_len - 1), 16 * size); } static inline void desc_mem_stack_v (unw_word t, struct ia64_state_record *sr) { sr->curr.reg[IA64_REG_PSP].when = sr->region_start + MIN ((int) t, sr->region_len - 1); } static inline void desc_reg_gr (unsigned char reg, unsigned char dst, struct ia64_state_record *sr) { set_reg (sr->curr.reg + reg, IA64_WHERE_GR, sr->region_start + sr->region_len - 1, dst); } static inline void desc_reg_psprel (unsigned char reg, unw_word pspoff, struct ia64_state_record *sr) { set_reg (sr->curr.reg + reg, IA64_WHERE_PSPREL, sr->region_start + sr->region_len - 1, 0x10 - 4 * pspoff); } static inline void desc_reg_sprel (unsigned char reg, unw_word spoff, struct ia64_state_record *sr) { set_reg (sr->curr.reg + reg, IA64_WHERE_SPREL, sr->region_start + sr->region_len - 1, 4 * spoff); } static inline void desc_rp_br (unsigned char dst, struct ia64_state_record *sr) { sr->return_link_reg = dst; } static inline void desc_reg_when (unsigned char regnum, unw_word t, struct ia64_state_record *sr) { struct ia64_reg_info *reg = sr->curr.reg + regnum; if (reg->where == IA64_WHERE_NONE) reg->where = IA64_WHERE_GR_SAVE; reg->when = sr->region_start + MIN ((int) t, sr->region_len - 1); } static inline void desc_spill_base (unw_word pspoff, struct ia64_state_record *sr) { sr->spill_offset = 0x10 - 4 * pspoff; } static inline unsigned char * desc_spill_mask (unsigned char *imaskp, struct ia64_state_record *sr) { sr->imask = imaskp; return imaskp + (2 * sr->region_len + 7) / 8; } /* Body descriptors. */ static inline void desc_epilogue (unw_word t, unw_word ecount, struct ia64_state_record *sr) { sr->when_sp_restored = sr->region_start + sr->region_len - 1 - t; sr->epilogue_count = ecount + 1; } static inline void desc_copy_state (unw_word label, struct ia64_state_record *sr) { struct ia64_labeled_state *ls; for (ls = sr->labeled_states; ls; ls = ls->next) { if (ls->label == label) { free_state_stack (&sr->curr); memcpy (&sr->curr, &ls->saved_state, sizeof (sr->curr)); sr->curr.next = dup_state_stack (ls->saved_state.next); return; } } print_error ("libunwind: failed to find labeled state\n"); } static inline void desc_label_state (unw_word label, struct ia64_state_record *sr) { struct ia64_labeled_state *ls; ls = alloc_labeled_state (); if (!ls) { print_error ("unwind.desc_label_state(): out of memory\n"); return; } ls->label = label; memcpy (&ls->saved_state, &sr->curr, sizeof (ls->saved_state)); ls->saved_state.next = dup_state_stack (sr->curr.next); /* insert into list of labeled states: */ ls->next = sr->labeled_states; sr->labeled_states = ls; } /* General descriptors. */ static inline int desc_is_active (unsigned char qp, unw_word t, struct ia64_state_record *sr) { if (sr->when_target <= sr->region_start + MIN ((int) t, sr->region_len - 1)) return 0; if (qp > 0) { if ((sr->pr_val & ((unw_word_t) 1 << qp)) == 0) return 0; sr->pr_mask |= ((unw_word_t) 1 << qp); } return 1; } static inline void desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct ia64_state_record *sr) { struct ia64_reg_info *r; if (!desc_is_active (qp, t, sr)) return; r = sr->curr.reg + decode_abreg (abreg, 0); r->where = IA64_WHERE_NONE; r->when = IA64_WHEN_NEVER; r->val = 0; } static inline void desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x, unsigned char ytreg, struct ia64_state_record *sr) { enum ia64_where where = IA64_WHERE_GR; struct ia64_reg_info *r; if (!desc_is_active (qp, t, sr)) return; if (x) where = IA64_WHERE_BR; else if (ytreg & 0x80) where = IA64_WHERE_FR; r = sr->curr.reg + decode_abreg (abreg, 0); r->where = where; r->when = sr->region_start + MIN ((int) t, sr->region_len - 1); r->val = (ytreg & 0x7f); } static inline void desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff, struct ia64_state_record *sr) { struct ia64_reg_info *r; if (!desc_is_active (qp, t, sr)) return; r = sr->curr.reg + decode_abreg (abreg, 1); r->where = IA64_WHERE_PSPREL; r->when = sr->region_start + MIN ((int) t, sr->region_len - 1); r->val = 0x10 - 4 * pspoff; } static inline void desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff, struct ia64_state_record *sr) { struct ia64_reg_info *r; if (!desc_is_active (qp, t, sr)) return; r = sr->curr.reg + decode_abreg (abreg, 1); r->where = IA64_WHERE_SPREL; r->when = sr->region_start + MIN ((int) t, sr->region_len - 1); r->val = 4 * spoff; } #define UNW_DEC_BAD_CODE(code) \ print_error ("libunwind: unknown code encountered\n") /* Register names. */ #define UNW_REG_BSP IA64_REG_BSP #define UNW_REG_BSPSTORE IA64_REG_BSPSTORE #define UNW_REG_FPSR IA64_REG_FPSR #define UNW_REG_LC IA64_REG_LC #define UNW_REG_PFS IA64_REG_PFS #define UNW_REG_PR IA64_REG_PR #define UNW_REG_RNAT IA64_REG_RNAT #define UNW_REG_PSP IA64_REG_PSP #define UNW_REG_RP IA64_REG_IP #define UNW_REG_UNAT IA64_REG_UNAT /* Region headers. */ #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg) #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg) /* Prologue descriptors. */ #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg) #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg) #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg) #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg) #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg) #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg) #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg) #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg) #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg) #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg) #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg) #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg) #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg) #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) \ desc_reg_when(IA64_REG_PRI_UNAT_GR,t,arg) #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) \ desc_reg_when(IA64_REG_PRI_UNAT_MEM,t,arg) #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) \ desc_reg_gr(IA64_REG_PRI_UNAT_GR,r,arg) #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) \ desc_reg_psprel(IA64_REG_PRI_UNAT_MEM,o,arg) #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) \ desc_reg_sprel(IA64_REG_PRI_UNAT_MEM,o,arg) #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg) #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg) #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg)) /* Body descriptors. */ #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg) #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg) #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg) /* General unwind descriptors. */ #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg) #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg) #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) \ desc_spill_psprel_p(p,t,a,o,arg) #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) \ desc_spill_psprel_p(0,t,a,o,arg) #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg) #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg) #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg) #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg) #include "unwind_decoder.h" #ifdef _U_dyn_op /* parse dynamic unwind info */ static struct ia64_reg_info * lookup_preg (int regnum, int memory, struct ia64_state_record *sr) { int preg; switch (regnum) { case UNW_IA64_AR_BSP: preg = IA64_REG_BSP; break; case UNW_IA64_AR_BSPSTORE: preg = IA64_REG_BSPSTORE; break; case UNW_IA64_AR_FPSR: preg = IA64_REG_FPSR; break; case UNW_IA64_AR_LC: preg = IA64_REG_LC; break; case UNW_IA64_AR_PFS: preg = IA64_REG_PFS; break; case UNW_IA64_AR_RNAT: preg = IA64_REG_RNAT; break; case UNW_IA64_AR_UNAT: preg = IA64_REG_UNAT; break; case UNW_IA64_BR + 0: preg = IA64_REG_IP; break; case UNW_IA64_PR: preg = IA64_REG_PR; break; case UNW_IA64_SP: preg = IA64_REG_PSP; break; case UNW_IA64_NAT: if (memory) preg = IA64_REG_PRI_UNAT_MEM; else preg = IA64_REG_PRI_UNAT_GR; break; case UNW_IA64_GR + 4 ... UNW_IA64_GR + 7: preg = IA64_REG_R4 + (regnum - (UNW_IA64_GR + 4)); break; case UNW_IA64_BR + 1 ... UNW_IA64_BR + 5: preg = IA64_REG_B1 + (regnum - UNW_IA64_BR); break; case UNW_IA64_FR + 2 ... UNW_IA64_FR + 5: preg = IA64_REG_F2 + (regnum - (UNW_IA64_FR + 2)); break; case UNW_IA64_FR + 16 ... UNW_IA64_FR + 31: preg = IA64_REG_F16 + (regnum - (UNW_IA64_FR + 16)); break; default: Dprintf ("%s: invalid register number %d\n", __FUNCTION__, regnum); return NULL; } return sr->curr.reg + preg; } /* An alias directive inside a region of length RLEN is interpreted to mean that the region behaves exactly like the first RLEN instructions at the aliased IP. RLEN=0 implies that the current state matches exactly that of before the instruction at the aliased IP is executed. */ static int desc_alias (unw_dyn_op_t *op, struct cursor *c, struct ia64_state_record *sr) { struct ia64_state_record orig_sr = *sr; int i, ret, when, rlen = sr->region_len; unw_word_t new_ip; when = MIN (sr->when_target, rlen); new_ip = op->val + ((when / 3) * 16 + (when % 3)); if ((ret = ia64_fetch_proc_info (c, new_ip, 1)) < 0) return ret; if ((ret = create_state_record_for (c, sr, new_ip)) < 0) return ret; sr->first_region = orig_sr.first_region; sr->done = 0; sr->any_spills |= orig_sr.any_spills; sr->in_body = orig_sr.in_body; sr->region_start = orig_sr.region_start; sr->region_len = orig_sr.region_len; if (sr->when_sp_restored != IA64_WHEN_NEVER) sr->when_sp_restored = op->when + MIN (orig_sr.when_sp_restored, rlen); sr->epilogue_count = orig_sr.epilogue_count; sr->when_target = orig_sr.when_target; for (i = 0; i < IA64_NUM_PREGS; ++i) if (sr->curr.reg[i].when != IA64_WHEN_NEVER) sr->curr.reg[i].when = op->when + MIN (sr->curr.reg[i].when, rlen); ia64_free_state_record (sr); sr->labeled_states = orig_sr.labeled_states; sr->curr.next = orig_sr.curr.next; return 0; } static inline int parse_dynamic (struct cursor *c, struct ia64_state_record *sr) { unw_dyn_info_t *di = c->pi.unwind_info; unw_dyn_proc_info_t *proc = &di->u.pi; unw_dyn_region_info_t *r; struct ia64_reg_info *ri; enum ia64_where where; int32_t when, len; unw_dyn_op_t *op; unw_word_t val; int memory, ret; int8_t qp; for (r = proc->regions; r; r = r->next) { len = r->insn_count; if (len < 0) { if (r->next) { Debug (1, "negative region length allowed in last region only!"); return -UNW_EINVAL; } len = -len; /* hack old region info to set the start where we need it: */ sr->region_start = (di->end_ip - di->start_ip) / 0x10 * 3 - len; sr->region_len = 0; } /* all regions are treated as prologue regions: */ desc_prologue (0, len, 0, 0, sr); if (sr->done) return 0; for (op = r->op; op < r->op + r->op_count; ++op) { when = op->when; val = op->val; qp = op->qp; if (!desc_is_active (qp, when, sr)) continue; when = sr->region_start + MIN ((int) when, sr->region_len - 1); switch (op->tag) { case UNW_DYN_SAVE_REG: memory = 0; if ((unsigned) (val - UNW_IA64_GR) < 128) where = IA64_WHERE_GR; else if ((unsigned) (val - UNW_IA64_FR) < 128) where = IA64_WHERE_FR; else if ((unsigned) (val - UNW_IA64_BR) < 8) where = IA64_WHERE_BR; else { Dprintf ("%s: can't save to register number %d\n", __FUNCTION__, (int) op->reg); return -UNW_EBADREG; } /* fall through */ update_reg_info: ri = lookup_preg (op->reg, memory, sr); if (!ri) return -UNW_EBADREG; ri->where = where; ri->when = when; ri->val = val; break; case UNW_DYN_SPILL_FP_REL: memory = 1; where = IA64_WHERE_PSPREL; val = 0x10 - val; goto update_reg_info; case UNW_DYN_SPILL_SP_REL: memory = 1; where = IA64_WHERE_SPREL; goto update_reg_info; case UNW_DYN_ADD: if (op->reg == UNW_IA64_SP) { if (val & 0xf) { Dprintf ("%s: frame-size %ld not an integer " "multiple of 16\n", __FUNCTION__, (long) op->val); return -UNW_EINVAL; } desc_mem_stack_f (when, -((int64_t) val / 16), sr); } else { Dprintf ("%s: can only ADD to stack-pointer\n", __FUNCTION__); return -UNW_EBADREG; } break; case UNW_DYN_POP_FRAMES: sr->when_sp_restored = when; sr->epilogue_count = op->val; break; case UNW_DYN_LABEL_STATE: desc_label_state (op->val, sr); break; case UNW_DYN_COPY_STATE: desc_copy_state (op->val, sr); break; case UNW_DYN_ALIAS: if ((ret = desc_alias (op, c, sr)) < 0) return ret; case UNW_DYN_STOP: goto end_of_ops; } } end_of_ops: ; } return 0; } #else # define parse_dynamic(c,sr) (-UNW_EINVAL) #endif /* _U_dyn_op */ HIDDEN int ia64_fetch_proc_info (struct cursor *c, unw_word_t ip, int need_unwind_info) { int ret, dynamic = 1; if (c->pi_valid && !need_unwind_info) return 0; /* check dynamic info first --- it overrides everything else */ ret = unwi_find_dynamic_proc_info (c->as, ip, &c->pi, need_unwind_info, c->as_arg); if (ret == -UNW_ENOINFO) { dynamic = 0; ret = ia64_find_proc_info (c, ip, need_unwind_info); } c->pi_valid = 1; c->pi_is_dynamic = dynamic; return ret; } static inline void put_unwind_info (struct cursor *c, unw_proc_info_t *pi) { if (!c->pi_valid) return; if (c->pi_is_dynamic) unwi_put_dynamic_unwind_info (c->as, pi, c->as_arg); else ia64_put_unwind_info (c, pi); } static int create_state_record_for (struct cursor *c, struct ia64_state_record *sr, unw_word_t ip) { unw_word_t predicates = c->pr; struct ia64_reg_info *r; uint8_t *dp, *desc_end; int ret; assert (c->pi_valid); /* build state record */ memset (sr, 0, sizeof (*sr)); for (r = sr->curr.reg; r < sr->curr.reg + IA64_NUM_PREGS; ++r) r->when = IA64_WHEN_NEVER; sr->pr_val = predicates; sr->first_region = 1; if (!c->pi.unwind_info) { /* No info, return default unwinder (leaf proc, no mem stack, no saved regs), rp in b0, pfs in ar.pfs. */ Debug (1, "no unwind info for ip=0x%lx (gp=%lx)\n", (long) ip, (long) c->pi.gp); sr->curr.reg[IA64_REG_IP].where = IA64_WHERE_BR; sr->curr.reg[IA64_REG_IP].when = -1; sr->curr.reg[IA64_REG_IP].val = 0; goto out; } sr->when_target = (3 * ((ip & ~(unw_word_t) 0xf) - c->pi.start_ip) / 16 + (ip & 0xf)); switch (c->pi.format) { case UNW_INFO_FORMAT_TABLE: case UNW_INFO_FORMAT_REMOTE_TABLE: dp = c->pi.unwind_info; desc_end = dp + c->pi.unwind_info_size; while (!sr->done && dp < desc_end) dp = unw_decode (dp, sr->in_body, sr); ret = 0; break; case UNW_INFO_FORMAT_DYNAMIC: ret = parse_dynamic (c, sr); break; default: ret = -UNW_EINVAL; } put_unwind_info (c, &c->pi); if (ret < 0) return ret; if (sr->when_target > sr->when_sp_restored) { /* sp has been restored and all values on the memory stack below psp also have been restored. */ sr->curr.reg[IA64_REG_PSP].val = 0; sr->curr.reg[IA64_REG_PSP].where = IA64_WHERE_NONE; sr->curr.reg[IA64_REG_PSP].when = IA64_WHEN_NEVER; for (r = sr->curr.reg; r < sr->curr.reg + IA64_NUM_PREGS; ++r) if ((r->where == IA64_WHERE_PSPREL && r->val <= 0x10) || r->where == IA64_WHERE_SPREL) { r->val = 0; r->where = IA64_WHERE_NONE; r->when = IA64_WHEN_NEVER; } } /* If RP did't get saved, generate entry for the return link register. */ if (sr->curr.reg[IA64_REG_IP].when >= sr->when_target) { sr->curr.reg[IA64_REG_IP].where = IA64_WHERE_BR; sr->curr.reg[IA64_REG_IP].when = -1; sr->curr.reg[IA64_REG_IP].val = sr->return_link_reg; } if (sr->when_target > sr->curr.reg[IA64_REG_BSP].when && sr->when_target > sr->curr.reg[IA64_REG_BSPSTORE].when && sr->when_target > sr->curr.reg[IA64_REG_RNAT].when) { Debug (8, "func 0x%lx may switch the register-backing-store\n", c->pi.start_ip); c->pi.flags |= UNW_PI_FLAG_IA64_RBS_SWITCH; } out: #if UNW_DEBUG if (unwi_debug_level > 2) { Dprintf ("%s: state record for func 0x%lx, t=%u (flags=0x%lx):\n", __FUNCTION__, (long) c->pi.start_ip, sr->when_target, (long) c->pi.flags); for (r = sr->curr.reg; r < sr->curr.reg + IA64_NUM_PREGS; ++r) { if (r->where != IA64_WHERE_NONE || r->when != IA64_WHEN_NEVER) { Dprintf (" %s <- ", unw.preg_name[r - sr->curr.reg]); switch (r->where) { case IA64_WHERE_GR: Dprintf ("r%lu", (long) r->val); break; case IA64_WHERE_FR: Dprintf ("f%lu", (long) r->val); break; case IA64_WHERE_BR: Dprintf ("b%lu", (long) r->val); break; case IA64_WHERE_SPREL: Dprintf ("[sp+0x%lx]", (long) r->val); break; case IA64_WHERE_PSPREL: Dprintf ("[psp+0x%lx]", (long) r->val); break; case IA64_WHERE_NONE: Dprintf ("%s+0x%lx", unw.preg_name[r - sr->curr.reg], (long) r->val); break; default: Dprintf ("BADWHERE(%d)", r->where); break; } Dprintf ("\t\t%d\n", r->when); } } } #endif return 0; } /* The proc-info must be valid for IP before this routine can be called. */ HIDDEN int ia64_create_state_record (struct cursor *c, struct ia64_state_record *sr) { return create_state_record_for (c, sr, c->ip); } HIDDEN int ia64_free_state_record (struct ia64_state_record *sr) { struct ia64_labeled_state *ls, *next; /* free labeled register states & stack: */ for (ls = sr->labeled_states; ls; ls = next) { next = ls->next; free_state_stack (&ls->saved_state); free_labeled_state (ls); } free_state_stack (&sr->curr); return 0; } HIDDEN int ia64_make_proc_info (struct cursor *c) { int ret, caching = c->as->caching_policy != UNW_CACHE_NONE; if (!caching || ia64_get_cached_proc_info (c) < 0) { /* Lookup it up the slow way... */ if ((ret = ia64_fetch_proc_info (c, c->ip, 0)) < 0) return ret; if (caching) ia64_cache_proc_info (c); } return 0; }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/tests/Ltest-exc.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if !defined(UNW_REMOTE_ONLY) #include "Gtest-exc.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if !defined(UNW_REMOTE_ONLY) #include "Gtest-exc.c" #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./.github/PULL_REQUEST_TEMPLATE/servicing_pull_request_template.md
Fixes Issue <!-- Issue Number --> main PR <!-- Link to PR if any that fixed this in the main branch. --> # Description <!-- Give a brief summary of the issue and how the pull request is fixing it. --> # Customer Impact <!-- What is the impact to customers of not taking this fix? --> # Regression <!-- Is this fixing a problem that was introduced in the most recent release, ie., fixing a regression? --> # Testing <!-- What kind of testing has been done with the fix. --> # Risk <!-- Please assess the risk of taking this fix. Provide details backing up your assessment. --> # Package authoring signed off? IMPORTANT: If this change touches code that ships in a NuGet package, please make certain that you have added any necessary [package authoring](https://github.com/dotnet/runtime/blob/main/docs/project/library-servicing.md) and gotten it explicitly reviewed.
Fixes Issue <!-- Issue Number --> main PR <!-- Link to PR if any that fixed this in the main branch. --> # Description <!-- Give a brief summary of the issue and how the pull request is fixing it. --> # Customer Impact <!-- What is the impact to customers of not taking this fix? --> # Regression <!-- Is this fixing a problem that was introduced in the most recent release, ie., fixing a regression? --> # Testing <!-- What kind of testing has been done with the fix. --> # Risk <!-- Please assess the risk of taking this fix. Provide details backing up your assessment. --> # Package authoring signed off? IMPORTANT: If this change touches code that ships in a NuGet package, please make certain that you have added any necessary [package authoring](https://github.com/dotnet/runtime/blob/main/docs/project/library-servicing.md) and gotten it explicitly reviewed.
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/mi/Lset_fpreg.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gset_fpreg.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gset_fpreg.c" #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/x86_64/Gtrace.c
/* libunwind - a platform-independent unwind library Copyright (C) 2010, 2011 by FERMI NATIONAL ACCELERATOR LABORATORY This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "libunwind_i.h" #include "unwind_i.h" #include "ucontext_i.h" #include <signal.h> #include <limits.h> #pragma weak pthread_once #pragma weak pthread_key_create #pragma weak pthread_getspecific #pragma weak pthread_setspecific /* Initial hash table size. Table expands by 2 bits (times four). */ #define HASH_MIN_BITS 14 typedef struct { unw_tdep_frame_t *frames; size_t log_size; size_t used; size_t dtor_count; /* Counts how many times our destructor has already been called. */ } unw_trace_cache_t; static const unw_tdep_frame_t empty_frame = { 0, UNW_X86_64_FRAME_OTHER, -1, -1, 0, -1, -1 }; static define_lock (trace_init_lock); static pthread_once_t trace_cache_once = PTHREAD_ONCE_INIT; static sig_atomic_t trace_cache_once_happen; static pthread_key_t trace_cache_key; static struct mempool trace_cache_pool; static _Thread_local unw_trace_cache_t *tls_cache; static _Thread_local int tls_cache_destroyed; /* Free memory for a thread's trace cache. */ static void trace_cache_free (void *arg) { unw_trace_cache_t *cache = arg; if (++cache->dtor_count < PTHREAD_DESTRUCTOR_ITERATIONS) { /* Not yet our turn to get destroyed. Re-install ourselves into the key. */ pthread_setspecific(trace_cache_key, cache); Debug(5, "delayed freeing cache %p (%zx to go)\n", cache, PTHREAD_DESTRUCTOR_ITERATIONS - cache->dtor_count); return; } tls_cache_destroyed = 1; tls_cache = NULL; munmap (cache->frames, (1u << cache->log_size) * sizeof(unw_tdep_frame_t)); mempool_free (&trace_cache_pool, cache); Debug(5, "freed cache %p\n", cache); } /* Initialise frame tracing for threaded use. */ static void trace_cache_init_once (void) { pthread_key_create (&trace_cache_key, &trace_cache_free); mempool_init (&trace_cache_pool, sizeof (unw_trace_cache_t), 0); trace_cache_once_happen = 1; } static unw_tdep_frame_t * trace_cache_buckets (size_t n) { unw_tdep_frame_t *frames; size_t i; GET_MEMORY(frames, n * sizeof (unw_tdep_frame_t)); if (likely(frames != NULL)) for (i = 0; i < n; ++i) frames[i] = empty_frame; return frames; } /* Allocate and initialise hash table for frame cache lookups. Returns the cache initialised with (1u << HASH_LOW_BITS) hash buckets, or NULL if there was a memory allocation problem. */ static unw_trace_cache_t * trace_cache_create (void) { unw_trace_cache_t *cache; if (tls_cache_destroyed) { /* The current thread is in the process of exiting. Don't recreate cache, as we wouldn't have another chance to free it. */ Debug(5, "refusing to reallocate cache: " "thread-locals are being deallocated\n"); return NULL; } if (! (cache = mempool_alloc(&trace_cache_pool))) { Debug(5, "failed to allocate cache\n"); return NULL; } if (! (cache->frames = trace_cache_buckets(1u << HASH_MIN_BITS))) { Debug(5, "failed to allocate buckets\n"); mempool_free(&trace_cache_pool, cache); return NULL; } cache->log_size = HASH_MIN_BITS; cache->used = 0; cache->dtor_count = 0; tls_cache_destroyed = 0; /* Paranoia: should already be 0. */ Debug(5, "allocated cache %p\n", cache); return cache; } /* Expand the hash table in the frame cache if possible. This always quadruples the hash size, and clears all previous frame entries. */ static int trace_cache_expand (unw_trace_cache_t *cache) { size_t old_size = (1u << cache->log_size); size_t new_log_size = cache->log_size + 2; unw_tdep_frame_t *new_frames = trace_cache_buckets (1u << new_log_size); if (unlikely(! new_frames)) { Debug(5, "failed to expand cache to 2^%lu buckets\n", new_log_size); return -UNW_ENOMEM; } Debug(5, "expanded cache from 2^%lu to 2^%lu buckets\n", cache->log_size, new_log_size); munmap(cache->frames, old_size * sizeof(unw_tdep_frame_t)); cache->frames = new_frames; cache->log_size = new_log_size; cache->used = 0; return 0; } static unw_trace_cache_t * trace_cache_get_unthreaded (void) { unw_trace_cache_t *cache; intrmask_t saved_mask; static unw_trace_cache_t *global_cache = NULL; lock_acquire (&trace_init_lock, saved_mask); if (! global_cache) { mempool_init (&trace_cache_pool, sizeof (unw_trace_cache_t), 0); global_cache = trace_cache_create (); } cache = global_cache; lock_release (&trace_init_lock, saved_mask); Debug(5, "using cache %p\n", cache); return cache; } /* Get the frame cache for the current thread. Create it if there is none. */ static unw_trace_cache_t * trace_cache_get (void) { unw_trace_cache_t *cache; if (likely (pthread_once != NULL)) { pthread_once(&trace_cache_once, &trace_cache_init_once); if (!trace_cache_once_happen) { return trace_cache_get_unthreaded(); } if (! (cache = tls_cache)) { cache = trace_cache_create(); pthread_setspecific(trace_cache_key, cache); tls_cache = cache; } Debug(5, "using cache %p\n", cache); return cache; } else { return trace_cache_get_unthreaded(); } } /* Initialise frame properties for address cache slot F at address RIP using current CFA, RBP and RSP values. Modifies CURSOR to that location, performs one unw_step(), and fills F with what was discovered about the location. Returns F. */ static unw_tdep_frame_t * trace_init_addr (unw_tdep_frame_t *f, unw_cursor_t *cursor, unw_word_t cfa, unw_word_t rip, unw_word_t rbp, unw_word_t rsp) { struct cursor *c = (struct cursor *) cursor; struct dwarf_cursor *d = &c->dwarf; int ret = -UNW_EINVAL; /* Initialise frame properties: unknown, not last. */ f->virtual_address = rip; f->frame_type = UNW_X86_64_FRAME_OTHER; f->last_frame = 0; f->cfa_reg_rsp = -1; f->cfa_reg_offset = 0; f->rbp_cfa_offset = -1; f->rsp_cfa_offset = -1; /* Reinitialise cursor to this instruction - but undo next/prev RIP adjustment because unw_step will redo it - and force RIP, RBP RSP into register locations (=~ ucontext we keep), then set their desired values. Then perform the step. */ d->ip = rip + d->use_prev_instr; d->cfa = cfa; for(int i = 0; i < DWARF_NUM_PRESERVED_REGS; i++) { d->loc[i] = DWARF_NULL_LOC; } d->loc[UNW_X86_64_RIP] = DWARF_REG_LOC (d, UNW_X86_64_RIP); d->loc[UNW_X86_64_RBP] = DWARF_REG_LOC (d, UNW_X86_64_RBP); d->loc[UNW_X86_64_RSP] = DWARF_REG_LOC (d, UNW_X86_64_RSP); c->frame_info = *f; if (likely(dwarf_put (d, d->loc[UNW_X86_64_RIP], rip) >= 0) && likely(dwarf_put (d, d->loc[UNW_X86_64_RBP], rbp) >= 0) && likely(dwarf_put (d, d->loc[UNW_X86_64_RSP], rsp) >= 0) && likely((ret = unw_step (cursor)) >= 0)) *f = c->frame_info; /* If unw_step() stopped voluntarily, remember that, even if it otherwise could not determine anything useful. This avoids failing trace if we hit frames without unwind info, which is common for the outermost frame (CRT stuff) on many systems. This avoids failing trace in very common circumstances; failing to unw_step() loop wouldn't produce any better result. */ if (ret == 0) f->last_frame = -1; Debug (3, "frame va %lx type %d last %d cfa %s+%d rbp @ cfa%+d rsp @ cfa%+d\n", f->virtual_address, f->frame_type, f->last_frame, f->cfa_reg_rsp ? "rsp" : "rbp", f->cfa_reg_offset, f->rbp_cfa_offset, f->rsp_cfa_offset); return f; } /* Look up and if necessary fill in frame attributes for address RIP in CACHE using current CFA, RBP and RSP values. Uses CURSOR to perform any unwind steps necessary to fill the cache. Returns the frame cache slot which describes RIP. */ static unw_tdep_frame_t * trace_lookup (unw_cursor_t *cursor, unw_trace_cache_t *cache, unw_word_t cfa, unw_word_t rip, unw_word_t rbp, unw_word_t rsp) { /* First look up for previously cached information using cache as linear probing hash table with probe step of 1. Majority of lookups should be completed within few steps, but it is very important the hash table does not fill up, or performance falls off the cliff. */ uint64_t i, addr; uint64_t cache_size = 1u << cache->log_size; uint64_t slot = ((rip * 0x9e3779b97f4a7c16) >> 43) & (cache_size-1); unw_tdep_frame_t *frame; for (i = 0; i < 16; ++i) { frame = &cache->frames[slot]; addr = frame->virtual_address; /* Return if we found the address. */ if (likely(addr == rip)) { Debug (4, "found address after %ld steps\n", i); return frame; } /* If slot is empty, reuse it. */ if (likely(! addr)) break; /* Linear probe to next slot candidate, step = 1. */ if (++slot >= cache_size) slot -= cache_size; } /* If we collided after 16 steps, or if the hash is more than half full, force the hash to expand. Fill the selected slot, whether it's free or collides. Note that hash expansion drops previous contents; further lookups will refill the hash. */ Debug (4, "updating slot %lu after %ld steps, replacing 0x%lx\n", slot, i, addr); if (unlikely(addr || cache->used >= cache_size / 2)) { if (unlikely(trace_cache_expand (cache) < 0)) return NULL; cache_size = 1u << cache->log_size; slot = ((rip * 0x9e3779b97f4a7c16) >> 43) & (cache_size-1); frame = &cache->frames[slot]; addr = frame->virtual_address; } if (! addr) ++cache->used; return trace_init_addr (frame, cursor, cfa, rip, rbp, rsp); } /* Fast stack backtrace for x86-64. This is used by backtrace() implementation to accelerate frequent queries for current stack, without any desire to unwind. It fills BUFFER with the call tree from CURSOR upwards for at most SIZE stack levels. The first frame, backtrace itself, is omitted. When called, SIZE should give the maximum number of entries that can be stored into BUFFER. Uses an internal thread-specific cache to accelerate queries. The caller should fall back to a unw_step() loop if this function fails by returning -UNW_ESTOPUNWIND, meaning the routine hit a stack frame that is too complex to be traced in the fast path. This function is tuned for clients which only need to walk the stack to get the call tree as fast as possible but without any other details, for example profilers sampling the stack thousands to millions of times per second. The routine handles the most common x86-64 ABI stack layouts: CFA is RBP or RSP plus/minus constant offset, return address is at CFA-8, and RBP and RSP are either unchanged or saved on stack at constant offset from the CFA; the signal return frame; and frames without unwind info provided they are at the outermost (final) frame or can conservatively be assumed to be frame-pointer based. Any other stack layout will cause the routine to give up. There are only a handful of relatively rarely used functions which do not have a stack in the standard form: vfork, longjmp, setcontext and _dl_runtime_profile on common linux systems for example. On success BUFFER and *SIZE reflect the trace progress up to *SIZE stack levels or the outermost frame, which ever is less. It may stop short of outermost frame if unw_step() loop would also do so, e.g. if there is no more unwind information; this is not reported as an error. The function returns a negative value for errors, -UNW_ESTOPUNWIND if tracing stopped because of an unusual frame unwind info. The BUFFER and *SIZE reflect tracing progress up to the error frame. Callers of this function would normally look like this: unw_cursor_t cur; unw_context_t ctx; void addrs[128]; int depth = 128; int ret; unw_getcontext(&ctx); unw_init_local(&cur, &ctx); if ((ret = unw_tdep_trace(&cur, addrs, &depth)) < 0) { depth = 0; unw_getcontext(&ctx); unw_init_local(&cur, &ctx); while ((ret = unw_step(&cur)) > 0 && depth < 128) { unw_word_t ip; unw_get_reg(&cur, UNW_REG_IP, &ip); addresses[depth++] = (void *) ip; } } */ HIDDEN int tdep_trace (unw_cursor_t *cursor, void **buffer, int *size) { struct cursor *c = (struct cursor *) cursor; struct dwarf_cursor *d = &c->dwarf; unw_trace_cache_t *cache; unw_word_t rbp, rsp, rip, cfa; int maxdepth = 0; int depth = 0; int ret; int validate = 0; /* Check input parametres. */ if (unlikely(! cursor || ! buffer || ! size || (maxdepth = *size) <= 0)) return -UNW_EINVAL; Debug (1, "begin ip 0x%lx cfa 0x%lx\n", d->ip, d->cfa); /* Tell core dwarf routines to call back to us. */ d->stash_frames = 1; /* Determine initial register values. These are direct access safe because we know they come from the initial machine context. */ rip = d->ip; rsp = cfa = d->cfa; ACCESS_MEM_FAST(ret, 0, d, DWARF_GET_LOC(d->loc[UNW_X86_64_RBP]), rbp); assert(ret == 0); /* Get frame cache. */ if (unlikely(! (cache = trace_cache_get()))) { Debug (1, "returning %d, cannot get trace cache\n", -UNW_ENOMEM); *size = 0; d->stash_frames = 0; return -UNW_ENOMEM; } /* Trace the stack upwards, starting from current RIP. Adjust the RIP address for previous/next instruction as the main unwinding logic would also do. We undo this before calling back into unw_step(). */ while (depth < maxdepth) { rip -= d->use_prev_instr; Debug (2, "depth %d cfa 0x%lx rip 0x%lx rsp 0x%lx rbp 0x%lx\n", depth, cfa, rip, rsp, rbp); /* See if we have this address cached. If not, evaluate enough of the dwarf unwind information to fill the cache line data, or to decide this frame cannot be handled in fast trace mode. We cache negative results too to prevent unnecessary dwarf parsing for common failures. */ unw_tdep_frame_t *f = trace_lookup (cursor, cache, cfa, rip, rbp, rsp); /* If we don't have information for this frame, give up. */ if (unlikely(! f)) { ret = -UNW_ENOINFO; break; } Debug (3, "frame va %lx type %d last %d cfa %s+%d rbp @ cfa%+d rsp @ cfa%+d\n", f->virtual_address, f->frame_type, f->last_frame, f->cfa_reg_rsp ? "rsp" : "rbp", f->cfa_reg_offset, f->rbp_cfa_offset, f->rsp_cfa_offset); assert (f->virtual_address == rip); /* Stop if this was the last frame. In particular don't evaluate new register values as it may not be safe - we don't normally run with full validation on, and do not want to - and there's enough bad unwind info floating around that we need to trust what unw_step() previously said, in potentially bogus frames. */ if (f->last_frame) break; /* Evaluate CFA and registers for the next frame. */ switch (f->frame_type) { case UNW_X86_64_FRAME_GUESSED: /* Fall thru to standard processing after forcing validation. */ if (d->as == unw_local_addr_space) dwarf_set_validate(d, 1); case UNW_X86_64_FRAME_STANDARD: /* Advance standard traceable frame. */ cfa = (f->cfa_reg_rsp ? rsp : rbp) + f->cfa_reg_offset; if (d->as == unw_local_addr_space) validate = dwarf_get_validate(d); ACCESS_MEM_FAST(ret, validate, d, cfa - 8, rip); if (likely(ret >= 0) && likely(f->rbp_cfa_offset != -1)) ACCESS_MEM_FAST(ret, validate, d, cfa + f->rbp_cfa_offset, rbp); /* Don't bother reading RSP from DWARF, CFA becomes new RSP. */ rsp = cfa; /* Next frame needs to back up for unwind info lookup. */ d->use_prev_instr = 1; break; case UNW_X86_64_FRAME_SIGRETURN: cfa = cfa + f->cfa_reg_offset; /* cfa now points to ucontext_t. */ if (d->as == unw_local_addr_space) validate = dwarf_get_validate(d); ACCESS_MEM_FAST(ret, validate, d, cfa + UC_MCONTEXT_GREGS_RIP, rip); if (likely(ret >= 0)) ACCESS_MEM_FAST(ret, validate, d, cfa + UC_MCONTEXT_GREGS_RBP, rbp); if (likely(ret >= 0)) ACCESS_MEM_FAST(ret, validate, d, cfa + UC_MCONTEXT_GREGS_RSP, rsp); /* Resume stack at signal restoration point. The stack is not necessarily continuous here, especially with sigaltstack(). */ cfa = rsp; /* Next frame should not back up. */ d->use_prev_instr = 0; break; case UNW_X86_64_FRAME_ALIGNED: /* Address of RIP was pushed on the stack via a simple * def_cfa_expr - result stack offset stored in cfa_reg_offset */ cfa = (f->cfa_reg_rsp ? rsp : rbp) + f->cfa_reg_offset; if (d->as == unw_local_addr_space) validate = dwarf_get_validate(d); ACCESS_MEM_FAST(ret, validate, d, cfa, cfa); if (likely(ret >= 0)) ACCESS_MEM_FAST(ret, validate, d, cfa - 8, rip); if (likely(ret >= 0)) ACCESS_MEM_FAST(ret, validate, d, rbp, rbp); /* Don't bother reading RSP from DWARF, CFA becomes new RSP. */ rsp = cfa; /* Next frame needs to back up for unwind info lookup. */ d->use_prev_instr = 1; break; default: /* We cannot trace through this frame, give up and tell the caller we had to stop. Data collected so far may still be useful to the caller, so let it know how far we got. */ ret = -UNW_ESTOPUNWIND; break; } Debug (4, "new cfa 0x%lx rip 0x%lx rsp 0x%lx rbp 0x%lx\n", cfa, rip, rsp, rbp); /* If we failed or ended up somewhere bogus, stop. */ if (unlikely(ret < 0 || rip < 0x4000)) break; /* Record this address in stack trace. We skipped the first address. */ buffer[depth++] = (void *) rip; } #if UNW_DEBUG Debug (1, "returning %d, depth %d\n", ret, depth); #endif *size = depth; return ret; }
/* libunwind - a platform-independent unwind library Copyright (C) 2010, 2011 by FERMI NATIONAL ACCELERATOR LABORATORY This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "libunwind_i.h" #include "unwind_i.h" #include "ucontext_i.h" #include <signal.h> #include <limits.h> #pragma weak pthread_once #pragma weak pthread_key_create #pragma weak pthread_getspecific #pragma weak pthread_setspecific /* Initial hash table size. Table expands by 2 bits (times four). */ #define HASH_MIN_BITS 14 typedef struct { unw_tdep_frame_t *frames; size_t log_size; size_t used; size_t dtor_count; /* Counts how many times our destructor has already been called. */ } unw_trace_cache_t; static const unw_tdep_frame_t empty_frame = { 0, UNW_X86_64_FRAME_OTHER, -1, -1, 0, -1, -1 }; static define_lock (trace_init_lock); static pthread_once_t trace_cache_once = PTHREAD_ONCE_INIT; static sig_atomic_t trace_cache_once_happen; static pthread_key_t trace_cache_key; static struct mempool trace_cache_pool; static _Thread_local unw_trace_cache_t *tls_cache; static _Thread_local int tls_cache_destroyed; /* Free memory for a thread's trace cache. */ static void trace_cache_free (void *arg) { unw_trace_cache_t *cache = arg; if (++cache->dtor_count < PTHREAD_DESTRUCTOR_ITERATIONS) { /* Not yet our turn to get destroyed. Re-install ourselves into the key. */ pthread_setspecific(trace_cache_key, cache); Debug(5, "delayed freeing cache %p (%zx to go)\n", cache, PTHREAD_DESTRUCTOR_ITERATIONS - cache->dtor_count); return; } tls_cache_destroyed = 1; tls_cache = NULL; munmap (cache->frames, (1u << cache->log_size) * sizeof(unw_tdep_frame_t)); mempool_free (&trace_cache_pool, cache); Debug(5, "freed cache %p\n", cache); } /* Initialise frame tracing for threaded use. */ static void trace_cache_init_once (void) { pthread_key_create (&trace_cache_key, &trace_cache_free); mempool_init (&trace_cache_pool, sizeof (unw_trace_cache_t), 0); trace_cache_once_happen = 1; } static unw_tdep_frame_t * trace_cache_buckets (size_t n) { unw_tdep_frame_t *frames; size_t i; GET_MEMORY(frames, n * sizeof (unw_tdep_frame_t)); if (likely(frames != NULL)) for (i = 0; i < n; ++i) frames[i] = empty_frame; return frames; } /* Allocate and initialise hash table for frame cache lookups. Returns the cache initialised with (1u << HASH_LOW_BITS) hash buckets, or NULL if there was a memory allocation problem. */ static unw_trace_cache_t * trace_cache_create (void) { unw_trace_cache_t *cache; if (tls_cache_destroyed) { /* The current thread is in the process of exiting. Don't recreate cache, as we wouldn't have another chance to free it. */ Debug(5, "refusing to reallocate cache: " "thread-locals are being deallocated\n"); return NULL; } if (! (cache = mempool_alloc(&trace_cache_pool))) { Debug(5, "failed to allocate cache\n"); return NULL; } if (! (cache->frames = trace_cache_buckets(1u << HASH_MIN_BITS))) { Debug(5, "failed to allocate buckets\n"); mempool_free(&trace_cache_pool, cache); return NULL; } cache->log_size = HASH_MIN_BITS; cache->used = 0; cache->dtor_count = 0; tls_cache_destroyed = 0; /* Paranoia: should already be 0. */ Debug(5, "allocated cache %p\n", cache); return cache; } /* Expand the hash table in the frame cache if possible. This always quadruples the hash size, and clears all previous frame entries. */ static int trace_cache_expand (unw_trace_cache_t *cache) { size_t old_size = (1u << cache->log_size); size_t new_log_size = cache->log_size + 2; unw_tdep_frame_t *new_frames = trace_cache_buckets (1u << new_log_size); if (unlikely(! new_frames)) { Debug(5, "failed to expand cache to 2^%lu buckets\n", new_log_size); return -UNW_ENOMEM; } Debug(5, "expanded cache from 2^%lu to 2^%lu buckets\n", cache->log_size, new_log_size); munmap(cache->frames, old_size * sizeof(unw_tdep_frame_t)); cache->frames = new_frames; cache->log_size = new_log_size; cache->used = 0; return 0; } static unw_trace_cache_t * trace_cache_get_unthreaded (void) { unw_trace_cache_t *cache; intrmask_t saved_mask; static unw_trace_cache_t *global_cache = NULL; lock_acquire (&trace_init_lock, saved_mask); if (! global_cache) { mempool_init (&trace_cache_pool, sizeof (unw_trace_cache_t), 0); global_cache = trace_cache_create (); } cache = global_cache; lock_release (&trace_init_lock, saved_mask); Debug(5, "using cache %p\n", cache); return cache; } /* Get the frame cache for the current thread. Create it if there is none. */ static unw_trace_cache_t * trace_cache_get (void) { unw_trace_cache_t *cache; if (likely (pthread_once != NULL)) { pthread_once(&trace_cache_once, &trace_cache_init_once); if (!trace_cache_once_happen) { return trace_cache_get_unthreaded(); } if (! (cache = tls_cache)) { cache = trace_cache_create(); pthread_setspecific(trace_cache_key, cache); tls_cache = cache; } Debug(5, "using cache %p\n", cache); return cache; } else { return trace_cache_get_unthreaded(); } } /* Initialise frame properties for address cache slot F at address RIP using current CFA, RBP and RSP values. Modifies CURSOR to that location, performs one unw_step(), and fills F with what was discovered about the location. Returns F. */ static unw_tdep_frame_t * trace_init_addr (unw_tdep_frame_t *f, unw_cursor_t *cursor, unw_word_t cfa, unw_word_t rip, unw_word_t rbp, unw_word_t rsp) { struct cursor *c = (struct cursor *) cursor; struct dwarf_cursor *d = &c->dwarf; int ret = -UNW_EINVAL; /* Initialise frame properties: unknown, not last. */ f->virtual_address = rip; f->frame_type = UNW_X86_64_FRAME_OTHER; f->last_frame = 0; f->cfa_reg_rsp = -1; f->cfa_reg_offset = 0; f->rbp_cfa_offset = -1; f->rsp_cfa_offset = -1; /* Reinitialise cursor to this instruction - but undo next/prev RIP adjustment because unw_step will redo it - and force RIP, RBP RSP into register locations (=~ ucontext we keep), then set their desired values. Then perform the step. */ d->ip = rip + d->use_prev_instr; d->cfa = cfa; for(int i = 0; i < DWARF_NUM_PRESERVED_REGS; i++) { d->loc[i] = DWARF_NULL_LOC; } d->loc[UNW_X86_64_RIP] = DWARF_REG_LOC (d, UNW_X86_64_RIP); d->loc[UNW_X86_64_RBP] = DWARF_REG_LOC (d, UNW_X86_64_RBP); d->loc[UNW_X86_64_RSP] = DWARF_REG_LOC (d, UNW_X86_64_RSP); c->frame_info = *f; if (likely(dwarf_put (d, d->loc[UNW_X86_64_RIP], rip) >= 0) && likely(dwarf_put (d, d->loc[UNW_X86_64_RBP], rbp) >= 0) && likely(dwarf_put (d, d->loc[UNW_X86_64_RSP], rsp) >= 0) && likely((ret = unw_step (cursor)) >= 0)) *f = c->frame_info; /* If unw_step() stopped voluntarily, remember that, even if it otherwise could not determine anything useful. This avoids failing trace if we hit frames without unwind info, which is common for the outermost frame (CRT stuff) on many systems. This avoids failing trace in very common circumstances; failing to unw_step() loop wouldn't produce any better result. */ if (ret == 0) f->last_frame = -1; Debug (3, "frame va %lx type %d last %d cfa %s+%d rbp @ cfa%+d rsp @ cfa%+d\n", f->virtual_address, f->frame_type, f->last_frame, f->cfa_reg_rsp ? "rsp" : "rbp", f->cfa_reg_offset, f->rbp_cfa_offset, f->rsp_cfa_offset); return f; } /* Look up and if necessary fill in frame attributes for address RIP in CACHE using current CFA, RBP and RSP values. Uses CURSOR to perform any unwind steps necessary to fill the cache. Returns the frame cache slot which describes RIP. */ static unw_tdep_frame_t * trace_lookup (unw_cursor_t *cursor, unw_trace_cache_t *cache, unw_word_t cfa, unw_word_t rip, unw_word_t rbp, unw_word_t rsp) { /* First look up for previously cached information using cache as linear probing hash table with probe step of 1. Majority of lookups should be completed within few steps, but it is very important the hash table does not fill up, or performance falls off the cliff. */ uint64_t i, addr; uint64_t cache_size = 1u << cache->log_size; uint64_t slot = ((rip * 0x9e3779b97f4a7c16) >> 43) & (cache_size-1); unw_tdep_frame_t *frame; for (i = 0; i < 16; ++i) { frame = &cache->frames[slot]; addr = frame->virtual_address; /* Return if we found the address. */ if (likely(addr == rip)) { Debug (4, "found address after %ld steps\n", i); return frame; } /* If slot is empty, reuse it. */ if (likely(! addr)) break; /* Linear probe to next slot candidate, step = 1. */ if (++slot >= cache_size) slot -= cache_size; } /* If we collided after 16 steps, or if the hash is more than half full, force the hash to expand. Fill the selected slot, whether it's free or collides. Note that hash expansion drops previous contents; further lookups will refill the hash. */ Debug (4, "updating slot %lu after %ld steps, replacing 0x%lx\n", slot, i, addr); if (unlikely(addr || cache->used >= cache_size / 2)) { if (unlikely(trace_cache_expand (cache) < 0)) return NULL; cache_size = 1u << cache->log_size; slot = ((rip * 0x9e3779b97f4a7c16) >> 43) & (cache_size-1); frame = &cache->frames[slot]; addr = frame->virtual_address; } if (! addr) ++cache->used; return trace_init_addr (frame, cursor, cfa, rip, rbp, rsp); } /* Fast stack backtrace for x86-64. This is used by backtrace() implementation to accelerate frequent queries for current stack, without any desire to unwind. It fills BUFFER with the call tree from CURSOR upwards for at most SIZE stack levels. The first frame, backtrace itself, is omitted. When called, SIZE should give the maximum number of entries that can be stored into BUFFER. Uses an internal thread-specific cache to accelerate queries. The caller should fall back to a unw_step() loop if this function fails by returning -UNW_ESTOPUNWIND, meaning the routine hit a stack frame that is too complex to be traced in the fast path. This function is tuned for clients which only need to walk the stack to get the call tree as fast as possible but without any other details, for example profilers sampling the stack thousands to millions of times per second. The routine handles the most common x86-64 ABI stack layouts: CFA is RBP or RSP plus/minus constant offset, return address is at CFA-8, and RBP and RSP are either unchanged or saved on stack at constant offset from the CFA; the signal return frame; and frames without unwind info provided they are at the outermost (final) frame or can conservatively be assumed to be frame-pointer based. Any other stack layout will cause the routine to give up. There are only a handful of relatively rarely used functions which do not have a stack in the standard form: vfork, longjmp, setcontext and _dl_runtime_profile on common linux systems for example. On success BUFFER and *SIZE reflect the trace progress up to *SIZE stack levels or the outermost frame, which ever is less. It may stop short of outermost frame if unw_step() loop would also do so, e.g. if there is no more unwind information; this is not reported as an error. The function returns a negative value for errors, -UNW_ESTOPUNWIND if tracing stopped because of an unusual frame unwind info. The BUFFER and *SIZE reflect tracing progress up to the error frame. Callers of this function would normally look like this: unw_cursor_t cur; unw_context_t ctx; void addrs[128]; int depth = 128; int ret; unw_getcontext(&ctx); unw_init_local(&cur, &ctx); if ((ret = unw_tdep_trace(&cur, addrs, &depth)) < 0) { depth = 0; unw_getcontext(&ctx); unw_init_local(&cur, &ctx); while ((ret = unw_step(&cur)) > 0 && depth < 128) { unw_word_t ip; unw_get_reg(&cur, UNW_REG_IP, &ip); addresses[depth++] = (void *) ip; } } */ HIDDEN int tdep_trace (unw_cursor_t *cursor, void **buffer, int *size) { struct cursor *c = (struct cursor *) cursor; struct dwarf_cursor *d = &c->dwarf; unw_trace_cache_t *cache; unw_word_t rbp, rsp, rip, cfa; int maxdepth = 0; int depth = 0; int ret; int validate = 0; /* Check input parametres. */ if (unlikely(! cursor || ! buffer || ! size || (maxdepth = *size) <= 0)) return -UNW_EINVAL; Debug (1, "begin ip 0x%lx cfa 0x%lx\n", d->ip, d->cfa); /* Tell core dwarf routines to call back to us. */ d->stash_frames = 1; /* Determine initial register values. These are direct access safe because we know they come from the initial machine context. */ rip = d->ip; rsp = cfa = d->cfa; ACCESS_MEM_FAST(ret, 0, d, DWARF_GET_LOC(d->loc[UNW_X86_64_RBP]), rbp); assert(ret == 0); /* Get frame cache. */ if (unlikely(! (cache = trace_cache_get()))) { Debug (1, "returning %d, cannot get trace cache\n", -UNW_ENOMEM); *size = 0; d->stash_frames = 0; return -UNW_ENOMEM; } /* Trace the stack upwards, starting from current RIP. Adjust the RIP address for previous/next instruction as the main unwinding logic would also do. We undo this before calling back into unw_step(). */ while (depth < maxdepth) { rip -= d->use_prev_instr; Debug (2, "depth %d cfa 0x%lx rip 0x%lx rsp 0x%lx rbp 0x%lx\n", depth, cfa, rip, rsp, rbp); /* See if we have this address cached. If not, evaluate enough of the dwarf unwind information to fill the cache line data, or to decide this frame cannot be handled in fast trace mode. We cache negative results too to prevent unnecessary dwarf parsing for common failures. */ unw_tdep_frame_t *f = trace_lookup (cursor, cache, cfa, rip, rbp, rsp); /* If we don't have information for this frame, give up. */ if (unlikely(! f)) { ret = -UNW_ENOINFO; break; } Debug (3, "frame va %lx type %d last %d cfa %s+%d rbp @ cfa%+d rsp @ cfa%+d\n", f->virtual_address, f->frame_type, f->last_frame, f->cfa_reg_rsp ? "rsp" : "rbp", f->cfa_reg_offset, f->rbp_cfa_offset, f->rsp_cfa_offset); assert (f->virtual_address == rip); /* Stop if this was the last frame. In particular don't evaluate new register values as it may not be safe - we don't normally run with full validation on, and do not want to - and there's enough bad unwind info floating around that we need to trust what unw_step() previously said, in potentially bogus frames. */ if (f->last_frame) break; /* Evaluate CFA and registers for the next frame. */ switch (f->frame_type) { case UNW_X86_64_FRAME_GUESSED: /* Fall thru to standard processing after forcing validation. */ if (d->as == unw_local_addr_space) dwarf_set_validate(d, 1); case UNW_X86_64_FRAME_STANDARD: /* Advance standard traceable frame. */ cfa = (f->cfa_reg_rsp ? rsp : rbp) + f->cfa_reg_offset; if (d->as == unw_local_addr_space) validate = dwarf_get_validate(d); ACCESS_MEM_FAST(ret, validate, d, cfa - 8, rip); if (likely(ret >= 0) && likely(f->rbp_cfa_offset != -1)) ACCESS_MEM_FAST(ret, validate, d, cfa + f->rbp_cfa_offset, rbp); /* Don't bother reading RSP from DWARF, CFA becomes new RSP. */ rsp = cfa; /* Next frame needs to back up for unwind info lookup. */ d->use_prev_instr = 1; break; case UNW_X86_64_FRAME_SIGRETURN: cfa = cfa + f->cfa_reg_offset; /* cfa now points to ucontext_t. */ if (d->as == unw_local_addr_space) validate = dwarf_get_validate(d); ACCESS_MEM_FAST(ret, validate, d, cfa + UC_MCONTEXT_GREGS_RIP, rip); if (likely(ret >= 0)) ACCESS_MEM_FAST(ret, validate, d, cfa + UC_MCONTEXT_GREGS_RBP, rbp); if (likely(ret >= 0)) ACCESS_MEM_FAST(ret, validate, d, cfa + UC_MCONTEXT_GREGS_RSP, rsp); /* Resume stack at signal restoration point. The stack is not necessarily continuous here, especially with sigaltstack(). */ cfa = rsp; /* Next frame should not back up. */ d->use_prev_instr = 0; break; case UNW_X86_64_FRAME_ALIGNED: /* Address of RIP was pushed on the stack via a simple * def_cfa_expr - result stack offset stored in cfa_reg_offset */ cfa = (f->cfa_reg_rsp ? rsp : rbp) + f->cfa_reg_offset; if (d->as == unw_local_addr_space) validate = dwarf_get_validate(d); ACCESS_MEM_FAST(ret, validate, d, cfa, cfa); if (likely(ret >= 0)) ACCESS_MEM_FAST(ret, validate, d, cfa - 8, rip); if (likely(ret >= 0)) ACCESS_MEM_FAST(ret, validate, d, rbp, rbp); /* Don't bother reading RSP from DWARF, CFA becomes new RSP. */ rsp = cfa; /* Next frame needs to back up for unwind info lookup. */ d->use_prev_instr = 1; break; default: /* We cannot trace through this frame, give up and tell the caller we had to stop. Data collected so far may still be useful to the caller, so let it know how far we got. */ ret = -UNW_ESTOPUNWIND; break; } Debug (4, "new cfa 0x%lx rip 0x%lx rsp 0x%lx rbp 0x%lx\n", cfa, rip, rsp, rbp); /* If we failed or ended up somewhere bogus, stop. */ if (unlikely(ret < 0 || rip < 0x4000)) break; /* Record this address in stack trace. We skipped the first address. */ buffer[depth++] = (void *) rip; } #if UNW_DEBUG Debug (1, "returning %d, depth %d\n", ret, depth); #endif *size = depth; return ret; }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/tests/crasher.c
/* This program should crash and produce coredump */ #include "compiler.h" #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <unistd.h> #ifdef __FreeBSD__ #include <sys/types.h> #include <sys/sysctl.h> #include <sys/user.h> #endif #if defined(__linux__) void write_maps(char *fname) { char buf[512], path[128]; char exec; uintmax_t addr; FILE *maps = fopen("/proc/self/maps", "r"); FILE *out = fopen(fname, "w"); if (!maps || !out) exit(EXIT_FAILURE); while (fgets(buf, sizeof(buf), maps)) { if (sscanf(buf, "%jx-%*x %*c%*c%c%*c %*x %*s %*d /%126[^\n]", &addr, &exec, path+1) != 3) continue; if (exec != 'x') continue; path[0] = '/'; fprintf(out, "0x%jx:%s ", addr, path); } fprintf(out, "\n"); fclose(out); fclose(maps); } #elif defined(__FreeBSD__) void write_maps(char *fname) { FILE *out; char *buf, *bp, *eb; struct kinfo_vmentry *kv; int mib[4], error; size_t len; out = fopen(fname, "w"); if (out == NULL) exit(EXIT_FAILURE); len = 0; mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = KERN_PROC_VMMAP; mib[3] = getpid(); error = sysctl(mib, 4, NULL, &len, NULL, 0); if (error == -1) exit(EXIT_FAILURE); len = len * 4 / 3; buf = malloc(len); if (buf == NULL) exit(EXIT_FAILURE); error = sysctl(mib, 4, buf, &len, NULL, 0); if (error == -1) exit(EXIT_FAILURE); for (bp = buf, eb = buf + len; bp < eb; bp += kv->kve_structsize) { kv = (struct kinfo_vmentry *)(uintptr_t)bp; if (kv->kve_type == KVME_TYPE_VNODE && (kv->kve_protection & KVME_PROT_EXEC) != 0) { fprintf(out, "0x%jx:%s ", kv->kve_start, kv->kve_path); } } fprintf(out, "\n"); fclose(out); free(buf); } #else #error Port me #endif #ifdef __GNUC__ #ifndef __clang__ // Gcc >= 8 became too good at inlining aliase c into b when using -O2 or -O3, // so force -O1 in all cases, otherwise a frame will be missing in the tests. #pragma GCC optimize "-O1" #endif int c(int x) NOINLINE ALIAS(b); #define compiler_barrier() __asm__ __volatile__ (""); #else int c(int x); #define compiler_barrier() #endif int NOINLINE a(void) { *(volatile int *)32 = 1; return 1; } int NOINLINE b(int x) { int r; compiler_barrier(); if (x) r = a(); else r = c(1); return r + 1; } int main (int argc, char **argv) { if (argc > 1) write_maps(argv[1]); b(0); return 0; }
/* This program should crash and produce coredump */ #include "compiler.h" #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <unistd.h> #ifdef __FreeBSD__ #include <sys/types.h> #include <sys/sysctl.h> #include <sys/user.h> #endif #if defined(__linux__) void write_maps(char *fname) { char buf[512], path[128]; char exec; uintmax_t addr; FILE *maps = fopen("/proc/self/maps", "r"); FILE *out = fopen(fname, "w"); if (!maps || !out) exit(EXIT_FAILURE); while (fgets(buf, sizeof(buf), maps)) { if (sscanf(buf, "%jx-%*x %*c%*c%c%*c %*x %*s %*d /%126[^\n]", &addr, &exec, path+1) != 3) continue; if (exec != 'x') continue; path[0] = '/'; fprintf(out, "0x%jx:%s ", addr, path); } fprintf(out, "\n"); fclose(out); fclose(maps); } #elif defined(__FreeBSD__) void write_maps(char *fname) { FILE *out; char *buf, *bp, *eb; struct kinfo_vmentry *kv; int mib[4], error; size_t len; out = fopen(fname, "w"); if (out == NULL) exit(EXIT_FAILURE); len = 0; mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = KERN_PROC_VMMAP; mib[3] = getpid(); error = sysctl(mib, 4, NULL, &len, NULL, 0); if (error == -1) exit(EXIT_FAILURE); len = len * 4 / 3; buf = malloc(len); if (buf == NULL) exit(EXIT_FAILURE); error = sysctl(mib, 4, buf, &len, NULL, 0); if (error == -1) exit(EXIT_FAILURE); for (bp = buf, eb = buf + len; bp < eb; bp += kv->kve_structsize) { kv = (struct kinfo_vmentry *)(uintptr_t)bp; if (kv->kve_type == KVME_TYPE_VNODE && (kv->kve_protection & KVME_PROT_EXEC) != 0) { fprintf(out, "0x%jx:%s ", kv->kve_start, kv->kve_path); } } fprintf(out, "\n"); fclose(out); free(buf); } #else #error Port me #endif #ifdef __GNUC__ #ifndef __clang__ // Gcc >= 8 became too good at inlining aliase c into b when using -O2 or -O3, // so force -O1 in all cases, otherwise a frame will be missing in the tests. #pragma GCC optimize "-O1" #endif int c(int x) NOINLINE ALIAS(b); #define compiler_barrier() __asm__ __volatile__ (""); #else int c(int x); #define compiler_barrier() #endif int NOINLINE a(void) { *(volatile int *)32 = 1; return 1; } int NOINLINE b(int x) { int r; compiler_barrier(); if (x) r = a(); else r = c(1); return r + 1; } int main (int argc, char **argv) { if (argc > 1) write_maps(argv[1]); b(0); return 0; }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./docs/design/coreclr/profiling/davbr-blog-archive/ELT Hooks - tail calls.md
*This blog post originally appeared on David Broman's blog on 6/20/2007* For most people the idea of entering or returning from a function seems straightforward. Your profiler's Enter hook is called at the beginning of a function, and its Leave hook is called just before the function returns. But the idea of a tail call and exactly what that means for the Profiling API is less straightforward. In [Part 1](ELT Hooks - The Basics.md) I talked about the basics of the Enter / Leave / Tailcall hooks and generally how they work. You may want to review that post first if you haven't seen it yet. This post builds on that one by talking exclusively about the Tailcall hook, how it works, and what profilers should do inside their Tailcall hooks. ## Tail calling in general Tail calling is a compiler optimization that saves execution of instructions and saves reads and writes of stack memory. When the last thing a function does is call another function (and other conditions are favorable), the compiler may consider implementing that call as a tail call, instead of a regular call. ``` static public void Main() { Helper(); } static public void Helper() { One(); Two(); Three(); } static public void Three() { ... } ``` In the code above, the compiler may consider implementing the call from Helper() to Three() as a tail call. What does that mean, and why would that optimize anything? Well, imagine this is compiled without a tail call optimization. By the time Three() is called, the stack looks like this (my stacks grow UP): ``` Three Helper Main ``` Each of those functions causes a separate frame to be allocated on the stack. All the usual contents of a frame, including locals, parameters, the return address, saved registers, etc., get stored in each of those frames. And when each of those functions returns, the return address is read from the frame, and the stack pointer is adjusted to collapse the frame of the returning function. That's just the usual overhead associated with making a function call. Now, if the call from Helper() to Three() were implemented as a tail call, we'd avoid that overhead, and Three() would just "reuse" the stack frame that had been set up for Helper(). While Three() is executing, the call stack would look like this: ``` Three Main ``` And when Three() returns, it returns directly to Main() without popping back through Helper() first. Folks who live in functional programming languages like Scheme use recursion at least as often as C++ or C# folks use while and for loops. Such functional programming languages depend on tail call optimizations (in particular tail recursion) to avoid overflowing the stack. While imperative languages like C++ or C# don't have such a vital need for tail call optimizations, it's still pretty handy as it reduces the number of instructions executed and the writes to the stack. Also, it's worth noting that the amount of stack space used for a single frame can be more than you'd expect. For example, in CLR x64, each regular call (without the tail call optimization) uses a minimum of 48 bytes of stack space, even if it takes no arguments, has no locals, and returns nothing. So for small functions, the tail call optimization can provide a significant overhead reduction in terms of stack space. ## The CLR and tail calls When you're dealing with languages managed by the CLR, there are two kinds of compilers in play. There's the compiler that goes from your language's source code down to IL (C# developers know this as csc.exe), and then there's the compiler that goes from IL to native code (the JIT 32/64 bit compilers that are invoked at run time or NGEN time). Both the source-\>IL and IL-\>native compilers understand the tail call optimization. But the IL-\>native compiler--which I'll just refer to as JIT--has the final say on whether the tail call optimization will ultimately be used. The source-\>IL compiler can help to generate IL that is conducive to making tail calls, including the use of the "tail." IL prefix (more on that later). In this way, the source-\>IL compiler can structure the IL it generates to persuade the JIT into making a tail call. But the JIT always has the option to do whatever it wants. ### When does the JIT make tail calls? I asked Fei Chen and [Grant Richins](https://docs.microsoft.com/en-us/archive/blogs/grantri/), neighbors down the hall from me who happen to work on the JIT, under what conditions the various JITs will employ the tail call optimization. The full answer is rather detailed. The quick summary is that the JITs try to use the tail call optimization whenever they can, but there are lots of reasons why the tail call optimization can't be used. Some reasons why tail calling is a non-option: - Caller doesn't return immediately after the call (duh :-)) - Stack arguments between caller and callee are incompatible in a way that would require shifting things around in the caller's frame before the callee could execute - Caller and callee return different types - We inline the call instead (inlining is way better than tail calling, and opens the door to many more optimizations) - Security gets in the way - The debugger / profiler turned off JIT optimizations [Here](Tail call JIT conditions.md) are their full, detailed answers. _Note that how the JIT decides whether to use the tail calling optimization is an implementation detail that is prone to change at whim. **You must not take dependencies on this behavior**. Use this information for your own personal entertainment only._ ## Your Profiler's Tailcall hook I'm assuming you've already read through [Part 1](ELT Hooks - The Basics.md) and are familiar with how your profiler sets up its Enter/Leave/Tailcall hooks, so I'm not repeating any of those details here. I will focus on what kind of code you will typically want to place inside your Tailcall hook: ``` typedef void FunctionTailcall2( FunctionID funcId, UINT_PTR clientData, COR_PRF_FRAME_INFO func); ``` **Tip** : More than once I've seen profiler writers make the following mistake. They will take their naked assembly-language wrapper for their Enter2 and Leave2 hooks, and paste it again to use as the Tailcall2 assembly-language wrapper. The problem is they forget that the Tailcall2 hook takes a different number of parameters than the Enter2 / Leave2 hooks (or, more to the point, a different number of _bytes_ is passed on the stack to invoke the Tailcall2 hook). So, they'll take the "ret 16" at the end of their Enter2/Leave2 hook wrappers and stick that into their Tailcall2 hook wrapper, forgetting to change it to a "ret 12". Don't make the same mistake! It's worth noting what these parameters mean. With the Enter and Leave hooks it's pretty obvious that the parameters your hook is given (e.g., funcId) apply to the function being Entered or Left. But what about the Tailcall hook? Do the Tailcall hook's parameters describe the caller (function making the tail call) or the callee (function being tail called into)? Answer: the parameters refer to tail call **er**. The way I remember it is that the Tailcall hook is like an "Alternative Leave" hook. A function ends either by returning (in which case the CLR invokes your Leave hook) or a function ends by tail calling out to somewhere else (in which case the CLR invokes your Tailcall hook). In either case (Leave hook or Tailcall hook) the hook's parameters tell you about the function that's _ending_. If a function happens to end by making a tail call, your profiler is not told the target of that tail call. (The astute reader will realize that actually your profiler _is_ told what the target of the tail call is--you need only wait until your Enter hook is called next, and that function will be the tail call target, or "tail callee". (Well, actually, this is true most of the time, but not all! (More on that later, but consider this confusing, nested series of afterthoughts a hint to a question I pose further down in this post.))) Did you just count the number of closing parentheses to ensure I got it right? If so, I'd like to make fun of you but I won't--I'd have counted the parentheses, too. My house is glass. Ok, enough dilly-dallying. What should your profiler do in its Tailcall hook? Two of the more common reasons profilers use Enter/Leave/Tailcall hooks in the first place is to keep **shadow stacks** or to maintain **call traces** (sometimes with timing information). ### Shadow stacks The [CLRProfiler](http://www.microsoft.com/downloads/details.aspx?FamilyID=a362781c-3870-43be-8926-862b40aa0cd0&DisplayLang=en) is a great example of using Enter/Leave/Tailcall hooks to maintain shadow stacks. A shadow stack is your profiler's own copy of the current stack of function calls on a given thread at any given time. Upon Enter of a function, you push that FunctionID (and whatever other info interests you, such as arguments) onto your data structure that represents that thread's stack. Upon Leave of a function, you pop that FunctionID. This gives you a live list of managed calls in play on the thread. The CLRProfiler uses shadow stacks so that whenever the managed app being profiled chooses to allocate a new object, the CLRProfiler can know the managed call stack that led to the allocation. (Note that an alternate way of accomplishing this would be to call DoStackSnapshot at every allocation point instead of maintaining a shadow stack. Since objects are allocated so frequently, however, you'd end up calling DoStackSnapshot extremely frequently and will often see worse performance than if you had been maintaining shadow stacks in the first place.) OK, so when your profiler maintains a shadow stack, it's clear what your profiler should do on Enter or Leave, but what should it do on Tailcall? There are a couple ways one could imagine answering that question, but only one of them will work! Taking the example from the top of this post, imagine the stack looks like this: ``` Helper Main ``` and Helper is about to make a tail call into Three(). What should your profiler do? Method 1: On tailcall, pop the last FunctionID. (In other words, treat Tailcall just like Leave.) So, in this example, when Helper() calls Three(), we'd pop Helper(). As soon as Three() is called, our profiler would receive an Enter for Three(), and our shadow stack would look like this: ``` Three Main ``` This approach mirrors reality, because this is what the actual physical stack will look like. Indeed, if one attaches a debugger to a live process, and breaks in while the process is inside a tail call, the debugger will show a call stack just like this, where you see the tail callee, but not the tail caller. However, it might be a little confusing to a user of your profiler who looks at his source code and sees that Helper() (not Main()) calls Three(). He may have no idea that when Helper() called Three(), the JIT chose to turn that into a tail call. In fact, your user may not even know what a tail call is. You might therefore be tempted to try this instead: Method 2: On tailcall, "mark" the FunctionID at the top of your stack as needing a "deferred pop" when its callee is popped, but don't pop yet. With this strategy, for the duration of the call to Three(), the shadow stack will look like this: ``` Three Helper (marked for deferred pop) Main ``` which some might consider more user-friendly. And as soon as Three() returns, your profiler will sneakily do a double-pop leaving just this: ``` Main ``` So which should your profiler use: Method 1 or Method 2? Before I answer, take some time to think about this, invoking that hint I cryptically placed above in nested parentheses. And no, the fact that the parentheses were nested is not part of the actual hint. The answer: Method 1. In principle, either method should be fine. However, the behavior of the CLR under certain circumstances will break Method 2. Those "certain circumstances" are what I alluded to when I mentioned "this is true most of the time, but not all" above. These mysterious "certain circumstances" involve a managed function tail calling into a native helper function inside the runtime. Here's an example: ``` static public void Main() { Thread.Sleep(44); Helper(); } ``` It just so happens that the implementation of Thread.Sleep makes a call into a native helper function in the bowels of the runtime. And that call happens to be the last thing Thread.Sleep does. So the JIT may helpfully optimize that call into a tail call. Here are the hook calls your profiler will see in this case: ``` (1) Enter (into Main) (2) Enter (into Thread.Sleep) (3) Tailcall (from Thread.Sleep) (4) Enter (into Helper) (5) Leave (from Helper) (6) Leave (from Main) ``` Note that after you get a Tailcall telling you that Thread.Sleep is done, (in (3)), the very next Enter you get (in (4)) is NOT the Enter for the function being tail called. This is because the CLR only provides Enter/Leave/Tailcall hooks for _managed_ functions, and the very next managed function being entered is Helper(). So, how will Method 1 and Method 2 fare in this example? Method 1: Shadow stack works By popping on every Tailcall hook, your shadow stack stays up to date. Method 2: Shadow stack fails At stage (4), the shadow stack looks like this: ``` Helper Thread.Sleep (marked for "deferred pop") Main ``` If you think it might be complicated to explain tail calls to your users so they can understand the Method 1 form of shadow stack presentation, just try explaining why it makes sense to present to them that Thread.Sleep() is calling Helper()! And of course, this can get arbitrarily nasty: ``` static public void Main() { Thread.Sleep(44); Thread.Sleep(44); Thread.Sleep(44); Thread.Sleep(44); Helper(); } ``` would yield: ``` Helper Thread.Sleep (marked for "deferred pop") Thread.Sleep (marked for "deferred pop") Thread.Sleep (marked for "deferred pop") Thread.Sleep (marked for "deferred pop") Main ``` And things get more complicated if you start to think about when you actually pop a frame marked for "deferred pop". In all the above examples, you would do so as soon as the frame above it gets popped. So once Helper() is popped (due to Leave()), you'd cascade-pop all the Thread.Sleeps. But what if there is no frame above the frames marked for "deferred pop"? To wit: ``` static public void Main() { Helper() } static public void Helper() { Thread.Sleep(44); Thread.Sleep(44); Thread.Sleep(44); Thread.Sleep(44); } ``` would yield: ``` Thread.Sleep (marked for "deferred pop") Thread.Sleep (marked for "deferred pop") Thread.Sleep (marked for "deferred pop") Thread.Sleep (marked for "deferred pop") Helper Main ``` until you get a Leave hook for Helper(). At this point, you need to pop Helper() from your shadow stack, but he's not at the top-- he's buried under all your "deferred pop" frames. So your profiler would need to perform the deferred pops if a frame above OR below them gets popped. Hopefully, the yuckiness of this implementation will scare you straight. But the confusion of presenting invalid stacks to the user is the real reason to abandon Method 2 and go with Method 1. ### Call tracing The important lesson to learn from the above section is that sometimes a Tailcall hook will match up with the next Enter hook (i.e., the tail call you're notified of in your Tailcall hook will have as its callee the very function you're notified of in the next Enter hook), and sometimes the Tailcall hook will NOT match with the next Enter hook (in particular when the Tailcall hook refers to a tail call into a native helper in the runtime). And the sad fact is that the Enter/Leave/Tailcall hook design does not currently allow you to predict whether a Tailcall will match the next Enter. As an illustration, consider two simple tail call examples: **Matching Example** ``` static public void Main() { One(); ...(other code here)... } static public void One() { Two(); } ``` **Non-matching Example** ``` static public void Main() { Thread.Sleep(44); Two(); } ``` In either case, your profiler will see the following hook calls ``` (1) Enter (into Main) (2) Enter (into One / Thread.Sleep) (3) Tailcall (from One / Thread.Sleep) (4) Enter (into Two) ... ``` In the first example, (3) and (4) match (i.e., the tail call really does call into Two()). But in the second example, they do not (the tail call does NOT call into Two()). Since you don't know when Tailcall will match the next Enter, your implementation of call tracing, like shadow stack maintenance, must treat a Tailcall hook just like a Leave. If you're logging when functions begin and end, potentially with the amount of time spent inside the function, then your Tailcall hook should basically do the same thing as your Leave hook. A call to your Tailcall hook indicates that the specified function is over and done with, just like a call to your Leave hook. As with shadow stacks, this will sometimes lead to call graphs that could be confusing. "Matching Example" had One tail call Two, but your graph will look like this: ``` Main | |-- One |-- Two ``` But at least this effect is explainable to your users, and is self-correcting after the tail call is complete, while yielding graphs that are consistent with your timing measurements. If instead you try to outsmart this situation and assume Tailcalls match the following Enter, the errors can snowball into incomprehensible graphs (see the nasty examples from the shadow stack section above). ### How often does this happen? So when does a managed function in the .NET Framework tail call into a native helper function inside the CLR? In the grand scheme of things, not a lot. But it's a pretty random and fragile list that depends on which JIT is in use (x86, x64, ia64), and can easily change as parts of the runtime are rev'd, or even as JIT compilation flags are modified by debuggers, profilers, and other environmental factors while a process is active. So you should not try to guess this list and make dependencies on it. ### Can't I just turn tail calling off?! If all this confusion is getting you down, you might be tempted to just avoid the problem in the first place. And yes, there is a way to do so, but I wouldn't recommend it in general. If you call SetEventMask, specifying COR\_PRF\_DISABLE\_OPTIMIZATIONS inside your mask, that will tell the JIT to turn off the tail call optimization. But the JIT will also turn off ALL optimizations. Profilers that shouldn't perturb the behavior of the app should definitely _not_ do this, as the code generation will be very different. ## Watching CLR tail calls in action If you're writing a profiler with Enter/Leave/Tailcall hooks, you'll want to make sure you exercise all your hooks so they're properly tested. It's easy enough to make sure your Enter/Leave hooks are called--just make sure the test app your profiler runs against has a Main()! But how to make sure your Tailcall hook is called? The surest way is to have a simple managed app that includes an obvious tail call candidate, and make sure the "tail." IL prefix is in place. You can use ilasm / ildasm to help build such an assembly. Here's an example I tried on x86 using C#. Start with some simple code that makes a call that should easily be optimized into a tail call: ``` using System; class Class1 { static int Main(string[] args) { return Helper(4); } static int Helper(int i) { Random r = new Random(); i = (i / 1000) + r.Next(); i = (i / 1000) + r.Next(); return MakeThisATailcall(i); } static int MakeThisATailcall(int i) { Random r = new Random(); i = (i / 1000) + r.Next(); i = (i / 1000) + r.Next(); return i; } } ``` You'll notice there's some extra gunk, like calls to Random.Next(), to make the functions big enough that the JIT won't inline them. There are other ways to avoid inlining (including from the profiling API itself), but padding your test functions is one of the easier ways to get started without impacting the code generation of the entire process. Now, compile that C# code into an IL assembly: ``` csc /o+ Class1.cs ``` (If you're wondering why I specified /o+, I've found that if I _don't_, then suboptimal IL gets generated, and some extraneous instructions appear inside Helper between the call to MakeThisATailcall and the return from Helper. Those extra instructions would prevent the JIT from making a tail call.) Run ildasm to get at the generated IL ``` ildasm Class1.exe ``` Inside ildasm, use File.Dump to generate a text file that contains a textual representation of the IL from Class1.exe. Call it Class1WithTail.il. Open up that file and add the tail. prefix just before the call you want optimized into a tail call (see highlighted yellow for changes): ``` .method private hidebysig static int32 Helper(int32 i) cil managed { ~~// Code size 45 (0x2d) ~~ // Code size 46 (0x2e) .maxstack 2 .locals init (class [mscorlib]System.Random V_0) IL_0000: newobj instance void [mscorlib]System.Random::.ctor() IL_0005: stloc.0 IL_0006: ldarg.0 IL_0007: ldc.i4 0x3e8 IL_000c: div IL_000d: ldloc.0 IL_000e: callvirt instance int32 [mscorlib]System.Random::Next() IL_0013: add IL_0014: starg.s i IL_0016: ldarg.0 IL_0017: ldc.i4 0x3e8 IL_001c: div IL_001d: ldloc.0 IL_001e: callvirt instance int32 [mscorlib]System.Random::Next() IL_0023: add IL_0024: starg.s i IL_0026: ldarg.0 ~~IL_0027: call int32 Class1::MakeThisATailcall(int32) IL_002c: ret ~~ IL_0027: tail. IL_0028: call int32 Class1::MakeThisATailcall(int32) IL_002d: ret } // end of method Class1::Helper ``` Now you can use ilasm to recompile your modified textual IL back into an executable assembly. ``` ilasm /debug=opt Class1WithTail.il ``` You now have Class1WithTail.exe that you can run! Hook up your profiler and step through your Tailcall hook. ## You Can Wake Up Now If you didn't learn anything, I hope you at least got some refreshing sleep thanks to this post. Here's a quick recap of what I wrote while you were napping: - If the last thing a function does is call another function, that call may be optimized into a simple jump (i.e., "tail call"). Tail calling is an optimization to save the time of stack manipulation and the space of generating an extra call frame. - In the CLR, the JIT has the final say on when it employs the tail call optimization. The JIT does this whenever it can, except for a huge list of exceptions. Note that the x86, x64, and ia64 JITs are all different, and you'll see different behavior on when they'll use the tail call optimizations. - Since some managed functions may tail call into native helper functions inside the CLR (for which you won't get an Enter hook notification), your Tailcall hook should treat the tail call as if it were a Leave, and not depend on the next Enter hook correlating to the target of the last tail call. With shadow stacks, for example, this means you should simply pop the calling function off your shadow stack in your Tailcall hook. - Since tail calls can be elusive to find in practice, it's well worth your while to use ildasm/ilasm to manufacture explicit tail calls so you can step through your Tailcall hook and test its logic. _David has been a developer at Microsoft for over 70 years (allowing for his upcoming time-displacement correction). He joined Microsoft in 2079, first starting in the experimental time-travel group. His current assignment is to apply his knowledge of the future to eliminate the "Wait for V3" effect customers commonly experience in his source universe. By using Retroactive Hindsight-ellisenseTM his goal is to "get it right the first time, this time" in a variety of product groups._
*This blog post originally appeared on David Broman's blog on 6/20/2007* For most people the idea of entering or returning from a function seems straightforward. Your profiler's Enter hook is called at the beginning of a function, and its Leave hook is called just before the function returns. But the idea of a tail call and exactly what that means for the Profiling API is less straightforward. In [Part 1](ELT Hooks - The Basics.md) I talked about the basics of the Enter / Leave / Tailcall hooks and generally how they work. You may want to review that post first if you haven't seen it yet. This post builds on that one by talking exclusively about the Tailcall hook, how it works, and what profilers should do inside their Tailcall hooks. ## Tail calling in general Tail calling is a compiler optimization that saves execution of instructions and saves reads and writes of stack memory. When the last thing a function does is call another function (and other conditions are favorable), the compiler may consider implementing that call as a tail call, instead of a regular call. ``` static public void Main() { Helper(); } static public void Helper() { One(); Two(); Three(); } static public void Three() { ... } ``` In the code above, the compiler may consider implementing the call from Helper() to Three() as a tail call. What does that mean, and why would that optimize anything? Well, imagine this is compiled without a tail call optimization. By the time Three() is called, the stack looks like this (my stacks grow UP): ``` Three Helper Main ``` Each of those functions causes a separate frame to be allocated on the stack. All the usual contents of a frame, including locals, parameters, the return address, saved registers, etc., get stored in each of those frames. And when each of those functions returns, the return address is read from the frame, and the stack pointer is adjusted to collapse the frame of the returning function. That's just the usual overhead associated with making a function call. Now, if the call from Helper() to Three() were implemented as a tail call, we'd avoid that overhead, and Three() would just "reuse" the stack frame that had been set up for Helper(). While Three() is executing, the call stack would look like this: ``` Three Main ``` And when Three() returns, it returns directly to Main() without popping back through Helper() first. Folks who live in functional programming languages like Scheme use recursion at least as often as C++ or C# folks use while and for loops. Such functional programming languages depend on tail call optimizations (in particular tail recursion) to avoid overflowing the stack. While imperative languages like C++ or C# don't have such a vital need for tail call optimizations, it's still pretty handy as it reduces the number of instructions executed and the writes to the stack. Also, it's worth noting that the amount of stack space used for a single frame can be more than you'd expect. For example, in CLR x64, each regular call (without the tail call optimization) uses a minimum of 48 bytes of stack space, even if it takes no arguments, has no locals, and returns nothing. So for small functions, the tail call optimization can provide a significant overhead reduction in terms of stack space. ## The CLR and tail calls When you're dealing with languages managed by the CLR, there are two kinds of compilers in play. There's the compiler that goes from your language's source code down to IL (C# developers know this as csc.exe), and then there's the compiler that goes from IL to native code (the JIT 32/64 bit compilers that are invoked at run time or NGEN time). Both the source-\>IL and IL-\>native compilers understand the tail call optimization. But the IL-\>native compiler--which I'll just refer to as JIT--has the final say on whether the tail call optimization will ultimately be used. The source-\>IL compiler can help to generate IL that is conducive to making tail calls, including the use of the "tail." IL prefix (more on that later). In this way, the source-\>IL compiler can structure the IL it generates to persuade the JIT into making a tail call. But the JIT always has the option to do whatever it wants. ### When does the JIT make tail calls? I asked Fei Chen and [Grant Richins](https://docs.microsoft.com/en-us/archive/blogs/grantri/), neighbors down the hall from me who happen to work on the JIT, under what conditions the various JITs will employ the tail call optimization. The full answer is rather detailed. The quick summary is that the JITs try to use the tail call optimization whenever they can, but there are lots of reasons why the tail call optimization can't be used. Some reasons why tail calling is a non-option: - Caller doesn't return immediately after the call (duh :-)) - Stack arguments between caller and callee are incompatible in a way that would require shifting things around in the caller's frame before the callee could execute - Caller and callee return different types - We inline the call instead (inlining is way better than tail calling, and opens the door to many more optimizations) - Security gets in the way - The debugger / profiler turned off JIT optimizations [Here](Tail call JIT conditions.md) are their full, detailed answers. _Note that how the JIT decides whether to use the tail calling optimization is an implementation detail that is prone to change at whim. **You must not take dependencies on this behavior**. Use this information for your own personal entertainment only._ ## Your Profiler's Tailcall hook I'm assuming you've already read through [Part 1](ELT Hooks - The Basics.md) and are familiar with how your profiler sets up its Enter/Leave/Tailcall hooks, so I'm not repeating any of those details here. I will focus on what kind of code you will typically want to place inside your Tailcall hook: ``` typedef void FunctionTailcall2( FunctionID funcId, UINT_PTR clientData, COR_PRF_FRAME_INFO func); ``` **Tip** : More than once I've seen profiler writers make the following mistake. They will take their naked assembly-language wrapper for their Enter2 and Leave2 hooks, and paste it again to use as the Tailcall2 assembly-language wrapper. The problem is they forget that the Tailcall2 hook takes a different number of parameters than the Enter2 / Leave2 hooks (or, more to the point, a different number of _bytes_ is passed on the stack to invoke the Tailcall2 hook). So, they'll take the "ret 16" at the end of their Enter2/Leave2 hook wrappers and stick that into their Tailcall2 hook wrapper, forgetting to change it to a "ret 12". Don't make the same mistake! It's worth noting what these parameters mean. With the Enter and Leave hooks it's pretty obvious that the parameters your hook is given (e.g., funcId) apply to the function being Entered or Left. But what about the Tailcall hook? Do the Tailcall hook's parameters describe the caller (function making the tail call) or the callee (function being tail called into)? Answer: the parameters refer to tail call **er**. The way I remember it is that the Tailcall hook is like an "Alternative Leave" hook. A function ends either by returning (in which case the CLR invokes your Leave hook) or a function ends by tail calling out to somewhere else (in which case the CLR invokes your Tailcall hook). In either case (Leave hook or Tailcall hook) the hook's parameters tell you about the function that's _ending_. If a function happens to end by making a tail call, your profiler is not told the target of that tail call. (The astute reader will realize that actually your profiler _is_ told what the target of the tail call is--you need only wait until your Enter hook is called next, and that function will be the tail call target, or "tail callee". (Well, actually, this is true most of the time, but not all! (More on that later, but consider this confusing, nested series of afterthoughts a hint to a question I pose further down in this post.))) Did you just count the number of closing parentheses to ensure I got it right? If so, I'd like to make fun of you but I won't--I'd have counted the parentheses, too. My house is glass. Ok, enough dilly-dallying. What should your profiler do in its Tailcall hook? Two of the more common reasons profilers use Enter/Leave/Tailcall hooks in the first place is to keep **shadow stacks** or to maintain **call traces** (sometimes with timing information). ### Shadow stacks The [CLRProfiler](http://www.microsoft.com/downloads/details.aspx?FamilyID=a362781c-3870-43be-8926-862b40aa0cd0&DisplayLang=en) is a great example of using Enter/Leave/Tailcall hooks to maintain shadow stacks. A shadow stack is your profiler's own copy of the current stack of function calls on a given thread at any given time. Upon Enter of a function, you push that FunctionID (and whatever other info interests you, such as arguments) onto your data structure that represents that thread's stack. Upon Leave of a function, you pop that FunctionID. This gives you a live list of managed calls in play on the thread. The CLRProfiler uses shadow stacks so that whenever the managed app being profiled chooses to allocate a new object, the CLRProfiler can know the managed call stack that led to the allocation. (Note that an alternate way of accomplishing this would be to call DoStackSnapshot at every allocation point instead of maintaining a shadow stack. Since objects are allocated so frequently, however, you'd end up calling DoStackSnapshot extremely frequently and will often see worse performance than if you had been maintaining shadow stacks in the first place.) OK, so when your profiler maintains a shadow stack, it's clear what your profiler should do on Enter or Leave, but what should it do on Tailcall? There are a couple ways one could imagine answering that question, but only one of them will work! Taking the example from the top of this post, imagine the stack looks like this: ``` Helper Main ``` and Helper is about to make a tail call into Three(). What should your profiler do? Method 1: On tailcall, pop the last FunctionID. (In other words, treat Tailcall just like Leave.) So, in this example, when Helper() calls Three(), we'd pop Helper(). As soon as Three() is called, our profiler would receive an Enter for Three(), and our shadow stack would look like this: ``` Three Main ``` This approach mirrors reality, because this is what the actual physical stack will look like. Indeed, if one attaches a debugger to a live process, and breaks in while the process is inside a tail call, the debugger will show a call stack just like this, where you see the tail callee, but not the tail caller. However, it might be a little confusing to a user of your profiler who looks at his source code and sees that Helper() (not Main()) calls Three(). He may have no idea that when Helper() called Three(), the JIT chose to turn that into a tail call. In fact, your user may not even know what a tail call is. You might therefore be tempted to try this instead: Method 2: On tailcall, "mark" the FunctionID at the top of your stack as needing a "deferred pop" when its callee is popped, but don't pop yet. With this strategy, for the duration of the call to Three(), the shadow stack will look like this: ``` Three Helper (marked for deferred pop) Main ``` which some might consider more user-friendly. And as soon as Three() returns, your profiler will sneakily do a double-pop leaving just this: ``` Main ``` So which should your profiler use: Method 1 or Method 2? Before I answer, take some time to think about this, invoking that hint I cryptically placed above in nested parentheses. And no, the fact that the parentheses were nested is not part of the actual hint. The answer: Method 1. In principle, either method should be fine. However, the behavior of the CLR under certain circumstances will break Method 2. Those "certain circumstances" are what I alluded to when I mentioned "this is true most of the time, but not all" above. These mysterious "certain circumstances" involve a managed function tail calling into a native helper function inside the runtime. Here's an example: ``` static public void Main() { Thread.Sleep(44); Helper(); } ``` It just so happens that the implementation of Thread.Sleep makes a call into a native helper function in the bowels of the runtime. And that call happens to be the last thing Thread.Sleep does. So the JIT may helpfully optimize that call into a tail call. Here are the hook calls your profiler will see in this case: ``` (1) Enter (into Main) (2) Enter (into Thread.Sleep) (3) Tailcall (from Thread.Sleep) (4) Enter (into Helper) (5) Leave (from Helper) (6) Leave (from Main) ``` Note that after you get a Tailcall telling you that Thread.Sleep is done, (in (3)), the very next Enter you get (in (4)) is NOT the Enter for the function being tail called. This is because the CLR only provides Enter/Leave/Tailcall hooks for _managed_ functions, and the very next managed function being entered is Helper(). So, how will Method 1 and Method 2 fare in this example? Method 1: Shadow stack works By popping on every Tailcall hook, your shadow stack stays up to date. Method 2: Shadow stack fails At stage (4), the shadow stack looks like this: ``` Helper Thread.Sleep (marked for "deferred pop") Main ``` If you think it might be complicated to explain tail calls to your users so they can understand the Method 1 form of shadow stack presentation, just try explaining why it makes sense to present to them that Thread.Sleep() is calling Helper()! And of course, this can get arbitrarily nasty: ``` static public void Main() { Thread.Sleep(44); Thread.Sleep(44); Thread.Sleep(44); Thread.Sleep(44); Helper(); } ``` would yield: ``` Helper Thread.Sleep (marked for "deferred pop") Thread.Sleep (marked for "deferred pop") Thread.Sleep (marked for "deferred pop") Thread.Sleep (marked for "deferred pop") Main ``` And things get more complicated if you start to think about when you actually pop a frame marked for "deferred pop". In all the above examples, you would do so as soon as the frame above it gets popped. So once Helper() is popped (due to Leave()), you'd cascade-pop all the Thread.Sleeps. But what if there is no frame above the frames marked for "deferred pop"? To wit: ``` static public void Main() { Helper() } static public void Helper() { Thread.Sleep(44); Thread.Sleep(44); Thread.Sleep(44); Thread.Sleep(44); } ``` would yield: ``` Thread.Sleep (marked for "deferred pop") Thread.Sleep (marked for "deferred pop") Thread.Sleep (marked for "deferred pop") Thread.Sleep (marked for "deferred pop") Helper Main ``` until you get a Leave hook for Helper(). At this point, you need to pop Helper() from your shadow stack, but he's not at the top-- he's buried under all your "deferred pop" frames. So your profiler would need to perform the deferred pops if a frame above OR below them gets popped. Hopefully, the yuckiness of this implementation will scare you straight. But the confusion of presenting invalid stacks to the user is the real reason to abandon Method 2 and go with Method 1. ### Call tracing The important lesson to learn from the above section is that sometimes a Tailcall hook will match up with the next Enter hook (i.e., the tail call you're notified of in your Tailcall hook will have as its callee the very function you're notified of in the next Enter hook), and sometimes the Tailcall hook will NOT match with the next Enter hook (in particular when the Tailcall hook refers to a tail call into a native helper in the runtime). And the sad fact is that the Enter/Leave/Tailcall hook design does not currently allow you to predict whether a Tailcall will match the next Enter. As an illustration, consider two simple tail call examples: **Matching Example** ``` static public void Main() { One(); ...(other code here)... } static public void One() { Two(); } ``` **Non-matching Example** ``` static public void Main() { Thread.Sleep(44); Two(); } ``` In either case, your profiler will see the following hook calls ``` (1) Enter (into Main) (2) Enter (into One / Thread.Sleep) (3) Tailcall (from One / Thread.Sleep) (4) Enter (into Two) ... ``` In the first example, (3) and (4) match (i.e., the tail call really does call into Two()). But in the second example, they do not (the tail call does NOT call into Two()). Since you don't know when Tailcall will match the next Enter, your implementation of call tracing, like shadow stack maintenance, must treat a Tailcall hook just like a Leave. If you're logging when functions begin and end, potentially with the amount of time spent inside the function, then your Tailcall hook should basically do the same thing as your Leave hook. A call to your Tailcall hook indicates that the specified function is over and done with, just like a call to your Leave hook. As with shadow stacks, this will sometimes lead to call graphs that could be confusing. "Matching Example" had One tail call Two, but your graph will look like this: ``` Main | |-- One |-- Two ``` But at least this effect is explainable to your users, and is self-correcting after the tail call is complete, while yielding graphs that are consistent with your timing measurements. If instead you try to outsmart this situation and assume Tailcalls match the following Enter, the errors can snowball into incomprehensible graphs (see the nasty examples from the shadow stack section above). ### How often does this happen? So when does a managed function in the .NET Framework tail call into a native helper function inside the CLR? In the grand scheme of things, not a lot. But it's a pretty random and fragile list that depends on which JIT is in use (x86, x64, ia64), and can easily change as parts of the runtime are rev'd, or even as JIT compilation flags are modified by debuggers, profilers, and other environmental factors while a process is active. So you should not try to guess this list and make dependencies on it. ### Can't I just turn tail calling off?! If all this confusion is getting you down, you might be tempted to just avoid the problem in the first place. And yes, there is a way to do so, but I wouldn't recommend it in general. If you call SetEventMask, specifying COR\_PRF\_DISABLE\_OPTIMIZATIONS inside your mask, that will tell the JIT to turn off the tail call optimization. But the JIT will also turn off ALL optimizations. Profilers that shouldn't perturb the behavior of the app should definitely _not_ do this, as the code generation will be very different. ## Watching CLR tail calls in action If you're writing a profiler with Enter/Leave/Tailcall hooks, you'll want to make sure you exercise all your hooks so they're properly tested. It's easy enough to make sure your Enter/Leave hooks are called--just make sure the test app your profiler runs against has a Main()! But how to make sure your Tailcall hook is called? The surest way is to have a simple managed app that includes an obvious tail call candidate, and make sure the "tail." IL prefix is in place. You can use ilasm / ildasm to help build such an assembly. Here's an example I tried on x86 using C#. Start with some simple code that makes a call that should easily be optimized into a tail call: ``` using System; class Class1 { static int Main(string[] args) { return Helper(4); } static int Helper(int i) { Random r = new Random(); i = (i / 1000) + r.Next(); i = (i / 1000) + r.Next(); return MakeThisATailcall(i); } static int MakeThisATailcall(int i) { Random r = new Random(); i = (i / 1000) + r.Next(); i = (i / 1000) + r.Next(); return i; } } ``` You'll notice there's some extra gunk, like calls to Random.Next(), to make the functions big enough that the JIT won't inline them. There are other ways to avoid inlining (including from the profiling API itself), but padding your test functions is one of the easier ways to get started without impacting the code generation of the entire process. Now, compile that C# code into an IL assembly: ``` csc /o+ Class1.cs ``` (If you're wondering why I specified /o+, I've found that if I _don't_, then suboptimal IL gets generated, and some extraneous instructions appear inside Helper between the call to MakeThisATailcall and the return from Helper. Those extra instructions would prevent the JIT from making a tail call.) Run ildasm to get at the generated IL ``` ildasm Class1.exe ``` Inside ildasm, use File.Dump to generate a text file that contains a textual representation of the IL from Class1.exe. Call it Class1WithTail.il. Open up that file and add the tail. prefix just before the call you want optimized into a tail call (see highlighted yellow for changes): ``` .method private hidebysig static int32 Helper(int32 i) cil managed { ~~// Code size 45 (0x2d) ~~ // Code size 46 (0x2e) .maxstack 2 .locals init (class [mscorlib]System.Random V_0) IL_0000: newobj instance void [mscorlib]System.Random::.ctor() IL_0005: stloc.0 IL_0006: ldarg.0 IL_0007: ldc.i4 0x3e8 IL_000c: div IL_000d: ldloc.0 IL_000e: callvirt instance int32 [mscorlib]System.Random::Next() IL_0013: add IL_0014: starg.s i IL_0016: ldarg.0 IL_0017: ldc.i4 0x3e8 IL_001c: div IL_001d: ldloc.0 IL_001e: callvirt instance int32 [mscorlib]System.Random::Next() IL_0023: add IL_0024: starg.s i IL_0026: ldarg.0 ~~IL_0027: call int32 Class1::MakeThisATailcall(int32) IL_002c: ret ~~ IL_0027: tail. IL_0028: call int32 Class1::MakeThisATailcall(int32) IL_002d: ret } // end of method Class1::Helper ``` Now you can use ilasm to recompile your modified textual IL back into an executable assembly. ``` ilasm /debug=opt Class1WithTail.il ``` You now have Class1WithTail.exe that you can run! Hook up your profiler and step through your Tailcall hook. ## You Can Wake Up Now If you didn't learn anything, I hope you at least got some refreshing sleep thanks to this post. Here's a quick recap of what I wrote while you were napping: - If the last thing a function does is call another function, that call may be optimized into a simple jump (i.e., "tail call"). Tail calling is an optimization to save the time of stack manipulation and the space of generating an extra call frame. - In the CLR, the JIT has the final say on when it employs the tail call optimization. The JIT does this whenever it can, except for a huge list of exceptions. Note that the x86, x64, and ia64 JITs are all different, and you'll see different behavior on when they'll use the tail call optimizations. - Since some managed functions may tail call into native helper functions inside the CLR (for which you won't get an Enter hook notification), your Tailcall hook should treat the tail call as if it were a Leave, and not depend on the next Enter hook correlating to the target of the last tail call. With shadow stacks, for example, this means you should simply pop the calling function off your shadow stack in your Tailcall hook. - Since tail calls can be elusive to find in practice, it's well worth your while to use ildasm/ilasm to manufacture explicit tail calls so you can step through your Tailcall hook and test its logic. _David has been a developer at Microsoft for over 70 years (allowing for his upcoming time-displacement correction). He joined Microsoft in 2079, first starting in the experimental time-travel group. His current assignment is to apply his knowledge of the future to eliminate the "Wait for V3" effect customers commonly experience in his source universe. By using Retroactive Hindsight-ellisenseTM his goal is to "get it right the first time, this time" in a variety of product groups._
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/arm/Linit_local.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Ginit_local.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Ginit_local.c" #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/loongarch64/Lstep.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gstep.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gstep.c" #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/unwind/GetRegionStart.c
/* libunwind - a platform-independent unwind library Copyright (C) 2003-2004 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind-internal.h" unsigned long _Unwind_GetRegionStart (struct _Unwind_Context *context) { unw_proc_info_t pi; pi.start_ip = 0; unw_get_proc_info (&context->cursor, &pi); return pi.start_ip; } unsigned long __libunwind_Unwind_GetRegionStart (struct _Unwind_Context *) ALIAS (_Unwind_GetRegionStart);
/* libunwind - a platform-independent unwind library Copyright (C) 2003-2004 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind-internal.h" unsigned long _Unwind_GetRegionStart (struct _Unwind_Context *context) { unw_proc_info_t pi; pi.start_ip = 0; unw_get_proc_info (&context->cursor, &pi); return pi.start_ip; } unsigned long __libunwind_Unwind_GetRegionStart (struct _Unwind_Context *) ALIAS (_Unwind_GetRegionStart);
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/eglib/test/ptrarray.c
#include <stdio.h> #include <glib.h> #include "test.h" /* Redefine the private structure only to verify proper allocations */ typedef struct _GPtrArrayPriv { gpointer *pdata; guint len; guint size; } GPtrArrayPriv; /* Don't add more than 32 items to this please */ static const char *items [] = { "Apples", "Oranges", "Plumbs", "Goats", "Snorps", "Grapes", "Tickle", "Place", "Coffee", "Cookies", "Cake", "Cheese", "Tseng", "Holiday", "Avenue", "Smashing", "Water", "Toilet", NULL }; static GPtrArray *ptrarray_alloc_and_fill(guint *item_count) { GPtrArray *array = g_ptr_array_new(); gint i; for(i = 0; items[i] != NULL; i++) { g_ptr_array_add(array, (gpointer)items[i]); } if (item_count != NULL) { *item_count = i; } return array; } static guint guess_size(guint length) { guint size = 1; while (size < length) { size <<= 1; } return size; } static RESULT ptrarray_alloc (void) { GPtrArrayPriv *array; guint i; array = (GPtrArrayPriv *)ptrarray_alloc_and_fill(&i); if (array->size != guess_size(array->len)) { return FAILED("Size should be %d, but it is %d", guess_size(array->len), array->size); } if (array->len != i) { return FAILED("Expected %d node(s) in the array", i); } g_ptr_array_free((GPtrArray *)array, TRUE); return OK; } static RESULT ptrarray_for_iterate (void) { GPtrArray *array = ptrarray_alloc_and_fill(NULL); guint i; for (i = 0; i < array->len; i++) { char *item = (char *)g_ptr_array_index(array, i); if (item != items[i]) { return FAILED( "Expected item at %d to be %s, but it was %s", i, items[i], item); } } g_ptr_array_free(array, TRUE); return OK; } static gint foreach_iterate_index = 0; static char *foreach_iterate_error = NULL; static void foreach_callback (gpointer data, gpointer user_data) { char *item = (char *)data; const char *item_cmp = items[foreach_iterate_index++]; if (foreach_iterate_error != NULL) { return; } if (item != item_cmp) { foreach_iterate_error = FAILED( "Expected item at %d to be %s, but it was %s", foreach_iterate_index - 1, item_cmp, item); } } static RESULT ptrarray_foreach_iterate (void) { GPtrArray *array = ptrarray_alloc_and_fill(NULL); foreach_iterate_index = 0; foreach_iterate_error = NULL; g_ptr_array_foreach(array, foreach_callback, array); g_ptr_array_free(array, TRUE); return foreach_iterate_error; } static RESULT ptrarray_set_size (void) { GPtrArray *array = g_ptr_array_new(); guint i, grow_length = 50; g_ptr_array_add(array, (gpointer)items[0]); g_ptr_array_add(array, (gpointer)items[1]); g_ptr_array_set_size(array, grow_length); if (array->len != grow_length) { return FAILED("Array length should be 50, it is %d", array->len); } else if (array->pdata[0] != items[0]) { return FAILED("Item 0 was overwritten, should be %s", items[0]); } else if (array->pdata[1] != items[1]) { return FAILED("Item 1 was overwritten, should be %s", items[1]); } for (i = 2; i < array->len; i++) { if (array->pdata[i] != NULL) { return FAILED("Item %d is not NULL, it is %p", i, array->pdata[i]); } } g_ptr_array_free(array, TRUE); return OK; } static RESULT ptrarray_remove_index (void) { GPtrArray *array; guint i; array = ptrarray_alloc_and_fill(&i); g_ptr_array_remove_index(array, 0); if (array->pdata[0] != items[1]) { return FAILED("First item is not %s, it is %s", items[1], array->pdata[0]); } g_ptr_array_remove_index(array, array->len - 1); if (array->pdata[array->len - 1] != items[array->len]) { return FAILED("Last item is not %s, it is %s", items[array->len - 2], array->pdata[array->len - 1]); } g_ptr_array_free(array, TRUE); return OK; } static RESULT ptrarray_remove_index_fast (void) { GPtrArray *array; guint i; array = ptrarray_alloc_and_fill(&i); g_ptr_array_remove_index_fast(array, 0); if (array->pdata[0] != items[array->len]) { return FAILED("First item is not %s, it is %s", items[array->len], array->pdata[0]); } g_ptr_array_remove_index_fast(array, array->len - 1); if (array->pdata[array->len - 1] != items[array->len - 1]) { return FAILED("Last item is not %s, it is %s", items[array->len - 1], array->pdata[array->len - 1]); } g_ptr_array_free(array, TRUE); return OK; } static RESULT ptrarray_remove (void) { GPtrArray *array; guint i; array = ptrarray_alloc_and_fill(&i); g_ptr_array_remove(array, (gpointer)items[7]); if (!g_ptr_array_remove(array, (gpointer)items[4])) { return FAILED("Item %s not removed", items[4]); } if (g_ptr_array_remove(array, (gpointer)items[4])) { return FAILED("Item %s still in array after removal", items[4]); } if (array->pdata[array->len - 1] != items[array->len + 1]) { return FAILED("Last item in GPtrArray not correct"); } g_ptr_array_free(array, TRUE); return OK; } static gint ptrarray_sort_compare (gconstpointer a, gconstpointer b) { gchar *stra = *(gchar **) a; gchar *strb = *(gchar **) b; return strcmp(stra, strb); } static RESULT ptrarray_sort (void) { GPtrArray *array = g_ptr_array_new(); guint i; static gchar * const letters [] = { (char*)"A", (char*)"B", (char*)"C", (char*)"D", (char*)"E" }; g_ptr_array_add(array, letters[0]); g_ptr_array_add(array, letters[1]); g_ptr_array_add(array, letters[2]); g_ptr_array_add(array, letters[3]); g_ptr_array_add(array, letters[4]); g_ptr_array_sort(array, ptrarray_sort_compare); for (i = 0; i < array->len; i++) { if (array->pdata[i] != letters[i]) { return FAILED("Array out of order, expected %s got %s at position %d", letters [i], (gchar *) array->pdata [i], i); } } g_ptr_array_free(array, TRUE); return OK; } static gint ptrarray_sort_compare_with_data (gconstpointer a, gconstpointer b, gpointer user_data) { gchar *stra = *(gchar **) a; gchar *strb = *(gchar **) b; if (strcmp (user_data, "this is the data for qsort") != 0) fprintf (stderr, "oops at compare with_data\n"); return strcmp(stra, strb); } static RESULT ptrarray_remove_fast (void) { GPtrArray *array = g_ptr_array_new(); static gchar * const letters [] = { (char*)"A", (char*)"B", (char*)"C", (char*)"D", (char*)"E" }; if (g_ptr_array_remove_fast (array, NULL)) return FAILED ("Removing NULL succeeded"); g_ptr_array_add(array, letters[0]); if (!g_ptr_array_remove_fast (array, letters[0]) || array->len != 0) return FAILED ("Removing last element failed"); g_ptr_array_add(array, letters[0]); g_ptr_array_add(array, letters[1]); g_ptr_array_add(array, letters[2]); g_ptr_array_add(array, letters[3]); g_ptr_array_add(array, letters[4]); if (!g_ptr_array_remove_fast (array, letters[0]) || array->len != 4) return FAILED ("Removing first element failed"); if (array->pdata [0] != letters [4]) return FAILED ("First element wasn't replaced with last upon removal"); if (g_ptr_array_remove_fast (array, letters[0])) return FAILED ("Succedeed removing a non-existing element"); if (!g_ptr_array_remove_fast (array, letters[3]) || array->len != 3) return FAILED ("Failed removing \"D\""); if (!g_ptr_array_remove_fast (array, letters[1]) || array->len != 2) return FAILED ("Failed removing \"B\""); if (array->pdata [0] != letters [4] || array->pdata [1] != letters [2]) return FAILED ("Last two elements are wrong"); g_ptr_array_free(array, TRUE); return OK; } static Test ptrarray_tests [] = { {"alloc", ptrarray_alloc}, {"for_iterate", ptrarray_for_iterate}, {"foreach_iterate", ptrarray_foreach_iterate}, {"set_size", ptrarray_set_size}, {"remove_index", ptrarray_remove_index}, {"remove_index_fast", ptrarray_remove_index_fast}, {"remove", ptrarray_remove}, {"sort", ptrarray_sort}, {"remove_fast", ptrarray_remove_fast}, {NULL, NULL} }; DEFINE_TEST_GROUP_INIT(ptrarray_tests_init, ptrarray_tests)
#include <stdio.h> #include <glib.h> #include "test.h" /* Redefine the private structure only to verify proper allocations */ typedef struct _GPtrArrayPriv { gpointer *pdata; guint len; guint size; } GPtrArrayPriv; /* Don't add more than 32 items to this please */ static const char *items [] = { "Apples", "Oranges", "Plumbs", "Goats", "Snorps", "Grapes", "Tickle", "Place", "Coffee", "Cookies", "Cake", "Cheese", "Tseng", "Holiday", "Avenue", "Smashing", "Water", "Toilet", NULL }; static GPtrArray *ptrarray_alloc_and_fill(guint *item_count) { GPtrArray *array = g_ptr_array_new(); gint i; for(i = 0; items[i] != NULL; i++) { g_ptr_array_add(array, (gpointer)items[i]); } if (item_count != NULL) { *item_count = i; } return array; } static guint guess_size(guint length) { guint size = 1; while (size < length) { size <<= 1; } return size; } static RESULT ptrarray_alloc (void) { GPtrArrayPriv *array; guint i; array = (GPtrArrayPriv *)ptrarray_alloc_and_fill(&i); if (array->size != guess_size(array->len)) { return FAILED("Size should be %d, but it is %d", guess_size(array->len), array->size); } if (array->len != i) { return FAILED("Expected %d node(s) in the array", i); } g_ptr_array_free((GPtrArray *)array, TRUE); return OK; } static RESULT ptrarray_for_iterate (void) { GPtrArray *array = ptrarray_alloc_and_fill(NULL); guint i; for (i = 0; i < array->len; i++) { char *item = (char *)g_ptr_array_index(array, i); if (item != items[i]) { return FAILED( "Expected item at %d to be %s, but it was %s", i, items[i], item); } } g_ptr_array_free(array, TRUE); return OK; } static gint foreach_iterate_index = 0; static char *foreach_iterate_error = NULL; static void foreach_callback (gpointer data, gpointer user_data) { char *item = (char *)data; const char *item_cmp = items[foreach_iterate_index++]; if (foreach_iterate_error != NULL) { return; } if (item != item_cmp) { foreach_iterate_error = FAILED( "Expected item at %d to be %s, but it was %s", foreach_iterate_index - 1, item_cmp, item); } } static RESULT ptrarray_foreach_iterate (void) { GPtrArray *array = ptrarray_alloc_and_fill(NULL); foreach_iterate_index = 0; foreach_iterate_error = NULL; g_ptr_array_foreach(array, foreach_callback, array); g_ptr_array_free(array, TRUE); return foreach_iterate_error; } static RESULT ptrarray_set_size (void) { GPtrArray *array = g_ptr_array_new(); guint i, grow_length = 50; g_ptr_array_add(array, (gpointer)items[0]); g_ptr_array_add(array, (gpointer)items[1]); g_ptr_array_set_size(array, grow_length); if (array->len != grow_length) { return FAILED("Array length should be 50, it is %d", array->len); } else if (array->pdata[0] != items[0]) { return FAILED("Item 0 was overwritten, should be %s", items[0]); } else if (array->pdata[1] != items[1]) { return FAILED("Item 1 was overwritten, should be %s", items[1]); } for (i = 2; i < array->len; i++) { if (array->pdata[i] != NULL) { return FAILED("Item %d is not NULL, it is %p", i, array->pdata[i]); } } g_ptr_array_free(array, TRUE); return OK; } static RESULT ptrarray_remove_index (void) { GPtrArray *array; guint i; array = ptrarray_alloc_and_fill(&i); g_ptr_array_remove_index(array, 0); if (array->pdata[0] != items[1]) { return FAILED("First item is not %s, it is %s", items[1], array->pdata[0]); } g_ptr_array_remove_index(array, array->len - 1); if (array->pdata[array->len - 1] != items[array->len]) { return FAILED("Last item is not %s, it is %s", items[array->len - 2], array->pdata[array->len - 1]); } g_ptr_array_free(array, TRUE); return OK; } static RESULT ptrarray_remove_index_fast (void) { GPtrArray *array; guint i; array = ptrarray_alloc_and_fill(&i); g_ptr_array_remove_index_fast(array, 0); if (array->pdata[0] != items[array->len]) { return FAILED("First item is not %s, it is %s", items[array->len], array->pdata[0]); } g_ptr_array_remove_index_fast(array, array->len - 1); if (array->pdata[array->len - 1] != items[array->len - 1]) { return FAILED("Last item is not %s, it is %s", items[array->len - 1], array->pdata[array->len - 1]); } g_ptr_array_free(array, TRUE); return OK; } static RESULT ptrarray_remove (void) { GPtrArray *array; guint i; array = ptrarray_alloc_and_fill(&i); g_ptr_array_remove(array, (gpointer)items[7]); if (!g_ptr_array_remove(array, (gpointer)items[4])) { return FAILED("Item %s not removed", items[4]); } if (g_ptr_array_remove(array, (gpointer)items[4])) { return FAILED("Item %s still in array after removal", items[4]); } if (array->pdata[array->len - 1] != items[array->len + 1]) { return FAILED("Last item in GPtrArray not correct"); } g_ptr_array_free(array, TRUE); return OK; } static gint ptrarray_sort_compare (gconstpointer a, gconstpointer b) { gchar *stra = *(gchar **) a; gchar *strb = *(gchar **) b; return strcmp(stra, strb); } static RESULT ptrarray_sort (void) { GPtrArray *array = g_ptr_array_new(); guint i; static gchar * const letters [] = { (char*)"A", (char*)"B", (char*)"C", (char*)"D", (char*)"E" }; g_ptr_array_add(array, letters[0]); g_ptr_array_add(array, letters[1]); g_ptr_array_add(array, letters[2]); g_ptr_array_add(array, letters[3]); g_ptr_array_add(array, letters[4]); g_ptr_array_sort(array, ptrarray_sort_compare); for (i = 0; i < array->len; i++) { if (array->pdata[i] != letters[i]) { return FAILED("Array out of order, expected %s got %s at position %d", letters [i], (gchar *) array->pdata [i], i); } } g_ptr_array_free(array, TRUE); return OK; } static gint ptrarray_sort_compare_with_data (gconstpointer a, gconstpointer b, gpointer user_data) { gchar *stra = *(gchar **) a; gchar *strb = *(gchar **) b; if (strcmp (user_data, "this is the data for qsort") != 0) fprintf (stderr, "oops at compare with_data\n"); return strcmp(stra, strb); } static RESULT ptrarray_remove_fast (void) { GPtrArray *array = g_ptr_array_new(); static gchar * const letters [] = { (char*)"A", (char*)"B", (char*)"C", (char*)"D", (char*)"E" }; if (g_ptr_array_remove_fast (array, NULL)) return FAILED ("Removing NULL succeeded"); g_ptr_array_add(array, letters[0]); if (!g_ptr_array_remove_fast (array, letters[0]) || array->len != 0) return FAILED ("Removing last element failed"); g_ptr_array_add(array, letters[0]); g_ptr_array_add(array, letters[1]); g_ptr_array_add(array, letters[2]); g_ptr_array_add(array, letters[3]); g_ptr_array_add(array, letters[4]); if (!g_ptr_array_remove_fast (array, letters[0]) || array->len != 4) return FAILED ("Removing first element failed"); if (array->pdata [0] != letters [4]) return FAILED ("First element wasn't replaced with last upon removal"); if (g_ptr_array_remove_fast (array, letters[0])) return FAILED ("Succedeed removing a non-existing element"); if (!g_ptr_array_remove_fast (array, letters[3]) || array->len != 3) return FAILED ("Failed removing \"D\""); if (!g_ptr_array_remove_fast (array, letters[1]) || array->len != 2) return FAILED ("Failed removing \"B\""); if (array->pdata [0] != letters [4] || array->pdata [1] != letters [2]) return FAILED ("Last two elements are wrong"); g_ptr_array_free(array, TRUE); return OK; } static Test ptrarray_tests [] = { {"alloc", ptrarray_alloc}, {"for_iterate", ptrarray_for_iterate}, {"foreach_iterate", ptrarray_foreach_iterate}, {"set_size", ptrarray_set_size}, {"remove_index", ptrarray_remove_index}, {"remove_index_fast", ptrarray_remove_index_fast}, {"remove", ptrarray_remove}, {"sort", ptrarray_sort}, {"remove_fast", ptrarray_remove_fast}, {NULL, NULL} }; DEFINE_TEST_GROUP_INIT(ptrarray_tests_init, ptrarray_tests)
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tests/JIT/Directed/arglist/varargnative.c
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdarg.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #ifdef _MSC_VER #define DLLEXPORT __declspec(dllexport) #else #define DLLEXPORT __attribute__((visibility("default"))) #if __i386__ #define _cdecl __attribute__((cdecl)) #else #define _cdecl #endif #define __int32 int #define __int16 short int #define __int8 char // assumes char is signed #ifdef HOST_64BIT #define __int64 long #else // HOST_64BIT #define __int64 long long #endif // HOST_64BIT #endif // !_MSC_VER /* Structures */ /* * struct one_byte_struct (4 bytes) */ typedef struct { int one; } one_int_struct; /* * struct two_int_struct (8 bytes) */ typedef struct { int one; int two; } two_int_struct; /* * struct one_long_long_struct (8 bytes) */ typedef struct { __int64 one; } one_long_long_struct; /* * struct two_long_long_struct (16 bytes) */ typedef struct { __int64 one; __int64 two; } two_long_long_struct; /* * struct four_int_struct (16 bytes) */ typedef struct { int one; int two; int three; int four; } four_int_struct; /* * struct four_long_long_struct (32 bytes) */ typedef struct { __int64 one; __int64 two; __int64 three; __int64 four; } four_long_long_struct; /* * struct one_float_struct (4 bytes) */ typedef struct { float one; } one_float_struct; /* * struct two_float_struct (8 bytes) */ typedef struct { float one; float two; } two_float_struct; /* * struct one_double_struct (8 bytes) */ typedef struct { double one; } one_double_struct; /* * struct two_double_struct (16 bytes) */ typedef struct { double one; double two; } two_double_struct; /* * struct three_double_struct (24 bytes) */ typedef struct { double one; double two; double three; } three_double_struct; /* * struct four_float_struct (16 bytes) */ typedef struct { float one; float two; float three; float four; } four_float_struct; /* * struct four_double_struct (32 bytes) */ typedef struct { double one; double two; double three; double four; } four_double_struct; /* * struct eight_byte_struct (8 bytes) */ typedef struct { char one; char two; char three; char four; char five; char six; char seven; char eight; } eight_byte_struct; /* * struct sixteen_byte_struct (8 bytes) */ typedef struct { char one; char two; char three; char four; char five; char six; char seven; char eight; char nine; char ten; char eleven; char twelve; char thirteen; char fourteen; char fifteen; char sixteen; } sixteen_byte_struct; /* Tests */ DLLEXPORT int _cdecl test_passing_ints(int count, ...) { va_list ap; int index, sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, int); } va_end(ap); return sum; } DLLEXPORT __int64 _cdecl test_passing_longs(int count, ...) { va_list ap; int index; __int64 sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, __int64); } va_end(ap); return sum; } DLLEXPORT float _cdecl test_passing_floats(int count, ...) { va_list ap; int index; double sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, double); } va_end(ap); return (float)sum; } DLLEXPORT double _cdecl test_passing_doubles(int count, ...) { va_list ap; int index; double sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, double); } va_end(ap); return sum; } DLLEXPORT __int64 _cdecl test_passing_int_and_longs(int int_count, int long_count, ...) { va_list ap; int index, count; __int64 sum; count = int_count + long_count; va_start(ap, long_count); sum = 0; for (index = 0; index < int_count; ++index) { sum += va_arg(ap, int); } for (index = 0; index < long_count; ++index) { sum += va_arg(ap, __int64); } va_end(ap); return sum; } DLLEXPORT double _cdecl test_passing_floats_and_doubles(int float_count, int double_count, ...) { va_list ap; int index, count; double sum; count = float_count + double_count; va_start(ap, double_count); sum = 0; for (index = 0; index < float_count; ++index) { // Read a double, C ABI defines reading a float as undefined, or // an error on unix. However, the managed side will correctly pass a // float. sum += va_arg(ap, double); } for (index = 0; index < double_count; ++index) { sum += va_arg(ap, double); } va_end(ap); return sum; } /* Args: expected_value (double) : expected sum int : first value double : second value int : third value double : fourth value int : fifth value double : sixth value */ DLLEXPORT double _cdecl test_passing_int_and_double(double expected_value, ...) { va_list ap; int index, count; double sum; count = 6; va_start(ap, expected_value); sum = 0; for (index = 0; index < 6; ++index) { if (index % 2 == 0) { sum += va_arg(ap, int); } else { sum += va_arg(ap, double); } } va_end(ap); return sum; } /* Args: expected_value (double) : expected sum __int64 : first value double : second value __int64 : third value double : fourth value __int64 : fifth value double : sixth value */ DLLEXPORT double _cdecl test_passing_long_and_double(double expected_value, ...) { va_list ap; int index, count; double sum; count = 6; va_start(ap, expected_value); sum = 0; for (index = 0; index < 6; ++index) { if (index % 2 == 0) { sum += va_arg(ap, __int64); } else { sum += va_arg(ap, double); } } va_end(ap); return sum; } /* Args: count (int) : count of args is_int_structs(int) : first value is_float_value(int) : second value is_mixed (int) : third value byte_count (int) : fourth value struct_count (int) : fifth value */ DLLEXPORT int _cdecl check_passing_struct(int count, ...) { va_list ap; int is_b, is_floating, is_mixed, byte_count, struct_count; int expected_value_i; __int64 expected_value_l; double expected_value_f; double expected_value_d; int passed = 0; va_start(ap, count); is_b = va_arg(ap, int); is_floating = va_arg(ap, int); is_mixed = va_arg(ap, int); byte_count = va_arg(ap, int); struct_count = va_arg(ap, int); if (!is_floating) { if (byte_count == 8) { // Eight byte structs. if (is_b) { // This is one_long_long_struct one_long_long_struct s; __int64 sum; expected_value_l = va_arg(ap, __int64); sum = 0; while (struct_count--) { s = va_arg(ap, one_long_long_struct); sum += s.one; } if (sum != expected_value_l) passed = 1; } else { // This is two_int_struct two_int_struct s; int sum; expected_value_i = va_arg(ap, int); sum = 0; while (struct_count--) { s = va_arg(ap, two_int_struct); sum += s.one + s.two; } if (sum != expected_value_i) passed = 1; } } else if (byte_count == 16) { // 16 byte structs. if (is_b) { // This is four_int_struct four_int_struct s; int sum; expected_value_i = va_arg(ap, int); sum = 0; while (struct_count--) { s = va_arg(ap, four_int_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_i) passed = 1; } else { // This is two_long_long_struct two_long_long_struct s; __int64 sum; expected_value_l = va_arg(ap, __int64); sum = 0; while (struct_count--) { s = va_arg(ap, two_long_long_struct); sum += s.one + s.two; } if (sum != expected_value_l) passed = 1; } } else if (byte_count == 32) { // This is sixteen_byte_struct four_long_long_struct s; __int64 sum; expected_value_l = va_arg(ap, __int64); sum = 0; while (struct_count--) { s = va_arg(ap, four_long_long_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_l) passed = 1; } } else { if (byte_count == 8) { // Eight byte structs. if (is_b) { // This is one_double_struct one_double_struct s; double sum; expected_value_d = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, one_double_struct); sum += s.one; } if (sum != expected_value_d) passed = 1; } else { // This is two_float_struct two_float_struct s; float sum; expected_value_f = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, two_float_struct); sum += s.one + s.two; } if (sum != expected_value_f) passed = 1; } } else if (byte_count == 16) { // 16 byte structs. if (is_b) { // This is four_float_struct four_float_struct s; float sum; expected_value_f = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, four_float_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_f) passed = 1; } else { // This is two_double_struct two_double_struct s; double sum; expected_value_d = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, two_double_struct); sum += s.one + s.two; } if (sum != expected_value_d) passed = 1; } } else if (byte_count == 32) { // This is four_double_struct four_double_struct s; double sum; expected_value_d = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, four_double_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_d) passed = 1; } } va_end(ap); return passed; } DLLEXPORT double _cdecl check_passing_four_three_double_struct(three_double_struct one, three_double_struct two, three_double_struct three, three_double_struct four, ...) { double sum; sum = 0; sum += one.one + one.two + one.three; sum += two.one + two.two + two.three; sum += three.one + three.two + three.three; sum += four.one + four.two + four.three; return sum; } /* Args: count (int) : count of args two_long_long_struct : first value two_long_long_struct : second value two_long_long_struct : third value two_long_long_struct : fourth value */ DLLEXPORT int _cdecl check_passing_four_sixteen_byte_structs(int count, ...) { va_list ap; int passed, index; two_long_long_struct s; __int64 expected_value, calculated_value; passed = 0; calculated_value = 0; va_start(ap, count); expected_value = va_arg(ap, __int64); for (index = 0; index < 4; ++index) { s = va_arg(ap, two_long_long_struct); calculated_value += s.one + s.two; } va_end(ap); passed = expected_value == calculated_value ? 0 : 1; return passed; } DLLEXPORT char _cdecl echo_byte(char arg, ...) { return arg; } DLLEXPORT char _cdecl echo_char(char arg, ...) { return arg; } DLLEXPORT __int16 _cdecl echo_short(__int16 arg, ...) { return arg; } DLLEXPORT __int32 _cdecl echo_int(__int32 arg, ...) { return arg; } DLLEXPORT __int64 _cdecl echo_int64(__int64 arg, ...) { return arg; } DLLEXPORT float _cdecl echo_float(float arg, ...) { return arg; } DLLEXPORT double _cdecl echo_double(double arg, ...) { return arg; } DLLEXPORT one_int_struct _cdecl echo_one_int_struct(one_int_struct arg, ...) { return arg; } DLLEXPORT two_int_struct _cdecl echo_two_int_struct(two_int_struct arg, ...) { return arg; } DLLEXPORT one_long_long_struct _cdecl echo_one_long_struct(one_long_long_struct arg, ...) { return arg; } DLLEXPORT two_long_long_struct _cdecl echo_two_long_struct(two_long_long_struct arg, ...) { return arg; } DLLEXPORT four_long_long_struct _cdecl echo_four_long_struct(four_long_long_struct arg) { return arg; } DLLEXPORT four_long_long_struct _cdecl echo_four_long_struct_with_vararg(four_long_long_struct arg, ...) { return arg; } DLLEXPORT eight_byte_struct _cdecl echo_eight_byte_struct(eight_byte_struct arg, ...) { return arg; } DLLEXPORT four_int_struct _cdecl echo_four_int_struct(four_int_struct arg, ...) { return arg; } DLLEXPORT sixteen_byte_struct _cdecl echo_sixteen_byte_struct(sixteen_byte_struct arg, ...) { return arg; } DLLEXPORT one_float_struct _cdecl echo_one_float_struct(one_float_struct arg, ...) { return arg; } DLLEXPORT two_float_struct _cdecl echo_two_float_struct(two_float_struct arg, ...) { return arg; } DLLEXPORT one_double_struct _cdecl echo_one_double_struct(one_double_struct arg, ...) { return arg; } DLLEXPORT two_double_struct _cdecl echo_two_double_struct(two_double_struct arg, ...) { return arg; } DLLEXPORT three_double_struct _cdecl echo_three_double_struct(three_double_struct arg, ...) { return arg; } DLLEXPORT four_float_struct _cdecl echo_four_float_struct(four_float_struct arg, ...) { return arg; } DLLEXPORT four_double_struct _cdecl echo_four_double_struct(four_double_struct arg, ...) { return arg; } DLLEXPORT __int8 _cdecl short_in_byte_out(__int16 arg, ...) { return (__int8)arg; } DLLEXPORT __int16 _cdecl byte_in_short_out(__int8 arg, ...) { return (__int16)arg; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdarg.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #ifdef _MSC_VER #define DLLEXPORT __declspec(dllexport) #else #define DLLEXPORT __attribute__((visibility("default"))) #if __i386__ #define _cdecl __attribute__((cdecl)) #else #define _cdecl #endif #define __int32 int #define __int16 short int #define __int8 char // assumes char is signed #ifdef HOST_64BIT #define __int64 long #else // HOST_64BIT #define __int64 long long #endif // HOST_64BIT #endif // !_MSC_VER /* Structures */ /* * struct one_byte_struct (4 bytes) */ typedef struct { int one; } one_int_struct; /* * struct two_int_struct (8 bytes) */ typedef struct { int one; int two; } two_int_struct; /* * struct one_long_long_struct (8 bytes) */ typedef struct { __int64 one; } one_long_long_struct; /* * struct two_long_long_struct (16 bytes) */ typedef struct { __int64 one; __int64 two; } two_long_long_struct; /* * struct four_int_struct (16 bytes) */ typedef struct { int one; int two; int three; int four; } four_int_struct; /* * struct four_long_long_struct (32 bytes) */ typedef struct { __int64 one; __int64 two; __int64 three; __int64 four; } four_long_long_struct; /* * struct one_float_struct (4 bytes) */ typedef struct { float one; } one_float_struct; /* * struct two_float_struct (8 bytes) */ typedef struct { float one; float two; } two_float_struct; /* * struct one_double_struct (8 bytes) */ typedef struct { double one; } one_double_struct; /* * struct two_double_struct (16 bytes) */ typedef struct { double one; double two; } two_double_struct; /* * struct three_double_struct (24 bytes) */ typedef struct { double one; double two; double three; } three_double_struct; /* * struct four_float_struct (16 bytes) */ typedef struct { float one; float two; float three; float four; } four_float_struct; /* * struct four_double_struct (32 bytes) */ typedef struct { double one; double two; double three; double four; } four_double_struct; /* * struct eight_byte_struct (8 bytes) */ typedef struct { char one; char two; char three; char four; char five; char six; char seven; char eight; } eight_byte_struct; /* * struct sixteen_byte_struct (8 bytes) */ typedef struct { char one; char two; char three; char four; char five; char six; char seven; char eight; char nine; char ten; char eleven; char twelve; char thirteen; char fourteen; char fifteen; char sixteen; } sixteen_byte_struct; /* Tests */ DLLEXPORT int _cdecl test_passing_ints(int count, ...) { va_list ap; int index, sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, int); } va_end(ap); return sum; } DLLEXPORT __int64 _cdecl test_passing_longs(int count, ...) { va_list ap; int index; __int64 sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, __int64); } va_end(ap); return sum; } DLLEXPORT float _cdecl test_passing_floats(int count, ...) { va_list ap; int index; double sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, double); } va_end(ap); return (float)sum; } DLLEXPORT double _cdecl test_passing_doubles(int count, ...) { va_list ap; int index; double sum; va_start(ap, count); sum = 0; for (index = 0; index < count; ++index) { sum += va_arg(ap, double); } va_end(ap); return sum; } DLLEXPORT __int64 _cdecl test_passing_int_and_longs(int int_count, int long_count, ...) { va_list ap; int index, count; __int64 sum; count = int_count + long_count; va_start(ap, long_count); sum = 0; for (index = 0; index < int_count; ++index) { sum += va_arg(ap, int); } for (index = 0; index < long_count; ++index) { sum += va_arg(ap, __int64); } va_end(ap); return sum; } DLLEXPORT double _cdecl test_passing_floats_and_doubles(int float_count, int double_count, ...) { va_list ap; int index, count; double sum; count = float_count + double_count; va_start(ap, double_count); sum = 0; for (index = 0; index < float_count; ++index) { // Read a double, C ABI defines reading a float as undefined, or // an error on unix. However, the managed side will correctly pass a // float. sum += va_arg(ap, double); } for (index = 0; index < double_count; ++index) { sum += va_arg(ap, double); } va_end(ap); return sum; } /* Args: expected_value (double) : expected sum int : first value double : second value int : third value double : fourth value int : fifth value double : sixth value */ DLLEXPORT double _cdecl test_passing_int_and_double(double expected_value, ...) { va_list ap; int index, count; double sum; count = 6; va_start(ap, expected_value); sum = 0; for (index = 0; index < 6; ++index) { if (index % 2 == 0) { sum += va_arg(ap, int); } else { sum += va_arg(ap, double); } } va_end(ap); return sum; } /* Args: expected_value (double) : expected sum __int64 : first value double : second value __int64 : third value double : fourth value __int64 : fifth value double : sixth value */ DLLEXPORT double _cdecl test_passing_long_and_double(double expected_value, ...) { va_list ap; int index, count; double sum; count = 6; va_start(ap, expected_value); sum = 0; for (index = 0; index < 6; ++index) { if (index % 2 == 0) { sum += va_arg(ap, __int64); } else { sum += va_arg(ap, double); } } va_end(ap); return sum; } /* Args: count (int) : count of args is_int_structs(int) : first value is_float_value(int) : second value is_mixed (int) : third value byte_count (int) : fourth value struct_count (int) : fifth value */ DLLEXPORT int _cdecl check_passing_struct(int count, ...) { va_list ap; int is_b, is_floating, is_mixed, byte_count, struct_count; int expected_value_i; __int64 expected_value_l; double expected_value_f; double expected_value_d; int passed = 0; va_start(ap, count); is_b = va_arg(ap, int); is_floating = va_arg(ap, int); is_mixed = va_arg(ap, int); byte_count = va_arg(ap, int); struct_count = va_arg(ap, int); if (!is_floating) { if (byte_count == 8) { // Eight byte structs. if (is_b) { // This is one_long_long_struct one_long_long_struct s; __int64 sum; expected_value_l = va_arg(ap, __int64); sum = 0; while (struct_count--) { s = va_arg(ap, one_long_long_struct); sum += s.one; } if (sum != expected_value_l) passed = 1; } else { // This is two_int_struct two_int_struct s; int sum; expected_value_i = va_arg(ap, int); sum = 0; while (struct_count--) { s = va_arg(ap, two_int_struct); sum += s.one + s.two; } if (sum != expected_value_i) passed = 1; } } else if (byte_count == 16) { // 16 byte structs. if (is_b) { // This is four_int_struct four_int_struct s; int sum; expected_value_i = va_arg(ap, int); sum = 0; while (struct_count--) { s = va_arg(ap, four_int_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_i) passed = 1; } else { // This is two_long_long_struct two_long_long_struct s; __int64 sum; expected_value_l = va_arg(ap, __int64); sum = 0; while (struct_count--) { s = va_arg(ap, two_long_long_struct); sum += s.one + s.two; } if (sum != expected_value_l) passed = 1; } } else if (byte_count == 32) { // This is sixteen_byte_struct four_long_long_struct s; __int64 sum; expected_value_l = va_arg(ap, __int64); sum = 0; while (struct_count--) { s = va_arg(ap, four_long_long_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_l) passed = 1; } } else { if (byte_count == 8) { // Eight byte structs. if (is_b) { // This is one_double_struct one_double_struct s; double sum; expected_value_d = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, one_double_struct); sum += s.one; } if (sum != expected_value_d) passed = 1; } else { // This is two_float_struct two_float_struct s; float sum; expected_value_f = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, two_float_struct); sum += s.one + s.two; } if (sum != expected_value_f) passed = 1; } } else if (byte_count == 16) { // 16 byte structs. if (is_b) { // This is four_float_struct four_float_struct s; float sum; expected_value_f = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, four_float_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_f) passed = 1; } else { // This is two_double_struct two_double_struct s; double sum; expected_value_d = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, two_double_struct); sum += s.one + s.two; } if (sum != expected_value_d) passed = 1; } } else if (byte_count == 32) { // This is four_double_struct four_double_struct s; double sum; expected_value_d = va_arg(ap, double); sum = 0; while (struct_count--) { s = va_arg(ap, four_double_struct); sum += s.one + s.two + s.three + s.four; } if (sum != expected_value_d) passed = 1; } } va_end(ap); return passed; } DLLEXPORT double _cdecl check_passing_four_three_double_struct(three_double_struct one, three_double_struct two, three_double_struct three, three_double_struct four, ...) { double sum; sum = 0; sum += one.one + one.two + one.three; sum += two.one + two.two + two.three; sum += three.one + three.two + three.three; sum += four.one + four.two + four.three; return sum; } /* Args: count (int) : count of args two_long_long_struct : first value two_long_long_struct : second value two_long_long_struct : third value two_long_long_struct : fourth value */ DLLEXPORT int _cdecl check_passing_four_sixteen_byte_structs(int count, ...) { va_list ap; int passed, index; two_long_long_struct s; __int64 expected_value, calculated_value; passed = 0; calculated_value = 0; va_start(ap, count); expected_value = va_arg(ap, __int64); for (index = 0; index < 4; ++index) { s = va_arg(ap, two_long_long_struct); calculated_value += s.one + s.two; } va_end(ap); passed = expected_value == calculated_value ? 0 : 1; return passed; } DLLEXPORT char _cdecl echo_byte(char arg, ...) { return arg; } DLLEXPORT char _cdecl echo_char(char arg, ...) { return arg; } DLLEXPORT __int16 _cdecl echo_short(__int16 arg, ...) { return arg; } DLLEXPORT __int32 _cdecl echo_int(__int32 arg, ...) { return arg; } DLLEXPORT __int64 _cdecl echo_int64(__int64 arg, ...) { return arg; } DLLEXPORT float _cdecl echo_float(float arg, ...) { return arg; } DLLEXPORT double _cdecl echo_double(double arg, ...) { return arg; } DLLEXPORT one_int_struct _cdecl echo_one_int_struct(one_int_struct arg, ...) { return arg; } DLLEXPORT two_int_struct _cdecl echo_two_int_struct(two_int_struct arg, ...) { return arg; } DLLEXPORT one_long_long_struct _cdecl echo_one_long_struct(one_long_long_struct arg, ...) { return arg; } DLLEXPORT two_long_long_struct _cdecl echo_two_long_struct(two_long_long_struct arg, ...) { return arg; } DLLEXPORT four_long_long_struct _cdecl echo_four_long_struct(four_long_long_struct arg) { return arg; } DLLEXPORT four_long_long_struct _cdecl echo_four_long_struct_with_vararg(four_long_long_struct arg, ...) { return arg; } DLLEXPORT eight_byte_struct _cdecl echo_eight_byte_struct(eight_byte_struct arg, ...) { return arg; } DLLEXPORT four_int_struct _cdecl echo_four_int_struct(four_int_struct arg, ...) { return arg; } DLLEXPORT sixteen_byte_struct _cdecl echo_sixteen_byte_struct(sixteen_byte_struct arg, ...) { return arg; } DLLEXPORT one_float_struct _cdecl echo_one_float_struct(one_float_struct arg, ...) { return arg; } DLLEXPORT two_float_struct _cdecl echo_two_float_struct(two_float_struct arg, ...) { return arg; } DLLEXPORT one_double_struct _cdecl echo_one_double_struct(one_double_struct arg, ...) { return arg; } DLLEXPORT two_double_struct _cdecl echo_two_double_struct(two_double_struct arg, ...) { return arg; } DLLEXPORT three_double_struct _cdecl echo_three_double_struct(three_double_struct arg, ...) { return arg; } DLLEXPORT four_float_struct _cdecl echo_four_float_struct(four_float_struct arg, ...) { return arg; } DLLEXPORT four_double_struct _cdecl echo_four_double_struct(four_double_struct arg, ...) { return arg; } DLLEXPORT __int8 _cdecl short_in_byte_out(__int16 arg, ...) { return (__int8)arg; } DLLEXPORT __int16 _cdecl byte_in_short_out(__int8 arg, ...) { return (__int16)arg; }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/tests/Gperf-simple.c
/* libunwind - a platform-independent unwind library Copyright (C) 2003-2004 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <string.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <libunwind.h> #include "compiler.h" #include <sys/resource.h> #include <sys/time.h> #define panic(args...) \ do { fprintf (stderr, args); exit (-1); } while (0) long dummy; static long iterations = 10000; static int maxlevel = 100; #define KB 1024 #define MB (1024*1024) static char big[64*MB]; /* should be >> max. cache size */ static inline double gettime (void) { struct timeval tv; gettimeofday (&tv, NULL); return tv.tv_sec + 1e-6*tv.tv_usec; } static int NOINLINE measure_unwind (int maxlevel, double *step) { double stop, start; unw_cursor_t cursor; unw_context_t uc; int ret, level = 0; unw_getcontext (&uc); if (unw_init_local (&cursor, &uc) < 0) panic ("unw_init_local() failed\n"); start = gettime (); do { ret = unw_step (&cursor); if (ret < 0) panic ("unw_step() failed\n"); ++level; } while (ret > 0); stop = gettime (); if (level <= maxlevel) panic ("Unwound only %d levels, expected at least %d levels\n", level, maxlevel); *step = (stop - start) / (double) level; return 0; } static int f1 (int, int, double *); static int NOINLINE g1 (int level, int maxlevel, double *step) { if (level == maxlevel) return measure_unwind (maxlevel, step); else /* defeat last-call/sibcall optimization */ return f1 (level + 1, maxlevel, step) + level; } static int NOINLINE f1 (int level, int maxlevel, double *step) { if (level == maxlevel) return measure_unwind (maxlevel, step); else /* defeat last-call/sibcall optimization */ return g1 (level + 1, maxlevel, step) + level; } static void doit (const char *label) { double step, min_step, first_step, sum_step; int i; sum_step = first_step = 0.0; min_step = 1e99; for (i = 0; i < iterations; ++i) { f1 (0, maxlevel, &step); sum_step += step; if (step < min_step) min_step = step; if (i == 0) first_step = step; } printf ("%s: unw_step : 1st=%9.3f min=%9.3f avg=%9.3f nsec\n", label, 1e9*first_step, 1e9*min_step, 1e9*sum_step/iterations); } static long sum (void *buf, size_t size) { long s = 0; char *cp = buf; size_t i; for (i = 0; i < size; i += 8) s += cp[i]; return s; } static void measure_init (void) { # define N 100 # define M 10 /* must be at least 2 to get steady-state */ double stop, start, get_cold, get_warm, init_cold, init_warm, delta; struct { unw_cursor_t c; char padding[1024]; /* should be > 2 * max. cacheline size */ } cursor[N]; struct { unw_context_t uc; char padding[1024]; /* should be > 2 * max. cacheline size */ } uc[N]; int i, j; /* Run each test M times and take the minimum to filter out noise such dynamic linker resolving overhead, context-switches, page-in, cache, and TLB effects. */ get_cold = 1e99; for (j = 0; j < M; ++j) { dummy += sum (big, sizeof (big)); /* flush the cache */ for (i = 0; i < N; ++i) uc[i].padding[511] = i; /* warm up the TLB */ start = gettime (); for (i = 0; i < N; ++i) unw_getcontext (&uc[i].uc); stop = gettime (); delta = (stop - start) / N; if (delta < get_cold) get_cold = delta; } init_cold = 1e99; for (j = 0; j < M; ++j) { dummy += sum (big, sizeof (big)); /* flush cache */ for (i = 0; i < N; ++i) uc[i].padding[511] = i; /* warm up the TLB */ start = gettime (); for (i = 0; i < N; ++i) unw_init_local (&cursor[i].c, &uc[i].uc); stop = gettime (); delta = (stop - start) / N; if (delta < init_cold) init_cold = delta; } get_warm = 1e99; for (j = 0; j < M; ++j) { start = gettime (); for (i = 0; i < N; ++i) unw_getcontext (&uc[0].uc); stop = gettime (); delta = (stop - start) / N; if (delta < get_warm) get_warm = delta; } init_warm = 1e99; for (j = 0; j < M; ++j) { start = gettime (); for (i = 0; i < N; ++i) unw_init_local (&cursor[0].c, &uc[0].uc); stop = gettime (); delta = (stop - start) / N; if (delta < init_warm) init_warm = delta; } printf ("unw_getcontext : cold avg=%9.3f nsec, warm avg=%9.3f nsec\n", 1e9 * get_cold, 1e9 * get_warm); printf ("unw_init_local : cold avg=%9.3f nsec, warm avg=%9.3f nsec\n", 1e9 * init_cold, 1e9 * init_warm); } int main (int argc, char **argv) { struct rlimit rlim; rlim.rlim_cur = RLIM_INFINITY; rlim.rlim_max = RLIM_INFINITY; setrlimit (RLIMIT_STACK, &rlim); memset (big, 0xaa, sizeof (big)); if (argc > 1) { maxlevel = atol (argv[1]); if (argc > 2) iterations = atol (argv[2]); } measure_init (); doit ("default "); unw_set_caching_policy (unw_local_addr_space, UNW_CACHE_NONE); doit ("no cache "); unw_set_caching_policy (unw_local_addr_space, UNW_CACHE_GLOBAL); doit ("global cache "); unw_set_caching_policy (unw_local_addr_space, UNW_CACHE_PER_THREAD); doit ("per-thread cache"); return 0; }
/* libunwind - a platform-independent unwind library Copyright (C) 2003-2004 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <string.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <libunwind.h> #include "compiler.h" #include <sys/resource.h> #include <sys/time.h> #define panic(args...) \ do { fprintf (stderr, args); exit (-1); } while (0) long dummy; static long iterations = 10000; static int maxlevel = 100; #define KB 1024 #define MB (1024*1024) static char big[64*MB]; /* should be >> max. cache size */ static inline double gettime (void) { struct timeval tv; gettimeofday (&tv, NULL); return tv.tv_sec + 1e-6*tv.tv_usec; } static int NOINLINE measure_unwind (int maxlevel, double *step) { double stop, start; unw_cursor_t cursor; unw_context_t uc; int ret, level = 0; unw_getcontext (&uc); if (unw_init_local (&cursor, &uc) < 0) panic ("unw_init_local() failed\n"); start = gettime (); do { ret = unw_step (&cursor); if (ret < 0) panic ("unw_step() failed\n"); ++level; } while (ret > 0); stop = gettime (); if (level <= maxlevel) panic ("Unwound only %d levels, expected at least %d levels\n", level, maxlevel); *step = (stop - start) / (double) level; return 0; } static int f1 (int, int, double *); static int NOINLINE g1 (int level, int maxlevel, double *step) { if (level == maxlevel) return measure_unwind (maxlevel, step); else /* defeat last-call/sibcall optimization */ return f1 (level + 1, maxlevel, step) + level; } static int NOINLINE f1 (int level, int maxlevel, double *step) { if (level == maxlevel) return measure_unwind (maxlevel, step); else /* defeat last-call/sibcall optimization */ return g1 (level + 1, maxlevel, step) + level; } static void doit (const char *label) { double step, min_step, first_step, sum_step; int i; sum_step = first_step = 0.0; min_step = 1e99; for (i = 0; i < iterations; ++i) { f1 (0, maxlevel, &step); sum_step += step; if (step < min_step) min_step = step; if (i == 0) first_step = step; } printf ("%s: unw_step : 1st=%9.3f min=%9.3f avg=%9.3f nsec\n", label, 1e9*first_step, 1e9*min_step, 1e9*sum_step/iterations); } static long sum (void *buf, size_t size) { long s = 0; char *cp = buf; size_t i; for (i = 0; i < size; i += 8) s += cp[i]; return s; } static void measure_init (void) { # define N 100 # define M 10 /* must be at least 2 to get steady-state */ double stop, start, get_cold, get_warm, init_cold, init_warm, delta; struct { unw_cursor_t c; char padding[1024]; /* should be > 2 * max. cacheline size */ } cursor[N]; struct { unw_context_t uc; char padding[1024]; /* should be > 2 * max. cacheline size */ } uc[N]; int i, j; /* Run each test M times and take the minimum to filter out noise such dynamic linker resolving overhead, context-switches, page-in, cache, and TLB effects. */ get_cold = 1e99; for (j = 0; j < M; ++j) { dummy += sum (big, sizeof (big)); /* flush the cache */ for (i = 0; i < N; ++i) uc[i].padding[511] = i; /* warm up the TLB */ start = gettime (); for (i = 0; i < N; ++i) unw_getcontext (&uc[i].uc); stop = gettime (); delta = (stop - start) / N; if (delta < get_cold) get_cold = delta; } init_cold = 1e99; for (j = 0; j < M; ++j) { dummy += sum (big, sizeof (big)); /* flush cache */ for (i = 0; i < N; ++i) uc[i].padding[511] = i; /* warm up the TLB */ start = gettime (); for (i = 0; i < N; ++i) unw_init_local (&cursor[i].c, &uc[i].uc); stop = gettime (); delta = (stop - start) / N; if (delta < init_cold) init_cold = delta; } get_warm = 1e99; for (j = 0; j < M; ++j) { start = gettime (); for (i = 0; i < N; ++i) unw_getcontext (&uc[0].uc); stop = gettime (); delta = (stop - start) / N; if (delta < get_warm) get_warm = delta; } init_warm = 1e99; for (j = 0; j < M; ++j) { start = gettime (); for (i = 0; i < N; ++i) unw_init_local (&cursor[0].c, &uc[0].uc); stop = gettime (); delta = (stop - start) / N; if (delta < init_warm) init_warm = delta; } printf ("unw_getcontext : cold avg=%9.3f nsec, warm avg=%9.3f nsec\n", 1e9 * get_cold, 1e9 * get_warm); printf ("unw_init_local : cold avg=%9.3f nsec, warm avg=%9.3f nsec\n", 1e9 * init_cold, 1e9 * init_warm); } int main (int argc, char **argv) { struct rlimit rlim; rlim.rlim_cur = RLIM_INFINITY; rlim.rlim_max = RLIM_INFINITY; setrlimit (RLIMIT_STACK, &rlim); memset (big, 0xaa, sizeof (big)); if (argc > 1) { maxlevel = atol (argv[1]); if (argc > 2) iterations = atol (argv[2]); } measure_init (); doit ("default "); unw_set_caching_policy (unw_local_addr_space, UNW_CACHE_NONE); doit ("no cache "); unw_set_caching_policy (unw_local_addr_space, UNW_CACHE_GLOBAL); doit ("global cache "); unw_set_caching_policy (unw_local_addr_space, UNW_CACHE_PER_THREAD); doit ("per-thread cache"); return 0; }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/native/eventpipe/ep-buffer.c
#include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #if !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES) #define EP_IMPL_BUFFER_GETTER_SETTER #include "ep.h" #include "ep-buffer.h" #include "ep-event.h" #include "ep-event-instance.h" #include "ep-event-payload.h" #include "ep-session.h" /* * EventPipeBuffer. */ EventPipeBuffer * ep_buffer_alloc ( uint32_t buffer_size, EventPipeThread *writer_thread, uint32_t event_sequence_number) { EventPipeBuffer *instance = ep_rt_object_alloc (EventPipeBuffer); ep_raise_error_if_nok (instance != NULL); instance->writer_thread = writer_thread; instance->event_sequence_number = event_sequence_number; instance->buffer = ep_rt_valloc0 (buffer_size); ep_raise_error_if_nok (instance->buffer); instance->limit = instance->buffer + buffer_size; instance->current = ep_buffer_get_next_aligned_address (instance, instance->buffer); instance->creation_timestamp = ep_perf_timestamp_get (); EP_ASSERT (instance->creation_timestamp > 0); instance->current_read_event = NULL; instance->prev_buffer = NULL; instance->next_buffer = NULL; ep_rt_volatile_store_uint32_t (&instance->state, (uint32_t)EP_BUFFER_STATE_WRITABLE); ep_on_exit: return instance; ep_on_error: ep_buffer_free (instance); instance = NULL; ep_exit_error_handler (); } void ep_buffer_free (EventPipeBuffer *buffer) { ep_return_void_if_nok (buffer != NULL); // We should never be deleting a buffer that a writer thread might still try to write to EP_ASSERT (ep_rt_volatile_load_uint32_t (&buffer->state) == (uint32_t)EP_BUFFER_STATE_READ_ONLY); ep_rt_vfree (buffer->buffer, buffer->limit - buffer->buffer); ep_rt_object_free (buffer); } bool ep_buffer_write_event ( EventPipeBuffer *buffer, ep_rt_thread_handle_t thread, EventPipeSession *session, EventPipeEvent *ep_event, EventPipeEventPayload *payload, const uint8_t *activity_id, const uint8_t *related_activity_id, EventPipeStackContents *stack) { EP_ASSERT (buffer != NULL); EP_ASSERT (payload != NULL); EP_ASSERT (((size_t)buffer->current % EP_BUFFER_ALIGNMENT_SIZE) == 0); // We should never try to write to a buffer that isn't expecting to be written to. EP_ASSERT ((EventPipeBufferState)buffer->state == EP_BUFFER_STATE_WRITABLE); bool success = true; // Calculate the size of the event. uint32_t event_size = sizeof (EventPipeEventInstance) + ep_event_payload_get_size (payload); // Make sure we have enough space to write the event. if(buffer->current + event_size > buffer->limit) ep_raise_error (); // Calculate the location of the data payload. uint8_t *data_dest; data_dest = (ep_event_payload_get_size (payload) == 0 ? NULL : buffer->current + sizeof(EventPipeEventInstance)); EventPipeStackContents stack_contents; EventPipeStackContents *current_stack_contents; current_stack_contents = ep_stack_contents_init (&stack_contents); if (stack == NULL && ep_event_get_need_stack (ep_event) && !ep_session_get_rundown_enabled (session)) { ep_walk_managed_stack_for_current_thread (current_stack_contents); stack = current_stack_contents; } uint32_t proc_number; proc_number = ep_rt_current_processor_get_number (); EventPipeEventInstance *instance; instance = ep_event_instance_init ( (EventPipeEventInstance *)buffer->current, ep_event, proc_number, ep_rt_thread_id_t_to_uint64_t((thread == NULL) ? ep_rt_current_thread_get_id () : ep_rt_thread_get_id (thread)), data_dest, ep_event_payload_get_size (payload), (thread == NULL) ? NULL : activity_id, related_activity_id); ep_raise_error_if_nok (instance != NULL); // Copy the stack if a separate stack trace was provided. if (stack != NULL) ep_stack_contents_copyto (stack, ep_event_instance_get_stack_contents_ref (instance)); // Write the event payload data to the buffer. if (ep_event_payload_get_size (payload) > 0) ep_event_payload_copy_data (payload, data_dest); EP_ASSERT (success); // Advance the current pointer past the event. buffer->current = ep_buffer_get_next_aligned_address (buffer, buffer->current + event_size); ep_on_exit: return success; ep_on_error: success = false; ep_exit_error_handler (); } void ep_buffer_move_next_read_event (EventPipeBuffer *buffer) { EP_ASSERT (buffer != NULL); EP_ASSERT (ep_rt_volatile_load_uint32_t (&buffer->state) == (uint32_t)EP_BUFFER_STATE_READ_ONLY); // If current_read_event is NULL we've reached the end of the events if (buffer->current_read_event != NULL) { // Confirm that current_read_event is within the used range of the buffer. if (((uint8_t *)buffer->current_read_event < buffer->buffer) || ((uint8_t *)buffer->current_read_event >= buffer->current)) { EP_ASSERT (!"Input pointer is out of range."); buffer->current_read_event = NULL; } else { if (ep_event_instance_get_data (buffer->current_read_event)) // We have a pointer within the bounds of the buffer. // Find the next event by skipping the current event with it's data payload immediately after the instance. buffer->current_read_event = (EventPipeEventInstance *)ep_buffer_get_next_aligned_address (buffer, (uint8_t *)(ep_event_instance_get_data (buffer->current_read_event) + ep_event_instance_get_data_len (buffer->current_read_event))); else // In case we do not have a payload, the next instance is right after the current instance buffer->current_read_event = (EventPipeEventInstance *)ep_buffer_get_next_aligned_address (buffer, (uint8_t *)(buffer->current_read_event + 1)); // this may roll over and that is fine buffer->event_sequence_number++; // Check to see if we've reached the end of the written portion of the buffer. if ((uint8_t *)buffer->current_read_event >= buffer->current) buffer->current_read_event = NULL; } } // Ensure that the timestamp is valid. The buffer is zero'd before use, so a zero timestamp is invalid. #ifdef EP_CHECKED_BUILD if (buffer->current_read_event != NULL) { ep_timestamp_t next_timestamp = ep_event_instance_get_timestamp (buffer->current_read_event); EP_ASSERT (next_timestamp != 0); } #endif } EventPipeEventInstance * ep_buffer_get_current_read_event (const EventPipeBuffer *buffer) { EP_ASSERT (buffer != NULL); EP_ASSERT (ep_rt_volatile_load_uint32_t (&buffer->state) == (uint32_t)EP_BUFFER_STATE_READ_ONLY); return buffer->current_read_event; } uint32_t ep_buffer_get_current_sequence_number (const EventPipeBuffer *buffer) { EP_ASSERT (buffer != NULL); EP_ASSERT (ep_rt_volatile_load_uint32_t (&buffer->state) == (uint32_t)EP_BUFFER_STATE_READ_ONLY); return buffer->event_sequence_number; } EventPipeBufferState ep_buffer_get_volatile_state (const EventPipeBuffer *buffer) { EP_ASSERT (buffer != NULL); return (EventPipeBufferState)ep_rt_volatile_load_uint32_t (&buffer->state); } void ep_buffer_convert_to_read_only (EventPipeBuffer *buffer) { EP_ASSERT (buffer != NULL); EP_ASSERT (buffer->current_read_event == NULL); ep_thread_requires_lock_held (buffer->writer_thread); ep_rt_volatile_store_uint32_t (&buffer->state, (uint32_t)EP_BUFFER_STATE_READ_ONLY); // If this buffer contains an event, select it. uint8_t *first_aligned_instance = ep_buffer_get_next_aligned_address (buffer, buffer->buffer); if (buffer->current > first_aligned_instance) buffer->current_read_event = (EventPipeEventInstance*)first_aligned_instance; else buffer->current_read_event = NULL; } #ifdef EP_CHECKED_BUILD bool ep_buffer_ensure_consistency (const EventPipeBuffer *buffer) { EP_ASSERT (buffer != NULL); uint8_t *ptr = ep_buffer_get_next_aligned_address (buffer, buffer->buffer); // Check to see if the buffer is empty. if (ptr == buffer->current) // Make sure that the buffer size is greater than zero. EP_ASSERT (buffer->buffer != buffer->limit); // Validate the contents of the filled portion of the buffer. while (ptr < buffer->current) { // Validate the event. EventPipeEventInstance *instance = (EventPipeEventInstance *)ptr; EP_ASSERT (ep_event_instance_ensure_consistency (instance)); // Validate that payload and length match. EP_ASSERT ( (ep_event_instance_get_data (instance) != NULL && ep_event_instance_get_data_len (instance) > 0) || (ep_event_instance_get_data (instance) == NULL && ep_event_instance_get_data_len (instance) == 0) ); // Skip the event. ptr = ep_buffer_get_next_aligned_address (buffer, ptr + sizeof (EventPipeEventInstance) + ep_event_instance_get_data_len (instance)); } // When we're done walking the filled portion of the buffer, // ptr should be the same as m_pCurrent. EP_ASSERT (ptr == buffer->current); // Walk the rest of the buffer, making sure it is properly zeroed. while (ptr < buffer->limit) { EP_ASSERT (*ptr == 0); ptr++; } return true; } #endif #endif /* !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES) */ #endif /* ENABLE_PERFTRACING */ #ifndef EP_INCLUDE_SOURCE_FILES extern const char quiet_linker_empty_file_warning_eventpipe_buffer; const char quiet_linker_empty_file_warning_eventpipe_buffer = 0; #endif
#include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #if !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES) #define EP_IMPL_BUFFER_GETTER_SETTER #include "ep.h" #include "ep-buffer.h" #include "ep-event.h" #include "ep-event-instance.h" #include "ep-event-payload.h" #include "ep-session.h" /* * EventPipeBuffer. */ EventPipeBuffer * ep_buffer_alloc ( uint32_t buffer_size, EventPipeThread *writer_thread, uint32_t event_sequence_number) { EventPipeBuffer *instance = ep_rt_object_alloc (EventPipeBuffer); ep_raise_error_if_nok (instance != NULL); instance->writer_thread = writer_thread; instance->event_sequence_number = event_sequence_number; instance->buffer = ep_rt_valloc0 (buffer_size); ep_raise_error_if_nok (instance->buffer); instance->limit = instance->buffer + buffer_size; instance->current = ep_buffer_get_next_aligned_address (instance, instance->buffer); instance->creation_timestamp = ep_perf_timestamp_get (); EP_ASSERT (instance->creation_timestamp > 0); instance->current_read_event = NULL; instance->prev_buffer = NULL; instance->next_buffer = NULL; ep_rt_volatile_store_uint32_t (&instance->state, (uint32_t)EP_BUFFER_STATE_WRITABLE); ep_on_exit: return instance; ep_on_error: ep_buffer_free (instance); instance = NULL; ep_exit_error_handler (); } void ep_buffer_free (EventPipeBuffer *buffer) { ep_return_void_if_nok (buffer != NULL); // We should never be deleting a buffer that a writer thread might still try to write to EP_ASSERT (ep_rt_volatile_load_uint32_t (&buffer->state) == (uint32_t)EP_BUFFER_STATE_READ_ONLY); ep_rt_vfree (buffer->buffer, buffer->limit - buffer->buffer); ep_rt_object_free (buffer); } bool ep_buffer_write_event ( EventPipeBuffer *buffer, ep_rt_thread_handle_t thread, EventPipeSession *session, EventPipeEvent *ep_event, EventPipeEventPayload *payload, const uint8_t *activity_id, const uint8_t *related_activity_id, EventPipeStackContents *stack) { EP_ASSERT (buffer != NULL); EP_ASSERT (payload != NULL); EP_ASSERT (((size_t)buffer->current % EP_BUFFER_ALIGNMENT_SIZE) == 0); // We should never try to write to a buffer that isn't expecting to be written to. EP_ASSERT ((EventPipeBufferState)buffer->state == EP_BUFFER_STATE_WRITABLE); bool success = true; // Calculate the size of the event. uint32_t event_size = sizeof (EventPipeEventInstance) + ep_event_payload_get_size (payload); // Make sure we have enough space to write the event. if(buffer->current + event_size > buffer->limit) ep_raise_error (); // Calculate the location of the data payload. uint8_t *data_dest; data_dest = (ep_event_payload_get_size (payload) == 0 ? NULL : buffer->current + sizeof(EventPipeEventInstance)); EventPipeStackContents stack_contents; EventPipeStackContents *current_stack_contents; current_stack_contents = ep_stack_contents_init (&stack_contents); if (stack == NULL && ep_event_get_need_stack (ep_event) && !ep_session_get_rundown_enabled (session)) { ep_walk_managed_stack_for_current_thread (current_stack_contents); stack = current_stack_contents; } uint32_t proc_number; proc_number = ep_rt_current_processor_get_number (); EventPipeEventInstance *instance; instance = ep_event_instance_init ( (EventPipeEventInstance *)buffer->current, ep_event, proc_number, ep_rt_thread_id_t_to_uint64_t((thread == NULL) ? ep_rt_current_thread_get_id () : ep_rt_thread_get_id (thread)), data_dest, ep_event_payload_get_size (payload), (thread == NULL) ? NULL : activity_id, related_activity_id); ep_raise_error_if_nok (instance != NULL); // Copy the stack if a separate stack trace was provided. if (stack != NULL) ep_stack_contents_copyto (stack, ep_event_instance_get_stack_contents_ref (instance)); // Write the event payload data to the buffer. if (ep_event_payload_get_size (payload) > 0) ep_event_payload_copy_data (payload, data_dest); EP_ASSERT (success); // Advance the current pointer past the event. buffer->current = ep_buffer_get_next_aligned_address (buffer, buffer->current + event_size); ep_on_exit: return success; ep_on_error: success = false; ep_exit_error_handler (); } void ep_buffer_move_next_read_event (EventPipeBuffer *buffer) { EP_ASSERT (buffer != NULL); EP_ASSERT (ep_rt_volatile_load_uint32_t (&buffer->state) == (uint32_t)EP_BUFFER_STATE_READ_ONLY); // If current_read_event is NULL we've reached the end of the events if (buffer->current_read_event != NULL) { // Confirm that current_read_event is within the used range of the buffer. if (((uint8_t *)buffer->current_read_event < buffer->buffer) || ((uint8_t *)buffer->current_read_event >= buffer->current)) { EP_ASSERT (!"Input pointer is out of range."); buffer->current_read_event = NULL; } else { if (ep_event_instance_get_data (buffer->current_read_event)) // We have a pointer within the bounds of the buffer. // Find the next event by skipping the current event with it's data payload immediately after the instance. buffer->current_read_event = (EventPipeEventInstance *)ep_buffer_get_next_aligned_address (buffer, (uint8_t *)(ep_event_instance_get_data (buffer->current_read_event) + ep_event_instance_get_data_len (buffer->current_read_event))); else // In case we do not have a payload, the next instance is right after the current instance buffer->current_read_event = (EventPipeEventInstance *)ep_buffer_get_next_aligned_address (buffer, (uint8_t *)(buffer->current_read_event + 1)); // this may roll over and that is fine buffer->event_sequence_number++; // Check to see if we've reached the end of the written portion of the buffer. if ((uint8_t *)buffer->current_read_event >= buffer->current) buffer->current_read_event = NULL; } } // Ensure that the timestamp is valid. The buffer is zero'd before use, so a zero timestamp is invalid. #ifdef EP_CHECKED_BUILD if (buffer->current_read_event != NULL) { ep_timestamp_t next_timestamp = ep_event_instance_get_timestamp (buffer->current_read_event); EP_ASSERT (next_timestamp != 0); } #endif } EventPipeEventInstance * ep_buffer_get_current_read_event (const EventPipeBuffer *buffer) { EP_ASSERT (buffer != NULL); EP_ASSERT (ep_rt_volatile_load_uint32_t (&buffer->state) == (uint32_t)EP_BUFFER_STATE_READ_ONLY); return buffer->current_read_event; } uint32_t ep_buffer_get_current_sequence_number (const EventPipeBuffer *buffer) { EP_ASSERT (buffer != NULL); EP_ASSERT (ep_rt_volatile_load_uint32_t (&buffer->state) == (uint32_t)EP_BUFFER_STATE_READ_ONLY); return buffer->event_sequence_number; } EventPipeBufferState ep_buffer_get_volatile_state (const EventPipeBuffer *buffer) { EP_ASSERT (buffer != NULL); return (EventPipeBufferState)ep_rt_volatile_load_uint32_t (&buffer->state); } void ep_buffer_convert_to_read_only (EventPipeBuffer *buffer) { EP_ASSERT (buffer != NULL); EP_ASSERT (buffer->current_read_event == NULL); ep_thread_requires_lock_held (buffer->writer_thread); ep_rt_volatile_store_uint32_t (&buffer->state, (uint32_t)EP_BUFFER_STATE_READ_ONLY); // If this buffer contains an event, select it. uint8_t *first_aligned_instance = ep_buffer_get_next_aligned_address (buffer, buffer->buffer); if (buffer->current > first_aligned_instance) buffer->current_read_event = (EventPipeEventInstance*)first_aligned_instance; else buffer->current_read_event = NULL; } #ifdef EP_CHECKED_BUILD bool ep_buffer_ensure_consistency (const EventPipeBuffer *buffer) { EP_ASSERT (buffer != NULL); uint8_t *ptr = ep_buffer_get_next_aligned_address (buffer, buffer->buffer); // Check to see if the buffer is empty. if (ptr == buffer->current) // Make sure that the buffer size is greater than zero. EP_ASSERT (buffer->buffer != buffer->limit); // Validate the contents of the filled portion of the buffer. while (ptr < buffer->current) { // Validate the event. EventPipeEventInstance *instance = (EventPipeEventInstance *)ptr; EP_ASSERT (ep_event_instance_ensure_consistency (instance)); // Validate that payload and length match. EP_ASSERT ( (ep_event_instance_get_data (instance) != NULL && ep_event_instance_get_data_len (instance) > 0) || (ep_event_instance_get_data (instance) == NULL && ep_event_instance_get_data_len (instance) == 0) ); // Skip the event. ptr = ep_buffer_get_next_aligned_address (buffer, ptr + sizeof (EventPipeEventInstance) + ep_event_instance_get_data_len (instance)); } // When we're done walking the filled portion of the buffer, // ptr should be the same as m_pCurrent. EP_ASSERT (ptr == buffer->current); // Walk the rest of the buffer, making sure it is properly zeroed. while (ptr < buffer->limit) { EP_ASSERT (*ptr == 0); ptr++; } return true; } #endif #endif /* !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES) */ #endif /* ENABLE_PERFTRACING */ #ifndef EP_INCLUDE_SOURCE_FILES extern const char quiet_linker_empty_file_warning_eventpipe_buffer; const char quiet_linker_empty_file_warning_eventpipe_buffer = 0; #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/dwarf/Lfind_proc_info-lsb.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gfind_proc_info-lsb.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gfind_proc_info-lsb.c" #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./docs/design/coreclr/jit/GuardedDevirtualization.md
# Guarded Devirtualization ## Overview Guarded devirtualization is a proposed new optimization for the JIT in .NET Core 3.0. This document describes the motivation, initial design sketch, and highlights various issues needing further investigation. ## Motivation The .NET Core JIT is able to do a limited amount of devirtualization for virtual and interface calls. This ability was added in .NET Core 2.0. To devirtualize the JIT must be able to demonstrate one of two things: either that it knows the type of some reference exactly (say because it has seen a `newobj`) or that the declared type of the reference is a `final` class (aka `sealed`). For virtual calls the JIT can also devirtualize if it can prove the method is marked as `final`. However, most of the time the JIT is unable to determine exactness or finalness and so devirtualization fails. Statistics show that currently only around 15% of virtual call sites can be devirtualized. Result are even more pessimistic for interface calls, where success rates are around 5%. There are a variety of reasons for this. The JIT analysis is somewhat weak. Historically all the JIT cared about was whether some location held **a** reference type, not a specific reference type. So the current type propagation has been retrofitted and there are places where types just get lost. The JIT analysis happens quite early (during importation) and there is only minimal ability to do data flow analysis at this stage. So for current devirtualization the source of the type information and the consumption must be fairly close in the code. A more detailed accounting of some of the shortcomings can be found in [#7541](https://github.com/dotnet/runtime/issues/7541). Resolution of these issues will improve the ability of the JIT to devirtualize, but even the best analysis possible will still miss out on many cases. Some call sites are truly polymorphic. Some others are truly monomorphic but proving this would require sophisticated interprocedural analyses that are not practical in the JIT or in a system as dynamic as the CLR. And some sites are monomorphic in practice but potentially polymorphic. As an alternative, when devirtualization fails, the JIT can perform *guarded devirtualization*. Here the JIT creates an `if-then-else` block set in place of a virtual or interface call and inserts a runtime type test (or similar) into the `if` -- the "guard". If the guard test succeeds the JIT knows the type of the reference, so the `then` block can directly invoke the method corresponding to that type. If the test fails then the `else` block is executed and this contains the original virtual or interface call. The upshot is that the JIT conditionally gains the benefit of devirtualization at the expense of increased code size, longer JIT times, and slightly longer code paths around the call. So long as the JIT's guess at the type is somewhat reasonable, this optimization can improve performance. ## Opportunity One might imagine that the JIT's guess about the type of the reference has to be pretty good for devirtualization to pay off. Somewhat surprisingly, at least based on our initial results, that is not the case. ### Virtual Calls: The Two-Class Case Given these class declarations: ```C# class B { public virtual int F() { return 33; } } class D : B { public override int F() { return 44; } } ``` Suppose we have an array `B[]` that is randomly filled with instances of `B` and `D` and each element is class `B` with probability `p`. We time how long it takes to invoke `F` on each member of the array (note the JIT will not ever be able to devirtualize these calls), and plot the times as a function of `p`. The result is something like the following: ![two classes baseline perf](images/TwoClassesBaseline.JPG) Modern hardware includes an indirect branch target predictor and we can see it in action here. When the array element type is predictable (`p` very close to zero or very close to 1) performance is better. When the element type is unpredictable (`p` near 0.5) performance is quite a bit worse. From this we can see that a correctly predicted virtual call requires about 19 time units and worst case incorrect prediction around 55 time units. There is some timing overhead here too so the real costs are a bit lower. Now imagine we update the JIT to do guarded devirtualization and check if the element is indeed type `B`. If so the JIT can call `B.F` directly and in our prototype the JIT will also inline the call. So we would expect that if the element types are mostly `B`s (that is if `p` is near 1.0) we'd see very good performance, and if the element type is mostly `D` (that is `p` near 0.0) performance should perhaps slightly worse than the un-optimized case as there is now extra code to run check before the call. ![two classes devirt perf](images/TwoClassesDevirt.JPG) However as you can see the performance of devirtualized case (blue line) is as good or better than the un-optimized case for all values of `p`. This is perhaps unexpected and deserves some explanation. Recall that modern hardware also includes a branch predictor. For small or large values of `p` this predictor will correctly guess whether the test added by the JIT will resolve to the `then` or `else` case. For small values of `p` the JIT guess will be wrong and control will flow to the `else` block. But unlike the original example, the indirect call here will only see instances of type `D` and so the indirect branch predictor will work extremely well. So the overhead for the small `p` case is similar to the well-predicted indirect case without guarded devirtualization. As `p` increases the branch predictor starts to mispredict and that costs some cycles. But when it mispredicts control reaches the `then` block which executes the inlined call. So the cost of misprediction is offset by the faster execution and the cost stays relatively flat. As `p` passes 0.5 the branch predictor flips its prediction to prefer the `then` case. As before mispredicts are costly and send us down the `else` path but there we still execute a correctly predicted indirect call. And as `p` approaches 1.0 the cost falls as the branch predictor is almost always correct and so the cost is simply that of the inlined call. So oddly enough the guarded devirtualization case shown here does not require any sort of perf tradeoff. The JIT is better off guessing the more likely case but even guessing the less likely case can pay off and doesn't hurt performance. One might suspect at this point that the two class case is a special case and that the results do not hold up in more complex cases. More on that shortly. Before moving on, we should point out that virtual calls in the current CLR are a bit more expensive than in C++, because the CLR uses a two-level method table. That is, the indirect call sequence is something like: ```asm 000095 mov rax, qword ptr [rcx] ; fetch method table 000098 mov rax, qword ptr [rax+72] ; fetch proper chunk 00009C call qword ptr [rax+32]B:F():int:this ; call indirect ``` This is a chain of 3 dependent loads and so best-case will require at least 3x the best cache latency (plus any indirect prediction overhead). So the virtual call costs for the CLR are high. The chunked method table design was adopted to save space (chunks can be shared by different classes) at the expense of some performance. And this apparently makes guarded devirtualization pay off over a wider range of class distributions than one might expect. And for completeness, the full guarded `if-then-else` sequence measured above is: ```asm 00007A mov rcx, gword ptr [rsi+8*rcx+16] ; fetch array element 00007F mov rax, 0x7FFC9CFB4A90 ; B's method table 000089 cmp qword ptr [rcx], rax ; method table test 00008C jne SHORT G_M30756_IG06 ; jump if class is not B 00008E mov eax, 33 ; inlined B.F 000093 jmp SHORT G_M30756_IG07 G_M30756_IG06: 000095 mov rax, qword ptr [rcx] ; fetch method table 000098 mov rax, qword ptr [rax+72] ; fetch proper chunk 00009C call qword ptr [rax+32]B:F():int:this ; call indirect G_M30756_IG07: ``` Note there is a redundant load of the method table (hidden in the `cmp`) that could be eliminated with a bit more work on the prototype. So guarded devirtualization perf could potentially be even better than is shown above, especially for smaller values of `p`. ### Virtual Calls: The Three-Class Case Now to return to the question we asked above: is there something about the two class case that made guarded devirtualization especially attractive? Read on. Suppose we introduce a third class into the mix and repeat the above measurement. There are now two probabilities in play: `p`, the probability that the element has class `B`, and `p1`, the probability that the element has class `D`, and there is a third class `E`. To avoid introducing a 3D plot we'll first simply average the results for the various values of `p1` and plot performance as a function of `p`: ![three classes devirt perf](images/ThreeClassesDevirt.JPG) The right-hand side (`p` near 1.0) looks a lot like the previous chart. This is not surprising as there are relatively few instances of that third class. But the middle and left hand side differ and are more costly. For the un-optimized case (orange) the difference is directly attributable to the performance of the indirect ranch predictor. Even when `p` is small there are still two viable branch targets (on average) and some some degree of indirect misprediction. For the optimized case we now see that guarded devirtualization performs worse than no optimization if the JIT's guess is completely wrong. The penalty is not that bad because the JIT-introduced branch is predictable. But even at very modest values of `p` guarded devirtualization starts to win out. Because we've averaged over `p1` you might suspect that we're hiding something. The following chart shows the min and max values as well as the average, and also shows the two-class result (dashed lines). ![three classes devirt perf ranges](images/ThreeClassesDevirtFull.JPG) You can see the minimum values are very similar to the two class case; these are cases where the `p1` is close to 0 or close to 1. And that makes sense because if there really are only two classes despite the potential of there being three then we'd expect to see similar results as in the case where there only can be two classes. And as noted above, if `p` is high enough then the curves also converge to the two class case, as the relative mixture of `D` and `E` is doesn't matter: the predominance of `B` wins out. For low values of `p` the actual class at the call site is some mixture of `D` and `E`. Here's some detail (the x axis now shows `p1` and `p` as upper and lower values respectively). ![three classes devirt perf detail](images/ThreeClassesDevirtDetail.JPG) The worst case for perf for both is when the mixture of `D` and `E` is unpredictably 50-50 and there are no `B`s. Once we mix in just 10% of `B` then guarded devirt performs better no matter what distribution we have for the other two classes. Worst case overhead -- where the JIT guesses a class that never appears, and the other classes are evenly distributed -- is around 20%. So it seems reasonable to say that so long as the JIT can make a credible guess about the possible class -- say a guess that is right at least 10% of the time -- then there is quite likely a performance benefit to guarded devirtualization for virtual calls. We'll need to verify this with more scenarios, but these initial results are certainly encouraging. ### Virtual Calls: Testing for Multiple Cases One might deduce from the above that if there are two likely candidates the JIT should test for each. This is certainly a possibility and in C++ compilers that do indirect call profiling there are cases where multiple tests are considered a good idea. But there's also additional code size and another branch. This is something we'll look into further. ### Interface Calls: The Two Class Case Interface calls on the CLR are implemented via [Virtual Stub Dispatch]( https://github.com/dotnet/runtime/blob/main/docs/design/coreclr/botr/virtual-stub-dispatch.md ) (aka VSD). Calls are made through an indirection cell that initially points at a lookup stub. On the first call, the interface target is identified from the object's method table and the lookup stub is replaced with a dispatch stub that checks for that specific method table in a manner quite similar to guarded devirtualization. If the method table check fails a counter is incremented, and once the counter reaches a threshold the dispatch stub is replaced with a resolve stub that looks up the right target in a process-wide hash table. For interface call sites that are monomorphic, the VSD mechanism (via the dispatch stub) executes the following code sequence (here for x64) ```asm ; JIT-produced code ; ; set up R11 with interface target info mov R11, ... ; additional VSD info for call mov RCX, ... ; dispatch target object cmp [rcx], rcx ; null check (unnecessary) call [addr] ; call indirect through indir cell ; dispatch stub cmp [RCX], targetMT ; check for right method table jne DISPATCH-FAIL ; bail to resolve stub if check fails (uses R11 info) jmp targetCode ; else "tail call" the right method ``` At first glance it might appear that adding guarded devirtualization on top of VSD may not provide much benefit for monomorphic sites. However the guarded devirtualization test doesn't use an indirection cell and doesn't require R11 setup, may be able to optimize away the null check, and opens the door for inlining. So it should be slightly cheaper on average and significantly cheaper in some cases. (Note [#9027](https://github.com/dotnet/runtime/issues/9027) indicates we should be able to optimize away the null check in any case). If the guarded tests fails we've filtered out one method table the dispatch cell now works well even if a call site alternates between two classes. So we'd expect the combination of guarded devirtualization and VSD to perform well on the two class test and only show limitations when faced with mixtures of three or more classes. If the guard test always fails we have the up-front cost for the vtable fetch (which should amortize pretty well with the subsequent fetch in the) stub plus the predicted not taken branch. So we'd expect the cost for the two-class cases where the JIT's prediction is always wrong to be a bit higher). The graph below shows the measured results. To makes sure we're not overly impacted by residual VSD state we use a fresh call site for each value of p. The solid orange line is the current cost. The dashed orange line is the corresponding cost for a virtual call with the same value of p. The solid blue line is the cost with an up-front guarded test. As noted there is some slowdown when the JIT always guesses the wrong class, but the break-even point (not shown) is at a relatively small probability of a correct guess. ![two classes interface devirt](images/TwoClassesInterface.JPG) ### Interface Calls: The Three Class Case As with virtual calls you may strongly suspect the two class case for interface calls is special. And you'd be right. If we mix a third class in as we did above, we see similar changes in the performance mix for interface calls, as seen below. But also as with virtual calls the JIT's guess doesn't have to be all that good to see payoffs. At around 10% correct, guessing wins on average, and around 30% correct guessing is always a perf win. ![three classes interface devirt](images/ThreeClassesInterface.JPG) ### Delegate Speculation While we have been discussing this topic in the context of virtual calls, the method is general and can be applied to indirect calls as well. Here the guard test may just test for a particular function rather than a type. `Delegate.Invoke` is a special method that can eventually turns into an indirect call. The JIT could speculate about the possible target of this call. Choosing a good target here would require some kind of indirect call profiling. ### Calli Speculation Indirect calls also arise via the `calli` opcode. As with delegates, choosing a target here likely requires specialized profiling. ### Costs Given the optimistic take on performance, it is important to remember that there are also some costs involved to guarded devirtualization: increased code size and increased JIT time. There may also be some second-order effects on the local code generation as we've introduced control flow into the method where it didn't exist previously. A naive implementation that aggressively performs guarded devirtualization increases code size overall by about 5% as measured by PMI. JIT time increase was not measured but should be in that same ballpark. Some assemblies see code size increasing by as much as 12%. However the guarded devirtualization only kicks in for about 15% of the methods. So the average relative size increase in a method with virtual calls is probably more like 33%. There may be some inefficiencies in the current prototype that can be fixed to reduce the code size impact. Aside from the extra method table fetch noted above the duplicated calls have the same sets of arguments and so we might be able to amortize argument evaluation costs better. And there are some complexities around handling return values (especially for implicit by-reference structures) that likewise might be able to be tightened up. Nevertheless, blindly optimizing all virtual calls with guarded devirtualization is not likely the right approach. Something more selective is almost certainly needed. However we have done code-expanding optimizations somewhat blindly before, and we could contain the size growth risk by restricting this optimization to Tier1. Also PMI can overstate size impact seen in real scenarios as it may over-count the impact of changes in methods that are always inlined. So we should look at size increases from some actual scenarios. And perhaps I'll look at the size impact of loop cloning as a precedent. ## Implementation Considerations To get the data above and a better feel for the challenges involved we have implemented a prototype. It is currently located on this branch: [GuardedDevirtFoundations](https://github.com/AndyAyersMS/coreclr/tree/GuardedDevirtFoundations). The prototype can introduce guarded devirtualization for some virtual and interface calls. It supports inlining of the directly invoked method. It uses the JIT's "best known type" as the class to predict. It also anticipates being able to query the runtime for implementing classes of an interface. ### Phase Ordering For the most part, devirtualization is done very early on in the JIT, during importation. This allows devirtualized calls to subsequently be inlined, and for devirtualization of call sites in inlinees to take advantage of type information propagating down into the inlinee from inlined arguments. We want those same properties to hold for guarded devirtualization candidates. So conceptually the transformation should happen in the same place. However it is not possible to introduce new control flow in the importer (ignoring for the moment the possibility of using question ops). So the actual transformation must be deferred until sometime after the importer runs and before the inliner runs. This deferral is a bit problematic as some key bits of importer state are needed to query the runtime about the properties of a call target. So if we defer the transformation we need to somehow capture the data needed for these queries and make it available later. The current prototype uses (abuses?) the inline candidate information for this. As part of this we require that all speculative devirtualization sites be treated as inline candidates, at least initially. This has the side effect of hoisting the call to be a top level (statement) expression and introduces a return value placeholder. We currently already have a similar transformation in the JIT, the "fat calli" transformation needed on CoreRT. This transformation runs at the right time -- after the importer and before the inliner -- and introduces the right kind of `if-then-else` control flow structure. So the thought is to generalize this to handle guarded devirtualization as well. ### Recognition In the prototype, candidates are recognized during the initial importer driven call to `impDevirtualizeCall`. If the only reason devirtualization fails is lack of exactness, then the call is marked as a guarded devirtualization candidate. ### Devirtualization To produce the direct call the prototype updates the `this` passed in the `then` version of the call so it has the exact predicted type. It then re-invokes `impDevirtualizeCall` which should now succeed as the type is now exactly known. The benefit of reuse here is that certain special cases of devirtualization are now more likely to be handled. ### Inline Candidacy The prototype currently sets up all virtual and interface calls as potential inline candidates. One open question is whether it is worth doing guarded devirtualization simply to introduce a direct call. As an alternative we could insist that the directly called method also be something that is potentially inlineable. One can argue that call overhead matters much more for small methods that are also likely good inline candidates. The inline candidate info is based on the apparent method invoked at the virtual site. This is the base method, the one that introduces the virtual slot. So if we speculatively check for some class and that class overrides, we need to somehow update the inline info. How to best do this is unclear. ### Return Values Because the candidate calls are handled as inline candidates, the JIT hoists the call to a top level expression (which is good) during importation and introduces a return value placeholder into the place the call occupied in its original tree. (Oddly we introduce return value placeholders for some calls that don't return a a value -- we should fix this). The placeholder points back at the call. When we split the call into two calls we can't keep this structure intact as there needs to be a 1-1 relationship between call and placeholder. So the prototype needs to save the return value in a new local and then update the placeholder to refer to that local. This can be tricky because in some cases we haven't yet settled on what the actual type of the return value is. The handling of return values in the early stages of the JIT (arguably, in the entire JIT) is quite messy. The ABI details bleed through quite early and do so somewhat unevenly. This mostly impacts methods that return structures as different ABIs have quite different conventions, and the IR is transformed to reflect those conventions at different times for un-inlined calls, inlineable calls that end up not getting inlined, and for calls that get inlined. In particular, structures that are small enough to be returned by value (in a register or set of registers) need careful handling. The prototype skips over such by-value-returning struct methods today. Some of the logic found in `fgUpdateInlineReturnExpressionPlaceHolder` needs to be pulled in to properly type the call return value so we can properly type the temp. Or perhaps we could leverage some of importer-time transformations that are done for the fat calli cases. For larger structs we should arrange so that the call(s) write their return values directly into the new temp, instead of copying the value from wherever they return it into a temp, to avoid one level of struct copy. Doing so may require upstream zero init of the return value struct and this should only happen in one place. ## Open Issues Here are some of the issues that need to be looked into more carefully. ### Policy - what is the best mechanism for guessing which class to test for? - instrument Tier0 code? - look at types of arguments? - ask runtime for set of known classes? - harvest info from runtime caches (VSD)? - add instrumenting Tier1 to collect data and Tier2 to optimize? - is there some efficient way to test for class ranges? Currently the JIT is doing an exact type test. But we really care more about what method is going to be invoked. So if there is a range of types `D1...DN` that all will invoke some particular method can we test for them all somehow? - or should we test the method after the method lookup (possibly worse tradeoff because of the chunked method table arrangement, also tricky as a method can have multiple addresses over time. Since many types can share a chunk this might allow devirtualization over a wider set of classes (good) but we'd lose knowledge of exact types (bad). Not clear how these tradeoffs play out. - interaction of guarded devirt with VSD? For interface calls we are sort of inlining the first level of the VSD into the JITted code. - revocation or reworking of the guard if the JIT's prediction turns out to bad? - improve regular devirtualization to reduce need for guarded devirtualization. - should we enable this for preJITted code? In preJITted code the target method table is not a JIT-time constant and must be looked up. - in the prototype, guarded devirtualization and late devirtualization sometimes conflict. Say we fail to devirtualize a site, and so expand via guarded devirtualization guessing some class X. The residual virtual call then may be optimizable via late devirtualization, and this may discover the actual class. In that case the guarded devirtualization is not needed. But currently it can't be undone. - we probably don't want to bother with guarded devirtualization if we can't also inline. But it takes us several evaluation steps to determine if a call can be inlined, some of these happening *after* we've done the guarded expansion. Again this expansion can't be undone. - so perhaps we need to build an undo capability for the cases where guarded devirtualization doesn't lead to inlining and/or where late devirtualization also applies. ### Implementation - avoid re-fetching method table for latent virtual call (should reduce code size and improve overall perf win) - look at how effectively we are sharing argument setup (might reduce code size and JIT time impact) -- perhaps implement head merging? - handle return values in full generality - il offsets - flag residual calls as not needing null checks - properly establish inline candidacy - decide if the refactoring of `InlineCandidateInfo` is the right way to pass information from importer to the indirect transform phase ### Futures - can we cover multiple calls with one test? This can happen already if the subsequent call is introduced via inlining of the directly called method, as we know the exact type along that path. But for back to back calls to virtual methods off of the same object it would be nice to do just one test. - should we test for multiple types? Once we've peeled off the "most likely" case if the conditional probability of the next most likely case is high it is probably worth testing for it too. I believe the C++ compiler will test up to 3 candidates this way... but that's a lot of code expansion.
# Guarded Devirtualization ## Overview Guarded devirtualization is a proposed new optimization for the JIT in .NET Core 3.0. This document describes the motivation, initial design sketch, and highlights various issues needing further investigation. ## Motivation The .NET Core JIT is able to do a limited amount of devirtualization for virtual and interface calls. This ability was added in .NET Core 2.0. To devirtualize the JIT must be able to demonstrate one of two things: either that it knows the type of some reference exactly (say because it has seen a `newobj`) or that the declared type of the reference is a `final` class (aka `sealed`). For virtual calls the JIT can also devirtualize if it can prove the method is marked as `final`. However, most of the time the JIT is unable to determine exactness or finalness and so devirtualization fails. Statistics show that currently only around 15% of virtual call sites can be devirtualized. Result are even more pessimistic for interface calls, where success rates are around 5%. There are a variety of reasons for this. The JIT analysis is somewhat weak. Historically all the JIT cared about was whether some location held **a** reference type, not a specific reference type. So the current type propagation has been retrofitted and there are places where types just get lost. The JIT analysis happens quite early (during importation) and there is only minimal ability to do data flow analysis at this stage. So for current devirtualization the source of the type information and the consumption must be fairly close in the code. A more detailed accounting of some of the shortcomings can be found in [#7541](https://github.com/dotnet/runtime/issues/7541). Resolution of these issues will improve the ability of the JIT to devirtualize, but even the best analysis possible will still miss out on many cases. Some call sites are truly polymorphic. Some others are truly monomorphic but proving this would require sophisticated interprocedural analyses that are not practical in the JIT or in a system as dynamic as the CLR. And some sites are monomorphic in practice but potentially polymorphic. As an alternative, when devirtualization fails, the JIT can perform *guarded devirtualization*. Here the JIT creates an `if-then-else` block set in place of a virtual or interface call and inserts a runtime type test (or similar) into the `if` -- the "guard". If the guard test succeeds the JIT knows the type of the reference, so the `then` block can directly invoke the method corresponding to that type. If the test fails then the `else` block is executed and this contains the original virtual or interface call. The upshot is that the JIT conditionally gains the benefit of devirtualization at the expense of increased code size, longer JIT times, and slightly longer code paths around the call. So long as the JIT's guess at the type is somewhat reasonable, this optimization can improve performance. ## Opportunity One might imagine that the JIT's guess about the type of the reference has to be pretty good for devirtualization to pay off. Somewhat surprisingly, at least based on our initial results, that is not the case. ### Virtual Calls: The Two-Class Case Given these class declarations: ```C# class B { public virtual int F() { return 33; } } class D : B { public override int F() { return 44; } } ``` Suppose we have an array `B[]` that is randomly filled with instances of `B` and `D` and each element is class `B` with probability `p`. We time how long it takes to invoke `F` on each member of the array (note the JIT will not ever be able to devirtualize these calls), and plot the times as a function of `p`. The result is something like the following: ![two classes baseline perf](images/TwoClassesBaseline.JPG) Modern hardware includes an indirect branch target predictor and we can see it in action here. When the array element type is predictable (`p` very close to zero or very close to 1) performance is better. When the element type is unpredictable (`p` near 0.5) performance is quite a bit worse. From this we can see that a correctly predicted virtual call requires about 19 time units and worst case incorrect prediction around 55 time units. There is some timing overhead here too so the real costs are a bit lower. Now imagine we update the JIT to do guarded devirtualization and check if the element is indeed type `B`. If so the JIT can call `B.F` directly and in our prototype the JIT will also inline the call. So we would expect that if the element types are mostly `B`s (that is if `p` is near 1.0) we'd see very good performance, and if the element type is mostly `D` (that is `p` near 0.0) performance should perhaps slightly worse than the un-optimized case as there is now extra code to run check before the call. ![two classes devirt perf](images/TwoClassesDevirt.JPG) However as you can see the performance of devirtualized case (blue line) is as good or better than the un-optimized case for all values of `p`. This is perhaps unexpected and deserves some explanation. Recall that modern hardware also includes a branch predictor. For small or large values of `p` this predictor will correctly guess whether the test added by the JIT will resolve to the `then` or `else` case. For small values of `p` the JIT guess will be wrong and control will flow to the `else` block. But unlike the original example, the indirect call here will only see instances of type `D` and so the indirect branch predictor will work extremely well. So the overhead for the small `p` case is similar to the well-predicted indirect case without guarded devirtualization. As `p` increases the branch predictor starts to mispredict and that costs some cycles. But when it mispredicts control reaches the `then` block which executes the inlined call. So the cost of misprediction is offset by the faster execution and the cost stays relatively flat. As `p` passes 0.5 the branch predictor flips its prediction to prefer the `then` case. As before mispredicts are costly and send us down the `else` path but there we still execute a correctly predicted indirect call. And as `p` approaches 1.0 the cost falls as the branch predictor is almost always correct and so the cost is simply that of the inlined call. So oddly enough the guarded devirtualization case shown here does not require any sort of perf tradeoff. The JIT is better off guessing the more likely case but even guessing the less likely case can pay off and doesn't hurt performance. One might suspect at this point that the two class case is a special case and that the results do not hold up in more complex cases. More on that shortly. Before moving on, we should point out that virtual calls in the current CLR are a bit more expensive than in C++, because the CLR uses a two-level method table. That is, the indirect call sequence is something like: ```asm 000095 mov rax, qword ptr [rcx] ; fetch method table 000098 mov rax, qword ptr [rax+72] ; fetch proper chunk 00009C call qword ptr [rax+32]B:F():int:this ; call indirect ``` This is a chain of 3 dependent loads and so best-case will require at least 3x the best cache latency (plus any indirect prediction overhead). So the virtual call costs for the CLR are high. The chunked method table design was adopted to save space (chunks can be shared by different classes) at the expense of some performance. And this apparently makes guarded devirtualization pay off over a wider range of class distributions than one might expect. And for completeness, the full guarded `if-then-else` sequence measured above is: ```asm 00007A mov rcx, gword ptr [rsi+8*rcx+16] ; fetch array element 00007F mov rax, 0x7FFC9CFB4A90 ; B's method table 000089 cmp qword ptr [rcx], rax ; method table test 00008C jne SHORT G_M30756_IG06 ; jump if class is not B 00008E mov eax, 33 ; inlined B.F 000093 jmp SHORT G_M30756_IG07 G_M30756_IG06: 000095 mov rax, qword ptr [rcx] ; fetch method table 000098 mov rax, qword ptr [rax+72] ; fetch proper chunk 00009C call qword ptr [rax+32]B:F():int:this ; call indirect G_M30756_IG07: ``` Note there is a redundant load of the method table (hidden in the `cmp`) that could be eliminated with a bit more work on the prototype. So guarded devirtualization perf could potentially be even better than is shown above, especially for smaller values of `p`. ### Virtual Calls: The Three-Class Case Now to return to the question we asked above: is there something about the two class case that made guarded devirtualization especially attractive? Read on. Suppose we introduce a third class into the mix and repeat the above measurement. There are now two probabilities in play: `p`, the probability that the element has class `B`, and `p1`, the probability that the element has class `D`, and there is a third class `E`. To avoid introducing a 3D plot we'll first simply average the results for the various values of `p1` and plot performance as a function of `p`: ![three classes devirt perf](images/ThreeClassesDevirt.JPG) The right-hand side (`p` near 1.0) looks a lot like the previous chart. This is not surprising as there are relatively few instances of that third class. But the middle and left hand side differ and are more costly. For the un-optimized case (orange) the difference is directly attributable to the performance of the indirect ranch predictor. Even when `p` is small there are still two viable branch targets (on average) and some some degree of indirect misprediction. For the optimized case we now see that guarded devirtualization performs worse than no optimization if the JIT's guess is completely wrong. The penalty is not that bad because the JIT-introduced branch is predictable. But even at very modest values of `p` guarded devirtualization starts to win out. Because we've averaged over `p1` you might suspect that we're hiding something. The following chart shows the min and max values as well as the average, and also shows the two-class result (dashed lines). ![three classes devirt perf ranges](images/ThreeClassesDevirtFull.JPG) You can see the minimum values are very similar to the two class case; these are cases where the `p1` is close to 0 or close to 1. And that makes sense because if there really are only two classes despite the potential of there being three then we'd expect to see similar results as in the case where there only can be two classes. And as noted above, if `p` is high enough then the curves also converge to the two class case, as the relative mixture of `D` and `E` is doesn't matter: the predominance of `B` wins out. For low values of `p` the actual class at the call site is some mixture of `D` and `E`. Here's some detail (the x axis now shows `p1` and `p` as upper and lower values respectively). ![three classes devirt perf detail](images/ThreeClassesDevirtDetail.JPG) The worst case for perf for both is when the mixture of `D` and `E` is unpredictably 50-50 and there are no `B`s. Once we mix in just 10% of `B` then guarded devirt performs better no matter what distribution we have for the other two classes. Worst case overhead -- where the JIT guesses a class that never appears, and the other classes are evenly distributed -- is around 20%. So it seems reasonable to say that so long as the JIT can make a credible guess about the possible class -- say a guess that is right at least 10% of the time -- then there is quite likely a performance benefit to guarded devirtualization for virtual calls. We'll need to verify this with more scenarios, but these initial results are certainly encouraging. ### Virtual Calls: Testing for Multiple Cases One might deduce from the above that if there are two likely candidates the JIT should test for each. This is certainly a possibility and in C++ compilers that do indirect call profiling there are cases where multiple tests are considered a good idea. But there's also additional code size and another branch. This is something we'll look into further. ### Interface Calls: The Two Class Case Interface calls on the CLR are implemented via [Virtual Stub Dispatch]( https://github.com/dotnet/runtime/blob/main/docs/design/coreclr/botr/virtual-stub-dispatch.md ) (aka VSD). Calls are made through an indirection cell that initially points at a lookup stub. On the first call, the interface target is identified from the object's method table and the lookup stub is replaced with a dispatch stub that checks for that specific method table in a manner quite similar to guarded devirtualization. If the method table check fails a counter is incremented, and once the counter reaches a threshold the dispatch stub is replaced with a resolve stub that looks up the right target in a process-wide hash table. For interface call sites that are monomorphic, the VSD mechanism (via the dispatch stub) executes the following code sequence (here for x64) ```asm ; JIT-produced code ; ; set up R11 with interface target info mov R11, ... ; additional VSD info for call mov RCX, ... ; dispatch target object cmp [rcx], rcx ; null check (unnecessary) call [addr] ; call indirect through indir cell ; dispatch stub cmp [RCX], targetMT ; check for right method table jne DISPATCH-FAIL ; bail to resolve stub if check fails (uses R11 info) jmp targetCode ; else "tail call" the right method ``` At first glance it might appear that adding guarded devirtualization on top of VSD may not provide much benefit for monomorphic sites. However the guarded devirtualization test doesn't use an indirection cell and doesn't require R11 setup, may be able to optimize away the null check, and opens the door for inlining. So it should be slightly cheaper on average and significantly cheaper in some cases. (Note [#9027](https://github.com/dotnet/runtime/issues/9027) indicates we should be able to optimize away the null check in any case). If the guarded tests fails we've filtered out one method table the dispatch cell now works well even if a call site alternates between two classes. So we'd expect the combination of guarded devirtualization and VSD to perform well on the two class test and only show limitations when faced with mixtures of three or more classes. If the guard test always fails we have the up-front cost for the vtable fetch (which should amortize pretty well with the subsequent fetch in the) stub plus the predicted not taken branch. So we'd expect the cost for the two-class cases where the JIT's prediction is always wrong to be a bit higher). The graph below shows the measured results. To makes sure we're not overly impacted by residual VSD state we use a fresh call site for each value of p. The solid orange line is the current cost. The dashed orange line is the corresponding cost for a virtual call with the same value of p. The solid blue line is the cost with an up-front guarded test. As noted there is some slowdown when the JIT always guesses the wrong class, but the break-even point (not shown) is at a relatively small probability of a correct guess. ![two classes interface devirt](images/TwoClassesInterface.JPG) ### Interface Calls: The Three Class Case As with virtual calls you may strongly suspect the two class case for interface calls is special. And you'd be right. If we mix a third class in as we did above, we see similar changes in the performance mix for interface calls, as seen below. But also as with virtual calls the JIT's guess doesn't have to be all that good to see payoffs. At around 10% correct, guessing wins on average, and around 30% correct guessing is always a perf win. ![three classes interface devirt](images/ThreeClassesInterface.JPG) ### Delegate Speculation While we have been discussing this topic in the context of virtual calls, the method is general and can be applied to indirect calls as well. Here the guard test may just test for a particular function rather than a type. `Delegate.Invoke` is a special method that can eventually turns into an indirect call. The JIT could speculate about the possible target of this call. Choosing a good target here would require some kind of indirect call profiling. ### Calli Speculation Indirect calls also arise via the `calli` opcode. As with delegates, choosing a target here likely requires specialized profiling. ### Costs Given the optimistic take on performance, it is important to remember that there are also some costs involved to guarded devirtualization: increased code size and increased JIT time. There may also be some second-order effects on the local code generation as we've introduced control flow into the method where it didn't exist previously. A naive implementation that aggressively performs guarded devirtualization increases code size overall by about 5% as measured by PMI. JIT time increase was not measured but should be in that same ballpark. Some assemblies see code size increasing by as much as 12%. However the guarded devirtualization only kicks in for about 15% of the methods. So the average relative size increase in a method with virtual calls is probably more like 33%. There may be some inefficiencies in the current prototype that can be fixed to reduce the code size impact. Aside from the extra method table fetch noted above the duplicated calls have the same sets of arguments and so we might be able to amortize argument evaluation costs better. And there are some complexities around handling return values (especially for implicit by-reference structures) that likewise might be able to be tightened up. Nevertheless, blindly optimizing all virtual calls with guarded devirtualization is not likely the right approach. Something more selective is almost certainly needed. However we have done code-expanding optimizations somewhat blindly before, and we could contain the size growth risk by restricting this optimization to Tier1. Also PMI can overstate size impact seen in real scenarios as it may over-count the impact of changes in methods that are always inlined. So we should look at size increases from some actual scenarios. And perhaps I'll look at the size impact of loop cloning as a precedent. ## Implementation Considerations To get the data above and a better feel for the challenges involved we have implemented a prototype. It is currently located on this branch: [GuardedDevirtFoundations](https://github.com/AndyAyersMS/coreclr/tree/GuardedDevirtFoundations). The prototype can introduce guarded devirtualization for some virtual and interface calls. It supports inlining of the directly invoked method. It uses the JIT's "best known type" as the class to predict. It also anticipates being able to query the runtime for implementing classes of an interface. ### Phase Ordering For the most part, devirtualization is done very early on in the JIT, during importation. This allows devirtualized calls to subsequently be inlined, and for devirtualization of call sites in inlinees to take advantage of type information propagating down into the inlinee from inlined arguments. We want those same properties to hold for guarded devirtualization candidates. So conceptually the transformation should happen in the same place. However it is not possible to introduce new control flow in the importer (ignoring for the moment the possibility of using question ops). So the actual transformation must be deferred until sometime after the importer runs and before the inliner runs. This deferral is a bit problematic as some key bits of importer state are needed to query the runtime about the properties of a call target. So if we defer the transformation we need to somehow capture the data needed for these queries and make it available later. The current prototype uses (abuses?) the inline candidate information for this. As part of this we require that all speculative devirtualization sites be treated as inline candidates, at least initially. This has the side effect of hoisting the call to be a top level (statement) expression and introduces a return value placeholder. We currently already have a similar transformation in the JIT, the "fat calli" transformation needed on CoreRT. This transformation runs at the right time -- after the importer and before the inliner -- and introduces the right kind of `if-then-else` control flow structure. So the thought is to generalize this to handle guarded devirtualization as well. ### Recognition In the prototype, candidates are recognized during the initial importer driven call to `impDevirtualizeCall`. If the only reason devirtualization fails is lack of exactness, then the call is marked as a guarded devirtualization candidate. ### Devirtualization To produce the direct call the prototype updates the `this` passed in the `then` version of the call so it has the exact predicted type. It then re-invokes `impDevirtualizeCall` which should now succeed as the type is now exactly known. The benefit of reuse here is that certain special cases of devirtualization are now more likely to be handled. ### Inline Candidacy The prototype currently sets up all virtual and interface calls as potential inline candidates. One open question is whether it is worth doing guarded devirtualization simply to introduce a direct call. As an alternative we could insist that the directly called method also be something that is potentially inlineable. One can argue that call overhead matters much more for small methods that are also likely good inline candidates. The inline candidate info is based on the apparent method invoked at the virtual site. This is the base method, the one that introduces the virtual slot. So if we speculatively check for some class and that class overrides, we need to somehow update the inline info. How to best do this is unclear. ### Return Values Because the candidate calls are handled as inline candidates, the JIT hoists the call to a top level expression (which is good) during importation and introduces a return value placeholder into the place the call occupied in its original tree. (Oddly we introduce return value placeholders for some calls that don't return a a value -- we should fix this). The placeholder points back at the call. When we split the call into two calls we can't keep this structure intact as there needs to be a 1-1 relationship between call and placeholder. So the prototype needs to save the return value in a new local and then update the placeholder to refer to that local. This can be tricky because in some cases we haven't yet settled on what the actual type of the return value is. The handling of return values in the early stages of the JIT (arguably, in the entire JIT) is quite messy. The ABI details bleed through quite early and do so somewhat unevenly. This mostly impacts methods that return structures as different ABIs have quite different conventions, and the IR is transformed to reflect those conventions at different times for un-inlined calls, inlineable calls that end up not getting inlined, and for calls that get inlined. In particular, structures that are small enough to be returned by value (in a register or set of registers) need careful handling. The prototype skips over such by-value-returning struct methods today. Some of the logic found in `fgUpdateInlineReturnExpressionPlaceHolder` needs to be pulled in to properly type the call return value so we can properly type the temp. Or perhaps we could leverage some of importer-time transformations that are done for the fat calli cases. For larger structs we should arrange so that the call(s) write their return values directly into the new temp, instead of copying the value from wherever they return it into a temp, to avoid one level of struct copy. Doing so may require upstream zero init of the return value struct and this should only happen in one place. ## Open Issues Here are some of the issues that need to be looked into more carefully. ### Policy - what is the best mechanism for guessing which class to test for? - instrument Tier0 code? - look at types of arguments? - ask runtime for set of known classes? - harvest info from runtime caches (VSD)? - add instrumenting Tier1 to collect data and Tier2 to optimize? - is there some efficient way to test for class ranges? Currently the JIT is doing an exact type test. But we really care more about what method is going to be invoked. So if there is a range of types `D1...DN` that all will invoke some particular method can we test for them all somehow? - or should we test the method after the method lookup (possibly worse tradeoff because of the chunked method table arrangement, also tricky as a method can have multiple addresses over time. Since many types can share a chunk this might allow devirtualization over a wider set of classes (good) but we'd lose knowledge of exact types (bad). Not clear how these tradeoffs play out. - interaction of guarded devirt with VSD? For interface calls we are sort of inlining the first level of the VSD into the JITted code. - revocation or reworking of the guard if the JIT's prediction turns out to bad? - improve regular devirtualization to reduce need for guarded devirtualization. - should we enable this for preJITted code? In preJITted code the target method table is not a JIT-time constant and must be looked up. - in the prototype, guarded devirtualization and late devirtualization sometimes conflict. Say we fail to devirtualize a site, and so expand via guarded devirtualization guessing some class X. The residual virtual call then may be optimizable via late devirtualization, and this may discover the actual class. In that case the guarded devirtualization is not needed. But currently it can't be undone. - we probably don't want to bother with guarded devirtualization if we can't also inline. But it takes us several evaluation steps to determine if a call can be inlined, some of these happening *after* we've done the guarded expansion. Again this expansion can't be undone. - so perhaps we need to build an undo capability for the cases where guarded devirtualization doesn't lead to inlining and/or where late devirtualization also applies. ### Implementation - avoid re-fetching method table for latent virtual call (should reduce code size and improve overall perf win) - look at how effectively we are sharing argument setup (might reduce code size and JIT time impact) -- perhaps implement head merging? - handle return values in full generality - il offsets - flag residual calls as not needing null checks - properly establish inline candidacy - decide if the refactoring of `InlineCandidateInfo` is the right way to pass information from importer to the indirect transform phase ### Futures - can we cover multiple calls with one test? This can happen already if the subsequent call is introduced via inlining of the directly called method, as we know the exact type along that path. But for back to back calls to virtual methods off of the same object it would be nice to do just one test. - should we test for multiple types? Once we've peeled off the "most likely" case if the conditional probability of the next most likely case is high it is probably worth testing for it too. I believe the C++ compiler will test up to 3 candidates this way... but that's a lot of code expansion.
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/ia64/Lget_proc_info.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gget_proc_info.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gget_proc_info.c" #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/ppc64/get_func_addr.c
/* libunwind - a platform-independent unwind library Copyright (C) 2006-2007 IBM Contributed by Corey Ashford <[email protected]> Jose Flavio Aguilar Paulino <[email protected]> <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind_i.h" int tdep_get_func_addr (unw_addr_space_t as, unw_word_t addr, unw_word_t *entry_point) { if (as->abi == UNW_PPC64_ABI_ELFv1) { unw_accessors_t *a; int ret; a = unw_get_accessors_int (as); /* Entry-point is stored in the 1st word of the function descriptor. In case that changes in the future, we'd have to update the line below and read the word at addr + offset: */ ret = (*a->access_mem) (as, addr, entry_point, 0, NULL); if (ret < 0) return ret; } else *entry_point = addr; return 0; }
/* libunwind - a platform-independent unwind library Copyright (C) 2006-2007 IBM Contributed by Corey Ashford <[email protected]> Jose Flavio Aguilar Paulino <[email protected]> <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind_i.h" int tdep_get_func_addr (unw_addr_space_t as, unw_word_t addr, unw_word_t *entry_point) { if (as->abi == UNW_PPC64_ABI_ELFv1) { unw_accessors_t *a; int ret; a = unw_get_accessors_int (as); /* Entry-point is stored in the 1st word of the function descriptor. In case that changes in the future, we'd have to update the line below and read the word at addr + offset: */ ret = (*a->access_mem) (as, addr, entry_point, 0, NULL); if (ret < 0) return ret; } else *entry_point = addr; return 0; }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/unwind/Backtrace.c
/* libunwind - a platform-independent unwind library Copyright (C) 2003-2004 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind-internal.h" _Unwind_Reason_Code _Unwind_Backtrace (_Unwind_Trace_Fn trace, void *trace_parameter) { struct _Unwind_Context context; unw_context_t uc; int ret; if (_Unwind_InitContext (&context, &uc) < 0) return _URC_FATAL_PHASE1_ERROR; /* Phase 1 (search phase) */ while (1) { if ((ret = unw_step (&context.cursor)) <= 0) { if (ret == 0) return _URC_END_OF_STACK; else return _URC_FATAL_PHASE1_ERROR; } if ((*trace) (&context, trace_parameter) != _URC_NO_REASON) return _URC_FATAL_PHASE1_ERROR; } } _Unwind_Reason_Code __libunwind_Unwind_Backtrace (_Unwind_Trace_Fn, void *) ALIAS (_Unwind_Backtrace);
/* libunwind - a platform-independent unwind library Copyright (C) 2003-2004 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind-internal.h" _Unwind_Reason_Code _Unwind_Backtrace (_Unwind_Trace_Fn trace, void *trace_parameter) { struct _Unwind_Context context; unw_context_t uc; int ret; if (_Unwind_InitContext (&context, &uc) < 0) return _URC_FATAL_PHASE1_ERROR; /* Phase 1 (search phase) */ while (1) { if ((ret = unw_step (&context.cursor)) <= 0) { if (ret == 0) return _URC_END_OF_STACK; else return _URC_FATAL_PHASE1_ERROR; } if ((*trace) (&context, trace_parameter) != _URC_NO_REASON) return _URC_FATAL_PHASE1_ERROR; } } _Unwind_Reason_Code __libunwind_Unwind_Backtrace (_Unwind_Trace_Fn, void *) ALIAS (_Unwind_Backtrace);
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/metadata/assembly.c
/** * \file * Routines for loading assemblies. * * Author: * Miguel de Icaza ([email protected]) * * Copyright 2001-2003 Ximian, Inc (http://www.ximian.com) * Copyright 2004-2009 Novell, Inc (http://www.novell.com) * Copyright 2011 Xamarin, Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include <config.h> #include <stdio.h> #include <glib.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <mono/metadata/assembly.h> #include "assembly-internals.h" #include <mono/metadata/image.h> #include "image-internals.h" #include "object-internals.h" #include <mono/metadata/loader.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/custom-attrs-internals.h> #include <mono/metadata/metadata-internals.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/class-internals.h> #include <mono/metadata/domain-internals.h> #include <mono/metadata/exception-internals.h> #include <mono/metadata/reflection-internals.h> #include <mono/metadata/mono-endian.h> #include <mono/metadata/mono-debug.h> #include <mono/utils/mono-uri.h> #include <mono/metadata/mono-config.h> #include <mono/metadata/mono-config-internals.h> #include <mono/metadata/mono-config-dirs.h> #include <mono/utils/mono-digest.h> #include <mono/utils/mono-logger-internals.h> #include <mono/utils/mono-path.h> #include <mono/utils/mono-proclib.h> #include <mono/metadata/reflection.h> #include <mono/metadata/coree.h> #include <mono/metadata/cil-coff.h> #include <mono/utils/atomic.h> #include <mono/utils/mono-os-mutex.h> #include <mono/metadata/mono-private-unstable.h> #include <minipal/getexepath.h> #ifndef HOST_WIN32 #include <sys/types.h> #include <unistd.h> #include <sys/stat.h> #endif #ifdef HOST_DARWIN #include <mach-o/dyld.h> #endif /* the default search path is empty, the first slot is replaced with the computed value */ static char* default_path [] = { NULL, NULL, NULL }; /* Contains the list of directories to be searched for assemblies (MONO_PATH) */ static char **assemblies_path = NULL; /* keeps track of loaded assemblies, excluding dynamic ones */ static GList *loaded_assemblies = NULL; static guint32 loaded_assembly_count = 0; static MonoAssembly *corlib; static char* unquote (const char *str); // This protects loaded_assemblies static mono_mutex_t assemblies_mutex; static inline void mono_assemblies_lock (void) { mono_os_mutex_lock (&assemblies_mutex); } static inline void mono_assemblies_unlock (void) { mono_os_mutex_unlock (&assemblies_mutex); } /* If defined, points to the bundled assembly information */ static const MonoBundledAssembly **bundles; static const MonoBundledSatelliteAssembly **satellite_bundles; /* Class lazy loading functions */ static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute") static GENERATE_TRY_GET_CLASS_WITH_CACHE (internals_visible, "System.Runtime.CompilerServices", "InternalsVisibleToAttribute") static MonoAssembly* mono_assembly_invoke_search_hook_internal (MonoAssemblyLoadContext *alc, MonoAssembly *requesting, MonoAssemblyName *aname, gboolean postload); static MonoAssembly * invoke_assembly_preload_hook (MonoAssemblyLoadContext *alc, MonoAssemblyName *aname, gchar **apath); static gchar* encode_public_tok (const guchar *token, gint32 len) { const static gchar allowed [] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; gchar *res; int i; res = (gchar *)g_malloc (len * 2 + 1); for (i = 0; i < len; i++) { res [i * 2] = allowed [token [i] >> 4]; res [i * 2 + 1] = allowed [token [i] & 0xF]; } res [len * 2] = 0; return res; } /** * mono_public_tokens_are_equal: * \param pubt1 first public key token * \param pubt2 second public key token * * Compare two public key tokens and return TRUE is they are equal and FALSE * otherwise. */ gboolean mono_public_tokens_are_equal (const unsigned char *pubt1, const unsigned char *pubt2) { return g_ascii_strncasecmp ((const char*) pubt1, (const char*) pubt2, 16) == 0; } /** * mono_set_assemblies_path: * \param path list of paths that contain directories where Mono will look for assemblies * * Use this method to override the standard assembly lookup system and * override any assemblies coming from the GAC. This is the method * that supports the \c MONO_PATH variable. * * Notice that \c MONO_PATH and this method are really a very bad idea as * it prevents the GAC from working and it prevents the standard * resolution mechanisms from working. Nonetheless, for some debugging * situations and bootstrapping setups, this is useful to have. */ void mono_set_assemblies_path (const char* path) { char **splitted, **dest; splitted = g_strsplit (path, G_SEARCHPATH_SEPARATOR_S, 1000); if (assemblies_path) g_strfreev (assemblies_path); assemblies_path = dest = splitted; while (*splitted) { char *tmp = *splitted; if (*tmp) *dest++ = mono_path_canonicalize (tmp); g_free (tmp); splitted++; } *dest = *splitted; if (g_hasenv ("MONO_DEBUG")) return; splitted = assemblies_path; while (*splitted) { if (**splitted && !g_file_test (*splitted, G_FILE_TEST_IS_DIR)) g_warning ("'%s' in MONO_PATH doesn't exist or has wrong permissions.", *splitted); splitted++; } } void mono_set_assemblies_path_direct (char **path) { g_strfreev (assemblies_path); assemblies_path = path; } static void check_path_env (void) { if (assemblies_path != NULL) return; char* path = g_getenv ("MONO_PATH"); if (!path) return; mono_set_assemblies_path(path); g_free (path); } static void mono_assembly_binding_info_free (MonoAssemblyBindingInfo *info) { if (!info) return; g_free (info->name); g_free (info->culture); } /** * mono_assembly_names_equal: * \param l first assembly * \param r second assembly. * * Compares two \c MonoAssemblyName instances and returns whether they are equal. * * This compares the names, the cultures, the release version and their * public tokens. * * \returns TRUE if both assembly names are equal. */ gboolean mono_assembly_names_equal (MonoAssemblyName *l, MonoAssemblyName *r) { return mono_assembly_names_equal_flags (l, r, MONO_ANAME_EQ_NONE); } /** * mono_assembly_names_equal_flags: * \param l first assembly name * \param r second assembly name * \param flags flags that affect what is compared. * * Compares two \c MonoAssemblyName instances and returns whether they are equal. * * This compares the simple names and cultures and optionally the versions and * public key tokens, depending on the \c flags. * * \returns TRUE if both assembly names are equal. */ gboolean mono_assembly_names_equal_flags (MonoAssemblyName *l, MonoAssemblyName *r, MonoAssemblyNameEqFlags flags) { g_assert (l != NULL); g_assert (r != NULL); if (!l->name || !r->name) return FALSE; if ((flags & MONO_ANAME_EQ_IGNORE_CASE) != 0 && g_strcasecmp (l->name, r->name)) return FALSE; if ((flags & MONO_ANAME_EQ_IGNORE_CASE) == 0 && strcmp (l->name, r->name)) return FALSE; if (l->culture && r->culture && strcmp (l->culture, r->culture)) return FALSE; if ((l->major != r->major || l->minor != r->minor || l->build != r->build || l->revision != r->revision) && (flags & MONO_ANAME_EQ_IGNORE_VERSION) == 0) if (! ((l->major == 0 && l->minor == 0 && l->build == 0 && l->revision == 0) || (r->major == 0 && r->minor == 0 && r->build == 0 && r->revision == 0))) return FALSE; if (!l->public_key_token [0] || !r->public_key_token [0] || (flags & MONO_ANAME_EQ_IGNORE_PUBKEY) != 0) return TRUE; if (!mono_public_tokens_are_equal (l->public_key_token, r->public_key_token)) return FALSE; return TRUE; } /** * assembly_names_compare_versions: * \param l left assembly name * \param r right assembly name * \param maxcomps how many version components to compare, or -1 to compare all. * * \returns a negative if \p l is a lower version than \p r; a positive value * if \p r is a lower version than \p l, or zero if \p l and \p r are equal * versions (comparing upto \p maxcomps components). * * Components are \c major, \c minor, \c revision, and \c build. \p maxcomps 1 means just compare * majors. 2 means majors then minors. etc. */ static int assembly_names_compare_versions (MonoAssemblyName *l, MonoAssemblyName *r, int maxcomps) { int i = 0; if (maxcomps < 0) maxcomps = 4; #define CMP(field) do { \ if (l-> field < r-> field && i < maxcomps) return -1; \ if (l-> field > r-> field && i < maxcomps) return 1; \ } while (0) CMP (major); ++i; CMP (minor); ++i; CMP (revision); ++i; CMP (build); #undef CMP return 0; } /** * mono_assembly_request_prepare_load: * \param req the load request to be initialized * \param alc the AssemblyLoadContext in netcore * * Initialize an assembly loader request. Its state will be reset and the assembly context kind will be prefilled with \p asmctx. */ void mono_assembly_request_prepare_load (MonoAssemblyLoadRequest *req, MonoAssemblyLoadContext *alc) { memset (req, 0, sizeof (MonoAssemblyLoadRequest)); req->alc = alc; } /** * mono_assembly_request_prepare_open: * \param req the open request to be initialized * \param alc the AssemblyLoadContext in netcore * * Initialize an assembly loader request intended to be used for open operations. Its state will be reset and the assembly context kind will be prefilled with \p asmctx. */ void mono_assembly_request_prepare_open (MonoAssemblyOpenRequest *req, MonoAssemblyLoadContext *alc) { memset (req, 0, sizeof (MonoAssemblyOpenRequest)); req->request.alc = alc; } /** * mono_assembly_request_prepare_byname: * \param req the byname request to be initialized * \param alc the AssemblyLoadContext in netcore * * Initialize an assembly load by name request. Its state will be reset and the assembly context kind will be prefilled with \p asmctx. */ void mono_assembly_request_prepare_byname (MonoAssemblyByNameRequest *req, MonoAssemblyLoadContext *alc) { memset (req, 0, sizeof (MonoAssemblyByNameRequest)); req->request.alc = alc; } static MonoAssembly * load_in_path (const char *basename, const char** search_path, const MonoAssemblyOpenRequest *req, MonoImageOpenStatus *status) { int i; char *fullpath; MonoAssembly *result; for (i = 0; search_path [i]; ++i) { fullpath = g_build_filename (search_path [i], basename, (const char*)NULL); result = mono_assembly_request_open (fullpath, req, status); g_free (fullpath); if (result) return result; } return NULL; } /** * mono_assembly_setrootdir: * \param root_dir The pathname of the root directory where we will locate assemblies * * This routine sets the internal default root directory for looking up * assemblies. * * This is used by Windows installations to compute dynamically the * place where the Mono assemblies are located. * */ void mono_assembly_setrootdir (const char *root_dir) { /* * Override the MONO_ASSEMBLIES directory configured at compile time. */ if (default_path [0]) g_free (default_path [0]); default_path [0] = g_strdup (root_dir); } /** * mono_assembly_getrootdir: * * Obtains the root directory used for looking up assemblies. * * Returns: a string with the directory, this string should not be freed. */ G_CONST_RETURN gchar * mono_assembly_getrootdir (void) { return default_path [0]; } /** * mono_native_getrootdir: * * Obtains the root directory used for looking up native libs (.so, .dylib). * * Returns: a string with the directory, this string should be freed by * the caller. */ gchar * mono_native_getrootdir (void) { gchar* fullpath = g_build_path (G_DIR_SEPARATOR_S, mono_assembly_getrootdir (), mono_config_get_reloc_lib_dir(), (const char*)NULL); return fullpath; } /** * mono_set_dirs: * \param assembly_dir the base directory for assemblies * \param config_dir the base directory for configuration files * * This routine is used internally and by developers embedding * the runtime into their own applications. * * There are a number of cases to consider: Mono as a system-installed * package that is available on the location preconfigured or Mono in * a relocated location. * * If you are using a system-installed Mono, you can pass NULL * to both parameters. If you are not, you should compute both * directory values and call this routine. * * The values for a given PREFIX are: * * assembly_dir: PREFIX/lib * config_dir: PREFIX/etc * * Notice that embedders that use Mono in a relocated way must * compute the location at runtime, as they will be in control * of where Mono is installed. */ void mono_set_dirs (const char *assembly_dir, const char *config_dir) { if (assembly_dir == NULL) assembly_dir = mono_config_get_assemblies_dir (); if (config_dir == NULL) config_dir = mono_config_get_cfg_dir (); mono_assembly_setrootdir (assembly_dir); mono_set_config_dir (config_dir); } #ifndef HOST_WIN32 static char * compute_base (char *path) { char *p = strrchr (path, '/'); if (p == NULL) return NULL; /* Not a well known Mono executable, we are embedded, cant guess the base */ if (strcmp (p, "/mono") && strcmp (p, "/mono-boehm") && strcmp (p, "/mono-sgen") && strcmp (p, "/pedump") && strcmp (p, "/monodis")) return NULL; *p = 0; p = strrchr (path, '/'); if (p == NULL) return NULL; if (strcmp (p, "/bin") != 0) return NULL; *p = 0; return path; } static void fallback (void) { mono_set_dirs (mono_config_get_assemblies_dir (), mono_config_get_cfg_dir ()); } static G_GNUC_UNUSED void set_dirs (char *exe) { char *base; char *config, *lib, *mono; struct stat buf; const char *bindir; /* * Only /usr prefix is treated specially */ bindir = mono_config_get_bin_dir (); g_assert (bindir); if (strncmp (exe, bindir, strlen (bindir)) == 0 || (base = compute_base (exe)) == NULL){ fallback (); return; } config = g_build_filename (base, "etc", (const char*)NULL); lib = g_build_filename (base, "lib", (const char*)NULL); mono = g_build_filename (lib, "mono/4.5", (const char*)NULL); // FIXME: stop hardcoding 4.5 here if (stat (mono, &buf) == -1) fallback (); else { mono_set_dirs (lib, config); } g_free (config); g_free (lib); g_free (mono); } #endif /* HOST_WIN32 */ /** * mono_set_rootdir: * * Registers the root directory for the Mono runtime, for Linux and Solaris 10, * this auto-detects the prefix where Mono was installed. */ void mono_set_rootdir (void) { char *path = minipal_getexepath(); if (path == NULL) { #ifndef HOST_WIN32 fallback (); #endif return; } #if defined(HOST_WIN32) || (defined(HOST_DARWIN) && !defined(TARGET_ARM)) gchar *bindir, *installdir, *root, *config; bindir = g_path_get_dirname (path); installdir = g_path_get_dirname (bindir); root = g_build_path (G_DIR_SEPARATOR_S, installdir, "lib", (const char*)NULL); config = g_build_filename (root, "..", "etc", (const char*)NULL); #ifdef HOST_WIN32 mono_set_dirs (root, config); #else if (g_file_test (root, G_FILE_TEST_EXISTS) && g_file_test (config, G_FILE_TEST_EXISTS)) mono_set_dirs (root, config); else fallback (); #endif g_free (config); g_free (root); g_free (installdir); g_free (bindir); g_free (path); #elif defined(DISABLE_MONO_AUTODETECTION) fallback (); #else set_dirs (path); return; #endif } /** * mono_assemblies_init: * * Initialize global variables used by this module. */ void mono_assemblies_init (void) { /* * Initialize our internal paths if we have not been initialized yet. * This happens when embedders use Mono. */ if (mono_assembly_getrootdir () == NULL) mono_set_rootdir (); check_path_env (); mono_os_mutex_init_recursive (&assemblies_mutex); } gboolean mono_assembly_fill_assembly_name_full (MonoImage *image, MonoAssemblyName *aname, gboolean copyBlobs) { MonoTableInfo *t = &image->tables [MONO_TABLE_ASSEMBLY]; guint32 cols [MONO_ASSEMBLY_SIZE]; gint32 machine, flags; if (!table_info_get_rows (t)) return FALSE; mono_metadata_decode_row (t, 0, cols, MONO_ASSEMBLY_SIZE); aname->hash_len = 0; aname->hash_value = NULL; aname->name = mono_metadata_string_heap (image, cols [MONO_ASSEMBLY_NAME]); if (copyBlobs) aname->name = g_strdup (aname->name); aname->culture = mono_metadata_string_heap (image, cols [MONO_ASSEMBLY_CULTURE]); if (copyBlobs) aname->culture = g_strdup (aname->culture); aname->flags = cols [MONO_ASSEMBLY_FLAGS]; aname->major = cols [MONO_ASSEMBLY_MAJOR_VERSION]; aname->minor = cols [MONO_ASSEMBLY_MINOR_VERSION]; aname->build = cols [MONO_ASSEMBLY_BUILD_NUMBER]; aname->revision = cols [MONO_ASSEMBLY_REV_NUMBER]; aname->hash_alg = cols [MONO_ASSEMBLY_HASH_ALG]; if (cols [MONO_ASSEMBLY_PUBLIC_KEY]) { guchar* token = (guchar *)g_malloc (8); gchar* encoded; const gchar* pkey; int len; pkey = mono_metadata_blob_heap (image, cols [MONO_ASSEMBLY_PUBLIC_KEY]); len = mono_metadata_decode_blob_size (pkey, &pkey); aname->public_key = (guchar*)pkey; mono_digest_get_public_token (token, aname->public_key, len); encoded = encode_public_tok (token, 8); g_strlcpy ((char*)aname->public_key_token, encoded, MONO_PUBLIC_KEY_TOKEN_LENGTH); g_free (encoded); g_free (token); } else { aname->public_key = NULL; memset (aname->public_key_token, 0, MONO_PUBLIC_KEY_TOKEN_LENGTH); } if (cols [MONO_ASSEMBLY_PUBLIC_KEY]) { aname->public_key = (guchar*)mono_metadata_blob_heap (image, cols [MONO_ASSEMBLY_PUBLIC_KEY]); if (copyBlobs) { const gchar *pkey_end; int len = mono_metadata_decode_blob_size ((const gchar*) aname->public_key, &pkey_end); pkey_end += len; /* move to end */ size_t size = pkey_end - (const gchar*)aname->public_key; guchar *tmp = g_new (guchar, size); memcpy (tmp, aname->public_key, size); aname->public_key = tmp; } } else aname->public_key = 0; machine = image->image_info->cli_header.coff.coff_machine; flags = image->image_info->cli_cli_header.ch_flags; switch (machine) { case COFF_MACHINE_I386: /* https://bugzilla.xamarin.com/show_bug.cgi?id=17632 */ if (flags & (CLI_FLAGS_32BITREQUIRED|CLI_FLAGS_PREFERRED32BIT)) aname->arch = MONO_PROCESSOR_ARCHITECTURE_X86; else if ((flags & 0x70) == 0x70) aname->arch = MONO_PROCESSOR_ARCHITECTURE_NONE; else aname->arch = MONO_PROCESSOR_ARCHITECTURE_MSIL; break; case COFF_MACHINE_IA64: aname->arch = MONO_PROCESSOR_ARCHITECTURE_IA64; break; case COFF_MACHINE_AMD64: aname->arch = MONO_PROCESSOR_ARCHITECTURE_AMD64; break; case COFF_MACHINE_ARM: aname->arch = MONO_PROCESSOR_ARCHITECTURE_ARM; break; default: break; } return TRUE; } /** * mono_assembly_fill_assembly_name: * \param image Image * \param aname Name * \returns TRUE if successful */ gboolean mono_assembly_fill_assembly_name (MonoImage *image, MonoAssemblyName *aname) { return mono_assembly_fill_assembly_name_full (image, aname, FALSE); } /** * mono_stringify_assembly_name: * \param aname the assembly name. * * Convert \p aname into its string format. The returned string is dynamically * allocated and should be freed by the caller. * * \returns a newly allocated string with a string representation of * the assembly name. */ char* mono_stringify_assembly_name (MonoAssemblyName *aname) { const char *quote = (aname->name && g_ascii_isspace (aname->name [0])) ? "\"" : ""; GString *str; str = g_string_new (NULL); g_string_append_printf (str, "%s%s%s", quote, aname->name, quote); if (!aname->without_version) g_string_append_printf (str, ", Version=%d.%d.%d.%d", aname->major, aname->minor, aname->build, aname->revision); if (!aname->without_culture) { if (aname->culture && *aname->culture) g_string_append_printf (str, ", Culture=%s", aname->culture); else g_string_append_printf (str, ", Culture=%s", "neutral"); } if (!aname->without_public_key_token) { if (aname->public_key_token [0]) g_string_append_printf (str,", PublicKeyToken=%s%s", (char *)aname->public_key_token, (aname->flags & ASSEMBLYREF_RETARGETABLE_FLAG) ? ", Retargetable=Yes" : ""); else g_string_append_printf (str,", PublicKeyToken=%s%s", "null", (aname->flags & ASSEMBLYREF_RETARGETABLE_FLAG) ? ", Retargetable=Yes" : ""); } char *result = g_string_free (str, FALSE); // result is the final formatted string. return result; } static gchar* assemblyref_public_tok (MonoImage *image, guint32 key_index, guint32 flags) { const gchar *public_tok; int len; public_tok = mono_metadata_blob_heap (image, key_index); len = mono_metadata_decode_blob_size (public_tok, &public_tok); if (flags & ASSEMBLYREF_FULL_PUBLIC_KEY_FLAG) { guchar token [8]; mono_digest_get_public_token (token, (guchar*)public_tok, len); return encode_public_tok (token, 8); } return encode_public_tok ((guchar*)public_tok, len); } static gchar* assemblyref_public_tok_checked (MonoImage *image, guint32 key_index, guint32 flags, MonoError *error) { const gchar *public_tok; int len; public_tok = mono_metadata_blob_heap_checked (image, key_index, error); return_val_if_nok (error, NULL); if (!public_tok) { mono_error_set_bad_image (error, image, "expected public key token (index = %d) in assembly reference, but the Blob heap is NULL", key_index); return NULL; } len = mono_metadata_decode_blob_size (public_tok, &public_tok); if (flags & ASSEMBLYREF_FULL_PUBLIC_KEY_FLAG) { guchar token [8]; mono_digest_get_public_token (token, (guchar*)public_tok, len); return encode_public_tok (token, 8); } return encode_public_tok ((guchar*)public_tok, len); } /** * mono_assembly_addref: * \param assembly the assembly to reference * * This routine increments the reference count on a MonoAssembly. * The reference count is reduced every time the method mono_assembly_close() is * invoked. */ gint32 mono_assembly_addref (MonoAssembly *assembly) { return mono_atomic_inc_i32 (&assembly->ref_count); } gint32 mono_assembly_decref (MonoAssembly *assembly) { return mono_atomic_dec_i32 (&assembly->ref_count); } /* * CAUTION: This table must be kept in sync with * ivkm/reflect/Fusion.cs */ #define SILVERLIGHT_KEY "7cec85d7bea7798e" #define WINFX_KEY "31bf3856ad364e35" #define ECMA_KEY "b77a5c561934e089" #define MSFINAL_KEY "b03f5f7f11d50a3a" #define COMPACTFRAMEWORK_KEY "969db8053d3322ac" typedef struct { const char *name; const char *from; const char *to; } KeyRemapEntry; static KeyRemapEntry key_remap_table[] = { { "CustomMarshalers", COMPACTFRAMEWORK_KEY, MSFINAL_KEY }, { "Microsoft.CSharp", WINFX_KEY, MSFINAL_KEY }, { "Microsoft.VisualBasic", COMPACTFRAMEWORK_KEY, MSFINAL_KEY }, { "System", SILVERLIGHT_KEY, ECMA_KEY }, { "System", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.ComponentModel.Composition", WINFX_KEY, ECMA_KEY }, { "System.ComponentModel.DataAnnotations", "ddd0da4d3e678217", WINFX_KEY }, { "System.Core", SILVERLIGHT_KEY, ECMA_KEY }, { "System.Core", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.Data", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.Data.DataSetExtensions", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.Drawing", COMPACTFRAMEWORK_KEY, MSFINAL_KEY }, { "System.Messaging", COMPACTFRAMEWORK_KEY, MSFINAL_KEY }, // FIXME: MS uses MSFINAL_KEY for .NET 4.5 { "System.Net", SILVERLIGHT_KEY, MSFINAL_KEY }, { "System.Numerics", WINFX_KEY, ECMA_KEY }, { "System.Runtime.Serialization", SILVERLIGHT_KEY, ECMA_KEY }, { "System.Runtime.Serialization", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.ServiceModel", WINFX_KEY, ECMA_KEY }, { "System.ServiceModel", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.ServiceModel.Web", SILVERLIGHT_KEY, WINFX_KEY }, { "System.Web.Services", COMPACTFRAMEWORK_KEY, MSFINAL_KEY }, { "System.Windows", SILVERLIGHT_KEY, MSFINAL_KEY }, { "System.Windows.Forms", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.Xml", SILVERLIGHT_KEY, ECMA_KEY }, { "System.Xml", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.Xml.Linq", WINFX_KEY, ECMA_KEY }, { "System.Xml.Linq", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.Xml.Serialization", WINFX_KEY, ECMA_KEY } }; static void remap_keys (MonoAssemblyName *aname) { int i; for (i = 0; i < G_N_ELEMENTS (key_remap_table); i++) { const KeyRemapEntry *entry = &key_remap_table [i]; if (strcmp (aname->name, entry->name) || !mono_public_tokens_are_equal (aname->public_key_token, (const unsigned char*) entry->from)) continue; memcpy (aname->public_key_token, entry->to, MONO_PUBLIC_KEY_TOKEN_LENGTH); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Remapped public key token of retargetable assembly %s from %s to %s", aname->name, entry->from, entry->to); return; } } static MonoAssemblyName * mono_assembly_remap_version (MonoAssemblyName *aname, MonoAssemblyName *dest_aname) { const MonoRuntimeInfo *current_runtime; if (aname->name == NULL) return aname; current_runtime = mono_get_runtime_info (); if (aname->flags & ASSEMBLYREF_RETARGETABLE_FLAG) { const AssemblyVersionSet* vset; /* Remap to current runtime */ vset = &current_runtime->version_sets [0]; memcpy (dest_aname, aname, sizeof(MonoAssemblyName)); dest_aname->major = vset->major; dest_aname->minor = vset->minor; dest_aname->build = vset->build; dest_aname->revision = vset->revision; dest_aname->flags &= ~ASSEMBLYREF_RETARGETABLE_FLAG; /* Remap assembly name */ if (!strcmp (aname->name, "System.Net")) dest_aname->name = g_strdup ("System"); remap_keys (dest_aname); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "The request to load the retargetable assembly %s v%d.%d.%d.%d was remapped to %s v%d.%d.%d.%d", aname->name, aname->major, aname->minor, aname->build, aname->revision, dest_aname->name, vset->major, vset->minor, vset->build, vset->revision ); return dest_aname; } return aname; } /** * mono_assembly_get_assemblyref: * \param image pointer to the \c MonoImage to extract the information from. * \param index index to the assembly reference in the image. * \param aname pointer to a \c MonoAssemblyName that will hold the returned value. * * Fills out the \p aname with the assembly name of the \p index assembly reference in \p image. */ void mono_assembly_get_assemblyref (MonoImage *image, int index, MonoAssemblyName *aname) { MonoTableInfo *t; guint32 cols [MONO_ASSEMBLYREF_SIZE]; const char *hash; t = &image->tables [MONO_TABLE_ASSEMBLYREF]; mono_metadata_decode_row (t, index, cols, MONO_ASSEMBLYREF_SIZE); // ECMA-335: II.22.5 - AssemblyRef // HashValue can be null or non-null. If non-null it's an index into the blob heap // Sometimes ILasm can create an image without a Blob heap. hash = mono_metadata_blob_heap_null_ok (image, cols [MONO_ASSEMBLYREF_HASH_VALUE]); if (hash) { aname->hash_len = mono_metadata_decode_blob_size (hash, &hash); aname->hash_value = hash; } else { aname->hash_len = 0; aname->hash_value = NULL; } aname->name = mono_metadata_string_heap (image, cols [MONO_ASSEMBLYREF_NAME]); aname->culture = mono_metadata_string_heap (image, cols [MONO_ASSEMBLYREF_CULTURE]); aname->flags = cols [MONO_ASSEMBLYREF_FLAGS]; aname->major = cols [MONO_ASSEMBLYREF_MAJOR_VERSION]; aname->minor = cols [MONO_ASSEMBLYREF_MINOR_VERSION]; aname->build = cols [MONO_ASSEMBLYREF_BUILD_NUMBER]; aname->revision = cols [MONO_ASSEMBLYREF_REV_NUMBER]; if (cols [MONO_ASSEMBLYREF_PUBLIC_KEY]) { gchar *token = assemblyref_public_tok (image, cols [MONO_ASSEMBLYREF_PUBLIC_KEY], aname->flags); g_strlcpy ((char*)aname->public_key_token, token, MONO_PUBLIC_KEY_TOKEN_LENGTH); g_free (token); } else { memset (aname->public_key_token, 0, MONO_PUBLIC_KEY_TOKEN_LENGTH); } } static MonoAssembly * search_bundle_for_assembly (MonoAssemblyLoadContext *alc, MonoAssemblyName *aname) { if (bundles == NULL && satellite_bundles == NULL) return NULL; MonoImageOpenStatus status; MonoImage *image; MonoAssemblyLoadRequest req; image = mono_assembly_open_from_bundle (alc, aname->name, &status, aname->culture); if (!image && !g_str_has_suffix (aname->name, ".dll")) { char *name = g_strdup_printf ("%s.dll", aname->name); image = mono_assembly_open_from_bundle (alc, name, &status, aname->culture); } if (image) { mono_assembly_request_prepare_load (&req, alc); return mono_assembly_request_load_from (image, aname->name, &req, &status); } return NULL; } static MonoAssembly* netcore_load_reference (MonoAssemblyName *aname, MonoAssemblyLoadContext *alc, MonoAssembly *requesting, gboolean postload) { g_assert (alc != NULL); MonoAssemblyName mapped_aname; aname = mono_assembly_remap_version (aname, &mapped_aname); MonoAssembly *reference = NULL; gboolean is_satellite = !mono_assembly_name_culture_is_neutral (aname); gboolean is_default = mono_alc_is_default (alc); /* * Try these until one of them succeeds (by returning a non-NULL reference): * 1. Check if it's already loaded by the ALC. * * 2. If it's a non-default ALC, call the Load() method. * * 3. If the ALC is not the default and this is not a satellite request, * check if it's already loaded by the default ALC. * * 4. If we have a bundle registered and this is not a satellite request, * search the images for a matching name. * * 5. If we have a satellite bundle registered and this is a satellite request, * find the parent ALC and search the images for a matching name and culture. * * 6. If the ALC is the default or this is not a satellite request, * check the TPA list, APP_PATHS, and ApplicationBase. * * 7. If this is a satellite request, call the ALC ResolveSatelliteAssembly method. * * 8. Call the ALC Resolving event. If the ALC is not the default and this is not * a satellite request, call the Resolving event in the default ALC first. * * 9. Call the ALC AssemblyResolve event (except for corlib satellite assemblies). * * 10. Return NULL. */ reference = mono_assembly_loaded_internal (alc, aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly already loaded in the active ALC: '%s'.", aname->name); goto leave; } if (!is_default) { reference = mono_alc_invoke_resolve_using_load_nofail (alc, aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found using Load method: '%s'.", aname->name); goto leave; } } if (!is_default && !is_satellite) { reference = mono_assembly_loaded_internal (mono_alc_get_default (), aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly already loaded in the default ALC: '%s'.", aname->name); goto leave; } } if (bundles != NULL && !is_satellite) { reference = search_bundle_for_assembly (mono_alc_get_default (), aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found in the bundle: '%s'.", aname->name); goto leave; } } if (satellite_bundles != NULL && is_satellite) { // Satellite assembly byname requests should be loaded in the same ALC as their parent assembly size_t name_len = strlen (aname->name); char *parent_name = NULL; MonoAssemblyLoadContext *parent_alc = NULL; if (g_str_has_suffix (aname->name, MONO_ASSEMBLY_RESOURCE_SUFFIX)) parent_name = g_strdup_printf ("%s.dll", g_strndup (aname->name, name_len - strlen (MONO_ASSEMBLY_RESOURCE_SUFFIX))); if (parent_name) { MonoAssemblyOpenRequest req; mono_assembly_request_prepare_open (&req, alc); MonoAssembly *parent_assembly = mono_assembly_request_open (parent_name, &req, NULL); parent_alc = mono_assembly_get_alc (parent_assembly); } if (parent_alc) reference = search_bundle_for_assembly (parent_alc, aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found in the satellite bundle: '%s'.", aname->name); goto leave; } } if (is_default || !is_satellite) { reference = invoke_assembly_preload_hook (mono_alc_get_default (), aname, assemblies_path); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found with the filesystem probing logic: '%s'.", aname->name); goto leave; } } if (is_satellite) { reference = mono_alc_invoke_resolve_using_resolve_satellite_nofail (alc, aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found with ResolveSatelliteAssembly method: '%s'.", aname->name); goto leave; } } // For compatibility with CoreCLR, invoke the Resolving event in the default ALC first whenever loading // a non-satellite assembly into a non-default ALC. See: https://github.com/dotnet/runtime/issues/54814 if (!is_default && !is_satellite) { reference = mono_alc_invoke_resolve_using_resolving_event_nofail (mono_alc_get_default (), aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found with the Resolving event (default ALC): '%s'.", aname->name); goto leave; } } reference = mono_alc_invoke_resolve_using_resolving_event_nofail (alc, aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found with the Resolving event: '%s'.", aname->name); goto leave; } // Looking up corlib resources here can cause an infinite loop // See: https://github.com/dotnet/coreclr/blob/0a762eb2f3a299489c459da1ddeb69e042008f07/src/vm/appdomain.cpp#L5178-L5239 if (!(strcmp (aname->name, MONO_ASSEMBLY_CORLIB_RESOURCE_NAME) == 0 && is_satellite) && postload) { reference = mono_assembly_invoke_search_hook_internal (alc, requesting, aname, TRUE); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found with AssemblyResolve event: '%s'.", aname->name); goto leave; } } leave: return reference; } /** * mono_assembly_get_assemblyref_checked: * \param image pointer to the \c MonoImage to extract the information from. * \param index index to the assembly reference in the image. * \param aname pointer to a \c MonoAssemblyName that will hold the returned value. * \param error set on error * * Fills out the \p aname with the assembly name of the \p index assembly reference in \p image. * * \returns TRUE on success, otherwise sets \p error and returns FALSE */ gboolean mono_assembly_get_assemblyref_checked (MonoImage *image, int index, MonoAssemblyName *aname, MonoError *error) { guint32 cols [MONO_ASSEMBLYREF_SIZE]; const char *hash; if (image_is_dynamic (image)) { MonoDynamicTable *t = &(((MonoDynamicImage*) image)->tables [MONO_TABLE_ASSEMBLYREF]); if (!mono_metadata_decode_row_dynamic_checked ((MonoDynamicImage*)image, t, index, cols, MONO_ASSEMBLYREF_SIZE, error)) return FALSE; } else { MonoTableInfo *t = &image->tables [MONO_TABLE_ASSEMBLYREF]; if (!mono_metadata_decode_row_checked (image, t, index, cols, MONO_ASSEMBLYREF_SIZE, error)) return FALSE; } // ECMA-335: II.22.5 - AssemblyRef // HashValue can be null or non-null. If non-null it's an index into the blob heap // Sometimes ILasm can create an image without a Blob heap. hash = mono_metadata_blob_heap_checked (image, cols [MONO_ASSEMBLYREF_HASH_VALUE], error); return_val_if_nok (error, FALSE); if (hash) { aname->hash_len = mono_metadata_decode_blob_size (hash, &hash); aname->hash_value = hash; } else { aname->hash_len = 0; aname->hash_value = NULL; } aname->name = mono_metadata_string_heap_checked (image, cols [MONO_ASSEMBLYREF_NAME], error); return_val_if_nok (error, FALSE); aname->culture = mono_metadata_string_heap_checked (image, cols [MONO_ASSEMBLYREF_CULTURE], error); return_val_if_nok (error, FALSE); aname->flags = cols [MONO_ASSEMBLYREF_FLAGS]; aname->major = cols [MONO_ASSEMBLYREF_MAJOR_VERSION]; aname->minor = cols [MONO_ASSEMBLYREF_MINOR_VERSION]; aname->build = cols [MONO_ASSEMBLYREF_BUILD_NUMBER]; aname->revision = cols [MONO_ASSEMBLYREF_REV_NUMBER]; if (cols [MONO_ASSEMBLYREF_PUBLIC_KEY]) { gchar *token = assemblyref_public_tok_checked (image, cols [MONO_ASSEMBLYREF_PUBLIC_KEY], aname->flags, error); return_val_if_nok (error, FALSE); g_strlcpy ((char*)aname->public_key_token, token, MONO_PUBLIC_KEY_TOKEN_LENGTH); g_free (token); } else { memset (aname->public_key_token, 0, MONO_PUBLIC_KEY_TOKEN_LENGTH); } return TRUE; } /** * mono_assembly_load_reference: */ void mono_assembly_load_reference (MonoImage *image, int index) { MonoAssembly *reference; MonoAssemblyName aname; MonoImageOpenStatus status = MONO_IMAGE_OK; memset (&aname, 0, sizeof (MonoAssemblyName)); /* * image->references is shared between threads, so we need to access * it inside a critical section. */ mono_image_lock (image); if (!image->references) { MonoTableInfo *t = &image->tables [MONO_TABLE_ASSEMBLYREF]; int n = table_info_get_rows (t); image->references = g_new0 (MonoAssembly *, n + 1); image->nreferences = n; } reference = image->references [index]; mono_image_unlock (image); if (reference) return; mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Requesting loading reference %d (of %d) of %s", index, image->nreferences, image->name); ERROR_DECL (local_error); mono_assembly_get_assemblyref_checked (image, index, &aname, local_error); if (!is_ok (local_error)) { mono_trace (G_LOG_LEVEL_WARNING, MONO_TRACE_ASSEMBLY, "Decoding assembly reference %d (of %d) of %s failed due to: %s", index, image->nreferences, image->name, mono_error_get_message (local_error)); mono_error_cleanup (local_error); goto commit_reference; } if (image->assembly) { if (mono_trace_is_traced (G_LOG_LEVEL_INFO, MONO_TRACE_ASSEMBLY)) { char *aname_str = mono_stringify_assembly_name (&aname); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Loading reference %d of %s (%s), looking for %s", index, image->name, mono_alc_is_default (mono_image_get_alc (image)) ? "default ALC" : "custom ALC" , aname_str); g_free (aname_str); } MonoAssemblyByNameRequest req; mono_assembly_request_prepare_byname (&req, mono_image_get_alc (image)); req.requesting_assembly = image->assembly; //req.no_postload_search = TRUE; // FIXME: should this be set? reference = mono_assembly_request_byname (&aname, &req, NULL); } else { g_assertf (image->assembly, "While loading reference %d MonoImage %s doesn't have a MonoAssembly", index, image->name); } if (reference == NULL){ char *extra_msg; if (status == MONO_IMAGE_ERROR_ERRNO && errno == ENOENT) { extra_msg = g_strdup_printf ("The assembly was not found in the Global Assembly Cache, a path listed in the MONO_PATH environment variable, or in the location of the executing assembly (%s).\n", image->assembly != NULL ? image->assembly->basedir : "" ); } else if (status == MONO_IMAGE_ERROR_ERRNO) { extra_msg = g_strdup_printf ("System error: %s\n", strerror (errno)); } else if (status == MONO_IMAGE_MISSING_ASSEMBLYREF) { extra_msg = g_strdup ("Cannot find an assembly referenced from this one.\n"); } else if (status == MONO_IMAGE_IMAGE_INVALID) { extra_msg = g_strdup ("The file exists but is not a valid assembly.\n"); } else { extra_msg = g_strdup (""); } mono_trace (G_LOG_LEVEL_WARNING, MONO_TRACE_ASSEMBLY, "The following assembly referenced from %s could not be loaded:\n" " Assembly: %s (assemblyref_index=%d)\n" " Version: %d.%d.%d.%d\n" " Public Key: %s\n%s", image->name, aname.name, index, aname.major, aname.minor, aname.build, aname.revision, strlen ((char*)aname.public_key_token) == 0 ? "(none)" : (char*)aname.public_key_token, extra_msg); g_free (extra_msg); } commit_reference: mono_image_lock (image); if (reference == NULL) { /* Flag as not found */ reference = (MonoAssembly *)REFERENCE_MISSING; } if (!image->references [index]) { if (reference != REFERENCE_MISSING){ mono_assembly_addref (reference); if (image->assembly) mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly Ref addref %s[%p] -> %s[%p]: %d", image->assembly->aname.name, image->assembly, reference->aname.name, reference, reference->ref_count); } else { if (image->assembly) mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_ASSEMBLY, "Failed to load assembly %s[%p].", image->assembly->aname.name, image->assembly); } image->references [index] = reference; } mono_image_unlock (image); if (image->references [index] != reference) { /* Somebody loaded it before us */ mono_assembly_close (reference); } } /** * mono_assembly_load_references: * \param image * \param status * \deprecated There is no reason to use this method anymore, it does nothing * * This method is now a no-op, it does nothing other than setting the \p status to \c MONO_IMAGE_OK */ void mono_assembly_load_references (MonoImage *image, MonoImageOpenStatus *status) { /* This is a no-op now but it is part of the embedding API so we can't remove it */ if (status) *status = MONO_IMAGE_OK; } typedef struct AssemblyLoadHook AssemblyLoadHook; struct AssemblyLoadHook { AssemblyLoadHook *next; union { MonoAssemblyLoadFunc v1; MonoAssemblyLoadFuncV2 v2; } func; int version; gpointer user_data; }; static AssemblyLoadHook *assembly_load_hook = NULL; void mono_assembly_invoke_load_hook_internal (MonoAssemblyLoadContext *alc, MonoAssembly *ass) { AssemblyLoadHook *hook; for (hook = assembly_load_hook; hook; hook = hook->next) { if (hook->version == 1) { hook->func.v1 (ass, hook->user_data); } else { ERROR_DECL (hook_error); g_assert (hook->version == 2); hook->func.v2 (alc, ass, hook->user_data, hook_error); mono_error_assert_ok (hook_error); /* FIXME: proper error handling */ } } } /** * mono_assembly_invoke_load_hook: */ void mono_assembly_invoke_load_hook (MonoAssembly *ass) { mono_assembly_invoke_load_hook_internal (mono_alc_get_default (), ass); } static void mono_install_assembly_load_hook_v1 (MonoAssemblyLoadFunc func, gpointer user_data) { AssemblyLoadHook *hook; g_return_if_fail (func != NULL); hook = g_new0 (AssemblyLoadHook, 1); hook->version = 1; hook->func.v1 = func; hook->user_data = user_data; hook->next = assembly_load_hook; assembly_load_hook = hook; } void mono_install_assembly_load_hook_v2 (MonoAssemblyLoadFuncV2 func, gpointer user_data, gboolean append) { g_return_if_fail (func != NULL); AssemblyLoadHook *hook = g_new0 (AssemblyLoadHook, 1); hook->version = 2; hook->func.v2 = func; hook->user_data = user_data; if (append && assembly_load_hook != NULL) { // If we don't have any installed hooks, append vs prepend is irrelevant AssemblyLoadHook *old = assembly_load_hook; while (old->next != NULL) old = old->next; old->next = hook; } else { hook->next = assembly_load_hook; assembly_load_hook = hook; } } /** * mono_install_assembly_load_hook: */ void mono_install_assembly_load_hook (MonoAssemblyLoadFunc func, gpointer user_data) { mono_install_assembly_load_hook_v1 (func, user_data); } typedef struct AssemblySearchHook AssemblySearchHook; struct AssemblySearchHook { AssemblySearchHook *next; union { MonoAssemblySearchFunc v1; MonoAssemblySearchFuncV2 v2; } func; gboolean postload; int version; gpointer user_data; }; static AssemblySearchHook *assembly_search_hook = NULL; static MonoAssembly* mono_assembly_invoke_search_hook_internal (MonoAssemblyLoadContext *alc, MonoAssembly *requesting, MonoAssemblyName *aname, gboolean postload) { AssemblySearchHook *hook; for (hook = assembly_search_hook; hook; hook = hook->next) { if (hook->postload == postload) { MonoAssembly *ass; if (hook->version == 1) { ass = hook->func.v1 (aname, hook->user_data); } else { ERROR_DECL (hook_error); g_assert (hook->version == 2); ass = hook->func.v2 (alc, requesting, aname, postload, hook->user_data, hook_error); mono_error_assert_ok (hook_error); /* FIXME: proper error handling */ } if (ass) return ass; } } return NULL; } /** * mono_assembly_invoke_search_hook: */ MonoAssembly* mono_assembly_invoke_search_hook (MonoAssemblyName *aname) { return mono_assembly_invoke_search_hook_internal (NULL, NULL, aname, FALSE); } static void mono_install_assembly_search_hook_internal_v1 (MonoAssemblySearchFunc func, gpointer user_data, gboolean postload) { AssemblySearchHook *hook; g_return_if_fail (func != NULL); hook = g_new0 (AssemblySearchHook, 1); hook->version = 1; hook->func.v1 = func; hook->user_data = user_data; hook->postload = postload; hook->next = assembly_search_hook; assembly_search_hook = hook; } void mono_install_assembly_search_hook_v2 (MonoAssemblySearchFuncV2 func, gpointer user_data, gboolean postload, gboolean append) { if (func == NULL) return; AssemblySearchHook *hook = g_new0 (AssemblySearchHook, 1); hook->version = 2; hook->func.v2 = func; hook->user_data = user_data; hook->postload = postload; if (append && assembly_search_hook != NULL) { // If we don't have any installed hooks, append vs prepend is irrelevant AssemblySearchHook *old = assembly_search_hook; while (old->next != NULL) old = old->next; old->next = hook; } else { hook->next = assembly_search_hook; assembly_search_hook = hook; } } /** * mono_install_assembly_search_hook: */ void mono_install_assembly_search_hook (MonoAssemblySearchFunc func, gpointer user_data) { mono_install_assembly_search_hook_internal_v1 (func, user_data, FALSE); } /** * mono_install_assembly_refonly_search_hook: */ void mono_install_assembly_refonly_search_hook (MonoAssemblySearchFunc func, gpointer user_data) { /* Ignore refonly hooks, they will never flre */ } /** * mono_install_assembly_postload_search_hook: */ void mono_install_assembly_postload_search_hook (MonoAssemblySearchFunc func, gpointer user_data) { mono_install_assembly_search_hook_internal_v1 (func, user_data, TRUE); } void mono_install_assembly_postload_refonly_search_hook (MonoAssemblySearchFunc func, gpointer user_data) { /* Ignore refonly hooks, they will never flre */ } typedef struct AssemblyPreLoadHook AssemblyPreLoadHook; struct AssemblyPreLoadHook { AssemblyPreLoadHook *next; union { MonoAssemblyPreLoadFunc v1; // legacy internal use MonoAssemblyPreLoadFuncV2 v2; // current internal use MonoAssemblyPreLoadFuncV3 v3; // netcore external use } func; gpointer user_data; gint32 version; }; static AssemblyPreLoadHook *assembly_preload_hook = NULL; static MonoAssembly * invoke_assembly_preload_hook (MonoAssemblyLoadContext *alc, MonoAssemblyName *aname, gchar **apath) { AssemblyPreLoadHook *hook; MonoAssembly *assembly; for (hook = assembly_preload_hook; hook; hook = hook->next) { if (hook->version == 1) assembly = hook->func.v1 (aname, apath, hook->user_data); else { ERROR_DECL (error); g_assert (hook->version == 2 || hook->version == 3); if (hook->version == 2) assembly = hook->func.v2 (alc, aname, apath, hook->user_data, error); else { // v3 /* * For the default ALC, pass the globally known gchandle (since it's never collectible, it's always a strong handle). * For other ALCs, make a new strong handle that is passed to the caller. * Early at startup, when the default ALC exists, but its managed object doesn't, so the default ALC gchandle points to null. */ gboolean needs_free = TRUE; MonoGCHandle strong_gchandle; if (mono_alc_is_default (alc)) { needs_free = FALSE; strong_gchandle = alc->gchandle; } else strong_gchandle = mono_gchandle_from_handle (mono_gchandle_get_target_handle (alc->gchandle), TRUE); assembly = hook->func.v3 (strong_gchandle, aname, apath, hook->user_data, error); if (needs_free) mono_gchandle_free_internal (strong_gchandle); } /* TODO: propagage error out to callers */ mono_error_assert_ok (error); } if (assembly != NULL) return assembly; } return NULL; } /** * mono_install_assembly_preload_hook: */ void mono_install_assembly_preload_hook (MonoAssemblyPreLoadFunc func, gpointer user_data) { AssemblyPreLoadHook *hook; g_return_if_fail (func != NULL); hook = g_new0 (AssemblyPreLoadHook, 1); hook->version = 1; hook->func.v1 = func; hook->user_data = user_data; hook->next = assembly_preload_hook; assembly_preload_hook = hook; } /** * mono_install_assembly_refonly_preload_hook: */ void mono_install_assembly_refonly_preload_hook (MonoAssemblyPreLoadFunc func, gpointer user_data) { /* Ignore refonly hooks, they never fire */ } void mono_install_assembly_preload_hook_v2 (MonoAssemblyPreLoadFuncV2 func, gpointer user_data, gboolean append) { AssemblyPreLoadHook *hook; g_return_if_fail (func != NULL); AssemblyPreLoadHook **hooks = &assembly_preload_hook; hook = g_new0 (AssemblyPreLoadHook, 1); hook->version = 2; hook->func.v2 = func; hook->user_data = user_data; if (append && *hooks != NULL) { // If we don't have any installed hooks, append vs prepend is irrelevant AssemblyPreLoadHook *old = *hooks; while (old->next != NULL) old = old->next; old->next = hook; } else { hook->next = *hooks; *hooks = hook; } } void mono_install_assembly_preload_hook_v3 (MonoAssemblyPreLoadFuncV3 func, gpointer user_data, gboolean append) { AssemblyPreLoadHook *hook; g_return_if_fail (func != NULL); hook = g_new0 (AssemblyPreLoadHook, 1); hook->version = 3; hook->func.v3 = func; hook->user_data = user_data; if (append && assembly_preload_hook != NULL) { AssemblyPreLoadHook *old = assembly_preload_hook; while (old->next != NULL) old = old->next; old->next = hook; } else { hook->next = assembly_preload_hook; assembly_preload_hook = hook; } } static gchar * absolute_dir (const gchar *filename) { gchar *cwd; gchar *mixed; gchar **parts; gchar *part; GList *list, *tmp; GString *result; gchar *res; gint i; if (g_path_is_absolute (filename)) { part = g_path_get_dirname (filename); res = g_strconcat (part, G_DIR_SEPARATOR_S, (const char*)NULL); g_free (part); return res; } cwd = g_get_current_dir (); mixed = g_build_filename (cwd, filename, (const char*)NULL); parts = g_strsplit (mixed, G_DIR_SEPARATOR_S, 0); g_free (mixed); g_free (cwd); list = NULL; for (i = 0; (part = parts [i]) != NULL; i++) { if (!strcmp (part, ".")) continue; if (!strcmp (part, "..")) { if (list && list->next) /* Don't remove root */ list = g_list_delete_link (list, list); } else { list = g_list_prepend (list, part); } } result = g_string_new (""); list = g_list_reverse (list); /* Ignores last data pointer, which should be the filename */ for (tmp = list; tmp && tmp->next != NULL; tmp = tmp->next){ if (tmp->data) g_string_append_printf (result, "%s%c", (char *) tmp->data, G_DIR_SEPARATOR); } res = result->str; g_string_free (result, FALSE); g_list_free (list); g_strfreev (parts); if (*res == '\0') { g_free (res); return g_strdup ("."); } return res; } static MonoImage * open_from_bundle_internal (MonoAssemblyLoadContext *alc, const char *filename, MonoImageOpenStatus *status, gboolean is_satellite) { if (!bundles) return NULL; MonoImage *image = NULL; char *name = is_satellite ? g_strdup (filename) : g_path_get_basename (filename); for (int i = 0; !image && bundles [i]; ++i) { if (strcmp (bundles [i]->name, name) == 0) { // Since bundled images don't exist on disk, don't give them a legit filename image = mono_image_open_from_data_internal (alc, (char*)bundles [i]->data, bundles [i]->size, FALSE, status, FALSE, name, NULL); break; } } g_free (name); return image; } static MonoImage * open_from_satellite_bundle (MonoAssemblyLoadContext *alc, const char *filename, MonoImageOpenStatus *status, const char *culture) { if (!satellite_bundles) return NULL; MonoImage *image = NULL; char *name = g_strdup (filename); for (int i = 0; !image && satellite_bundles [i]; ++i) { if (strcmp (satellite_bundles [i]->name, name) == 0 && strcmp (satellite_bundles [i]->culture, culture) == 0) { char *bundle_name = g_strconcat (culture, "/", name, (const char *)NULL); image = mono_image_open_from_data_internal (alc, (char *)satellite_bundles [i]->data, satellite_bundles [i]->size, FALSE, status, FALSE, bundle_name, NULL); g_free (bundle_name); break; } } g_free (name); return image; } /** * mono_assembly_open_from_bundle: * \param filename Filename requested * \param status return status code * * This routine tries to open the assembly specified by \p filename from the * defined bundles, if found, returns the MonoImage for it, if not found * returns NULL */ MonoImage * mono_assembly_open_from_bundle (MonoAssemblyLoadContext *alc, const char *filename, MonoImageOpenStatus *status, const char *culture) { /* * we do a very simple search for bundled assemblies: it's not a general * purpose assembly loading mechanism. */ MonoImage *image = NULL; gboolean is_satellite = culture && culture [0] != 0; if (is_satellite) image = open_from_satellite_bundle (alc, filename, status, culture); else image = open_from_bundle_internal (alc, filename, status, FALSE); if (image) { mono_image_addref (image); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly Loader loaded assembly from bundle: '%s'.", filename); } return image; } /** * mono_assembly_open_full: * \param filename the file to load * \param status return status code * \param refonly Whether this assembly is being opened in "reflection-only" mode. * * This loads an assembly from the specified \p filename. The \p filename allows * a local URL (starting with a \c file:// prefix). If a file prefix is used, the * filename is interpreted as a URL, and the filename is URL-decoded. Otherwise the file * is treated as a local path. * * First, an attempt is made to load the assembly from the bundled executable (for those * deployments that have been done with the \c mkbundle tool or for scenarios where the * assembly has been registered as an embedded assembly). If this is not the case, then * the assembly is loaded from disk using `api:mono_image_open_full`. * * If \p refonly is set to true, then the assembly is loaded purely for inspection with * the \c System.Reflection API. * * \returns NULL on error, with the \p status set to an error code, or a pointer * to the assembly. */ MonoAssembly * mono_assembly_open_full (const char *filename, MonoImageOpenStatus *status, gboolean refonly) { if (refonly) { if (status) *status = MONO_IMAGE_IMAGE_INVALID; return NULL; } MonoAssembly *res; MONO_ENTER_GC_UNSAFE; MonoAssemblyOpenRequest req; mono_assembly_request_prepare_open (&req, mono_alc_get_default ()); res = mono_assembly_request_open (filename, &req, status); MONO_EXIT_GC_UNSAFE; return res; } MonoAssembly * mono_assembly_request_open (const char *filename, const MonoAssemblyOpenRequest *open_req, MonoImageOpenStatus *status) { MonoImage *image; MonoAssembly *ass; MonoImageOpenStatus def_status; gchar *fname; gboolean loaded_from_bundle; MonoAssemblyLoadRequest load_req; /* we will be overwriting the load request's asmctx.*/ memcpy (&load_req, &open_req->request, sizeof (load_req)); g_return_val_if_fail (filename != NULL, NULL); if (!status) status = &def_status; *status = MONO_IMAGE_OK; fname = g_strdup (filename); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly Loader probing location: '%s'.", fname); image = NULL; // If VM built with mkbundle loaded_from_bundle = FALSE; if (bundles != NULL || satellite_bundles != NULL) { /* We don't know the culture of the filename we're loading here, so this call is not culture aware. */ image = mono_assembly_open_from_bundle (load_req.alc, fname, status, NULL); loaded_from_bundle = image != NULL; } if (!image) image = mono_image_open_a_lot (load_req.alc, fname, status); if (!image){ if (*status == MONO_IMAGE_OK) *status = MONO_IMAGE_ERROR_ERRNO; g_free (fname); return NULL; } if (image->assembly) { /* We want to return the MonoAssembly that's already loaded, * but if we're using the strict assembly loader, we also need * to check that the previously loaded assembly matches the * predicate. It could be that we previously loaded a * different version that happens to have the filename that * we're currently probing. */ if (mono_loader_get_strict_assembly_name_check () && load_req.predicate && !load_req.predicate (image->assembly, load_req.predicate_ud)) { mono_image_close (image); g_free (fname); return NULL; } else { /* Already loaded by another appdomain */ mono_assembly_invoke_load_hook_internal (load_req.alc, image->assembly); mono_image_close (image); g_free (fname); return image->assembly; } } ass = mono_assembly_request_load_from (image, fname, &load_req, status); if (ass) { if (!loaded_from_bundle) mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly Loader loaded assembly from location: '%s'.", filename); } /* Clear the reference added by mono_image_open */ mono_image_close (image); g_free (fname); return ass; } static void free_assembly_name_item (gpointer val, gpointer user_data) { mono_assembly_name_free_internal ((MonoAssemblyName *)val); g_free (val); } /** * mono_assembly_load_friends: * \param ass an assembly * * Load the list of friend assemblies that are allowed to access * the assembly's internal types and members. They are stored as assembly * names in custom attributes. * * This is an internal method, we need this because when we load mscorlib * we do not have the internals visible cattr loaded yet, * so we need to load these after we initialize the runtime. * * LOCKING: Acquires the assemblies lock plus the loader lock. */ void mono_assembly_load_friends (MonoAssembly* ass) { ERROR_DECL (error); int i; MonoCustomAttrInfo* attrs; if (ass->friend_assembly_names_inited) return; attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, error); mono_error_assert_ok (error); if (!attrs) { mono_assemblies_lock (); ass->friend_assembly_names_inited = TRUE; mono_assemblies_unlock (); return; } mono_assemblies_lock (); if (ass->friend_assembly_names_inited) { mono_assemblies_unlock (); return; } mono_assemblies_unlock (); GSList *visible_list = NULL; GSList *ignores_list = NULL; /* * We build the list outside the assemblies lock, the worse that can happen * is that we'll need to free the allocated list. */ for (i = 0; i < attrs->num_attrs; ++i) { MonoCustomAttrEntry *attr = &attrs->attrs [i]; MonoAssemblyName *aname; const gchar *data; uint32_t data_length; gchar *data_with_terminator; /* Do some sanity checking */ if (!attr->ctor) continue; gboolean has_visible = FALSE; gboolean has_ignores = FALSE; has_visible = attr->ctor->klass == mono_class_try_get_internals_visible_class (); /* IgnoresAccessChecksToAttribute is dynamically generated, so it's not necessarily in CoreLib */ /* FIXME: should we only check for it in dynamic modules? */ has_ignores = (!strcmp ("IgnoresAccessChecksToAttribute", m_class_get_name (attr->ctor->klass)) && !strcmp ("System.Runtime.CompilerServices", m_class_get_name_space (attr->ctor->klass))); if (!has_visible && !has_ignores) continue; if (attr->data_size < 4) continue; data = (const char*)attr->data; /* 0xFF means null string, see custom attr format */ if (data [0] != 1 || data [1] != 0 || (data [2] & 0xFF) == 0xFF) continue; data_length = mono_metadata_decode_value (data + 2, &data); data_with_terminator = (char *)g_memdup (data, data_length + 1); data_with_terminator[data_length] = 0; aname = g_new0 (MonoAssemblyName, 1); /*g_print ("friend ass: %s\n", data);*/ if (mono_assembly_name_parse_full (data_with_terminator, aname, TRUE, NULL, NULL)) { if (has_visible) visible_list = g_slist_prepend (visible_list, aname); if (has_ignores) ignores_list = g_slist_prepend (ignores_list, aname); } else { g_free (aname); } g_free (data_with_terminator); } mono_custom_attrs_free (attrs); mono_assemblies_lock (); if (ass->friend_assembly_names_inited) { mono_assemblies_unlock (); g_slist_foreach (visible_list, free_assembly_name_item, NULL); g_slist_free (visible_list); g_slist_foreach (ignores_list, free_assembly_name_item, NULL); g_slist_free (ignores_list); return; } ass->friend_assembly_names = visible_list; ass->ignores_checks_assembly_names = ignores_list; /* Because of the double checked locking pattern above */ mono_memory_barrier (); ass->friend_assembly_names_inited = TRUE; mono_assemblies_unlock (); } struct HasReferenceAssemblyAttributeIterData { gboolean has_attr; }; static gboolean has_reference_assembly_attribute_iterator (MonoImage *image, guint32 typeref_scope_token, const char *nspace, const char *name, guint32 method_token, gpointer user_data) { gboolean stop_scanning = FALSE; struct HasReferenceAssemblyAttributeIterData *iter_data = (struct HasReferenceAssemblyAttributeIterData*)user_data; if (!strcmp (name, "ReferenceAssemblyAttribute") && !strcmp (nspace, "System.Runtime.CompilerServices")) { /* Note we don't check the assembly name, same as coreCLR. */ iter_data->has_attr = TRUE; stop_scanning = TRUE; } return stop_scanning; } /** * mono_assembly_has_reference_assembly_attribute: * \param assembly a MonoAssembly * \param error set on error. * * \returns TRUE if \p assembly has the \c System.Runtime.CompilerServices.ReferenceAssemblyAttribute set. * On error returns FALSE and sets \p error. */ gboolean mono_assembly_has_reference_assembly_attribute (MonoAssembly *assembly, MonoError *error) { g_assert (assembly && assembly->image); /* .NET Framework appears to ignore the attribute on dynamic * assemblies, so don't call this function for dynamic assemblies. */ g_assert (!image_is_dynamic (assembly->image)); error_init (error); /* * This might be called during assembly loading, so do everything using the low-level * metadata APIs. */ struct HasReferenceAssemblyAttributeIterData iter_data = { FALSE }; mono_assembly_metadata_foreach_custom_attr (assembly, &has_reference_assembly_attribute_iterator, &iter_data); return iter_data.has_attr; } /** * mono_assembly_open: * \param filename Opens the assembly pointed out by this name * \param status return status code * * This loads an assembly from the specified \p filename. The \p filename allows * a local URL (starting with a \c file:// prefix). If a file prefix is used, the * filename is interpreted as a URL, and the filename is URL-decoded. Otherwise the file * is treated as a local path. * * First, an attempt is made to load the assembly from the bundled executable (for those * deployments that have been done with the \c mkbundle tool or for scenarios where the * assembly has been registered as an embedded assembly). If this is not the case, then * the assembly is loaded from disk using `api:mono_image_open_full`. * * \returns a pointer to the \c MonoAssembly if \p filename contains a valid * assembly or NULL on error. Details about the error are stored in the * \p status variable. */ MonoAssembly * mono_assembly_open (const char *filename, MonoImageOpenStatus *status) { MonoAssembly *res; MONO_ENTER_GC_UNSAFE; MonoAssemblyOpenRequest req; mono_assembly_request_prepare_open (&req, mono_alc_get_default ()); res = mono_assembly_request_open (filename, &req, status); MONO_EXIT_GC_UNSAFE; return res; } /** * mono_assembly_load_from_full: * \param image Image to load the assembly from * \param fname assembly name to associate with the assembly * \param status returns the status condition * \param refonly Whether this assembly is being opened in "reflection-only" mode. * * If the provided \p image has an assembly reference, it will process the given * image as an assembly with the given name. * * Most likely you want to use the `api:mono_assembly_load_full` method instead. * * Returns: A valid pointer to a \c MonoAssembly* on success and the \p status will be * set to \c MONO_IMAGE_OK; or NULL on error. * * If there is an error loading the assembly the \p status will indicate the * reason with \p status being set to \c MONO_IMAGE_INVALID if the * image did not contain an assembly reference table. */ MonoAssembly * mono_assembly_load_from_full (MonoImage *image, const char*fname, MonoImageOpenStatus *status, gboolean refonly) { if (refonly) { if (status) *status = MONO_IMAGE_IMAGE_INVALID; return NULL; } MonoAssembly *res; MONO_ENTER_GC_UNSAFE; MonoAssemblyLoadRequest req; MonoImageOpenStatus def_status; if (!status) status = &def_status; mono_assembly_request_prepare_load (&req, mono_alc_get_default ()); res = mono_assembly_request_load_from (image, fname, &req, status); MONO_EXIT_GC_UNSAFE; return res; } MonoAssembly * mono_assembly_request_load_from (MonoImage *image, const char *fname, const MonoAssemblyLoadRequest *req, MonoImageOpenStatus *status) { MonoAssemblyCandidatePredicate predicate; gpointer user_data; MonoAssembly *ass, *ass2; char *base_dir; g_assert (status != NULL); predicate = req->predicate; user_data = req->predicate_ud; if (!table_info_get_rows (&image->tables [MONO_TABLE_ASSEMBLY])) { /* 'image' doesn't have a manifest -- maybe someone is trying to Assembly.Load a .netmodule */ *status = MONO_IMAGE_IMAGE_INVALID; return NULL; } #if defined (HOST_WIN32) { gchar *tmp_fn; int i; tmp_fn = g_strdup (fname); for (i = strlen (tmp_fn) - 1; i >= 0; i--) { if (tmp_fn [i] == '/') tmp_fn [i] = '\\'; } base_dir = absolute_dir (tmp_fn); g_free (tmp_fn); } #else base_dir = absolute_dir (fname); #endif /* * Create assembly struct, and enter it into the assembly cache */ ass = g_new0 (MonoAssembly, 1); ass->basedir = base_dir; ass->context.no_managed_load_event = req->no_managed_load_event; ass->image = image; MONO_PROFILER_RAISE (assembly_loading, (ass)); mono_assembly_fill_assembly_name (image, &ass->aname); if (mono_defaults.corlib && strcmp (ass->aname.name, MONO_ASSEMBLY_CORLIB_NAME) == 0) { // MS.NET doesn't support loading other mscorlibs g_free (ass); g_free (base_dir); mono_image_addref (mono_defaults.corlib); *status = MONO_IMAGE_OK; return mono_defaults.corlib->assembly; } /* Add a non-temporary reference because of ass->image */ mono_image_addref (image); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Image addref %s[%p] (%s) -> %s[%p]: %d", ass->aname.name, ass, mono_alc_is_default (mono_image_get_alc (image)) ? "default ALC" : "custom ALC", image->name, image, image->ref_count); /* * The load hooks might take locks so we can't call them while holding the * assemblies lock. */ if (ass->aname.name && !req->no_invoke_search_hook) { /* FIXME: I think individual context should probably also look for an existing MonoAssembly here, we just need to pass the asmctx to the search hook so that it does a filename match (I guess?) */ ass2 = mono_assembly_invoke_search_hook_internal (req->alc, NULL, &ass->aname, FALSE); if (ass2) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Image %s[%p] reusing existing assembly %s[%p]", ass->aname.name, ass, ass2->aname.name, ass2); g_free (ass); g_free (base_dir); mono_image_close (image); *status = MONO_IMAGE_OK; return ass2; } } /* We need to check for ReferenceAssemblyAttribute before we * mark the assembly as loaded and before we fire the load * hook. Otherwise mono_domain_fire_assembly_load () in * appdomain.c will cache a mapping from the assembly name to * this image and we won't be able to look for a different * candidate. */ { ERROR_DECL (refasm_error); if (mono_assembly_has_reference_assembly_attribute (ass, refasm_error)) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Image for assembly '%s' (%s) has ReferenceAssemblyAttribute, skipping", ass->aname.name, image->name); g_free (ass); g_free (base_dir); mono_image_close (image); *status = MONO_IMAGE_IMAGE_INVALID; return NULL; } mono_error_cleanup (refasm_error); } if (predicate && !predicate (ass, user_data)) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Predicate returned FALSE, skipping '%s' (%s)\n", ass->aname.name, image->name); g_free (ass); g_free (base_dir); mono_image_close (image); *status = MONO_IMAGE_IMAGE_INVALID; return NULL; } mono_assemblies_lock (); /* If an assembly is loaded into an individual context, always return a * new MonoAssembly, even if another assembly with the same name has * already been loaded. */ if (image->assembly && !req->no_invoke_search_hook) { /* * This means another thread has already loaded the assembly, but not yet * called the load hooks so the search hook can't find the assembly. */ mono_assemblies_unlock (); ass2 = image->assembly; g_free (ass); g_free (base_dir); mono_image_close (image); *status = MONO_IMAGE_OK; return ass2; } mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Prepared to set up assembly '%s' (%s)", ass->aname.name, image->name); /* If asmctx is INDIVIDUAL, image->assembly might not be NULL, so don't * overwrite it. */ if (image->assembly == NULL) image->assembly = ass; loaded_assemblies = g_list_prepend (loaded_assemblies, ass); loaded_assembly_count++; mono_assemblies_unlock (); #ifdef HOST_WIN32 if (m_image_is_module_handle (image)) mono_image_fixup_vtable (image); #endif mono_assembly_invoke_load_hook_internal (req->alc, ass); MONO_PROFILER_RAISE (assembly_loaded, (ass)); return ass; } /** * mono_assembly_load_from: * \param image Image to load the assembly from * \param fname assembly name to associate with the assembly * \param status return status code * * If the provided \p image has an assembly reference, it will process the given * image as an assembly with the given name. * * Most likely you want to use the `api:mono_assembly_load_full` method instead. * * This is equivalent to calling `api:mono_assembly_load_from_full` with the * \p refonly parameter set to FALSE. * \returns A valid pointer to a \c MonoAssembly* on success and then \p status will be * set to \c MONO_IMAGE_OK; or NULL on error. * * If there is an error loading the assembly the \p status will indicate the * reason with \p status being set to \c MONO_IMAGE_INVALID if the * image did not contain an assembly reference table. */ MonoAssembly * mono_assembly_load_from (MonoImage *image, const char *fname, MonoImageOpenStatus *status) { MonoAssembly *res; MONO_ENTER_GC_UNSAFE; MonoAssemblyLoadRequest req; MonoImageOpenStatus def_status; if (!status) status = &def_status; mono_assembly_request_prepare_load (&req, mono_alc_get_default ()); res = mono_assembly_request_load_from (image, fname, &req, status); MONO_EXIT_GC_UNSAFE; return res; } /** * mono_assembly_name_free_internal: * \param aname assembly name to free * * Frees the provided assembly name object. * (it does not frees the object itself, only the name members). */ void mono_assembly_name_free_internal (MonoAssemblyName *aname) { MONO_REQ_GC_UNSAFE_MODE; if (aname == NULL) return; g_free ((void *) aname->name); g_free ((void *) aname->culture); g_free ((void *) aname->hash_value); g_free ((guint8*) aname->public_key); } static gboolean parse_public_key (const gchar *key, gchar** pubkey, gboolean *is_ecma) { const gchar *pkey; gchar header [16], val, *arr, *endp; gint i, j, offset, bitlen, keylen, pkeylen; //both pubkey and is_ecma are required arguments g_assert (pubkey && is_ecma); keylen = strlen (key) >> 1; if (keylen < 1) return FALSE; /* allow the ECMA standard key */ if (strcmp (key, "00000000000000000400000000000000") == 0) { *pubkey = NULL; *is_ecma = TRUE; return TRUE; } *is_ecma = FALSE; val = g_ascii_xdigit_value (key [0]) << 4; val |= g_ascii_xdigit_value (key [1]); switch (val) { case 0x00: if (keylen < 13) return FALSE; val = g_ascii_xdigit_value (key [24]); val |= g_ascii_xdigit_value (key [25]); if (val != 0x06) return FALSE; pkey = key + 24; break; case 0x06: pkey = key; break; default: return FALSE; } /* We need the first 16 bytes * to check whether this key is valid or not */ pkeylen = strlen (pkey) >> 1; if (pkeylen < 16) return FALSE; for (i = 0, j = 0; i < 16; i++) { header [i] = g_ascii_xdigit_value (pkey [j++]) << 4; header [i] |= g_ascii_xdigit_value (pkey [j++]); } if (header [0] != 0x06 || /* PUBLICKEYBLOB (0x06) */ header [1] != 0x02 || /* Version (0x02) */ header [2] != 0x00 || /* Reserved (word) */ header [3] != 0x00 || (guint)(read32 (header + 8)) != 0x31415352) /* DWORD magic = RSA1 */ return FALSE; /* Based on this length, we _should_ be able to know if the length is right */ bitlen = read32 (header + 12) >> 3; if ((bitlen + 16 + 4) != pkeylen) return FALSE; arr = (gchar *)g_malloc (keylen + 4); /* Encode the size of the blob */ mono_metadata_encode_value (keylen, &arr[0], &endp); offset = (gint)(endp-arr); for (i = offset, j = 0; i < keylen + offset; i++) { arr [i] = g_ascii_xdigit_value (key [j++]) << 4; arr [i] |= g_ascii_xdigit_value (key [j++]); } *pubkey = arr; return TRUE; } static gboolean build_assembly_name (const char *name, const char *version, const char *culture, const char *token, const char *key, guint32 flags, guint32 arch, MonoAssemblyName *aname, gboolean save_public_key) { gint len; gint version_parts; gchar *pkeyptr, *encoded, tok [8]; memset (aname, 0, sizeof (MonoAssemblyName)); if (version) { int parts [4]; int i; int part_len; parts [2] = -1; parts [3] = -1; const char *s = version; version_parts = 0; for (i = 0; i < 4; ++i) { int n = sscanf (s, "%u%n", &parts [i], &part_len); if (n != 1) return FALSE; if (parts [i] < 0 || parts [i] > 65535) return FALSE; if (i < 2 && parts [i] == 65535) return FALSE; version_parts ++; s += part_len; if (s [0] == '\0') break; if (i < 3) { if (s [0] != '.') return FALSE; s ++; } } if (s [0] != '\0') return FALSE; if (version_parts < 2 || version_parts > 4) return FALSE; aname->major = parts [0]; aname->minor = parts [1]; if (version_parts >= 3) aname->build = parts [2]; else aname->build = -1; if (version_parts == 4) aname->revision = parts [3]; else aname->revision = -1; } aname->flags = flags; aname->arch = arch; aname->name = g_strdup (name); if (culture) { if (g_ascii_strcasecmp (culture, "neutral") == 0) aname->culture = g_strdup (""); else aname->culture = g_strdup (culture); } if (token && strncmp (token, "null", 4) != 0) { char *lower; /* the constant includes the ending NULL, hence the -1 */ if (strlen (token) != (MONO_PUBLIC_KEY_TOKEN_LENGTH - 1)) { mono_assembly_name_free_internal (aname); return FALSE; } lower = g_ascii_strdown (token, MONO_PUBLIC_KEY_TOKEN_LENGTH); g_strlcpy ((char*)aname->public_key_token, lower, MONO_PUBLIC_KEY_TOKEN_LENGTH); g_free (lower); } if (key) { gboolean is_ecma = FALSE; gchar *pkey = NULL; if (strcmp (key, "null") == 0 || !parse_public_key (key, &pkey, &is_ecma)) { mono_assembly_name_free_internal (aname); return FALSE; } if (is_ecma) { g_assert (pkey == NULL); aname->public_key = NULL; g_strlcpy ((gchar*)aname->public_key_token, "b77a5c561934e089", MONO_PUBLIC_KEY_TOKEN_LENGTH); return TRUE; } len = mono_metadata_decode_blob_size ((const gchar *) pkey, (const gchar **) &pkeyptr); // We also need to generate the key token mono_digest_get_public_token ((guchar*) tok, (guint8*) pkeyptr, len); encoded = encode_public_tok ((guchar*) tok, 8); g_strlcpy ((gchar*)aname->public_key_token, encoded, MONO_PUBLIC_KEY_TOKEN_LENGTH); g_free (encoded); if (save_public_key) aname->public_key = (guint8*) pkey; else g_free (pkey); } return TRUE; } static gboolean split_key_value (const gchar *pair, gchar **key, guint32 *keylen, gchar **value) { char *eqsign = (char*)strchr (pair, '='); if (!eqsign) { *key = NULL; *keylen = 0; *value = NULL; return FALSE; } *key = (gchar*)pair; *keylen = eqsign - *key; while (*keylen > 0 && g_ascii_isspace ((*key) [*keylen - 1])) (*keylen)--; *value = g_strstrip (eqsign + 1); return TRUE; } gboolean mono_assembly_name_parse_full (const char *name, MonoAssemblyName *aname, gboolean save_public_key, gboolean *is_version_defined, gboolean *is_token_defined) { gchar *dllname; gchar *dllname_uq; gchar *version = NULL; gchar *version_uq; gchar *culture = NULL; gchar *culture_uq; gchar *token = NULL; gchar *token_uq; gchar *key = NULL; gchar *key_uq; gchar *retargetable = NULL; gchar *retargetable_uq; gchar *procarch = NULL; gchar *procarch_uq; gboolean res; gchar *value, *part_name; guint32 part_name_len; gchar **parts; gchar **tmp; gboolean version_defined; gboolean token_defined; guint32 flags = 0; guint32 arch = MONO_PROCESSOR_ARCHITECTURE_NONE; if (!is_version_defined) is_version_defined = &version_defined; *is_version_defined = FALSE; if (!is_token_defined) is_token_defined = &token_defined; *is_token_defined = FALSE; parts = tmp = g_strsplit (name, ",", 6); if (!tmp || !*tmp) { goto cleanup_and_fail; } dllname = g_strstrip (*tmp); // Simple name cannot be empty if (!*dllname) { goto cleanup_and_fail; } // Characters /, :, and \ not allowed in simple names while (*dllname) { gchar tmp_char = *dllname; if (tmp_char == '/' || tmp_char == ':' || tmp_char == '\\') goto cleanup_and_fail; dllname++; } dllname = *tmp; tmp++; while (*tmp) { if (!split_key_value (g_strstrip (*tmp), &part_name, &part_name_len, &value)) goto cleanup_and_fail; if (part_name_len == 7 && !g_ascii_strncasecmp (part_name, "Version", part_name_len)) { *is_version_defined = TRUE; if (version != NULL || strlen (value) == 0) { goto cleanup_and_fail; } version = value; tmp++; continue; } if (part_name_len == 7 && !g_ascii_strncasecmp (part_name, "Culture", part_name_len)) { if (culture != NULL || strlen (value) == 0) { goto cleanup_and_fail; } culture = value; tmp++; continue; } if (part_name_len == 14 && !g_ascii_strncasecmp (part_name, "PublicKeyToken", part_name_len)) { *is_token_defined = TRUE; if (token != NULL || key != NULL || strlen (value) == 0) { goto cleanup_and_fail; } token = value; tmp++; continue; } if (part_name_len == 9 && !g_ascii_strncasecmp (part_name, "PublicKey", part_name_len)) { if (token != NULL || key != NULL || strlen (value) == 0) { goto cleanup_and_fail; } key = value; tmp++; continue; } if (part_name_len == 12 && !g_ascii_strncasecmp (part_name, "Retargetable", part_name_len)) { if (retargetable != NULL) { goto cleanup_and_fail; } retargetable = value; retargetable_uq = unquote (retargetable); if (retargetable_uq != NULL) retargetable = retargetable_uq; if (!g_ascii_strcasecmp (retargetable, "yes")) { flags |= ASSEMBLYREF_RETARGETABLE_FLAG; } else if (g_ascii_strcasecmp (retargetable, "no")) { g_free (retargetable_uq); goto cleanup_and_fail; } g_free (retargetable_uq); tmp++; continue; } if (part_name_len == 21 && !g_ascii_strncasecmp (part_name, "ProcessorArchitecture", part_name_len)) { if (procarch != NULL) { goto cleanup_and_fail; } procarch = value; procarch_uq = unquote (procarch); if (procarch_uq != NULL) procarch = procarch_uq; if (!g_ascii_strcasecmp (procarch, "MSIL")) arch = MONO_PROCESSOR_ARCHITECTURE_MSIL; else if (!g_ascii_strcasecmp (procarch, "X86")) arch = MONO_PROCESSOR_ARCHITECTURE_X86; else if (!g_ascii_strcasecmp (procarch, "IA64")) arch = MONO_PROCESSOR_ARCHITECTURE_IA64; else if (!g_ascii_strcasecmp (procarch, "AMD64")) arch = MONO_PROCESSOR_ARCHITECTURE_AMD64; else if (!g_ascii_strcasecmp (procarch, "ARM")) arch = MONO_PROCESSOR_ARCHITECTURE_ARM; else { g_free (procarch_uq); goto cleanup_and_fail; } flags |= arch << 4; g_free (procarch_uq); tmp++; continue; } // compat: If we got here, the attribute name is unknown to us. Ignore it. tmp++; } /* if retargetable flag is set, then we must have a fully qualified name */ if (retargetable != NULL && (version == NULL || culture == NULL || (key == NULL && token == NULL))) { goto cleanup_and_fail; } dllname_uq = unquote (dllname); version_uq = unquote (version); culture_uq = unquote (culture); token_uq = unquote (token); key_uq = unquote (key); res = build_assembly_name ( dllname_uq == NULL ? dllname : dllname_uq, version_uq == NULL ? version : version_uq, culture_uq == NULL ? culture : culture_uq, token_uq == NULL ? token : token_uq, key_uq == NULL ? key : key_uq, flags, arch, aname, save_public_key); g_free (dllname_uq); g_free (version_uq); g_free (culture_uq); g_free (token_uq); g_free (key_uq); g_strfreev (parts); return res; cleanup_and_fail: g_strfreev (parts); return FALSE; } static char* unquote (const char *str) { gint slen; const char *end; if (str == NULL) return NULL; slen = strlen (str); if (slen < 2) return NULL; if (*str != '\'' && *str != '\"') return NULL; end = str + slen - 1; if (*str != *end) return NULL; return g_strndup (str + 1, slen - 2); } /** * mono_assembly_name_parse: * \param name name to parse * \param aname the destination assembly name * * Parses an assembly qualified type name and assigns the name, * version, culture and token to the provided assembly name object. * * \returns TRUE if the name could be parsed. */ gboolean mono_assembly_name_parse (const char *name, MonoAssemblyName *aname) { return mono_assembly_name_parse_full (name, aname, FALSE, NULL, NULL); } /** * mono_assembly_name_new: * \param name name to parse * * Allocate a new \c MonoAssemblyName and fill its values from the * passed \p name. * * \returns a newly allocated structure or NULL if there was any failure. */ MonoAssemblyName* mono_assembly_name_new (const char *name) { MonoAssemblyName *result = NULL; MONO_ENTER_GC_UNSAFE; MonoAssemblyName *aname = g_new0 (MonoAssemblyName, 1); if (mono_assembly_name_parse (name, aname)) result = aname; else g_free (aname); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_assembly_name_get_name: */ const char* mono_assembly_name_get_name (MonoAssemblyName *aname) { const char *result = NULL; MONO_ENTER_GC_UNSAFE; result = aname->name; MONO_EXIT_GC_UNSAFE; return result; } /** * mono_assembly_name_get_culture: */ const char* mono_assembly_name_get_culture (MonoAssemblyName *aname) { const char *result = NULL; MONO_ENTER_GC_UNSAFE; result = aname->culture; MONO_EXIT_GC_UNSAFE; return result; } /** * mono_assembly_name_get_pubkeytoken: */ mono_byte* mono_assembly_name_get_pubkeytoken (MonoAssemblyName *aname) { if (aname->public_key_token [0]) return aname->public_key_token; return NULL; } /** * mono_assembly_name_get_version: */ uint16_t mono_assembly_name_get_version (MonoAssemblyName *aname, uint16_t *minor, uint16_t *build, uint16_t *revision) { if (minor) *minor = aname->minor; if (build) *build = aname->build; if (revision) *revision = aname->revision; return aname->major; } gboolean mono_assembly_name_culture_is_neutral (const MonoAssemblyName *aname) { return (!aname->culture || aname->culture [0] == 0); } /** * mono_assembly_load_with_partial_name: * \param name an assembly name that is then parsed by `api:mono_assembly_name_parse`. * \param status return status code * * Loads a \c MonoAssembly from a name. The name is parsed using `api:mono_assembly_name_parse`, * so it might contain a qualified type name, version, culture and token. * * This will load the assembly from the file whose name is derived from the assembly name * by appending the \c .dll extension. * * The assembly is loaded from either one of the extra Global Assembly Caches specified * by the extra GAC paths (specified by the \c MONO_GAC_PREFIX environment variable) or * if that fails from the GAC. * * \returns NULL on failure, or a pointer to a \c MonoAssembly on success. */ MonoAssembly* mono_assembly_load_with_partial_name (const char *name, MonoImageOpenStatus *status) { MonoAssembly *result; MONO_ENTER_GC_UNSAFE; MonoImageOpenStatus def_status; if (!status) status = &def_status; result = mono_assembly_load_with_partial_name_internal (name, mono_alc_get_default (), status); MONO_EXIT_GC_UNSAFE; return result; } MonoAssembly* mono_assembly_load_with_partial_name_internal (const char *name, MonoAssemblyLoadContext *alc, MonoImageOpenStatus *status) { ERROR_DECL (error); MonoAssembly *res; MonoAssemblyName *aname, base_name; MonoAssemblyName mapped_aname; MONO_REQ_GC_UNSAFE_MODE; g_assert (status != NULL); memset (&base_name, 0, sizeof (MonoAssemblyName)); aname = &base_name; if (!mono_assembly_name_parse (name, aname)) return NULL; /* * If no specific version has been requested, make sure we load the * correct version for system assemblies. */ if ((aname->major | aname->minor | aname->build | aname->revision) == 0) aname = mono_assembly_remap_version (aname, &mapped_aname); res = mono_assembly_loaded_internal (alc, aname); if (res) { mono_assembly_name_free_internal (aname); return res; } res = invoke_assembly_preload_hook (alc, aname, assemblies_path); if (res) { mono_assembly_name_free_internal (aname); return res; } mono_assembly_name_free_internal (aname); if (!res) { res = mono_try_assembly_resolve (alc, name, NULL, error); if (!is_ok (error)) { mono_error_cleanup (error); if (*status == MONO_IMAGE_OK) *status = MONO_IMAGE_IMAGE_INVALID; } } return res; } MonoAssembly* mono_assembly_load_corlib (MonoImageOpenStatus *status) { MonoAssemblyName *aname; MonoAssemblyOpenRequest req; mono_assembly_request_prepare_open (&req, mono_alc_get_default ()); if (corlib) { /* g_print ("corlib already loaded\n"); */ return corlib; } aname = mono_assembly_name_new (MONO_ASSEMBLY_CORLIB_NAME); corlib = invoke_assembly_preload_hook (req.request.alc, aname, NULL); /* MonoCore preload hook should know how to find it */ /* FIXME: AOT compiler comes here without an installed hook. */ if (!corlib) { if (assemblies_path) { // Custom assemblies path set via MONO_PATH or mono_set_assemblies_path char *corlib_name = g_strdup_printf ("%s.dll", MONO_ASSEMBLY_CORLIB_NAME); corlib = load_in_path (corlib_name, (const char**)assemblies_path, &req, status); } } if (!corlib) { /* Maybe its in a bundle */ char *corlib_name = g_strdup_printf ("%s.dll", MONO_ASSEMBLY_CORLIB_NAME); corlib = mono_assembly_request_open (corlib_name, &req, status); } g_assert (corlib); return corlib; } gboolean mono_assembly_candidate_predicate_sn_same_name (MonoAssembly *candidate, gpointer ud) { MonoAssemblyName *wanted_name = (MonoAssemblyName*)ud; MonoAssemblyName *candidate_name = &candidate->aname; g_assert (wanted_name != NULL); g_assert (candidate_name != NULL); if (mono_trace_is_traced (G_LOG_LEVEL_INFO, MONO_TRACE_ASSEMBLY)) { char * s = mono_stringify_assembly_name (wanted_name); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Predicate: wanted = %s", s); g_free (s); s = mono_stringify_assembly_name (candidate_name); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Predicate: candidate = %s", s); g_free (s); } return mono_assembly_check_name_match (wanted_name, candidate_name); } gboolean mono_assembly_check_name_match (MonoAssemblyName *wanted_name, MonoAssemblyName *candidate_name) { gboolean result = mono_assembly_names_equal_flags (wanted_name, candidate_name, MONO_ANAME_EQ_IGNORE_VERSION | MONO_ANAME_EQ_IGNORE_PUBKEY); if (result && assembly_names_compare_versions (wanted_name, candidate_name, -1) > 0) result = FALSE; mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Predicate: candidate and wanted names %s", result ? "match, returning TRUE" : "don't match, returning FALSE"); return result; } MonoAssembly* mono_assembly_request_byname (MonoAssemblyName *aname, const MonoAssemblyByNameRequest *req, MonoImageOpenStatus *status) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Request to load %s in alc %p", aname->name, (gpointer)req->request.alc); MonoAssembly *result; if (status) *status = MONO_IMAGE_OK; result = netcore_load_reference (aname, req->request.alc, req->requesting_assembly, !req->no_postload_search); return result; } MonoAssembly * mono_assembly_load_full_alc (MonoGCHandle alc_gchandle, MonoAssemblyName *aname, const char *basedir, MonoImageOpenStatus *status) { MonoAssembly *res; MONO_ENTER_GC_UNSAFE; MonoAssemblyByNameRequest req; MonoAssemblyLoadContext *alc = mono_alc_from_gchandle (alc_gchandle); mono_assembly_request_prepare_byname (&req, alc); req.requesting_assembly = NULL; req.basedir = basedir; res = mono_assembly_request_byname (aname, &req, status); MONO_EXIT_GC_UNSAFE; return res; } /** * mono_assembly_load_full: * \param aname A MonoAssemblyName with the assembly name to load. * \param basedir A directory to look up the assembly at. * \param status a pointer to a MonoImageOpenStatus to return the status of the load operation * \param refonly Whether this assembly is being opened in "reflection-only" mode. * * Loads the assembly referenced by \p aname, if the value of \p basedir is not NULL, it * attempts to load the assembly from that directory before probing the standard locations. * * If the assembly is being opened in reflection-only mode (\p refonly set to TRUE) then no * assembly binding takes place. * * \returns the assembly referenced by \p aname loaded or NULL on error. On error the * value pointed by \p status is updated with an error code. */ MonoAssembly* mono_assembly_load_full (MonoAssemblyName *aname, const char *basedir, MonoImageOpenStatus *status, gboolean refonly) { if (refonly) { if (status) *status = MONO_IMAGE_IMAGE_INVALID; return NULL; } MonoAssembly *res; MONO_ENTER_GC_UNSAFE; MonoAssemblyByNameRequest req; mono_assembly_request_prepare_byname (&req, mono_alc_get_default ()); req.requesting_assembly = NULL; req.basedir = basedir; res = mono_assembly_request_byname (aname, &req, status); MONO_EXIT_GC_UNSAFE; return res; } /** * mono_assembly_load: * \param aname A MonoAssemblyName with the assembly name to load. * \param basedir A directory to look up the assembly at. * \param status a pointer to a MonoImageOpenStatus to return the status of the load operation * * Loads the assembly referenced by \p aname, if the value of \p basedir is not NULL, it * attempts to load the assembly from that directory before probing the standard locations. * * \returns the assembly referenced by \p aname loaded or NULL on error. On error the * value pointed by \p status is updated with an error code. */ MonoAssembly* mono_assembly_load (MonoAssemblyName *aname, const char *basedir, MonoImageOpenStatus *status) { MonoAssemblyByNameRequest req; mono_assembly_request_prepare_byname (&req, mono_alc_get_default ()); req.requesting_assembly = NULL; req.basedir = basedir; return mono_assembly_request_byname (aname, &req, status); } /** * mono_assembly_loaded_full: * \param aname an assembly to look for. * \param refonly Whether this assembly is being opened in "reflection-only" mode. * * This is used to determine if the specified assembly has been loaded * \returns NULL If the given \p aname assembly has not been loaded, or a pointer to * a \c MonoAssembly that matches the \c MonoAssemblyName specified. */ MonoAssembly* mono_assembly_loaded_full (MonoAssemblyName *aname, gboolean refonly) { if (refonly) return NULL; MonoAssemblyLoadContext *alc = mono_alc_get_default (); return mono_assembly_loaded_internal (alc, aname); } MonoAssembly * mono_assembly_loaded_internal (MonoAssemblyLoadContext *alc, MonoAssemblyName *aname) { MonoAssembly *res; MonoAssemblyName mapped_aname; aname = mono_assembly_remap_version (aname, &mapped_aname); res = mono_assembly_invoke_search_hook_internal (alc, NULL, aname, FALSE); return res; } /** * mono_assembly_loaded: * \param aname an assembly to look for. * * This is used to determine if the specified assembly has been loaded * \returns NULL If the given \p aname assembly has not been loaded, or a pointer to * a \c MonoAssembly that matches the \c MonoAssemblyName specified. */ MonoAssembly* mono_assembly_loaded (MonoAssemblyName *aname) { MonoAssembly *res; MONO_ENTER_GC_UNSAFE; res = mono_assembly_loaded_internal (mono_alc_get_default (), aname); MONO_EXIT_GC_UNSAFE; return res; } void mono_assembly_release_gc_roots (MonoAssembly *assembly) { if (assembly == NULL || assembly == REFERENCE_MISSING) return; if (assembly_is_dynamic (assembly)) { int i; MonoDynamicImage *dynimg = (MonoDynamicImage *)assembly->image; for (i = 0; i < dynimg->image.module_count; ++i) mono_dynamic_image_release_gc_roots ((MonoDynamicImage *)dynimg->image.modules [i]); mono_dynamic_image_release_gc_roots (dynimg); } } /* * Returns whether mono_assembly_close_finish() must be called as * well. See comment for mono_image_close_except_pools() for why we * unload in two steps. */ gboolean mono_assembly_close_except_image_pools (MonoAssembly *assembly) { g_return_val_if_fail (assembly != NULL, FALSE); if (assembly == REFERENCE_MISSING) return FALSE; /* Might be 0 already */ if (mono_assembly_decref (assembly) > 0) return FALSE; MONO_PROFILER_RAISE (assembly_unloading, (assembly)); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Unloading assembly %s [%p].", assembly->aname.name, assembly); mono_debug_close_image (assembly->image); mono_assemblies_lock (); loaded_assemblies = g_list_remove (loaded_assemblies, assembly); loaded_assembly_count--; mono_assemblies_unlock (); assembly->image->assembly = NULL; if (!mono_image_close_except_pools (assembly->image)) assembly->image = NULL; g_slist_foreach (assembly->friend_assembly_names, free_assembly_name_item, NULL); g_slist_foreach (assembly->ignores_checks_assembly_names, free_assembly_name_item, NULL); g_slist_free (assembly->friend_assembly_names); g_slist_free (assembly->ignores_checks_assembly_names); g_free (assembly->basedir); MONO_PROFILER_RAISE (assembly_unloaded, (assembly)); return TRUE; } void mono_assembly_close_finish (MonoAssembly *assembly) { g_assert (assembly && assembly != REFERENCE_MISSING); if (assembly->image) mono_image_close_finish (assembly->image); if (assembly_is_dynamic (assembly)) { g_free ((char*)assembly->aname.culture); } else { g_free (assembly); } } /** * mono_assembly_close: * \param assembly the assembly to release. * * This method releases a reference to the \p assembly. The assembly is * only released when all the outstanding references to it are released. */ void mono_assembly_close (MonoAssembly *assembly) { if (mono_assembly_close_except_image_pools (assembly)) mono_assembly_close_finish (assembly); } /** * mono_assembly_load_module: */ MonoImage* mono_assembly_load_module (MonoAssembly *assembly, guint32 idx) { ERROR_DECL (error); MonoImage *result = mono_assembly_load_module_checked (assembly, idx, error); mono_error_assert_ok (error); return result; } MONO_API MonoImage* mono_assembly_load_module_checked (MonoAssembly *assembly, uint32_t idx, MonoError *error) { return mono_image_load_file_for_image_checked (assembly->image, idx, error); } /** * mono_assembly_foreach: * \param func function to invoke for each assembly loaded * \param user_data data passed to the callback * * Invokes the provided \p func callback for each assembly loaded into * the runtime. The first parameter passed to the callback is the * \c MonoAssembly*, and the second parameter is the \p user_data. * * This is done for all assemblies loaded in the runtime, not just * those loaded in the current application domain. */ void mono_assembly_foreach (GFunc func, gpointer user_data) { GList *copy; /* * We make a copy of the list to avoid calling the callback inside the * lock, which could lead to deadlocks. */ mono_assemblies_lock (); copy = g_list_copy (loaded_assemblies); mono_assemblies_unlock (); g_list_foreach (loaded_assemblies, func, user_data); g_list_free (copy); } /** * mono_assemblies_cleanup: * * Free all resources used by this module. */ void mono_assemblies_cleanup (void) { } /* * Holds the assembly of the application, for * System.Diagnostics.Process::MainModule */ static MonoAssembly *main_assembly=NULL; /** * mono_assembly_set_main: */ void mono_assembly_set_main (MonoAssembly *assembly) { main_assembly = assembly; } /** * mono_assembly_get_main: * * Returns: the assembly for the application, the first assembly that is loaded by the VM */ MonoAssembly * mono_assembly_get_main (void) { return (main_assembly); } /** * mono_assembly_get_image: * \param assembly The assembly to retrieve the image from * * \returns the \c MonoImage associated with this assembly. */ MonoImage* mono_assembly_get_image (MonoAssembly *assembly) { MonoImage *res; MONO_ENTER_GC_UNSAFE; res = mono_assembly_get_image_internal (assembly); MONO_EXIT_GC_UNSAFE; return res; } MonoImage* mono_assembly_get_image_internal (MonoAssembly *assembly) { MONO_REQ_GC_UNSAFE_MODE; return assembly->image; } /** * mono_assembly_get_name: * \param assembly The assembly to retrieve the name from * * The returned name's lifetime is the same as \p assembly's. * * \returns the \c MonoAssemblyName associated with this assembly. */ MonoAssemblyName * mono_assembly_get_name (MonoAssembly *assembly) { MonoAssemblyName *res; MONO_ENTER_GC_UNSAFE; res = mono_assembly_get_name_internal (assembly); MONO_EXIT_GC_UNSAFE; return res; } MonoAssemblyName * mono_assembly_get_name_internal (MonoAssembly *assembly) { MONO_REQ_GC_UNSAFE_MODE; return &assembly->aname; } /** * mono_register_bundled_assemblies: */ void mono_register_bundled_assemblies (const MonoBundledAssembly **assemblies) { bundles = assemblies; } /** * mono_create_new_bundled_satellite_assembly: */ MonoBundledSatelliteAssembly * mono_create_new_bundled_satellite_assembly (const char *name, const char *culture, const unsigned char *data, unsigned int size) { MonoBundledSatelliteAssembly *satellite_assembly = g_new0 (MonoBundledSatelliteAssembly, 1); satellite_assembly->name = strdup (name); satellite_assembly->culture = strdup (culture); satellite_assembly->data = data; satellite_assembly->size = size; return satellite_assembly; } /** * mono_register_bundled_satellite_assemblies: */ void mono_register_bundled_satellite_assemblies (const MonoBundledSatelliteAssembly **assemblies) { satellite_bundles = assemblies; } /** * mono_assembly_is_jit_optimizer_disabled: * * \param assm the assembly * * Returns TRUE if the System.Diagnostics.DebuggableAttribute has the * DebuggingModes.DisableOptimizations bit set. * */ gboolean mono_assembly_is_jit_optimizer_disabled (MonoAssembly *ass) { ERROR_DECL (error); g_assert (ass); if (ass->jit_optimizer_disabled_inited) return ass->jit_optimizer_disabled; MonoClass *klass = mono_class_try_get_debuggable_attribute_class (); if (!klass) { /* Linked away */ ass->jit_optimizer_disabled = FALSE; mono_memory_barrier (); ass->jit_optimizer_disabled_inited = TRUE; return FALSE; } gboolean disable_opts = FALSE; MonoCustomAttrInfo* attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, error); mono_error_cleanup (error); /* FIXME don't swallow the error */ if (attrs) { for (int i = 0; i < attrs->num_attrs; ++i) { MonoCustomAttrEntry *attr = &attrs->attrs [i]; const gchar *p; MonoMethodSignature *sig; if (!attr->ctor || attr->ctor->klass != klass) continue; /* Decode the attribute. See reflection.c */ p = (const char*)attr->data; g_assert (read16 (p) == 0x0001); p += 2; // FIXME: Support named parameters sig = mono_method_signature_internal (attr->ctor); MonoClass *param_class; if (sig->param_count == 2 && sig->params [0]->type == MONO_TYPE_BOOLEAN && sig->params [1]->type == MONO_TYPE_BOOLEAN) { /* Two boolean arguments */ p ++; disable_opts = *p; } else if (sig->param_count == 1 && sig->params[0]->type == MONO_TYPE_VALUETYPE && (param_class = mono_class_from_mono_type_internal (sig->params[0])) != NULL && m_class_is_enumtype (param_class) && !strcmp (m_class_get_name (param_class), "DebuggingModes")) { /* System.Diagnostics.DebuggableAttribute+DebuggingModes */ int32_t flags = read32 (p); p += 4; disable_opts = (flags & 0x0100) != 0; } } mono_custom_attrs_free (attrs); } ass->jit_optimizer_disabled = disable_opts; mono_memory_barrier (); ass->jit_optimizer_disabled_inited = TRUE; return disable_opts; } guint32 mono_assembly_get_count (void) { return loaded_assembly_count; }
/** * \file * Routines for loading assemblies. * * Author: * Miguel de Icaza ([email protected]) * * Copyright 2001-2003 Ximian, Inc (http://www.ximian.com) * Copyright 2004-2009 Novell, Inc (http://www.novell.com) * Copyright 2011 Xamarin, Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include <config.h> #include <stdio.h> #include <glib.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <mono/metadata/assembly.h> #include "assembly-internals.h" #include <mono/metadata/image.h> #include "image-internals.h" #include "object-internals.h" #include <mono/metadata/loader.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/custom-attrs-internals.h> #include <mono/metadata/metadata-internals.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/class-internals.h> #include <mono/metadata/domain-internals.h> #include <mono/metadata/exception-internals.h> #include <mono/metadata/reflection-internals.h> #include <mono/metadata/mono-endian.h> #include <mono/metadata/mono-debug.h> #include <mono/utils/mono-uri.h> #include <mono/metadata/mono-config.h> #include <mono/metadata/mono-config-internals.h> #include <mono/metadata/mono-config-dirs.h> #include <mono/utils/mono-digest.h> #include <mono/utils/mono-logger-internals.h> #include <mono/utils/mono-path.h> #include <mono/utils/mono-proclib.h> #include <mono/metadata/reflection.h> #include <mono/metadata/coree.h> #include <mono/metadata/cil-coff.h> #include <mono/utils/atomic.h> #include <mono/utils/mono-os-mutex.h> #include <mono/metadata/mono-private-unstable.h> #include <minipal/getexepath.h> #ifndef HOST_WIN32 #include <sys/types.h> #include <unistd.h> #include <sys/stat.h> #endif #ifdef HOST_DARWIN #include <mach-o/dyld.h> #endif /* the default search path is empty, the first slot is replaced with the computed value */ static char* default_path [] = { NULL, NULL, NULL }; /* Contains the list of directories to be searched for assemblies (MONO_PATH) */ static char **assemblies_path = NULL; /* keeps track of loaded assemblies, excluding dynamic ones */ static GList *loaded_assemblies = NULL; static guint32 loaded_assembly_count = 0; static MonoAssembly *corlib; static char* unquote (const char *str); // This protects loaded_assemblies static mono_mutex_t assemblies_mutex; static inline void mono_assemblies_lock (void) { mono_os_mutex_lock (&assemblies_mutex); } static inline void mono_assemblies_unlock (void) { mono_os_mutex_unlock (&assemblies_mutex); } /* If defined, points to the bundled assembly information */ static const MonoBundledAssembly **bundles; static const MonoBundledSatelliteAssembly **satellite_bundles; /* Class lazy loading functions */ static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute") static GENERATE_TRY_GET_CLASS_WITH_CACHE (internals_visible, "System.Runtime.CompilerServices", "InternalsVisibleToAttribute") static MonoAssembly* mono_assembly_invoke_search_hook_internal (MonoAssemblyLoadContext *alc, MonoAssembly *requesting, MonoAssemblyName *aname, gboolean postload); static MonoAssembly * invoke_assembly_preload_hook (MonoAssemblyLoadContext *alc, MonoAssemblyName *aname, gchar **apath); static gchar* encode_public_tok (const guchar *token, gint32 len) { const static gchar allowed [] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; gchar *res; int i; res = (gchar *)g_malloc (len * 2 + 1); for (i = 0; i < len; i++) { res [i * 2] = allowed [token [i] >> 4]; res [i * 2 + 1] = allowed [token [i] & 0xF]; } res [len * 2] = 0; return res; } /** * mono_public_tokens_are_equal: * \param pubt1 first public key token * \param pubt2 second public key token * * Compare two public key tokens and return TRUE is they are equal and FALSE * otherwise. */ gboolean mono_public_tokens_are_equal (const unsigned char *pubt1, const unsigned char *pubt2) { return g_ascii_strncasecmp ((const char*) pubt1, (const char*) pubt2, 16) == 0; } /** * mono_set_assemblies_path: * \param path list of paths that contain directories where Mono will look for assemblies * * Use this method to override the standard assembly lookup system and * override any assemblies coming from the GAC. This is the method * that supports the \c MONO_PATH variable. * * Notice that \c MONO_PATH and this method are really a very bad idea as * it prevents the GAC from working and it prevents the standard * resolution mechanisms from working. Nonetheless, for some debugging * situations and bootstrapping setups, this is useful to have. */ void mono_set_assemblies_path (const char* path) { char **splitted, **dest; splitted = g_strsplit (path, G_SEARCHPATH_SEPARATOR_S, 1000); if (assemblies_path) g_strfreev (assemblies_path); assemblies_path = dest = splitted; while (*splitted) { char *tmp = *splitted; if (*tmp) *dest++ = mono_path_canonicalize (tmp); g_free (tmp); splitted++; } *dest = *splitted; if (g_hasenv ("MONO_DEBUG")) return; splitted = assemblies_path; while (*splitted) { if (**splitted && !g_file_test (*splitted, G_FILE_TEST_IS_DIR)) g_warning ("'%s' in MONO_PATH doesn't exist or has wrong permissions.", *splitted); splitted++; } } void mono_set_assemblies_path_direct (char **path) { g_strfreev (assemblies_path); assemblies_path = path; } static void check_path_env (void) { if (assemblies_path != NULL) return; char* path = g_getenv ("MONO_PATH"); if (!path) return; mono_set_assemblies_path(path); g_free (path); } static void mono_assembly_binding_info_free (MonoAssemblyBindingInfo *info) { if (!info) return; g_free (info->name); g_free (info->culture); } /** * mono_assembly_names_equal: * \param l first assembly * \param r second assembly. * * Compares two \c MonoAssemblyName instances and returns whether they are equal. * * This compares the names, the cultures, the release version and their * public tokens. * * \returns TRUE if both assembly names are equal. */ gboolean mono_assembly_names_equal (MonoAssemblyName *l, MonoAssemblyName *r) { return mono_assembly_names_equal_flags (l, r, MONO_ANAME_EQ_NONE); } /** * mono_assembly_names_equal_flags: * \param l first assembly name * \param r second assembly name * \param flags flags that affect what is compared. * * Compares two \c MonoAssemblyName instances and returns whether they are equal. * * This compares the simple names and cultures and optionally the versions and * public key tokens, depending on the \c flags. * * \returns TRUE if both assembly names are equal. */ gboolean mono_assembly_names_equal_flags (MonoAssemblyName *l, MonoAssemblyName *r, MonoAssemblyNameEqFlags flags) { g_assert (l != NULL); g_assert (r != NULL); if (!l->name || !r->name) return FALSE; if ((flags & MONO_ANAME_EQ_IGNORE_CASE) != 0 && g_strcasecmp (l->name, r->name)) return FALSE; if ((flags & MONO_ANAME_EQ_IGNORE_CASE) == 0 && strcmp (l->name, r->name)) return FALSE; if (l->culture && r->culture && strcmp (l->culture, r->culture)) return FALSE; if ((l->major != r->major || l->minor != r->minor || l->build != r->build || l->revision != r->revision) && (flags & MONO_ANAME_EQ_IGNORE_VERSION) == 0) if (! ((l->major == 0 && l->minor == 0 && l->build == 0 && l->revision == 0) || (r->major == 0 && r->minor == 0 && r->build == 0 && r->revision == 0))) return FALSE; if (!l->public_key_token [0] || !r->public_key_token [0] || (flags & MONO_ANAME_EQ_IGNORE_PUBKEY) != 0) return TRUE; if (!mono_public_tokens_are_equal (l->public_key_token, r->public_key_token)) return FALSE; return TRUE; } /** * assembly_names_compare_versions: * \param l left assembly name * \param r right assembly name * \param maxcomps how many version components to compare, or -1 to compare all. * * \returns a negative if \p l is a lower version than \p r; a positive value * if \p r is a lower version than \p l, or zero if \p l and \p r are equal * versions (comparing upto \p maxcomps components). * * Components are \c major, \c minor, \c revision, and \c build. \p maxcomps 1 means just compare * majors. 2 means majors then minors. etc. */ static int assembly_names_compare_versions (MonoAssemblyName *l, MonoAssemblyName *r, int maxcomps) { int i = 0; if (maxcomps < 0) maxcomps = 4; #define CMP(field) do { \ if (l-> field < r-> field && i < maxcomps) return -1; \ if (l-> field > r-> field && i < maxcomps) return 1; \ } while (0) CMP (major); ++i; CMP (minor); ++i; CMP (revision); ++i; CMP (build); #undef CMP return 0; } /** * mono_assembly_request_prepare_load: * \param req the load request to be initialized * \param alc the AssemblyLoadContext in netcore * * Initialize an assembly loader request. Its state will be reset and the assembly context kind will be prefilled with \p asmctx. */ void mono_assembly_request_prepare_load (MonoAssemblyLoadRequest *req, MonoAssemblyLoadContext *alc) { memset (req, 0, sizeof (MonoAssemblyLoadRequest)); req->alc = alc; } /** * mono_assembly_request_prepare_open: * \param req the open request to be initialized * \param alc the AssemblyLoadContext in netcore * * Initialize an assembly loader request intended to be used for open operations. Its state will be reset and the assembly context kind will be prefilled with \p asmctx. */ void mono_assembly_request_prepare_open (MonoAssemblyOpenRequest *req, MonoAssemblyLoadContext *alc) { memset (req, 0, sizeof (MonoAssemblyOpenRequest)); req->request.alc = alc; } /** * mono_assembly_request_prepare_byname: * \param req the byname request to be initialized * \param alc the AssemblyLoadContext in netcore * * Initialize an assembly load by name request. Its state will be reset and the assembly context kind will be prefilled with \p asmctx. */ void mono_assembly_request_prepare_byname (MonoAssemblyByNameRequest *req, MonoAssemblyLoadContext *alc) { memset (req, 0, sizeof (MonoAssemblyByNameRequest)); req->request.alc = alc; } static MonoAssembly * load_in_path (const char *basename, const char** search_path, const MonoAssemblyOpenRequest *req, MonoImageOpenStatus *status) { int i; char *fullpath; MonoAssembly *result; for (i = 0; search_path [i]; ++i) { fullpath = g_build_filename (search_path [i], basename, (const char*)NULL); result = mono_assembly_request_open (fullpath, req, status); g_free (fullpath); if (result) return result; } return NULL; } /** * mono_assembly_setrootdir: * \param root_dir The pathname of the root directory where we will locate assemblies * * This routine sets the internal default root directory for looking up * assemblies. * * This is used by Windows installations to compute dynamically the * place where the Mono assemblies are located. * */ void mono_assembly_setrootdir (const char *root_dir) { /* * Override the MONO_ASSEMBLIES directory configured at compile time. */ if (default_path [0]) g_free (default_path [0]); default_path [0] = g_strdup (root_dir); } /** * mono_assembly_getrootdir: * * Obtains the root directory used for looking up assemblies. * * Returns: a string with the directory, this string should not be freed. */ G_CONST_RETURN gchar * mono_assembly_getrootdir (void) { return default_path [0]; } /** * mono_native_getrootdir: * * Obtains the root directory used for looking up native libs (.so, .dylib). * * Returns: a string with the directory, this string should be freed by * the caller. */ gchar * mono_native_getrootdir (void) { gchar* fullpath = g_build_path (G_DIR_SEPARATOR_S, mono_assembly_getrootdir (), mono_config_get_reloc_lib_dir(), (const char*)NULL); return fullpath; } /** * mono_set_dirs: * \param assembly_dir the base directory for assemblies * \param config_dir the base directory for configuration files * * This routine is used internally and by developers embedding * the runtime into their own applications. * * There are a number of cases to consider: Mono as a system-installed * package that is available on the location preconfigured or Mono in * a relocated location. * * If you are using a system-installed Mono, you can pass NULL * to both parameters. If you are not, you should compute both * directory values and call this routine. * * The values for a given PREFIX are: * * assembly_dir: PREFIX/lib * config_dir: PREFIX/etc * * Notice that embedders that use Mono in a relocated way must * compute the location at runtime, as they will be in control * of where Mono is installed. */ void mono_set_dirs (const char *assembly_dir, const char *config_dir) { if (assembly_dir == NULL) assembly_dir = mono_config_get_assemblies_dir (); if (config_dir == NULL) config_dir = mono_config_get_cfg_dir (); mono_assembly_setrootdir (assembly_dir); mono_set_config_dir (config_dir); } #ifndef HOST_WIN32 static char * compute_base (char *path) { char *p = strrchr (path, '/'); if (p == NULL) return NULL; /* Not a well known Mono executable, we are embedded, cant guess the base */ if (strcmp (p, "/mono") && strcmp (p, "/mono-boehm") && strcmp (p, "/mono-sgen") && strcmp (p, "/pedump") && strcmp (p, "/monodis")) return NULL; *p = 0; p = strrchr (path, '/'); if (p == NULL) return NULL; if (strcmp (p, "/bin") != 0) return NULL; *p = 0; return path; } static void fallback (void) { mono_set_dirs (mono_config_get_assemblies_dir (), mono_config_get_cfg_dir ()); } static G_GNUC_UNUSED void set_dirs (char *exe) { char *base; char *config, *lib, *mono; struct stat buf; const char *bindir; /* * Only /usr prefix is treated specially */ bindir = mono_config_get_bin_dir (); g_assert (bindir); if (strncmp (exe, bindir, strlen (bindir)) == 0 || (base = compute_base (exe)) == NULL){ fallback (); return; } config = g_build_filename (base, "etc", (const char*)NULL); lib = g_build_filename (base, "lib", (const char*)NULL); mono = g_build_filename (lib, "mono/4.5", (const char*)NULL); // FIXME: stop hardcoding 4.5 here if (stat (mono, &buf) == -1) fallback (); else { mono_set_dirs (lib, config); } g_free (config); g_free (lib); g_free (mono); } #endif /* HOST_WIN32 */ /** * mono_set_rootdir: * * Registers the root directory for the Mono runtime, for Linux and Solaris 10, * this auto-detects the prefix where Mono was installed. */ void mono_set_rootdir (void) { char *path = minipal_getexepath(); if (path == NULL) { #ifndef HOST_WIN32 fallback (); #endif return; } #if defined(HOST_WIN32) || (defined(HOST_DARWIN) && !defined(TARGET_ARM)) gchar *bindir, *installdir, *root, *config; bindir = g_path_get_dirname (path); installdir = g_path_get_dirname (bindir); root = g_build_path (G_DIR_SEPARATOR_S, installdir, "lib", (const char*)NULL); config = g_build_filename (root, "..", "etc", (const char*)NULL); #ifdef HOST_WIN32 mono_set_dirs (root, config); #else if (g_file_test (root, G_FILE_TEST_EXISTS) && g_file_test (config, G_FILE_TEST_EXISTS)) mono_set_dirs (root, config); else fallback (); #endif g_free (config); g_free (root); g_free (installdir); g_free (bindir); g_free (path); #elif defined(DISABLE_MONO_AUTODETECTION) fallback (); #else set_dirs (path); return; #endif } /** * mono_assemblies_init: * * Initialize global variables used by this module. */ void mono_assemblies_init (void) { /* * Initialize our internal paths if we have not been initialized yet. * This happens when embedders use Mono. */ if (mono_assembly_getrootdir () == NULL) mono_set_rootdir (); check_path_env (); mono_os_mutex_init_recursive (&assemblies_mutex); } gboolean mono_assembly_fill_assembly_name_full (MonoImage *image, MonoAssemblyName *aname, gboolean copyBlobs) { MonoTableInfo *t = &image->tables [MONO_TABLE_ASSEMBLY]; guint32 cols [MONO_ASSEMBLY_SIZE]; gint32 machine, flags; if (!table_info_get_rows (t)) return FALSE; mono_metadata_decode_row (t, 0, cols, MONO_ASSEMBLY_SIZE); aname->hash_len = 0; aname->hash_value = NULL; aname->name = mono_metadata_string_heap (image, cols [MONO_ASSEMBLY_NAME]); if (copyBlobs) aname->name = g_strdup (aname->name); aname->culture = mono_metadata_string_heap (image, cols [MONO_ASSEMBLY_CULTURE]); if (copyBlobs) aname->culture = g_strdup (aname->culture); aname->flags = cols [MONO_ASSEMBLY_FLAGS]; aname->major = cols [MONO_ASSEMBLY_MAJOR_VERSION]; aname->minor = cols [MONO_ASSEMBLY_MINOR_VERSION]; aname->build = cols [MONO_ASSEMBLY_BUILD_NUMBER]; aname->revision = cols [MONO_ASSEMBLY_REV_NUMBER]; aname->hash_alg = cols [MONO_ASSEMBLY_HASH_ALG]; if (cols [MONO_ASSEMBLY_PUBLIC_KEY]) { guchar* token = (guchar *)g_malloc (8); gchar* encoded; const gchar* pkey; int len; pkey = mono_metadata_blob_heap (image, cols [MONO_ASSEMBLY_PUBLIC_KEY]); len = mono_metadata_decode_blob_size (pkey, &pkey); aname->public_key = (guchar*)pkey; mono_digest_get_public_token (token, aname->public_key, len); encoded = encode_public_tok (token, 8); g_strlcpy ((char*)aname->public_key_token, encoded, MONO_PUBLIC_KEY_TOKEN_LENGTH); g_free (encoded); g_free (token); } else { aname->public_key = NULL; memset (aname->public_key_token, 0, MONO_PUBLIC_KEY_TOKEN_LENGTH); } if (cols [MONO_ASSEMBLY_PUBLIC_KEY]) { aname->public_key = (guchar*)mono_metadata_blob_heap (image, cols [MONO_ASSEMBLY_PUBLIC_KEY]); if (copyBlobs) { const gchar *pkey_end; int len = mono_metadata_decode_blob_size ((const gchar*) aname->public_key, &pkey_end); pkey_end += len; /* move to end */ size_t size = pkey_end - (const gchar*)aname->public_key; guchar *tmp = g_new (guchar, size); memcpy (tmp, aname->public_key, size); aname->public_key = tmp; } } else aname->public_key = 0; machine = image->image_info->cli_header.coff.coff_machine; flags = image->image_info->cli_cli_header.ch_flags; switch (machine) { case COFF_MACHINE_I386: /* https://bugzilla.xamarin.com/show_bug.cgi?id=17632 */ if (flags & (CLI_FLAGS_32BITREQUIRED|CLI_FLAGS_PREFERRED32BIT)) aname->arch = MONO_PROCESSOR_ARCHITECTURE_X86; else if ((flags & 0x70) == 0x70) aname->arch = MONO_PROCESSOR_ARCHITECTURE_NONE; else aname->arch = MONO_PROCESSOR_ARCHITECTURE_MSIL; break; case COFF_MACHINE_IA64: aname->arch = MONO_PROCESSOR_ARCHITECTURE_IA64; break; case COFF_MACHINE_AMD64: aname->arch = MONO_PROCESSOR_ARCHITECTURE_AMD64; break; case COFF_MACHINE_ARM: aname->arch = MONO_PROCESSOR_ARCHITECTURE_ARM; break; default: break; } return TRUE; } /** * mono_assembly_fill_assembly_name: * \param image Image * \param aname Name * \returns TRUE if successful */ gboolean mono_assembly_fill_assembly_name (MonoImage *image, MonoAssemblyName *aname) { return mono_assembly_fill_assembly_name_full (image, aname, FALSE); } /** * mono_stringify_assembly_name: * \param aname the assembly name. * * Convert \p aname into its string format. The returned string is dynamically * allocated and should be freed by the caller. * * \returns a newly allocated string with a string representation of * the assembly name. */ char* mono_stringify_assembly_name (MonoAssemblyName *aname) { const char *quote = (aname->name && g_ascii_isspace (aname->name [0])) ? "\"" : ""; GString *str; str = g_string_new (NULL); g_string_append_printf (str, "%s%s%s", quote, aname->name, quote); if (!aname->without_version) g_string_append_printf (str, ", Version=%d.%d.%d.%d", aname->major, aname->minor, aname->build, aname->revision); if (!aname->without_culture) { if (aname->culture && *aname->culture) g_string_append_printf (str, ", Culture=%s", aname->culture); else g_string_append_printf (str, ", Culture=%s", "neutral"); } if (!aname->without_public_key_token) { if (aname->public_key_token [0]) g_string_append_printf (str,", PublicKeyToken=%s%s", (char *)aname->public_key_token, (aname->flags & ASSEMBLYREF_RETARGETABLE_FLAG) ? ", Retargetable=Yes" : ""); else g_string_append_printf (str,", PublicKeyToken=%s%s", "null", (aname->flags & ASSEMBLYREF_RETARGETABLE_FLAG) ? ", Retargetable=Yes" : ""); } char *result = g_string_free (str, FALSE); // result is the final formatted string. return result; } static gchar* assemblyref_public_tok (MonoImage *image, guint32 key_index, guint32 flags) { const gchar *public_tok; int len; public_tok = mono_metadata_blob_heap (image, key_index); len = mono_metadata_decode_blob_size (public_tok, &public_tok); if (flags & ASSEMBLYREF_FULL_PUBLIC_KEY_FLAG) { guchar token [8]; mono_digest_get_public_token (token, (guchar*)public_tok, len); return encode_public_tok (token, 8); } return encode_public_tok ((guchar*)public_tok, len); } static gchar* assemblyref_public_tok_checked (MonoImage *image, guint32 key_index, guint32 flags, MonoError *error) { const gchar *public_tok; int len; public_tok = mono_metadata_blob_heap_checked (image, key_index, error); return_val_if_nok (error, NULL); if (!public_tok) { mono_error_set_bad_image (error, image, "expected public key token (index = %d) in assembly reference, but the Blob heap is NULL", key_index); return NULL; } len = mono_metadata_decode_blob_size (public_tok, &public_tok); if (flags & ASSEMBLYREF_FULL_PUBLIC_KEY_FLAG) { guchar token [8]; mono_digest_get_public_token (token, (guchar*)public_tok, len); return encode_public_tok (token, 8); } return encode_public_tok ((guchar*)public_tok, len); } /** * mono_assembly_addref: * \param assembly the assembly to reference * * This routine increments the reference count on a MonoAssembly. * The reference count is reduced every time the method mono_assembly_close() is * invoked. */ gint32 mono_assembly_addref (MonoAssembly *assembly) { return mono_atomic_inc_i32 (&assembly->ref_count); } gint32 mono_assembly_decref (MonoAssembly *assembly) { return mono_atomic_dec_i32 (&assembly->ref_count); } /* * CAUTION: This table must be kept in sync with * ivkm/reflect/Fusion.cs */ #define SILVERLIGHT_KEY "7cec85d7bea7798e" #define WINFX_KEY "31bf3856ad364e35" #define ECMA_KEY "b77a5c561934e089" #define MSFINAL_KEY "b03f5f7f11d50a3a" #define COMPACTFRAMEWORK_KEY "969db8053d3322ac" typedef struct { const char *name; const char *from; const char *to; } KeyRemapEntry; static KeyRemapEntry key_remap_table[] = { { "CustomMarshalers", COMPACTFRAMEWORK_KEY, MSFINAL_KEY }, { "Microsoft.CSharp", WINFX_KEY, MSFINAL_KEY }, { "Microsoft.VisualBasic", COMPACTFRAMEWORK_KEY, MSFINAL_KEY }, { "System", SILVERLIGHT_KEY, ECMA_KEY }, { "System", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.ComponentModel.Composition", WINFX_KEY, ECMA_KEY }, { "System.ComponentModel.DataAnnotations", "ddd0da4d3e678217", WINFX_KEY }, { "System.Core", SILVERLIGHT_KEY, ECMA_KEY }, { "System.Core", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.Data", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.Data.DataSetExtensions", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.Drawing", COMPACTFRAMEWORK_KEY, MSFINAL_KEY }, { "System.Messaging", COMPACTFRAMEWORK_KEY, MSFINAL_KEY }, // FIXME: MS uses MSFINAL_KEY for .NET 4.5 { "System.Net", SILVERLIGHT_KEY, MSFINAL_KEY }, { "System.Numerics", WINFX_KEY, ECMA_KEY }, { "System.Runtime.Serialization", SILVERLIGHT_KEY, ECMA_KEY }, { "System.Runtime.Serialization", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.ServiceModel", WINFX_KEY, ECMA_KEY }, { "System.ServiceModel", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.ServiceModel.Web", SILVERLIGHT_KEY, WINFX_KEY }, { "System.Web.Services", COMPACTFRAMEWORK_KEY, MSFINAL_KEY }, { "System.Windows", SILVERLIGHT_KEY, MSFINAL_KEY }, { "System.Windows.Forms", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.Xml", SILVERLIGHT_KEY, ECMA_KEY }, { "System.Xml", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.Xml.Linq", WINFX_KEY, ECMA_KEY }, { "System.Xml.Linq", COMPACTFRAMEWORK_KEY, ECMA_KEY }, { "System.Xml.Serialization", WINFX_KEY, ECMA_KEY } }; static void remap_keys (MonoAssemblyName *aname) { int i; for (i = 0; i < G_N_ELEMENTS (key_remap_table); i++) { const KeyRemapEntry *entry = &key_remap_table [i]; if (strcmp (aname->name, entry->name) || !mono_public_tokens_are_equal (aname->public_key_token, (const unsigned char*) entry->from)) continue; memcpy (aname->public_key_token, entry->to, MONO_PUBLIC_KEY_TOKEN_LENGTH); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Remapped public key token of retargetable assembly %s from %s to %s", aname->name, entry->from, entry->to); return; } } static MonoAssemblyName * mono_assembly_remap_version (MonoAssemblyName *aname, MonoAssemblyName *dest_aname) { const MonoRuntimeInfo *current_runtime; if (aname->name == NULL) return aname; current_runtime = mono_get_runtime_info (); if (aname->flags & ASSEMBLYREF_RETARGETABLE_FLAG) { const AssemblyVersionSet* vset; /* Remap to current runtime */ vset = &current_runtime->version_sets [0]; memcpy (dest_aname, aname, sizeof(MonoAssemblyName)); dest_aname->major = vset->major; dest_aname->minor = vset->minor; dest_aname->build = vset->build; dest_aname->revision = vset->revision; dest_aname->flags &= ~ASSEMBLYREF_RETARGETABLE_FLAG; /* Remap assembly name */ if (!strcmp (aname->name, "System.Net")) dest_aname->name = g_strdup ("System"); remap_keys (dest_aname); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "The request to load the retargetable assembly %s v%d.%d.%d.%d was remapped to %s v%d.%d.%d.%d", aname->name, aname->major, aname->minor, aname->build, aname->revision, dest_aname->name, vset->major, vset->minor, vset->build, vset->revision ); return dest_aname; } return aname; } /** * mono_assembly_get_assemblyref: * \param image pointer to the \c MonoImage to extract the information from. * \param index index to the assembly reference in the image. * \param aname pointer to a \c MonoAssemblyName that will hold the returned value. * * Fills out the \p aname with the assembly name of the \p index assembly reference in \p image. */ void mono_assembly_get_assemblyref (MonoImage *image, int index, MonoAssemblyName *aname) { MonoTableInfo *t; guint32 cols [MONO_ASSEMBLYREF_SIZE]; const char *hash; t = &image->tables [MONO_TABLE_ASSEMBLYREF]; mono_metadata_decode_row (t, index, cols, MONO_ASSEMBLYREF_SIZE); // ECMA-335: II.22.5 - AssemblyRef // HashValue can be null or non-null. If non-null it's an index into the blob heap // Sometimes ILasm can create an image without a Blob heap. hash = mono_metadata_blob_heap_null_ok (image, cols [MONO_ASSEMBLYREF_HASH_VALUE]); if (hash) { aname->hash_len = mono_metadata_decode_blob_size (hash, &hash); aname->hash_value = hash; } else { aname->hash_len = 0; aname->hash_value = NULL; } aname->name = mono_metadata_string_heap (image, cols [MONO_ASSEMBLYREF_NAME]); aname->culture = mono_metadata_string_heap (image, cols [MONO_ASSEMBLYREF_CULTURE]); aname->flags = cols [MONO_ASSEMBLYREF_FLAGS]; aname->major = cols [MONO_ASSEMBLYREF_MAJOR_VERSION]; aname->minor = cols [MONO_ASSEMBLYREF_MINOR_VERSION]; aname->build = cols [MONO_ASSEMBLYREF_BUILD_NUMBER]; aname->revision = cols [MONO_ASSEMBLYREF_REV_NUMBER]; if (cols [MONO_ASSEMBLYREF_PUBLIC_KEY]) { gchar *token = assemblyref_public_tok (image, cols [MONO_ASSEMBLYREF_PUBLIC_KEY], aname->flags); g_strlcpy ((char*)aname->public_key_token, token, MONO_PUBLIC_KEY_TOKEN_LENGTH); g_free (token); } else { memset (aname->public_key_token, 0, MONO_PUBLIC_KEY_TOKEN_LENGTH); } } static MonoAssembly * search_bundle_for_assembly (MonoAssemblyLoadContext *alc, MonoAssemblyName *aname) { if (bundles == NULL && satellite_bundles == NULL) return NULL; MonoImageOpenStatus status; MonoImage *image; MonoAssemblyLoadRequest req; image = mono_assembly_open_from_bundle (alc, aname->name, &status, aname->culture); if (!image && !g_str_has_suffix (aname->name, ".dll")) { char *name = g_strdup_printf ("%s.dll", aname->name); image = mono_assembly_open_from_bundle (alc, name, &status, aname->culture); } if (image) { mono_assembly_request_prepare_load (&req, alc); return mono_assembly_request_load_from (image, aname->name, &req, &status); } return NULL; } static MonoAssembly* netcore_load_reference (MonoAssemblyName *aname, MonoAssemblyLoadContext *alc, MonoAssembly *requesting, gboolean postload) { g_assert (alc != NULL); MonoAssemblyName mapped_aname; aname = mono_assembly_remap_version (aname, &mapped_aname); MonoAssembly *reference = NULL; gboolean is_satellite = !mono_assembly_name_culture_is_neutral (aname); gboolean is_default = mono_alc_is_default (alc); /* * Try these until one of them succeeds (by returning a non-NULL reference): * 1. Check if it's already loaded by the ALC. * * 2. If it's a non-default ALC, call the Load() method. * * 3. If the ALC is not the default and this is not a satellite request, * check if it's already loaded by the default ALC. * * 4. If we have a bundle registered and this is not a satellite request, * search the images for a matching name. * * 5. If we have a satellite bundle registered and this is a satellite request, * find the parent ALC and search the images for a matching name and culture. * * 6. If the ALC is the default or this is not a satellite request, * check the TPA list, APP_PATHS, and ApplicationBase. * * 7. If this is a satellite request, call the ALC ResolveSatelliteAssembly method. * * 8. Call the ALC Resolving event. If the ALC is not the default and this is not * a satellite request, call the Resolving event in the default ALC first. * * 9. Call the ALC AssemblyResolve event (except for corlib satellite assemblies). * * 10. Return NULL. */ reference = mono_assembly_loaded_internal (alc, aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly already loaded in the active ALC: '%s'.", aname->name); goto leave; } if (!is_default) { reference = mono_alc_invoke_resolve_using_load_nofail (alc, aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found using Load method: '%s'.", aname->name); goto leave; } } if (!is_default && !is_satellite) { reference = mono_assembly_loaded_internal (mono_alc_get_default (), aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly already loaded in the default ALC: '%s'.", aname->name); goto leave; } } if (bundles != NULL && !is_satellite) { reference = search_bundle_for_assembly (mono_alc_get_default (), aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found in the bundle: '%s'.", aname->name); goto leave; } } if (satellite_bundles != NULL && is_satellite) { // Satellite assembly byname requests should be loaded in the same ALC as their parent assembly size_t name_len = strlen (aname->name); char *parent_name = NULL; MonoAssemblyLoadContext *parent_alc = NULL; if (g_str_has_suffix (aname->name, MONO_ASSEMBLY_RESOURCE_SUFFIX)) parent_name = g_strdup_printf ("%s.dll", g_strndup (aname->name, name_len - strlen (MONO_ASSEMBLY_RESOURCE_SUFFIX))); if (parent_name) { MonoAssemblyOpenRequest req; mono_assembly_request_prepare_open (&req, alc); MonoAssembly *parent_assembly = mono_assembly_request_open (parent_name, &req, NULL); parent_alc = mono_assembly_get_alc (parent_assembly); } if (parent_alc) reference = search_bundle_for_assembly (parent_alc, aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found in the satellite bundle: '%s'.", aname->name); goto leave; } } if (is_default || !is_satellite) { reference = invoke_assembly_preload_hook (mono_alc_get_default (), aname, assemblies_path); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found with the filesystem probing logic: '%s'.", aname->name); goto leave; } } if (is_satellite) { reference = mono_alc_invoke_resolve_using_resolve_satellite_nofail (alc, aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found with ResolveSatelliteAssembly method: '%s'.", aname->name); goto leave; } } // For compatibility with CoreCLR, invoke the Resolving event in the default ALC first whenever loading // a non-satellite assembly into a non-default ALC. See: https://github.com/dotnet/runtime/issues/54814 if (!is_default && !is_satellite) { reference = mono_alc_invoke_resolve_using_resolving_event_nofail (mono_alc_get_default (), aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found with the Resolving event (default ALC): '%s'.", aname->name); goto leave; } } reference = mono_alc_invoke_resolve_using_resolving_event_nofail (alc, aname); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found with the Resolving event: '%s'.", aname->name); goto leave; } // Looking up corlib resources here can cause an infinite loop // See: https://github.com/dotnet/coreclr/blob/0a762eb2f3a299489c459da1ddeb69e042008f07/src/vm/appdomain.cpp#L5178-L5239 if (!(strcmp (aname->name, MONO_ASSEMBLY_CORLIB_RESOURCE_NAME) == 0 && is_satellite) && postload) { reference = mono_assembly_invoke_search_hook_internal (alc, requesting, aname, TRUE); if (reference) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly found with AssemblyResolve event: '%s'.", aname->name); goto leave; } } leave: return reference; } /** * mono_assembly_get_assemblyref_checked: * \param image pointer to the \c MonoImage to extract the information from. * \param index index to the assembly reference in the image. * \param aname pointer to a \c MonoAssemblyName that will hold the returned value. * \param error set on error * * Fills out the \p aname with the assembly name of the \p index assembly reference in \p image. * * \returns TRUE on success, otherwise sets \p error and returns FALSE */ gboolean mono_assembly_get_assemblyref_checked (MonoImage *image, int index, MonoAssemblyName *aname, MonoError *error) { guint32 cols [MONO_ASSEMBLYREF_SIZE]; const char *hash; if (image_is_dynamic (image)) { MonoDynamicTable *t = &(((MonoDynamicImage*) image)->tables [MONO_TABLE_ASSEMBLYREF]); if (!mono_metadata_decode_row_dynamic_checked ((MonoDynamicImage*)image, t, index, cols, MONO_ASSEMBLYREF_SIZE, error)) return FALSE; } else { MonoTableInfo *t = &image->tables [MONO_TABLE_ASSEMBLYREF]; if (!mono_metadata_decode_row_checked (image, t, index, cols, MONO_ASSEMBLYREF_SIZE, error)) return FALSE; } // ECMA-335: II.22.5 - AssemblyRef // HashValue can be null or non-null. If non-null it's an index into the blob heap // Sometimes ILasm can create an image without a Blob heap. hash = mono_metadata_blob_heap_checked (image, cols [MONO_ASSEMBLYREF_HASH_VALUE], error); return_val_if_nok (error, FALSE); if (hash) { aname->hash_len = mono_metadata_decode_blob_size (hash, &hash); aname->hash_value = hash; } else { aname->hash_len = 0; aname->hash_value = NULL; } aname->name = mono_metadata_string_heap_checked (image, cols [MONO_ASSEMBLYREF_NAME], error); return_val_if_nok (error, FALSE); aname->culture = mono_metadata_string_heap_checked (image, cols [MONO_ASSEMBLYREF_CULTURE], error); return_val_if_nok (error, FALSE); aname->flags = cols [MONO_ASSEMBLYREF_FLAGS]; aname->major = cols [MONO_ASSEMBLYREF_MAJOR_VERSION]; aname->minor = cols [MONO_ASSEMBLYREF_MINOR_VERSION]; aname->build = cols [MONO_ASSEMBLYREF_BUILD_NUMBER]; aname->revision = cols [MONO_ASSEMBLYREF_REV_NUMBER]; if (cols [MONO_ASSEMBLYREF_PUBLIC_KEY]) { gchar *token = assemblyref_public_tok_checked (image, cols [MONO_ASSEMBLYREF_PUBLIC_KEY], aname->flags, error); return_val_if_nok (error, FALSE); g_strlcpy ((char*)aname->public_key_token, token, MONO_PUBLIC_KEY_TOKEN_LENGTH); g_free (token); } else { memset (aname->public_key_token, 0, MONO_PUBLIC_KEY_TOKEN_LENGTH); } return TRUE; } /** * mono_assembly_load_reference: */ void mono_assembly_load_reference (MonoImage *image, int index) { MonoAssembly *reference; MonoAssemblyName aname; MonoImageOpenStatus status = MONO_IMAGE_OK; memset (&aname, 0, sizeof (MonoAssemblyName)); /* * image->references is shared between threads, so we need to access * it inside a critical section. */ mono_image_lock (image); if (!image->references) { MonoTableInfo *t = &image->tables [MONO_TABLE_ASSEMBLYREF]; int n = table_info_get_rows (t); image->references = g_new0 (MonoAssembly *, n + 1); image->nreferences = n; } reference = image->references [index]; mono_image_unlock (image); if (reference) return; mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Requesting loading reference %d (of %d) of %s", index, image->nreferences, image->name); ERROR_DECL (local_error); mono_assembly_get_assemblyref_checked (image, index, &aname, local_error); if (!is_ok (local_error)) { mono_trace (G_LOG_LEVEL_WARNING, MONO_TRACE_ASSEMBLY, "Decoding assembly reference %d (of %d) of %s failed due to: %s", index, image->nreferences, image->name, mono_error_get_message (local_error)); mono_error_cleanup (local_error); goto commit_reference; } if (image->assembly) { if (mono_trace_is_traced (G_LOG_LEVEL_INFO, MONO_TRACE_ASSEMBLY)) { char *aname_str = mono_stringify_assembly_name (&aname); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Loading reference %d of %s (%s), looking for %s", index, image->name, mono_alc_is_default (mono_image_get_alc (image)) ? "default ALC" : "custom ALC" , aname_str); g_free (aname_str); } MonoAssemblyByNameRequest req; mono_assembly_request_prepare_byname (&req, mono_image_get_alc (image)); req.requesting_assembly = image->assembly; //req.no_postload_search = TRUE; // FIXME: should this be set? reference = mono_assembly_request_byname (&aname, &req, NULL); } else { g_assertf (image->assembly, "While loading reference %d MonoImage %s doesn't have a MonoAssembly", index, image->name); } if (reference == NULL){ char *extra_msg; if (status == MONO_IMAGE_ERROR_ERRNO && errno == ENOENT) { extra_msg = g_strdup_printf ("The assembly was not found in the Global Assembly Cache, a path listed in the MONO_PATH environment variable, or in the location of the executing assembly (%s).\n", image->assembly != NULL ? image->assembly->basedir : "" ); } else if (status == MONO_IMAGE_ERROR_ERRNO) { extra_msg = g_strdup_printf ("System error: %s\n", strerror (errno)); } else if (status == MONO_IMAGE_MISSING_ASSEMBLYREF) { extra_msg = g_strdup ("Cannot find an assembly referenced from this one.\n"); } else if (status == MONO_IMAGE_IMAGE_INVALID) { extra_msg = g_strdup ("The file exists but is not a valid assembly.\n"); } else { extra_msg = g_strdup (""); } mono_trace (G_LOG_LEVEL_WARNING, MONO_TRACE_ASSEMBLY, "The following assembly referenced from %s could not be loaded:\n" " Assembly: %s (assemblyref_index=%d)\n" " Version: %d.%d.%d.%d\n" " Public Key: %s\n%s", image->name, aname.name, index, aname.major, aname.minor, aname.build, aname.revision, strlen ((char*)aname.public_key_token) == 0 ? "(none)" : (char*)aname.public_key_token, extra_msg); g_free (extra_msg); } commit_reference: mono_image_lock (image); if (reference == NULL) { /* Flag as not found */ reference = (MonoAssembly *)REFERENCE_MISSING; } if (!image->references [index]) { if (reference != REFERENCE_MISSING){ mono_assembly_addref (reference); if (image->assembly) mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly Ref addref %s[%p] -> %s[%p]: %d", image->assembly->aname.name, image->assembly, reference->aname.name, reference, reference->ref_count); } else { if (image->assembly) mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_ASSEMBLY, "Failed to load assembly %s[%p].", image->assembly->aname.name, image->assembly); } image->references [index] = reference; } mono_image_unlock (image); if (image->references [index] != reference) { /* Somebody loaded it before us */ mono_assembly_close (reference); } } /** * mono_assembly_load_references: * \param image * \param status * \deprecated There is no reason to use this method anymore, it does nothing * * This method is now a no-op, it does nothing other than setting the \p status to \c MONO_IMAGE_OK */ void mono_assembly_load_references (MonoImage *image, MonoImageOpenStatus *status) { /* This is a no-op now but it is part of the embedding API so we can't remove it */ if (status) *status = MONO_IMAGE_OK; } typedef struct AssemblyLoadHook AssemblyLoadHook; struct AssemblyLoadHook { AssemblyLoadHook *next; union { MonoAssemblyLoadFunc v1; MonoAssemblyLoadFuncV2 v2; } func; int version; gpointer user_data; }; static AssemblyLoadHook *assembly_load_hook = NULL; void mono_assembly_invoke_load_hook_internal (MonoAssemblyLoadContext *alc, MonoAssembly *ass) { AssemblyLoadHook *hook; for (hook = assembly_load_hook; hook; hook = hook->next) { if (hook->version == 1) { hook->func.v1 (ass, hook->user_data); } else { ERROR_DECL (hook_error); g_assert (hook->version == 2); hook->func.v2 (alc, ass, hook->user_data, hook_error); mono_error_assert_ok (hook_error); /* FIXME: proper error handling */ } } } /** * mono_assembly_invoke_load_hook: */ void mono_assembly_invoke_load_hook (MonoAssembly *ass) { mono_assembly_invoke_load_hook_internal (mono_alc_get_default (), ass); } static void mono_install_assembly_load_hook_v1 (MonoAssemblyLoadFunc func, gpointer user_data) { AssemblyLoadHook *hook; g_return_if_fail (func != NULL); hook = g_new0 (AssemblyLoadHook, 1); hook->version = 1; hook->func.v1 = func; hook->user_data = user_data; hook->next = assembly_load_hook; assembly_load_hook = hook; } void mono_install_assembly_load_hook_v2 (MonoAssemblyLoadFuncV2 func, gpointer user_data, gboolean append) { g_return_if_fail (func != NULL); AssemblyLoadHook *hook = g_new0 (AssemblyLoadHook, 1); hook->version = 2; hook->func.v2 = func; hook->user_data = user_data; if (append && assembly_load_hook != NULL) { // If we don't have any installed hooks, append vs prepend is irrelevant AssemblyLoadHook *old = assembly_load_hook; while (old->next != NULL) old = old->next; old->next = hook; } else { hook->next = assembly_load_hook; assembly_load_hook = hook; } } /** * mono_install_assembly_load_hook: */ void mono_install_assembly_load_hook (MonoAssemblyLoadFunc func, gpointer user_data) { mono_install_assembly_load_hook_v1 (func, user_data); } typedef struct AssemblySearchHook AssemblySearchHook; struct AssemblySearchHook { AssemblySearchHook *next; union { MonoAssemblySearchFunc v1; MonoAssemblySearchFuncV2 v2; } func; gboolean postload; int version; gpointer user_data; }; static AssemblySearchHook *assembly_search_hook = NULL; static MonoAssembly* mono_assembly_invoke_search_hook_internal (MonoAssemblyLoadContext *alc, MonoAssembly *requesting, MonoAssemblyName *aname, gboolean postload) { AssemblySearchHook *hook; for (hook = assembly_search_hook; hook; hook = hook->next) { if (hook->postload == postload) { MonoAssembly *ass; if (hook->version == 1) { ass = hook->func.v1 (aname, hook->user_data); } else { ERROR_DECL (hook_error); g_assert (hook->version == 2); ass = hook->func.v2 (alc, requesting, aname, postload, hook->user_data, hook_error); mono_error_assert_ok (hook_error); /* FIXME: proper error handling */ } if (ass) return ass; } } return NULL; } /** * mono_assembly_invoke_search_hook: */ MonoAssembly* mono_assembly_invoke_search_hook (MonoAssemblyName *aname) { return mono_assembly_invoke_search_hook_internal (NULL, NULL, aname, FALSE); } static void mono_install_assembly_search_hook_internal_v1 (MonoAssemblySearchFunc func, gpointer user_data, gboolean postload) { AssemblySearchHook *hook; g_return_if_fail (func != NULL); hook = g_new0 (AssemblySearchHook, 1); hook->version = 1; hook->func.v1 = func; hook->user_data = user_data; hook->postload = postload; hook->next = assembly_search_hook; assembly_search_hook = hook; } void mono_install_assembly_search_hook_v2 (MonoAssemblySearchFuncV2 func, gpointer user_data, gboolean postload, gboolean append) { if (func == NULL) return; AssemblySearchHook *hook = g_new0 (AssemblySearchHook, 1); hook->version = 2; hook->func.v2 = func; hook->user_data = user_data; hook->postload = postload; if (append && assembly_search_hook != NULL) { // If we don't have any installed hooks, append vs prepend is irrelevant AssemblySearchHook *old = assembly_search_hook; while (old->next != NULL) old = old->next; old->next = hook; } else { hook->next = assembly_search_hook; assembly_search_hook = hook; } } /** * mono_install_assembly_search_hook: */ void mono_install_assembly_search_hook (MonoAssemblySearchFunc func, gpointer user_data) { mono_install_assembly_search_hook_internal_v1 (func, user_data, FALSE); } /** * mono_install_assembly_refonly_search_hook: */ void mono_install_assembly_refonly_search_hook (MonoAssemblySearchFunc func, gpointer user_data) { /* Ignore refonly hooks, they will never flre */ } /** * mono_install_assembly_postload_search_hook: */ void mono_install_assembly_postload_search_hook (MonoAssemblySearchFunc func, gpointer user_data) { mono_install_assembly_search_hook_internal_v1 (func, user_data, TRUE); } void mono_install_assembly_postload_refonly_search_hook (MonoAssemblySearchFunc func, gpointer user_data) { /* Ignore refonly hooks, they will never flre */ } typedef struct AssemblyPreLoadHook AssemblyPreLoadHook; struct AssemblyPreLoadHook { AssemblyPreLoadHook *next; union { MonoAssemblyPreLoadFunc v1; // legacy internal use MonoAssemblyPreLoadFuncV2 v2; // current internal use MonoAssemblyPreLoadFuncV3 v3; // netcore external use } func; gpointer user_data; gint32 version; }; static AssemblyPreLoadHook *assembly_preload_hook = NULL; static MonoAssembly * invoke_assembly_preload_hook (MonoAssemblyLoadContext *alc, MonoAssemblyName *aname, gchar **apath) { AssemblyPreLoadHook *hook; MonoAssembly *assembly; for (hook = assembly_preload_hook; hook; hook = hook->next) { if (hook->version == 1) assembly = hook->func.v1 (aname, apath, hook->user_data); else { ERROR_DECL (error); g_assert (hook->version == 2 || hook->version == 3); if (hook->version == 2) assembly = hook->func.v2 (alc, aname, apath, hook->user_data, error); else { // v3 /* * For the default ALC, pass the globally known gchandle (since it's never collectible, it's always a strong handle). * For other ALCs, make a new strong handle that is passed to the caller. * Early at startup, when the default ALC exists, but its managed object doesn't, so the default ALC gchandle points to null. */ gboolean needs_free = TRUE; MonoGCHandle strong_gchandle; if (mono_alc_is_default (alc)) { needs_free = FALSE; strong_gchandle = alc->gchandle; } else strong_gchandle = mono_gchandle_from_handle (mono_gchandle_get_target_handle (alc->gchandle), TRUE); assembly = hook->func.v3 (strong_gchandle, aname, apath, hook->user_data, error); if (needs_free) mono_gchandle_free_internal (strong_gchandle); } /* TODO: propagage error out to callers */ mono_error_assert_ok (error); } if (assembly != NULL) return assembly; } return NULL; } /** * mono_install_assembly_preload_hook: */ void mono_install_assembly_preload_hook (MonoAssemblyPreLoadFunc func, gpointer user_data) { AssemblyPreLoadHook *hook; g_return_if_fail (func != NULL); hook = g_new0 (AssemblyPreLoadHook, 1); hook->version = 1; hook->func.v1 = func; hook->user_data = user_data; hook->next = assembly_preload_hook; assembly_preload_hook = hook; } /** * mono_install_assembly_refonly_preload_hook: */ void mono_install_assembly_refonly_preload_hook (MonoAssemblyPreLoadFunc func, gpointer user_data) { /* Ignore refonly hooks, they never fire */ } void mono_install_assembly_preload_hook_v2 (MonoAssemblyPreLoadFuncV2 func, gpointer user_data, gboolean append) { AssemblyPreLoadHook *hook; g_return_if_fail (func != NULL); AssemblyPreLoadHook **hooks = &assembly_preload_hook; hook = g_new0 (AssemblyPreLoadHook, 1); hook->version = 2; hook->func.v2 = func; hook->user_data = user_data; if (append && *hooks != NULL) { // If we don't have any installed hooks, append vs prepend is irrelevant AssemblyPreLoadHook *old = *hooks; while (old->next != NULL) old = old->next; old->next = hook; } else { hook->next = *hooks; *hooks = hook; } } void mono_install_assembly_preload_hook_v3 (MonoAssemblyPreLoadFuncV3 func, gpointer user_data, gboolean append) { AssemblyPreLoadHook *hook; g_return_if_fail (func != NULL); hook = g_new0 (AssemblyPreLoadHook, 1); hook->version = 3; hook->func.v3 = func; hook->user_data = user_data; if (append && assembly_preload_hook != NULL) { AssemblyPreLoadHook *old = assembly_preload_hook; while (old->next != NULL) old = old->next; old->next = hook; } else { hook->next = assembly_preload_hook; assembly_preload_hook = hook; } } static gchar * absolute_dir (const gchar *filename) { gchar *cwd; gchar *mixed; gchar **parts; gchar *part; GList *list, *tmp; GString *result; gchar *res; gint i; if (g_path_is_absolute (filename)) { part = g_path_get_dirname (filename); res = g_strconcat (part, G_DIR_SEPARATOR_S, (const char*)NULL); g_free (part); return res; } cwd = g_get_current_dir (); mixed = g_build_filename (cwd, filename, (const char*)NULL); parts = g_strsplit (mixed, G_DIR_SEPARATOR_S, 0); g_free (mixed); g_free (cwd); list = NULL; for (i = 0; (part = parts [i]) != NULL; i++) { if (!strcmp (part, ".")) continue; if (!strcmp (part, "..")) { if (list && list->next) /* Don't remove root */ list = g_list_delete_link (list, list); } else { list = g_list_prepend (list, part); } } result = g_string_new (""); list = g_list_reverse (list); /* Ignores last data pointer, which should be the filename */ for (tmp = list; tmp && tmp->next != NULL; tmp = tmp->next){ if (tmp->data) g_string_append_printf (result, "%s%c", (char *) tmp->data, G_DIR_SEPARATOR); } res = result->str; g_string_free (result, FALSE); g_list_free (list); g_strfreev (parts); if (*res == '\0') { g_free (res); return g_strdup ("."); } return res; } static MonoImage * open_from_bundle_internal (MonoAssemblyLoadContext *alc, const char *filename, MonoImageOpenStatus *status, gboolean is_satellite) { if (!bundles) return NULL; MonoImage *image = NULL; char *name = is_satellite ? g_strdup (filename) : g_path_get_basename (filename); for (int i = 0; !image && bundles [i]; ++i) { if (strcmp (bundles [i]->name, name) == 0) { // Since bundled images don't exist on disk, don't give them a legit filename image = mono_image_open_from_data_internal (alc, (char*)bundles [i]->data, bundles [i]->size, FALSE, status, FALSE, name, NULL); break; } } g_free (name); return image; } static MonoImage * open_from_satellite_bundle (MonoAssemblyLoadContext *alc, const char *filename, MonoImageOpenStatus *status, const char *culture) { if (!satellite_bundles) return NULL; MonoImage *image = NULL; char *name = g_strdup (filename); for (int i = 0; !image && satellite_bundles [i]; ++i) { if (strcmp (satellite_bundles [i]->name, name) == 0 && strcmp (satellite_bundles [i]->culture, culture) == 0) { char *bundle_name = g_strconcat (culture, "/", name, (const char *)NULL); image = mono_image_open_from_data_internal (alc, (char *)satellite_bundles [i]->data, satellite_bundles [i]->size, FALSE, status, FALSE, bundle_name, NULL); g_free (bundle_name); break; } } g_free (name); return image; } /** * mono_assembly_open_from_bundle: * \param filename Filename requested * \param status return status code * * This routine tries to open the assembly specified by \p filename from the * defined bundles, if found, returns the MonoImage for it, if not found * returns NULL */ MonoImage * mono_assembly_open_from_bundle (MonoAssemblyLoadContext *alc, const char *filename, MonoImageOpenStatus *status, const char *culture) { /* * we do a very simple search for bundled assemblies: it's not a general * purpose assembly loading mechanism. */ MonoImage *image = NULL; gboolean is_satellite = culture && culture [0] != 0; if (is_satellite) image = open_from_satellite_bundle (alc, filename, status, culture); else image = open_from_bundle_internal (alc, filename, status, FALSE); if (image) { mono_image_addref (image); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly Loader loaded assembly from bundle: '%s'.", filename); } return image; } /** * mono_assembly_open_full: * \param filename the file to load * \param status return status code * \param refonly Whether this assembly is being opened in "reflection-only" mode. * * This loads an assembly from the specified \p filename. The \p filename allows * a local URL (starting with a \c file:// prefix). If a file prefix is used, the * filename is interpreted as a URL, and the filename is URL-decoded. Otherwise the file * is treated as a local path. * * First, an attempt is made to load the assembly from the bundled executable (for those * deployments that have been done with the \c mkbundle tool or for scenarios where the * assembly has been registered as an embedded assembly). If this is not the case, then * the assembly is loaded from disk using `api:mono_image_open_full`. * * If \p refonly is set to true, then the assembly is loaded purely for inspection with * the \c System.Reflection API. * * \returns NULL on error, with the \p status set to an error code, or a pointer * to the assembly. */ MonoAssembly * mono_assembly_open_full (const char *filename, MonoImageOpenStatus *status, gboolean refonly) { if (refonly) { if (status) *status = MONO_IMAGE_IMAGE_INVALID; return NULL; } MonoAssembly *res; MONO_ENTER_GC_UNSAFE; MonoAssemblyOpenRequest req; mono_assembly_request_prepare_open (&req, mono_alc_get_default ()); res = mono_assembly_request_open (filename, &req, status); MONO_EXIT_GC_UNSAFE; return res; } MonoAssembly * mono_assembly_request_open (const char *filename, const MonoAssemblyOpenRequest *open_req, MonoImageOpenStatus *status) { MonoImage *image; MonoAssembly *ass; MonoImageOpenStatus def_status; gchar *fname; gboolean loaded_from_bundle; MonoAssemblyLoadRequest load_req; /* we will be overwriting the load request's asmctx.*/ memcpy (&load_req, &open_req->request, sizeof (load_req)); g_return_val_if_fail (filename != NULL, NULL); if (!status) status = &def_status; *status = MONO_IMAGE_OK; fname = g_strdup (filename); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly Loader probing location: '%s'.", fname); image = NULL; // If VM built with mkbundle loaded_from_bundle = FALSE; if (bundles != NULL || satellite_bundles != NULL) { /* We don't know the culture of the filename we're loading here, so this call is not culture aware. */ image = mono_assembly_open_from_bundle (load_req.alc, fname, status, NULL); loaded_from_bundle = image != NULL; } if (!image) image = mono_image_open_a_lot (load_req.alc, fname, status); if (!image){ if (*status == MONO_IMAGE_OK) *status = MONO_IMAGE_ERROR_ERRNO; g_free (fname); return NULL; } if (image->assembly) { /* We want to return the MonoAssembly that's already loaded, * but if we're using the strict assembly loader, we also need * to check that the previously loaded assembly matches the * predicate. It could be that we previously loaded a * different version that happens to have the filename that * we're currently probing. */ if (mono_loader_get_strict_assembly_name_check () && load_req.predicate && !load_req.predicate (image->assembly, load_req.predicate_ud)) { mono_image_close (image); g_free (fname); return NULL; } else { /* Already loaded by another appdomain */ mono_assembly_invoke_load_hook_internal (load_req.alc, image->assembly); mono_image_close (image); g_free (fname); return image->assembly; } } ass = mono_assembly_request_load_from (image, fname, &load_req, status); if (ass) { if (!loaded_from_bundle) mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Assembly Loader loaded assembly from location: '%s'.", filename); } /* Clear the reference added by mono_image_open */ mono_image_close (image); g_free (fname); return ass; } static void free_assembly_name_item (gpointer val, gpointer user_data) { mono_assembly_name_free_internal ((MonoAssemblyName *)val); g_free (val); } /** * mono_assembly_load_friends: * \param ass an assembly * * Load the list of friend assemblies that are allowed to access * the assembly's internal types and members. They are stored as assembly * names in custom attributes. * * This is an internal method, we need this because when we load mscorlib * we do not have the internals visible cattr loaded yet, * so we need to load these after we initialize the runtime. * * LOCKING: Acquires the assemblies lock plus the loader lock. */ void mono_assembly_load_friends (MonoAssembly* ass) { ERROR_DECL (error); int i; MonoCustomAttrInfo* attrs; if (ass->friend_assembly_names_inited) return; attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, error); mono_error_assert_ok (error); if (!attrs) { mono_assemblies_lock (); ass->friend_assembly_names_inited = TRUE; mono_assemblies_unlock (); return; } mono_assemblies_lock (); if (ass->friend_assembly_names_inited) { mono_assemblies_unlock (); return; } mono_assemblies_unlock (); GSList *visible_list = NULL; GSList *ignores_list = NULL; /* * We build the list outside the assemblies lock, the worse that can happen * is that we'll need to free the allocated list. */ for (i = 0; i < attrs->num_attrs; ++i) { MonoCustomAttrEntry *attr = &attrs->attrs [i]; MonoAssemblyName *aname; const gchar *data; uint32_t data_length; gchar *data_with_terminator; /* Do some sanity checking */ if (!attr->ctor) continue; gboolean has_visible = FALSE; gboolean has_ignores = FALSE; has_visible = attr->ctor->klass == mono_class_try_get_internals_visible_class (); /* IgnoresAccessChecksToAttribute is dynamically generated, so it's not necessarily in CoreLib */ /* FIXME: should we only check for it in dynamic modules? */ has_ignores = (!strcmp ("IgnoresAccessChecksToAttribute", m_class_get_name (attr->ctor->klass)) && !strcmp ("System.Runtime.CompilerServices", m_class_get_name_space (attr->ctor->klass))); if (!has_visible && !has_ignores) continue; if (attr->data_size < 4) continue; data = (const char*)attr->data; /* 0xFF means null string, see custom attr format */ if (data [0] != 1 || data [1] != 0 || (data [2] & 0xFF) == 0xFF) continue; data_length = mono_metadata_decode_value (data + 2, &data); data_with_terminator = (char *)g_memdup (data, data_length + 1); data_with_terminator[data_length] = 0; aname = g_new0 (MonoAssemblyName, 1); /*g_print ("friend ass: %s\n", data);*/ if (mono_assembly_name_parse_full (data_with_terminator, aname, TRUE, NULL, NULL)) { if (has_visible) visible_list = g_slist_prepend (visible_list, aname); if (has_ignores) ignores_list = g_slist_prepend (ignores_list, aname); } else { g_free (aname); } g_free (data_with_terminator); } mono_custom_attrs_free (attrs); mono_assemblies_lock (); if (ass->friend_assembly_names_inited) { mono_assemblies_unlock (); g_slist_foreach (visible_list, free_assembly_name_item, NULL); g_slist_free (visible_list); g_slist_foreach (ignores_list, free_assembly_name_item, NULL); g_slist_free (ignores_list); return; } ass->friend_assembly_names = visible_list; ass->ignores_checks_assembly_names = ignores_list; /* Because of the double checked locking pattern above */ mono_memory_barrier (); ass->friend_assembly_names_inited = TRUE; mono_assemblies_unlock (); } struct HasReferenceAssemblyAttributeIterData { gboolean has_attr; }; static gboolean has_reference_assembly_attribute_iterator (MonoImage *image, guint32 typeref_scope_token, const char *nspace, const char *name, guint32 method_token, gpointer user_data) { gboolean stop_scanning = FALSE; struct HasReferenceAssemblyAttributeIterData *iter_data = (struct HasReferenceAssemblyAttributeIterData*)user_data; if (!strcmp (name, "ReferenceAssemblyAttribute") && !strcmp (nspace, "System.Runtime.CompilerServices")) { /* Note we don't check the assembly name, same as coreCLR. */ iter_data->has_attr = TRUE; stop_scanning = TRUE; } return stop_scanning; } /** * mono_assembly_has_reference_assembly_attribute: * \param assembly a MonoAssembly * \param error set on error. * * \returns TRUE if \p assembly has the \c System.Runtime.CompilerServices.ReferenceAssemblyAttribute set. * On error returns FALSE and sets \p error. */ gboolean mono_assembly_has_reference_assembly_attribute (MonoAssembly *assembly, MonoError *error) { g_assert (assembly && assembly->image); /* .NET Framework appears to ignore the attribute on dynamic * assemblies, so don't call this function for dynamic assemblies. */ g_assert (!image_is_dynamic (assembly->image)); error_init (error); /* * This might be called during assembly loading, so do everything using the low-level * metadata APIs. */ struct HasReferenceAssemblyAttributeIterData iter_data = { FALSE }; mono_assembly_metadata_foreach_custom_attr (assembly, &has_reference_assembly_attribute_iterator, &iter_data); return iter_data.has_attr; } /** * mono_assembly_open: * \param filename Opens the assembly pointed out by this name * \param status return status code * * This loads an assembly from the specified \p filename. The \p filename allows * a local URL (starting with a \c file:// prefix). If a file prefix is used, the * filename is interpreted as a URL, and the filename is URL-decoded. Otherwise the file * is treated as a local path. * * First, an attempt is made to load the assembly from the bundled executable (for those * deployments that have been done with the \c mkbundle tool or for scenarios where the * assembly has been registered as an embedded assembly). If this is not the case, then * the assembly is loaded from disk using `api:mono_image_open_full`. * * \returns a pointer to the \c MonoAssembly if \p filename contains a valid * assembly or NULL on error. Details about the error are stored in the * \p status variable. */ MonoAssembly * mono_assembly_open (const char *filename, MonoImageOpenStatus *status) { MonoAssembly *res; MONO_ENTER_GC_UNSAFE; MonoAssemblyOpenRequest req; mono_assembly_request_prepare_open (&req, mono_alc_get_default ()); res = mono_assembly_request_open (filename, &req, status); MONO_EXIT_GC_UNSAFE; return res; } /** * mono_assembly_load_from_full: * \param image Image to load the assembly from * \param fname assembly name to associate with the assembly * \param status returns the status condition * \param refonly Whether this assembly is being opened in "reflection-only" mode. * * If the provided \p image has an assembly reference, it will process the given * image as an assembly with the given name. * * Most likely you want to use the `api:mono_assembly_load_full` method instead. * * Returns: A valid pointer to a \c MonoAssembly* on success and the \p status will be * set to \c MONO_IMAGE_OK; or NULL on error. * * If there is an error loading the assembly the \p status will indicate the * reason with \p status being set to \c MONO_IMAGE_INVALID if the * image did not contain an assembly reference table. */ MonoAssembly * mono_assembly_load_from_full (MonoImage *image, const char*fname, MonoImageOpenStatus *status, gboolean refonly) { if (refonly) { if (status) *status = MONO_IMAGE_IMAGE_INVALID; return NULL; } MonoAssembly *res; MONO_ENTER_GC_UNSAFE; MonoAssemblyLoadRequest req; MonoImageOpenStatus def_status; if (!status) status = &def_status; mono_assembly_request_prepare_load (&req, mono_alc_get_default ()); res = mono_assembly_request_load_from (image, fname, &req, status); MONO_EXIT_GC_UNSAFE; return res; } MonoAssembly * mono_assembly_request_load_from (MonoImage *image, const char *fname, const MonoAssemblyLoadRequest *req, MonoImageOpenStatus *status) { MonoAssemblyCandidatePredicate predicate; gpointer user_data; MonoAssembly *ass, *ass2; char *base_dir; g_assert (status != NULL); predicate = req->predicate; user_data = req->predicate_ud; if (!table_info_get_rows (&image->tables [MONO_TABLE_ASSEMBLY])) { /* 'image' doesn't have a manifest -- maybe someone is trying to Assembly.Load a .netmodule */ *status = MONO_IMAGE_IMAGE_INVALID; return NULL; } #if defined (HOST_WIN32) { gchar *tmp_fn; int i; tmp_fn = g_strdup (fname); for (i = strlen (tmp_fn) - 1; i >= 0; i--) { if (tmp_fn [i] == '/') tmp_fn [i] = '\\'; } base_dir = absolute_dir (tmp_fn); g_free (tmp_fn); } #else base_dir = absolute_dir (fname); #endif /* * Create assembly struct, and enter it into the assembly cache */ ass = g_new0 (MonoAssembly, 1); ass->basedir = base_dir; ass->context.no_managed_load_event = req->no_managed_load_event; ass->image = image; MONO_PROFILER_RAISE (assembly_loading, (ass)); mono_assembly_fill_assembly_name (image, &ass->aname); if (mono_defaults.corlib && strcmp (ass->aname.name, MONO_ASSEMBLY_CORLIB_NAME) == 0) { // MS.NET doesn't support loading other mscorlibs g_free (ass); g_free (base_dir); mono_image_addref (mono_defaults.corlib); *status = MONO_IMAGE_OK; return mono_defaults.corlib->assembly; } /* Add a non-temporary reference because of ass->image */ mono_image_addref (image); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Image addref %s[%p] (%s) -> %s[%p]: %d", ass->aname.name, ass, mono_alc_is_default (mono_image_get_alc (image)) ? "default ALC" : "custom ALC", image->name, image, image->ref_count); /* * The load hooks might take locks so we can't call them while holding the * assemblies lock. */ if (ass->aname.name && !req->no_invoke_search_hook) { /* FIXME: I think individual context should probably also look for an existing MonoAssembly here, we just need to pass the asmctx to the search hook so that it does a filename match (I guess?) */ ass2 = mono_assembly_invoke_search_hook_internal (req->alc, NULL, &ass->aname, FALSE); if (ass2) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Image %s[%p] reusing existing assembly %s[%p]", ass->aname.name, ass, ass2->aname.name, ass2); g_free (ass); g_free (base_dir); mono_image_close (image); *status = MONO_IMAGE_OK; return ass2; } } /* We need to check for ReferenceAssemblyAttribute before we * mark the assembly as loaded and before we fire the load * hook. Otherwise mono_domain_fire_assembly_load () in * appdomain.c will cache a mapping from the assembly name to * this image and we won't be able to look for a different * candidate. */ { ERROR_DECL (refasm_error); if (mono_assembly_has_reference_assembly_attribute (ass, refasm_error)) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Image for assembly '%s' (%s) has ReferenceAssemblyAttribute, skipping", ass->aname.name, image->name); g_free (ass); g_free (base_dir); mono_image_close (image); *status = MONO_IMAGE_IMAGE_INVALID; return NULL; } mono_error_cleanup (refasm_error); } if (predicate && !predicate (ass, user_data)) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Predicate returned FALSE, skipping '%s' (%s)\n", ass->aname.name, image->name); g_free (ass); g_free (base_dir); mono_image_close (image); *status = MONO_IMAGE_IMAGE_INVALID; return NULL; } mono_assemblies_lock (); /* If an assembly is loaded into an individual context, always return a * new MonoAssembly, even if another assembly with the same name has * already been loaded. */ if (image->assembly && !req->no_invoke_search_hook) { /* * This means another thread has already loaded the assembly, but not yet * called the load hooks so the search hook can't find the assembly. */ mono_assemblies_unlock (); ass2 = image->assembly; g_free (ass); g_free (base_dir); mono_image_close (image); *status = MONO_IMAGE_OK; return ass2; } mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Prepared to set up assembly '%s' (%s)", ass->aname.name, image->name); /* If asmctx is INDIVIDUAL, image->assembly might not be NULL, so don't * overwrite it. */ if (image->assembly == NULL) image->assembly = ass; loaded_assemblies = g_list_prepend (loaded_assemblies, ass); loaded_assembly_count++; mono_assemblies_unlock (); #ifdef HOST_WIN32 if (m_image_is_module_handle (image)) mono_image_fixup_vtable (image); #endif mono_assembly_invoke_load_hook_internal (req->alc, ass); MONO_PROFILER_RAISE (assembly_loaded, (ass)); return ass; } /** * mono_assembly_load_from: * \param image Image to load the assembly from * \param fname assembly name to associate with the assembly * \param status return status code * * If the provided \p image has an assembly reference, it will process the given * image as an assembly with the given name. * * Most likely you want to use the `api:mono_assembly_load_full` method instead. * * This is equivalent to calling `api:mono_assembly_load_from_full` with the * \p refonly parameter set to FALSE. * \returns A valid pointer to a \c MonoAssembly* on success and then \p status will be * set to \c MONO_IMAGE_OK; or NULL on error. * * If there is an error loading the assembly the \p status will indicate the * reason with \p status being set to \c MONO_IMAGE_INVALID if the * image did not contain an assembly reference table. */ MonoAssembly * mono_assembly_load_from (MonoImage *image, const char *fname, MonoImageOpenStatus *status) { MonoAssembly *res; MONO_ENTER_GC_UNSAFE; MonoAssemblyLoadRequest req; MonoImageOpenStatus def_status; if (!status) status = &def_status; mono_assembly_request_prepare_load (&req, mono_alc_get_default ()); res = mono_assembly_request_load_from (image, fname, &req, status); MONO_EXIT_GC_UNSAFE; return res; } /** * mono_assembly_name_free_internal: * \param aname assembly name to free * * Frees the provided assembly name object. * (it does not frees the object itself, only the name members). */ void mono_assembly_name_free_internal (MonoAssemblyName *aname) { MONO_REQ_GC_UNSAFE_MODE; if (aname == NULL) return; g_free ((void *) aname->name); g_free ((void *) aname->culture); g_free ((void *) aname->hash_value); g_free ((guint8*) aname->public_key); } static gboolean parse_public_key (const gchar *key, gchar** pubkey, gboolean *is_ecma) { const gchar *pkey; gchar header [16], val, *arr, *endp; gint i, j, offset, bitlen, keylen, pkeylen; //both pubkey and is_ecma are required arguments g_assert (pubkey && is_ecma); keylen = strlen (key) >> 1; if (keylen < 1) return FALSE; /* allow the ECMA standard key */ if (strcmp (key, "00000000000000000400000000000000") == 0) { *pubkey = NULL; *is_ecma = TRUE; return TRUE; } *is_ecma = FALSE; val = g_ascii_xdigit_value (key [0]) << 4; val |= g_ascii_xdigit_value (key [1]); switch (val) { case 0x00: if (keylen < 13) return FALSE; val = g_ascii_xdigit_value (key [24]); val |= g_ascii_xdigit_value (key [25]); if (val != 0x06) return FALSE; pkey = key + 24; break; case 0x06: pkey = key; break; default: return FALSE; } /* We need the first 16 bytes * to check whether this key is valid or not */ pkeylen = strlen (pkey) >> 1; if (pkeylen < 16) return FALSE; for (i = 0, j = 0; i < 16; i++) { header [i] = g_ascii_xdigit_value (pkey [j++]) << 4; header [i] |= g_ascii_xdigit_value (pkey [j++]); } if (header [0] != 0x06 || /* PUBLICKEYBLOB (0x06) */ header [1] != 0x02 || /* Version (0x02) */ header [2] != 0x00 || /* Reserved (word) */ header [3] != 0x00 || (guint)(read32 (header + 8)) != 0x31415352) /* DWORD magic = RSA1 */ return FALSE; /* Based on this length, we _should_ be able to know if the length is right */ bitlen = read32 (header + 12) >> 3; if ((bitlen + 16 + 4) != pkeylen) return FALSE; arr = (gchar *)g_malloc (keylen + 4); /* Encode the size of the blob */ mono_metadata_encode_value (keylen, &arr[0], &endp); offset = (gint)(endp-arr); for (i = offset, j = 0; i < keylen + offset; i++) { arr [i] = g_ascii_xdigit_value (key [j++]) << 4; arr [i] |= g_ascii_xdigit_value (key [j++]); } *pubkey = arr; return TRUE; } static gboolean build_assembly_name (const char *name, const char *version, const char *culture, const char *token, const char *key, guint32 flags, guint32 arch, MonoAssemblyName *aname, gboolean save_public_key) { gint len; gint version_parts; gchar *pkeyptr, *encoded, tok [8]; memset (aname, 0, sizeof (MonoAssemblyName)); if (version) { int parts [4]; int i; int part_len; parts [2] = -1; parts [3] = -1; const char *s = version; version_parts = 0; for (i = 0; i < 4; ++i) { int n = sscanf (s, "%u%n", &parts [i], &part_len); if (n != 1) return FALSE; if (parts [i] < 0 || parts [i] > 65535) return FALSE; if (i < 2 && parts [i] == 65535) return FALSE; version_parts ++; s += part_len; if (s [0] == '\0') break; if (i < 3) { if (s [0] != '.') return FALSE; s ++; } } if (s [0] != '\0') return FALSE; if (version_parts < 2 || version_parts > 4) return FALSE; aname->major = parts [0]; aname->minor = parts [1]; if (version_parts >= 3) aname->build = parts [2]; else aname->build = -1; if (version_parts == 4) aname->revision = parts [3]; else aname->revision = -1; } aname->flags = flags; aname->arch = arch; aname->name = g_strdup (name); if (culture) { if (g_ascii_strcasecmp (culture, "neutral") == 0) aname->culture = g_strdup (""); else aname->culture = g_strdup (culture); } if (token && strncmp (token, "null", 4) != 0) { char *lower; /* the constant includes the ending NULL, hence the -1 */ if (strlen (token) != (MONO_PUBLIC_KEY_TOKEN_LENGTH - 1)) { mono_assembly_name_free_internal (aname); return FALSE; } lower = g_ascii_strdown (token, MONO_PUBLIC_KEY_TOKEN_LENGTH); g_strlcpy ((char*)aname->public_key_token, lower, MONO_PUBLIC_KEY_TOKEN_LENGTH); g_free (lower); } if (key) { gboolean is_ecma = FALSE; gchar *pkey = NULL; if (strcmp (key, "null") == 0 || !parse_public_key (key, &pkey, &is_ecma)) { mono_assembly_name_free_internal (aname); return FALSE; } if (is_ecma) { g_assert (pkey == NULL); aname->public_key = NULL; g_strlcpy ((gchar*)aname->public_key_token, "b77a5c561934e089", MONO_PUBLIC_KEY_TOKEN_LENGTH); return TRUE; } len = mono_metadata_decode_blob_size ((const gchar *) pkey, (const gchar **) &pkeyptr); // We also need to generate the key token mono_digest_get_public_token ((guchar*) tok, (guint8*) pkeyptr, len); encoded = encode_public_tok ((guchar*) tok, 8); g_strlcpy ((gchar*)aname->public_key_token, encoded, MONO_PUBLIC_KEY_TOKEN_LENGTH); g_free (encoded); if (save_public_key) aname->public_key = (guint8*) pkey; else g_free (pkey); } return TRUE; } static gboolean split_key_value (const gchar *pair, gchar **key, guint32 *keylen, gchar **value) { char *eqsign = (char*)strchr (pair, '='); if (!eqsign) { *key = NULL; *keylen = 0; *value = NULL; return FALSE; } *key = (gchar*)pair; *keylen = eqsign - *key; while (*keylen > 0 && g_ascii_isspace ((*key) [*keylen - 1])) (*keylen)--; *value = g_strstrip (eqsign + 1); return TRUE; } gboolean mono_assembly_name_parse_full (const char *name, MonoAssemblyName *aname, gboolean save_public_key, gboolean *is_version_defined, gboolean *is_token_defined) { gchar *dllname; gchar *dllname_uq; gchar *version = NULL; gchar *version_uq; gchar *culture = NULL; gchar *culture_uq; gchar *token = NULL; gchar *token_uq; gchar *key = NULL; gchar *key_uq; gchar *retargetable = NULL; gchar *retargetable_uq; gchar *procarch = NULL; gchar *procarch_uq; gboolean res; gchar *value, *part_name; guint32 part_name_len; gchar **parts; gchar **tmp; gboolean version_defined; gboolean token_defined; guint32 flags = 0; guint32 arch = MONO_PROCESSOR_ARCHITECTURE_NONE; if (!is_version_defined) is_version_defined = &version_defined; *is_version_defined = FALSE; if (!is_token_defined) is_token_defined = &token_defined; *is_token_defined = FALSE; parts = tmp = g_strsplit (name, ",", 6); if (!tmp || !*tmp) { goto cleanup_and_fail; } dllname = g_strstrip (*tmp); // Simple name cannot be empty if (!*dllname) { goto cleanup_and_fail; } // Characters /, :, and \ not allowed in simple names while (*dllname) { gchar tmp_char = *dllname; if (tmp_char == '/' || tmp_char == ':' || tmp_char == '\\') goto cleanup_and_fail; dllname++; } dllname = *tmp; tmp++; while (*tmp) { if (!split_key_value (g_strstrip (*tmp), &part_name, &part_name_len, &value)) goto cleanup_and_fail; if (part_name_len == 7 && !g_ascii_strncasecmp (part_name, "Version", part_name_len)) { *is_version_defined = TRUE; if (version != NULL || strlen (value) == 0) { goto cleanup_and_fail; } version = value; tmp++; continue; } if (part_name_len == 7 && !g_ascii_strncasecmp (part_name, "Culture", part_name_len)) { if (culture != NULL || strlen (value) == 0) { goto cleanup_and_fail; } culture = value; tmp++; continue; } if (part_name_len == 14 && !g_ascii_strncasecmp (part_name, "PublicKeyToken", part_name_len)) { *is_token_defined = TRUE; if (token != NULL || key != NULL || strlen (value) == 0) { goto cleanup_and_fail; } token = value; tmp++; continue; } if (part_name_len == 9 && !g_ascii_strncasecmp (part_name, "PublicKey", part_name_len)) { if (token != NULL || key != NULL || strlen (value) == 0) { goto cleanup_and_fail; } key = value; tmp++; continue; } if (part_name_len == 12 && !g_ascii_strncasecmp (part_name, "Retargetable", part_name_len)) { if (retargetable != NULL) { goto cleanup_and_fail; } retargetable = value; retargetable_uq = unquote (retargetable); if (retargetable_uq != NULL) retargetable = retargetable_uq; if (!g_ascii_strcasecmp (retargetable, "yes")) { flags |= ASSEMBLYREF_RETARGETABLE_FLAG; } else if (g_ascii_strcasecmp (retargetable, "no")) { g_free (retargetable_uq); goto cleanup_and_fail; } g_free (retargetable_uq); tmp++; continue; } if (part_name_len == 21 && !g_ascii_strncasecmp (part_name, "ProcessorArchitecture", part_name_len)) { if (procarch != NULL) { goto cleanup_and_fail; } procarch = value; procarch_uq = unquote (procarch); if (procarch_uq != NULL) procarch = procarch_uq; if (!g_ascii_strcasecmp (procarch, "MSIL")) arch = MONO_PROCESSOR_ARCHITECTURE_MSIL; else if (!g_ascii_strcasecmp (procarch, "X86")) arch = MONO_PROCESSOR_ARCHITECTURE_X86; else if (!g_ascii_strcasecmp (procarch, "IA64")) arch = MONO_PROCESSOR_ARCHITECTURE_IA64; else if (!g_ascii_strcasecmp (procarch, "AMD64")) arch = MONO_PROCESSOR_ARCHITECTURE_AMD64; else if (!g_ascii_strcasecmp (procarch, "ARM")) arch = MONO_PROCESSOR_ARCHITECTURE_ARM; else { g_free (procarch_uq); goto cleanup_and_fail; } flags |= arch << 4; g_free (procarch_uq); tmp++; continue; } // compat: If we got here, the attribute name is unknown to us. Ignore it. tmp++; } /* if retargetable flag is set, then we must have a fully qualified name */ if (retargetable != NULL && (version == NULL || culture == NULL || (key == NULL && token == NULL))) { goto cleanup_and_fail; } dllname_uq = unquote (dllname); version_uq = unquote (version); culture_uq = unquote (culture); token_uq = unquote (token); key_uq = unquote (key); res = build_assembly_name ( dllname_uq == NULL ? dllname : dllname_uq, version_uq == NULL ? version : version_uq, culture_uq == NULL ? culture : culture_uq, token_uq == NULL ? token : token_uq, key_uq == NULL ? key : key_uq, flags, arch, aname, save_public_key); g_free (dllname_uq); g_free (version_uq); g_free (culture_uq); g_free (token_uq); g_free (key_uq); g_strfreev (parts); return res; cleanup_and_fail: g_strfreev (parts); return FALSE; } static char* unquote (const char *str) { gint slen; const char *end; if (str == NULL) return NULL; slen = strlen (str); if (slen < 2) return NULL; if (*str != '\'' && *str != '\"') return NULL; end = str + slen - 1; if (*str != *end) return NULL; return g_strndup (str + 1, slen - 2); } /** * mono_assembly_name_parse: * \param name name to parse * \param aname the destination assembly name * * Parses an assembly qualified type name and assigns the name, * version, culture and token to the provided assembly name object. * * \returns TRUE if the name could be parsed. */ gboolean mono_assembly_name_parse (const char *name, MonoAssemblyName *aname) { return mono_assembly_name_parse_full (name, aname, FALSE, NULL, NULL); } /** * mono_assembly_name_new: * \param name name to parse * * Allocate a new \c MonoAssemblyName and fill its values from the * passed \p name. * * \returns a newly allocated structure or NULL if there was any failure. */ MonoAssemblyName* mono_assembly_name_new (const char *name) { MonoAssemblyName *result = NULL; MONO_ENTER_GC_UNSAFE; MonoAssemblyName *aname = g_new0 (MonoAssemblyName, 1); if (mono_assembly_name_parse (name, aname)) result = aname; else g_free (aname); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_assembly_name_get_name: */ const char* mono_assembly_name_get_name (MonoAssemblyName *aname) { const char *result = NULL; MONO_ENTER_GC_UNSAFE; result = aname->name; MONO_EXIT_GC_UNSAFE; return result; } /** * mono_assembly_name_get_culture: */ const char* mono_assembly_name_get_culture (MonoAssemblyName *aname) { const char *result = NULL; MONO_ENTER_GC_UNSAFE; result = aname->culture; MONO_EXIT_GC_UNSAFE; return result; } /** * mono_assembly_name_get_pubkeytoken: */ mono_byte* mono_assembly_name_get_pubkeytoken (MonoAssemblyName *aname) { if (aname->public_key_token [0]) return aname->public_key_token; return NULL; } /** * mono_assembly_name_get_version: */ uint16_t mono_assembly_name_get_version (MonoAssemblyName *aname, uint16_t *minor, uint16_t *build, uint16_t *revision) { if (minor) *minor = aname->minor; if (build) *build = aname->build; if (revision) *revision = aname->revision; return aname->major; } gboolean mono_assembly_name_culture_is_neutral (const MonoAssemblyName *aname) { return (!aname->culture || aname->culture [0] == 0); } /** * mono_assembly_load_with_partial_name: * \param name an assembly name that is then parsed by `api:mono_assembly_name_parse`. * \param status return status code * * Loads a \c MonoAssembly from a name. The name is parsed using `api:mono_assembly_name_parse`, * so it might contain a qualified type name, version, culture and token. * * This will load the assembly from the file whose name is derived from the assembly name * by appending the \c .dll extension. * * The assembly is loaded from either one of the extra Global Assembly Caches specified * by the extra GAC paths (specified by the \c MONO_GAC_PREFIX environment variable) or * if that fails from the GAC. * * \returns NULL on failure, or a pointer to a \c MonoAssembly on success. */ MonoAssembly* mono_assembly_load_with_partial_name (const char *name, MonoImageOpenStatus *status) { MonoAssembly *result; MONO_ENTER_GC_UNSAFE; MonoImageOpenStatus def_status; if (!status) status = &def_status; result = mono_assembly_load_with_partial_name_internal (name, mono_alc_get_default (), status); MONO_EXIT_GC_UNSAFE; return result; } MonoAssembly* mono_assembly_load_with_partial_name_internal (const char *name, MonoAssemblyLoadContext *alc, MonoImageOpenStatus *status) { ERROR_DECL (error); MonoAssembly *res; MonoAssemblyName *aname, base_name; MonoAssemblyName mapped_aname; MONO_REQ_GC_UNSAFE_MODE; g_assert (status != NULL); memset (&base_name, 0, sizeof (MonoAssemblyName)); aname = &base_name; if (!mono_assembly_name_parse (name, aname)) return NULL; /* * If no specific version has been requested, make sure we load the * correct version for system assemblies. */ if ((aname->major | aname->minor | aname->build | aname->revision) == 0) aname = mono_assembly_remap_version (aname, &mapped_aname); res = mono_assembly_loaded_internal (alc, aname); if (res) { mono_assembly_name_free_internal (aname); return res; } res = invoke_assembly_preload_hook (alc, aname, assemblies_path); if (res) { mono_assembly_name_free_internal (aname); return res; } mono_assembly_name_free_internal (aname); if (!res) { res = mono_try_assembly_resolve (alc, name, NULL, error); if (!is_ok (error)) { mono_error_cleanup (error); if (*status == MONO_IMAGE_OK) *status = MONO_IMAGE_IMAGE_INVALID; } } return res; } MonoAssembly* mono_assembly_load_corlib (MonoImageOpenStatus *status) { MonoAssemblyName *aname; MonoAssemblyOpenRequest req; mono_assembly_request_prepare_open (&req, mono_alc_get_default ()); if (corlib) { /* g_print ("corlib already loaded\n"); */ return corlib; } aname = mono_assembly_name_new (MONO_ASSEMBLY_CORLIB_NAME); corlib = invoke_assembly_preload_hook (req.request.alc, aname, NULL); /* MonoCore preload hook should know how to find it */ /* FIXME: AOT compiler comes here without an installed hook. */ if (!corlib) { if (assemblies_path) { // Custom assemblies path set via MONO_PATH or mono_set_assemblies_path char *corlib_name = g_strdup_printf ("%s.dll", MONO_ASSEMBLY_CORLIB_NAME); corlib = load_in_path (corlib_name, (const char**)assemblies_path, &req, status); } } if (!corlib) { /* Maybe its in a bundle */ char *corlib_name = g_strdup_printf ("%s.dll", MONO_ASSEMBLY_CORLIB_NAME); corlib = mono_assembly_request_open (corlib_name, &req, status); } g_assert (corlib); return corlib; } gboolean mono_assembly_candidate_predicate_sn_same_name (MonoAssembly *candidate, gpointer ud) { MonoAssemblyName *wanted_name = (MonoAssemblyName*)ud; MonoAssemblyName *candidate_name = &candidate->aname; g_assert (wanted_name != NULL); g_assert (candidate_name != NULL); if (mono_trace_is_traced (G_LOG_LEVEL_INFO, MONO_TRACE_ASSEMBLY)) { char * s = mono_stringify_assembly_name (wanted_name); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Predicate: wanted = %s", s); g_free (s); s = mono_stringify_assembly_name (candidate_name); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Predicate: candidate = %s", s); g_free (s); } return mono_assembly_check_name_match (wanted_name, candidate_name); } gboolean mono_assembly_check_name_match (MonoAssemblyName *wanted_name, MonoAssemblyName *candidate_name) { gboolean result = mono_assembly_names_equal_flags (wanted_name, candidate_name, MONO_ANAME_EQ_IGNORE_VERSION | MONO_ANAME_EQ_IGNORE_PUBKEY); if (result && assembly_names_compare_versions (wanted_name, candidate_name, -1) > 0) result = FALSE; mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Predicate: candidate and wanted names %s", result ? "match, returning TRUE" : "don't match, returning FALSE"); return result; } MonoAssembly* mono_assembly_request_byname (MonoAssemblyName *aname, const MonoAssemblyByNameRequest *req, MonoImageOpenStatus *status) { mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Request to load %s in alc %p", aname->name, (gpointer)req->request.alc); MonoAssembly *result; if (status) *status = MONO_IMAGE_OK; result = netcore_load_reference (aname, req->request.alc, req->requesting_assembly, !req->no_postload_search); return result; } MonoAssembly * mono_assembly_load_full_alc (MonoGCHandle alc_gchandle, MonoAssemblyName *aname, const char *basedir, MonoImageOpenStatus *status) { MonoAssembly *res; MONO_ENTER_GC_UNSAFE; MonoAssemblyByNameRequest req; MonoAssemblyLoadContext *alc = mono_alc_from_gchandle (alc_gchandle); mono_assembly_request_prepare_byname (&req, alc); req.requesting_assembly = NULL; req.basedir = basedir; res = mono_assembly_request_byname (aname, &req, status); MONO_EXIT_GC_UNSAFE; return res; } /** * mono_assembly_load_full: * \param aname A MonoAssemblyName with the assembly name to load. * \param basedir A directory to look up the assembly at. * \param status a pointer to a MonoImageOpenStatus to return the status of the load operation * \param refonly Whether this assembly is being opened in "reflection-only" mode. * * Loads the assembly referenced by \p aname, if the value of \p basedir is not NULL, it * attempts to load the assembly from that directory before probing the standard locations. * * If the assembly is being opened in reflection-only mode (\p refonly set to TRUE) then no * assembly binding takes place. * * \returns the assembly referenced by \p aname loaded or NULL on error. On error the * value pointed by \p status is updated with an error code. */ MonoAssembly* mono_assembly_load_full (MonoAssemblyName *aname, const char *basedir, MonoImageOpenStatus *status, gboolean refonly) { if (refonly) { if (status) *status = MONO_IMAGE_IMAGE_INVALID; return NULL; } MonoAssembly *res; MONO_ENTER_GC_UNSAFE; MonoAssemblyByNameRequest req; mono_assembly_request_prepare_byname (&req, mono_alc_get_default ()); req.requesting_assembly = NULL; req.basedir = basedir; res = mono_assembly_request_byname (aname, &req, status); MONO_EXIT_GC_UNSAFE; return res; } /** * mono_assembly_load: * \param aname A MonoAssemblyName with the assembly name to load. * \param basedir A directory to look up the assembly at. * \param status a pointer to a MonoImageOpenStatus to return the status of the load operation * * Loads the assembly referenced by \p aname, if the value of \p basedir is not NULL, it * attempts to load the assembly from that directory before probing the standard locations. * * \returns the assembly referenced by \p aname loaded or NULL on error. On error the * value pointed by \p status is updated with an error code. */ MonoAssembly* mono_assembly_load (MonoAssemblyName *aname, const char *basedir, MonoImageOpenStatus *status) { MonoAssemblyByNameRequest req; mono_assembly_request_prepare_byname (&req, mono_alc_get_default ()); req.requesting_assembly = NULL; req.basedir = basedir; return mono_assembly_request_byname (aname, &req, status); } /** * mono_assembly_loaded_full: * \param aname an assembly to look for. * \param refonly Whether this assembly is being opened in "reflection-only" mode. * * This is used to determine if the specified assembly has been loaded * \returns NULL If the given \p aname assembly has not been loaded, or a pointer to * a \c MonoAssembly that matches the \c MonoAssemblyName specified. */ MonoAssembly* mono_assembly_loaded_full (MonoAssemblyName *aname, gboolean refonly) { if (refonly) return NULL; MonoAssemblyLoadContext *alc = mono_alc_get_default (); return mono_assembly_loaded_internal (alc, aname); } MonoAssembly * mono_assembly_loaded_internal (MonoAssemblyLoadContext *alc, MonoAssemblyName *aname) { MonoAssembly *res; MonoAssemblyName mapped_aname; aname = mono_assembly_remap_version (aname, &mapped_aname); res = mono_assembly_invoke_search_hook_internal (alc, NULL, aname, FALSE); return res; } /** * mono_assembly_loaded: * \param aname an assembly to look for. * * This is used to determine if the specified assembly has been loaded * \returns NULL If the given \p aname assembly has not been loaded, or a pointer to * a \c MonoAssembly that matches the \c MonoAssemblyName specified. */ MonoAssembly* mono_assembly_loaded (MonoAssemblyName *aname) { MonoAssembly *res; MONO_ENTER_GC_UNSAFE; res = mono_assembly_loaded_internal (mono_alc_get_default (), aname); MONO_EXIT_GC_UNSAFE; return res; } void mono_assembly_release_gc_roots (MonoAssembly *assembly) { if (assembly == NULL || assembly == REFERENCE_MISSING) return; if (assembly_is_dynamic (assembly)) { int i; MonoDynamicImage *dynimg = (MonoDynamicImage *)assembly->image; for (i = 0; i < dynimg->image.module_count; ++i) mono_dynamic_image_release_gc_roots ((MonoDynamicImage *)dynimg->image.modules [i]); mono_dynamic_image_release_gc_roots (dynimg); } } /* * Returns whether mono_assembly_close_finish() must be called as * well. See comment for mono_image_close_except_pools() for why we * unload in two steps. */ gboolean mono_assembly_close_except_image_pools (MonoAssembly *assembly) { g_return_val_if_fail (assembly != NULL, FALSE); if (assembly == REFERENCE_MISSING) return FALSE; /* Might be 0 already */ if (mono_assembly_decref (assembly) > 0) return FALSE; MONO_PROFILER_RAISE (assembly_unloading, (assembly)); mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_ASSEMBLY, "Unloading assembly %s [%p].", assembly->aname.name, assembly); mono_debug_close_image (assembly->image); mono_assemblies_lock (); loaded_assemblies = g_list_remove (loaded_assemblies, assembly); loaded_assembly_count--; mono_assemblies_unlock (); assembly->image->assembly = NULL; if (!mono_image_close_except_pools (assembly->image)) assembly->image = NULL; g_slist_foreach (assembly->friend_assembly_names, free_assembly_name_item, NULL); g_slist_foreach (assembly->ignores_checks_assembly_names, free_assembly_name_item, NULL); g_slist_free (assembly->friend_assembly_names); g_slist_free (assembly->ignores_checks_assembly_names); g_free (assembly->basedir); MONO_PROFILER_RAISE (assembly_unloaded, (assembly)); return TRUE; } void mono_assembly_close_finish (MonoAssembly *assembly) { g_assert (assembly && assembly != REFERENCE_MISSING); if (assembly->image) mono_image_close_finish (assembly->image); if (assembly_is_dynamic (assembly)) { g_free ((char*)assembly->aname.culture); } else { g_free (assembly); } } /** * mono_assembly_close: * \param assembly the assembly to release. * * This method releases a reference to the \p assembly. The assembly is * only released when all the outstanding references to it are released. */ void mono_assembly_close (MonoAssembly *assembly) { if (mono_assembly_close_except_image_pools (assembly)) mono_assembly_close_finish (assembly); } /** * mono_assembly_load_module: */ MonoImage* mono_assembly_load_module (MonoAssembly *assembly, guint32 idx) { ERROR_DECL (error); MonoImage *result = mono_assembly_load_module_checked (assembly, idx, error); mono_error_assert_ok (error); return result; } MONO_API MonoImage* mono_assembly_load_module_checked (MonoAssembly *assembly, uint32_t idx, MonoError *error) { return mono_image_load_file_for_image_checked (assembly->image, idx, error); } /** * mono_assembly_foreach: * \param func function to invoke for each assembly loaded * \param user_data data passed to the callback * * Invokes the provided \p func callback for each assembly loaded into * the runtime. The first parameter passed to the callback is the * \c MonoAssembly*, and the second parameter is the \p user_data. * * This is done for all assemblies loaded in the runtime, not just * those loaded in the current application domain. */ void mono_assembly_foreach (GFunc func, gpointer user_data) { GList *copy; /* * We make a copy of the list to avoid calling the callback inside the * lock, which could lead to deadlocks. */ mono_assemblies_lock (); copy = g_list_copy (loaded_assemblies); mono_assemblies_unlock (); g_list_foreach (loaded_assemblies, func, user_data); g_list_free (copy); } /** * mono_assemblies_cleanup: * * Free all resources used by this module. */ void mono_assemblies_cleanup (void) { } /* * Holds the assembly of the application, for * System.Diagnostics.Process::MainModule */ static MonoAssembly *main_assembly=NULL; /** * mono_assembly_set_main: */ void mono_assembly_set_main (MonoAssembly *assembly) { main_assembly = assembly; } /** * mono_assembly_get_main: * * Returns: the assembly for the application, the first assembly that is loaded by the VM */ MonoAssembly * mono_assembly_get_main (void) { return (main_assembly); } /** * mono_assembly_get_image: * \param assembly The assembly to retrieve the image from * * \returns the \c MonoImage associated with this assembly. */ MonoImage* mono_assembly_get_image (MonoAssembly *assembly) { MonoImage *res; MONO_ENTER_GC_UNSAFE; res = mono_assembly_get_image_internal (assembly); MONO_EXIT_GC_UNSAFE; return res; } MonoImage* mono_assembly_get_image_internal (MonoAssembly *assembly) { MONO_REQ_GC_UNSAFE_MODE; return assembly->image; } /** * mono_assembly_get_name: * \param assembly The assembly to retrieve the name from * * The returned name's lifetime is the same as \p assembly's. * * \returns the \c MonoAssemblyName associated with this assembly. */ MonoAssemblyName * mono_assembly_get_name (MonoAssembly *assembly) { MonoAssemblyName *res; MONO_ENTER_GC_UNSAFE; res = mono_assembly_get_name_internal (assembly); MONO_EXIT_GC_UNSAFE; return res; } MonoAssemblyName * mono_assembly_get_name_internal (MonoAssembly *assembly) { MONO_REQ_GC_UNSAFE_MODE; return &assembly->aname; } /** * mono_register_bundled_assemblies: */ void mono_register_bundled_assemblies (const MonoBundledAssembly **assemblies) { bundles = assemblies; } /** * mono_create_new_bundled_satellite_assembly: */ MonoBundledSatelliteAssembly * mono_create_new_bundled_satellite_assembly (const char *name, const char *culture, const unsigned char *data, unsigned int size) { MonoBundledSatelliteAssembly *satellite_assembly = g_new0 (MonoBundledSatelliteAssembly, 1); satellite_assembly->name = strdup (name); satellite_assembly->culture = strdup (culture); satellite_assembly->data = data; satellite_assembly->size = size; return satellite_assembly; } /** * mono_register_bundled_satellite_assemblies: */ void mono_register_bundled_satellite_assemblies (const MonoBundledSatelliteAssembly **assemblies) { satellite_bundles = assemblies; } /** * mono_assembly_is_jit_optimizer_disabled: * * \param assm the assembly * * Returns TRUE if the System.Diagnostics.DebuggableAttribute has the * DebuggingModes.DisableOptimizations bit set. * */ gboolean mono_assembly_is_jit_optimizer_disabled (MonoAssembly *ass) { ERROR_DECL (error); g_assert (ass); if (ass->jit_optimizer_disabled_inited) return ass->jit_optimizer_disabled; MonoClass *klass = mono_class_try_get_debuggable_attribute_class (); if (!klass) { /* Linked away */ ass->jit_optimizer_disabled = FALSE; mono_memory_barrier (); ass->jit_optimizer_disabled_inited = TRUE; return FALSE; } gboolean disable_opts = FALSE; MonoCustomAttrInfo* attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, error); mono_error_cleanup (error); /* FIXME don't swallow the error */ if (attrs) { for (int i = 0; i < attrs->num_attrs; ++i) { MonoCustomAttrEntry *attr = &attrs->attrs [i]; const gchar *p; MonoMethodSignature *sig; if (!attr->ctor || attr->ctor->klass != klass) continue; /* Decode the attribute. See reflection.c */ p = (const char*)attr->data; g_assert (read16 (p) == 0x0001); p += 2; // FIXME: Support named parameters sig = mono_method_signature_internal (attr->ctor); MonoClass *param_class; if (sig->param_count == 2 && sig->params [0]->type == MONO_TYPE_BOOLEAN && sig->params [1]->type == MONO_TYPE_BOOLEAN) { /* Two boolean arguments */ p ++; disable_opts = *p; } else if (sig->param_count == 1 && sig->params[0]->type == MONO_TYPE_VALUETYPE && (param_class = mono_class_from_mono_type_internal (sig->params[0])) != NULL && m_class_is_enumtype (param_class) && !strcmp (m_class_get_name (param_class), "DebuggingModes")) { /* System.Diagnostics.DebuggableAttribute+DebuggingModes */ int32_t flags = read32 (p); p += 4; disable_opts = (flags & 0x0100) != 0; } } mono_custom_attrs_free (attrs); } ass->jit_optimizer_disabled = disable_opts; mono_memory_barrier (); ass->jit_optimizer_disabled_inited = TRUE; return disable_opts; } guint32 mono_assembly_get_count (void) { return loaded_assembly_count; }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/aarch64/Gis_signal_frame.c
/* libunwind - a platform-independent unwind library Copyright (C) 2012 Tommi Rantala <[email protected]> Copyright (C) 2013 Linaro Limited This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind_i.h" /* The restorer stub will always have the form: d2801168 movz x8, #0x8b d4000001 svc #0x0 */ int unw_is_signal_frame (unw_cursor_t *cursor) { #ifdef __linux__ struct cursor *c = (struct cursor *) cursor; unw_word_t w0, ip; unw_addr_space_t as; unw_accessors_t *a; void *arg; int ret; as = c->dwarf.as; a = unw_get_accessors_int (as); arg = c->dwarf.as_arg; ip = c->dwarf.ip; ret = (*a->access_mem) (as, ip, &w0, 0, arg); if (ret < 0) return ret; /* FIXME: distinguish 32bit insn vs 64bit registers. */ if (w0 != 0xd4000001d2801168) return 0; return 1; #else return -UNW_ENOINFO; #endif }
/* libunwind - a platform-independent unwind library Copyright (C) 2012 Tommi Rantala <[email protected]> Copyright (C) 2013 Linaro Limited This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind_i.h" /* The restorer stub will always have the form: d2801168 movz x8, #0x8b d4000001 svc #0x0 */ int unw_is_signal_frame (unw_cursor_t *cursor) { #ifdef __linux__ struct cursor *c = (struct cursor *) cursor; unw_word_t w0, ip; unw_addr_space_t as; unw_accessors_t *a; void *arg; int ret; as = c->dwarf.as; a = unw_get_accessors_int (as); arg = c->dwarf.as_arg; ip = c->dwarf.ip; ret = (*a->access_mem) (as, ip, &w0, 0, arg); if (ret < 0) return ret; /* FIXME: distinguish 32bit insn vs 64bit registers. */ if (w0 != 0xd4000001d2801168) return 0; return 1; #else return -UNW_ENOINFO; #endif }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/libraries/Microsoft.NETCore.Platforms/readme.md
# Runtime IDs The package `Microsoft.NETCore.Platforms` defines the runtime identifiers (RIDs) used by .NET packages to represent runtime-specific assets in NuGet packages. ## What is a RID? A RID is an opaque string that identifies a platform. RIDs have relationships to other RIDs by "importing" the other RID. In that way a RID is a directed graph of compatible RIDs. ## How does NuGet use RIDs? When NuGet is deciding which assets to use from a package and which packages to include NuGet will consider a RID if the project.json lists a RID in its `runtimes` section. - NuGet chooses the best RID-specific asset, where best is determined by a breadth first traversal of the RID graph. Breadth ordering is document order. - NuGet considers RID-specific assets for two asset types: lib and native. - NuGet never considers RID-specific assets for compile. ### Best RID Consider the partial RID-graph: ``` "any": {}, "win": { "#import": [ "any" ] }, "win-x86": { "#import": [ "win" ] }, "win-x64": { "#import": [ "win" ] }, "win7": { "#import": [ "win" ] }, "win7-x86": { "#import": [ "win7", "win-x86" ] }, "win7-x64": { "#import": [ "win7", "win-x64" ] } ``` This can be visualized as a directed graph, as follows: ``` win7-x64 win7-x86 | \ / | | win7 | | | | win-x64 | win-x86 \ | / win | any ``` As such, best RID, when evaluating for win7-x64 would be:`win7-x64`, `win7`, `win-x64`, `win`, `any` Similarly, when evaluating for `win-x64`: `win-x64`, `win`, `any` Note that `win7` comes before `win-x64` due to the import for `win7` appearing before the import for `win-x64` in document order. ### RID-qualified assets are preferred NuGet will always prefer a RID-qualified asset over a RID-less asset. For example if a package contains ``` lib/netcoreapp1.0/foo.dll runtimes/win/lib/netcoreapp1.0/foo.dll ``` When resolving for netstandard1.0/win7-x64 NuGet will choose `runtimes/win/lib/netcoreapp1.0/foo.dll`. Additionally, NuGet will always prefer a RID-qualified asset over a RID-less asset, even if the framework is less specific for the RID-qualified asset. ``` lib/netstandard1.5/foo.dll runtimes/win/lib/netstandard1.0/foo.dll ``` When resolving for netstandard1.5/win7-x64 NuGet will choose `runtimes/win/lib/netstandard1.0/foo.dll` over `lib/netstandard1.5/foo.dll` even though `netstandard1.5` is more specific than `netstandard1.0`. ### RID-qualified assets are never used for compile NuGet will select different compile-assets than runtime-assets. The compile assets can never be RID-qualified. Consider the package: ``` lib/netstandard1.5/foo.dll runtimes/win/lib/netstandard1.0/foo.dll ``` When resolving for netstandard1.5/win7-x64 will select `lib/netstandard1.5/foo.dll` for the compile asset and `runtimes/win/lib/netstandard1.0/foo.dll` for the runtime asset. ## Adding new RIDs ### Why do I need to add a new RID? NuGet's extensibility mechanism for platform-specific assets requires a RID be defined for any platform that needs assets specific to that platform. Unlike TFMs, which have a known relationship in NuGet (eg net4.5 is compatible with net4.0), RIDs are opaque strings which NuGet knows nothing about. The definition and relationship of RIDs comes solely from the `runtime.json` files within the root of the packages referenced by the project. As such, whenever we want to put a new RID in a project.json in order to get assets specific for that RID we have to define the rid in some package. Typically that package is `Microsoft.NETCore.Platforms` if the RID is "official". If you'd like to prototype you can put the RID in any other package and so long as that package is referenced you can use that RID. ### Do I really need to add a new RID? If you're prototyping on a platform that is compatible with an existing platform then you can reuse the RID for that exsisting platform. New RIDs are only needed when an asset needs to be different on a particular platform. `Microsoft.NETCore.Platforms` attempts to define all RIDs that packages may need, and as such will define RIDs for platforms that we don't actually cross compile for. This is to support higher-level packages, 3rd party packages, that may need to cross-compile for that RID. ### Adding a new OS Add a new `RuntimeGroup` item in `runtimeGroups.props`. For example: ```xml <RuntimeGroup Include="myLinuxDistro"> <Parent>linux</Parent> <Architectures>x86;x64;arm</Architectures> <Versions>42.0;43.0</Versions> </RuntimeGroup> ``` This will create a new RID for `myLinuxDistro` where `myLinuxDistro` should be the string used for the `ID=` value in the `/etc/os-release` file. Whenever modifying the `runtimeGroups.props` make sure to pack the project via the `dotnet pack` command and inspect if the generated package contains the desired changes. RuntimeGroup items have the following format: - `Identity`: the base string for the RID, without version architecture, or qualifiers. - `Parent`: the base string for the parent of this RID. This RID will be imported by the baseRID, architecture-specific, and qualifier-specific RIDs (with the latter two appending appropriate architecture and qualifiers). - `Versions`: A list of strings delimited by semi-colons that represent the versions for this RID. - `TreatVersionsAsCompatible`: Default is true. When true, version-specific RIDs will import the previous version-specific RID in the Versions list, with the first version importing the version-less RID. When false all version-specific RIDs will import the version-less RID (bypassing previous version-specific RIDs) - `OmitVersionDelimiter`: Default is false. When true no characters will separate the base RID and version (EG: win7). When false a '.' will separate the base RID and version (EG: osx.10.12). - `ApplyVersionsToParent`: Default is false. When true, version-specific RIDs will import version-specific Parent RIDs similar to is done for architecture and qualifier (see Parent above). - `Architectures`: A list of strings delimited by semi-colons that represent the architectures for this RID. - `AdditionalQualifiers`: A list of strings delimited by semi-colons that represent the additional qualifiers for this RID. Additional qualifers do not stack, each only applies to the qualifier-less RIDs (so as not to cause combinatorial exponential growth of RIDs). ### Adding a new version to an existing OS Find the existing `RuntimeGroup` in `runtimeGroups.props` and add the version to the list of `Versions`, separated by a semi-colon. If the version you are adding needs to be treated as not-compatible with previous versions and the `RuntimeGroup` has not set `TreatVersionsAsCompatible`=`false` then you may create a new `RuntimeGroup` to represent the new compatibility band. ### Checking your work After making a change to `runtimeGroups.props` you can examine the resulting changes in `runtime.json` and `runtime.compatibility.json`. `runtime.json` is the graph representation of the RIDs and is what ships in the package. `runtime.compatibility.json` is a flattened version of the graph that shows the RID precedence for each RID in the graph. ### Version compatibility Version compatibility is represented through imports. If a platform is considered compatible with another version of the same platform, or a specific version of another platform, then it can import that platform. This permits packages to reuse assets that were built for the imported platform on the compatible platform. Compatibility here is a bit nebulous because inevitably different platforms will have observable differences that can cause compatibility problems. For the purposes of RIDs we'll try to represent compatibility as versions of a platform that are explicitly advertised as being compatible with a previous version and/or another platform and don't have any known broad breaking changes. It is usually better to opt to treat platforms as compatible since that enables the scenario of building an asset for a particular version and using that in future versions, otherwise you force people to cross-compile for all future versions the moment they target a specific version. ## Appendix : details of RID graph generation ### Naming convention We use the following convention in all newly-defined RIDs. Some RIDs (win7-x64, win8-x64) predate this convention and don't follow it, but all new RIDs should follow it. `[os name].[version]-[architecture]-[additional qualifiers]`, for example `osx.10.10-x64` or `ubuntu.14.04-x64` - `[os name]` can contain any characters other than `.` - `[version]` can contain any characters other than `-`. Typically a numeric version like 14.04 or 10.0. - `[architecture]` can contain any characters other than `-`. Typically: `x86`, `x64`, `arm`, `arm64` - `[additional qualifiers]` can be things like `aot`. Used to further differentiate different platforms. For all of these we strive to make them something that can be uniquely discoverable at runtime, so that a RID may be computed from an executing application. As such these properties should be derivable from `/etc/os-release` or similar platform APIs / data. ### Import convention Imports should be used when the added RID is considered compatible with an existing RID. 1. Architecture-specific RIDs should first import the architecture-less RID. EG: `osx.10.11-x64` should first import `osx.10.11`. 2. Architecture-specific RIDs that are compatible with a previous version RID for the same OS should then import the previous version, architecture specific RID. EG: `osx.10.11-x64` should then import `osx.10.10-x64`. If there is no earlier compatible/supported version, then a versionless RID should be imported. EG: `osx.10.10-x64` should import `osx-x64`. 3. Architecture-less RIDs that are compatible with a previous version RID for the same OS should then import the previous version, architecture neutral RID. EG: `osx.10.11` should import `osx.10.10`. If there is no earlier compatible/supported version, then a versionless RID should be imported. EG: `osx.10.10` should import `osx`. 4. Version-less RIDs should import an OS category. EG: `osx-x64` should import `unix-x64`, `osx` should import `unix`. ### Advanced RuntimeGroup metadata The following options can be used under special circumstances but break the normal precedence rules we try to establish by generating the RID graph from common logic. These options make it possible to create a RID fallback chain that doesn't match the rest of the RIDs and therefore is hard for developers/package authors to reason about. Only use these options for cases where you know what you are doing and have carefully reviewed the resulting RID fallbacks using the CompatibliltyMap. - `OmitRIDs`: A list of strings delimited by semi-colons that represent RIDs calculated from this RuntimeGroup that should be omitted from the RuntimeGraph. These RIDs will not be referenced nor defined. - `OmitRIDDefinitions`: A list of strings delimited by semi-colons that represent RIDs calculated from this RuntimeGroup that should be omitted from the RuntimeGraph. These RIDs will not be defined by this RuntimeGroup, but will be referenced: useful in case some other RuntimeGroup (or runtime.json template) defines them. - `OmitRIDReferences`: A list of strings delimited by semi-colons that represent RIDs calculated from this RuntimeGroup that should be omitted from the RuntimeGraph. These RIDs will be defined but not referenced by this RuntimeGroup.
# Runtime IDs The package `Microsoft.NETCore.Platforms` defines the runtime identifiers (RIDs) used by .NET packages to represent runtime-specific assets in NuGet packages. ## What is a RID? A RID is an opaque string that identifies a platform. RIDs have relationships to other RIDs by "importing" the other RID. In that way a RID is a directed graph of compatible RIDs. ## How does NuGet use RIDs? When NuGet is deciding which assets to use from a package and which packages to include NuGet will consider a RID if the project.json lists a RID in its `runtimes` section. - NuGet chooses the best RID-specific asset, where best is determined by a breadth first traversal of the RID graph. Breadth ordering is document order. - NuGet considers RID-specific assets for two asset types: lib and native. - NuGet never considers RID-specific assets for compile. ### Best RID Consider the partial RID-graph: ``` "any": {}, "win": { "#import": [ "any" ] }, "win-x86": { "#import": [ "win" ] }, "win-x64": { "#import": [ "win" ] }, "win7": { "#import": [ "win" ] }, "win7-x86": { "#import": [ "win7", "win-x86" ] }, "win7-x64": { "#import": [ "win7", "win-x64" ] } ``` This can be visualized as a directed graph, as follows: ``` win7-x64 win7-x86 | \ / | | win7 | | | | win-x64 | win-x86 \ | / win | any ``` As such, best RID, when evaluating for win7-x64 would be:`win7-x64`, `win7`, `win-x64`, `win`, `any` Similarly, when evaluating for `win-x64`: `win-x64`, `win`, `any` Note that `win7` comes before `win-x64` due to the import for `win7` appearing before the import for `win-x64` in document order. ### RID-qualified assets are preferred NuGet will always prefer a RID-qualified asset over a RID-less asset. For example if a package contains ``` lib/netcoreapp1.0/foo.dll runtimes/win/lib/netcoreapp1.0/foo.dll ``` When resolving for netstandard1.0/win7-x64 NuGet will choose `runtimes/win/lib/netcoreapp1.0/foo.dll`. Additionally, NuGet will always prefer a RID-qualified asset over a RID-less asset, even if the framework is less specific for the RID-qualified asset. ``` lib/netstandard1.5/foo.dll runtimes/win/lib/netstandard1.0/foo.dll ``` When resolving for netstandard1.5/win7-x64 NuGet will choose `runtimes/win/lib/netstandard1.0/foo.dll` over `lib/netstandard1.5/foo.dll` even though `netstandard1.5` is more specific than `netstandard1.0`. ### RID-qualified assets are never used for compile NuGet will select different compile-assets than runtime-assets. The compile assets can never be RID-qualified. Consider the package: ``` lib/netstandard1.5/foo.dll runtimes/win/lib/netstandard1.0/foo.dll ``` When resolving for netstandard1.5/win7-x64 will select `lib/netstandard1.5/foo.dll` for the compile asset and `runtimes/win/lib/netstandard1.0/foo.dll` for the runtime asset. ## Adding new RIDs ### Why do I need to add a new RID? NuGet's extensibility mechanism for platform-specific assets requires a RID be defined for any platform that needs assets specific to that platform. Unlike TFMs, which have a known relationship in NuGet (eg net4.5 is compatible with net4.0), RIDs are opaque strings which NuGet knows nothing about. The definition and relationship of RIDs comes solely from the `runtime.json` files within the root of the packages referenced by the project. As such, whenever we want to put a new RID in a project.json in order to get assets specific for that RID we have to define the rid in some package. Typically that package is `Microsoft.NETCore.Platforms` if the RID is "official". If you'd like to prototype you can put the RID in any other package and so long as that package is referenced you can use that RID. ### Do I really need to add a new RID? If you're prototyping on a platform that is compatible with an existing platform then you can reuse the RID for that exsisting platform. New RIDs are only needed when an asset needs to be different on a particular platform. `Microsoft.NETCore.Platforms` attempts to define all RIDs that packages may need, and as such will define RIDs for platforms that we don't actually cross compile for. This is to support higher-level packages, 3rd party packages, that may need to cross-compile for that RID. ### Adding a new OS Add a new `RuntimeGroup` item in `runtimeGroups.props`. For example: ```xml <RuntimeGroup Include="myLinuxDistro"> <Parent>linux</Parent> <Architectures>x86;x64;arm</Architectures> <Versions>42.0;43.0</Versions> </RuntimeGroup> ``` This will create a new RID for `myLinuxDistro` where `myLinuxDistro` should be the string used for the `ID=` value in the `/etc/os-release` file. Whenever modifying the `runtimeGroups.props` make sure to pack the project via the `dotnet pack` command and inspect if the generated package contains the desired changes. RuntimeGroup items have the following format: - `Identity`: the base string for the RID, without version architecture, or qualifiers. - `Parent`: the base string for the parent of this RID. This RID will be imported by the baseRID, architecture-specific, and qualifier-specific RIDs (with the latter two appending appropriate architecture and qualifiers). - `Versions`: A list of strings delimited by semi-colons that represent the versions for this RID. - `TreatVersionsAsCompatible`: Default is true. When true, version-specific RIDs will import the previous version-specific RID in the Versions list, with the first version importing the version-less RID. When false all version-specific RIDs will import the version-less RID (bypassing previous version-specific RIDs) - `OmitVersionDelimiter`: Default is false. When true no characters will separate the base RID and version (EG: win7). When false a '.' will separate the base RID and version (EG: osx.10.12). - `ApplyVersionsToParent`: Default is false. When true, version-specific RIDs will import version-specific Parent RIDs similar to is done for architecture and qualifier (see Parent above). - `Architectures`: A list of strings delimited by semi-colons that represent the architectures for this RID. - `AdditionalQualifiers`: A list of strings delimited by semi-colons that represent the additional qualifiers for this RID. Additional qualifers do not stack, each only applies to the qualifier-less RIDs (so as not to cause combinatorial exponential growth of RIDs). ### Adding a new version to an existing OS Find the existing `RuntimeGroup` in `runtimeGroups.props` and add the version to the list of `Versions`, separated by a semi-colon. If the version you are adding needs to be treated as not-compatible with previous versions and the `RuntimeGroup` has not set `TreatVersionsAsCompatible`=`false` then you may create a new `RuntimeGroup` to represent the new compatibility band. ### Checking your work After making a change to `runtimeGroups.props` you can examine the resulting changes in `runtime.json` and `runtime.compatibility.json`. `runtime.json` is the graph representation of the RIDs and is what ships in the package. `runtime.compatibility.json` is a flattened version of the graph that shows the RID precedence for each RID in the graph. ### Version compatibility Version compatibility is represented through imports. If a platform is considered compatible with another version of the same platform, or a specific version of another platform, then it can import that platform. This permits packages to reuse assets that were built for the imported platform on the compatible platform. Compatibility here is a bit nebulous because inevitably different platforms will have observable differences that can cause compatibility problems. For the purposes of RIDs we'll try to represent compatibility as versions of a platform that are explicitly advertised as being compatible with a previous version and/or another platform and don't have any known broad breaking changes. It is usually better to opt to treat platforms as compatible since that enables the scenario of building an asset for a particular version and using that in future versions, otherwise you force people to cross-compile for all future versions the moment they target a specific version. ## Appendix : details of RID graph generation ### Naming convention We use the following convention in all newly-defined RIDs. Some RIDs (win7-x64, win8-x64) predate this convention and don't follow it, but all new RIDs should follow it. `[os name].[version]-[architecture]-[additional qualifiers]`, for example `osx.10.10-x64` or `ubuntu.14.04-x64` - `[os name]` can contain any characters other than `.` - `[version]` can contain any characters other than `-`. Typically a numeric version like 14.04 or 10.0. - `[architecture]` can contain any characters other than `-`. Typically: `x86`, `x64`, `arm`, `arm64` - `[additional qualifiers]` can be things like `aot`. Used to further differentiate different platforms. For all of these we strive to make them something that can be uniquely discoverable at runtime, so that a RID may be computed from an executing application. As such these properties should be derivable from `/etc/os-release` or similar platform APIs / data. ### Import convention Imports should be used when the added RID is considered compatible with an existing RID. 1. Architecture-specific RIDs should first import the architecture-less RID. EG: `osx.10.11-x64` should first import `osx.10.11`. 2. Architecture-specific RIDs that are compatible with a previous version RID for the same OS should then import the previous version, architecture specific RID. EG: `osx.10.11-x64` should then import `osx.10.10-x64`. If there is no earlier compatible/supported version, then a versionless RID should be imported. EG: `osx.10.10-x64` should import `osx-x64`. 3. Architecture-less RIDs that are compatible with a previous version RID for the same OS should then import the previous version, architecture neutral RID. EG: `osx.10.11` should import `osx.10.10`. If there is no earlier compatible/supported version, then a versionless RID should be imported. EG: `osx.10.10` should import `osx`. 4. Version-less RIDs should import an OS category. EG: `osx-x64` should import `unix-x64`, `osx` should import `unix`. ### Advanced RuntimeGroup metadata The following options can be used under special circumstances but break the normal precedence rules we try to establish by generating the RID graph from common logic. These options make it possible to create a RID fallback chain that doesn't match the rest of the RIDs and therefore is hard for developers/package authors to reason about. Only use these options for cases where you know what you are doing and have carefully reviewed the resulting RID fallbacks using the CompatibliltyMap. - `OmitRIDs`: A list of strings delimited by semi-colons that represent RIDs calculated from this RuntimeGroup that should be omitted from the RuntimeGraph. These RIDs will not be referenced nor defined. - `OmitRIDDefinitions`: A list of strings delimited by semi-colons that represent RIDs calculated from this RuntimeGroup that should be omitted from the RuntimeGraph. These RIDs will not be defined by this RuntimeGroup, but will be referenced: useful in case some other RuntimeGroup (or runtime.json template) defines them. - `OmitRIDReferences`: A list of strings delimited by semi-colons that represent RIDs calculated from this RuntimeGroup that should be omitted from the RuntimeGraph. These RIDs will be defined but not referenced by this RuntimeGroup.
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/native/external/brotli/enc/utf8_util.c
/* Copyright 2013 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ /* Heuristics for deciding about the UTF8-ness of strings. */ #include "./utf8_util.h" #include <brotli/types.h> #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static size_t BrotliParseAsUTF8( int* symbol, const uint8_t* input, size_t size) { /* ASCII */ if ((input[0] & 0x80) == 0) { *symbol = input[0]; if (*symbol > 0) { return 1; } } /* 2-byte UTF8 */ if (size > 1u && (input[0] & 0xE0) == 0xC0 && (input[1] & 0xC0) == 0x80) { *symbol = (((input[0] & 0x1F) << 6) | (input[1] & 0x3F)); if (*symbol > 0x7F) { return 2; } } /* 3-byte UFT8 */ if (size > 2u && (input[0] & 0xF0) == 0xE0 && (input[1] & 0xC0) == 0x80 && (input[2] & 0xC0) == 0x80) { *symbol = (((input[0] & 0x0F) << 12) | ((input[1] & 0x3F) << 6) | (input[2] & 0x3F)); if (*symbol > 0x7FF) { return 3; } } /* 4-byte UFT8 */ if (size > 3u && (input[0] & 0xF8) == 0xF0 && (input[1] & 0xC0) == 0x80 && (input[2] & 0xC0) == 0x80 && (input[3] & 0xC0) == 0x80) { *symbol = (((input[0] & 0x07) << 18) | ((input[1] & 0x3F) << 12) | ((input[2] & 0x3F) << 6) | (input[3] & 0x3F)); if (*symbol > 0xFFFF && *symbol <= 0x10FFFF) { return 4; } } /* Not UTF8, emit a special symbol above the UTF8-code space */ *symbol = 0x110000 | input[0]; return 1; } /* Returns 1 if at least min_fraction of the data is UTF8-encoded.*/ BROTLI_BOOL BrotliIsMostlyUTF8( const uint8_t* data, const size_t pos, const size_t mask, const size_t length, const double min_fraction) { size_t size_utf8 = 0; size_t i = 0; while (i < length) { int symbol; size_t bytes_read = BrotliParseAsUTF8(&symbol, &data[(pos + i) & mask], length - i); i += bytes_read; if (symbol < 0x110000) size_utf8 += bytes_read; } return TO_BROTLI_BOOL((double)size_utf8 > min_fraction * (double)length); } #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif
/* Copyright 2013 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ /* Heuristics for deciding about the UTF8-ness of strings. */ #include "./utf8_util.h" #include <brotli/types.h> #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static size_t BrotliParseAsUTF8( int* symbol, const uint8_t* input, size_t size) { /* ASCII */ if ((input[0] & 0x80) == 0) { *symbol = input[0]; if (*symbol > 0) { return 1; } } /* 2-byte UTF8 */ if (size > 1u && (input[0] & 0xE0) == 0xC0 && (input[1] & 0xC0) == 0x80) { *symbol = (((input[0] & 0x1F) << 6) | (input[1] & 0x3F)); if (*symbol > 0x7F) { return 2; } } /* 3-byte UFT8 */ if (size > 2u && (input[0] & 0xF0) == 0xE0 && (input[1] & 0xC0) == 0x80 && (input[2] & 0xC0) == 0x80) { *symbol = (((input[0] & 0x0F) << 12) | ((input[1] & 0x3F) << 6) | (input[2] & 0x3F)); if (*symbol > 0x7FF) { return 3; } } /* 4-byte UFT8 */ if (size > 3u && (input[0] & 0xF8) == 0xF0 && (input[1] & 0xC0) == 0x80 && (input[2] & 0xC0) == 0x80 && (input[3] & 0xC0) == 0x80) { *symbol = (((input[0] & 0x07) << 18) | ((input[1] & 0x3F) << 12) | ((input[2] & 0x3F) << 6) | (input[3] & 0x3F)); if (*symbol > 0xFFFF && *symbol <= 0x10FFFF) { return 4; } } /* Not UTF8, emit a special symbol above the UTF8-code space */ *symbol = 0x110000 | input[0]; return 1; } /* Returns 1 if at least min_fraction of the data is UTF8-encoded.*/ BROTLI_BOOL BrotliIsMostlyUTF8( const uint8_t* data, const size_t pos, const size_t mask, const size_t length, const double min_fraction) { size_t size_utf8 = 0; size_t i = 0; while (i < length) { int symbol; size_t bytes_read = BrotliParseAsUTF8(&symbol, &data[(pos + i) & mask], length - i); i += bytes_read; if (symbol < 0x110000) size_utf8 += bytes_read; } return TO_BROTLI_BOOL((double)size_utf8 > min_fraction * (double)length); } #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/riscv/Gstep.c
/* libunwind - a platform-independent unwind library Copyright (C) 2008 CodeSourcery Copyright (C) 2021 Zhaofeng Li This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind_i.h" #include "offsets.h" static int riscv_handle_signal_frame (unw_cursor_t *cursor) { int ret, i; struct cursor *c = (struct cursor *) cursor; unw_word_t sp, sp_addr = c->dwarf.cfa; struct dwarf_loc sp_loc = DWARF_LOC (sp_addr, 0); if ((ret = dwarf_get (&c->dwarf, sp_loc, &sp)) < 0) return -UNW_EUNSPEC; if (!unw_is_signal_frame (cursor)) return -UNW_EUNSPEC; #ifdef __linux__ /* rt_sigframe contains the siginfo structure, the ucontext, and then the trampoline. We store the mcontext inside ucontext as sigcontext_addr. */ c->sigcontext_format = RISCV_SCF_LINUX_RT_SIGFRAME; c->sigcontext_addr = sp_addr + sizeof (siginfo_t) + UC_MCONTEXT_REGS_OFF; c->sigcontext_sp = sp_addr; c->sigcontext_pc = c->dwarf.ip; #else /* Not making any assumption at all - You need to implement this */ return -UNW_EUNSPEC; #endif /* Update the dwarf cursor. Set the location of the registers to the corresponding addresses of the uc_mcontext / sigcontext structure contents. */ #define SC_REG_OFFSET(X) (8 * X) /* The PC is stored in place of X0 in sigcontext */ c->dwarf.loc[UNW_TDEP_IP] = DWARF_LOC (c->sigcontext_addr + SC_REG_OFFSET(UNW_RISCV_X0), 0); for (i = UNW_RISCV_X1; i <= UNW_RISCV_F31; i++) { c->dwarf.loc[i] = DWARF_LOC (c->sigcontext_addr + SC_REG_OFFSET(i), 0); } /* Set SP/CFA and PC/IP. */ dwarf_get (&c->dwarf, c->dwarf.loc[UNW_TDEP_SP], &c->dwarf.cfa); dwarf_get (&c->dwarf, c->dwarf.loc[UNW_TDEP_IP], &c->dwarf.ip); return 1; } int unw_step (unw_cursor_t *cursor) { struct cursor *c = (struct cursor *) cursor; int validate = c->validate; int ret; Debug (1, "(cursor=%p, ip=0x%016lx, sp=0x%016lx)\n", c, c->dwarf.ip, c->dwarf.cfa); /* Validate all addresses before dereferencing. */ c->validate = 1; /* Special handling the signal frame. */ if (unw_is_signal_frame (cursor) > 0) return riscv_handle_signal_frame (cursor); /* Restore default memory validation state */ c->validate = validate; /* Try DWARF-based unwinding... */ ret = dwarf_step (&c->dwarf); if (unlikely (ret == -UNW_ESTOPUNWIND)) return ret; /* DWARF unwinding didn't work, let's tread carefully here */ if (unlikely (ret < 0)) { Debug (1, "DWARF unwinding failed (cursor=%p, ip=0x%016lx, sp=0x%016lx)\n", c, c->dwarf.ip, c->dwarf.cfa); /* Try RA/X1? */ c->dwarf.loc[UNW_RISCV_PC] = c->dwarf.loc[UNW_RISCV_X1]; c->dwarf.loc[UNW_RISCV_X1] = DWARF_NULL_LOC; if (!DWARF_IS_NULL_LOC (c->dwarf.loc[UNW_RISCV_PC])) { ret = dwarf_get (&c->dwarf, c->dwarf.loc[UNW_RISCV_PC], &c->dwarf.ip); if (ret < 0) { Debug (2, "Failed to get PC from return address: %d\n", ret); return ret; } Debug (2, "ra= 0x%016lx\n", c->dwarf.ip); ret = 1; } else { c->dwarf.ip = 0; } } return (c->dwarf.ip == 0) ? 0 : 1; }
/* libunwind - a platform-independent unwind library Copyright (C) 2008 CodeSourcery Copyright (C) 2021 Zhaofeng Li This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind_i.h" #include "offsets.h" static int riscv_handle_signal_frame (unw_cursor_t *cursor) { int ret, i; struct cursor *c = (struct cursor *) cursor; unw_word_t sp, sp_addr = c->dwarf.cfa; struct dwarf_loc sp_loc = DWARF_LOC (sp_addr, 0); if ((ret = dwarf_get (&c->dwarf, sp_loc, &sp)) < 0) return -UNW_EUNSPEC; if (!unw_is_signal_frame (cursor)) return -UNW_EUNSPEC; #ifdef __linux__ /* rt_sigframe contains the siginfo structure, the ucontext, and then the trampoline. We store the mcontext inside ucontext as sigcontext_addr. */ c->sigcontext_format = RISCV_SCF_LINUX_RT_SIGFRAME; c->sigcontext_addr = sp_addr + sizeof (siginfo_t) + UC_MCONTEXT_REGS_OFF; c->sigcontext_sp = sp_addr; c->sigcontext_pc = c->dwarf.ip; #else /* Not making any assumption at all - You need to implement this */ return -UNW_EUNSPEC; #endif /* Update the dwarf cursor. Set the location of the registers to the corresponding addresses of the uc_mcontext / sigcontext structure contents. */ #define SC_REG_OFFSET(X) (8 * X) /* The PC is stored in place of X0 in sigcontext */ c->dwarf.loc[UNW_TDEP_IP] = DWARF_LOC (c->sigcontext_addr + SC_REG_OFFSET(UNW_RISCV_X0), 0); for (i = UNW_RISCV_X1; i <= UNW_RISCV_F31; i++) { c->dwarf.loc[i] = DWARF_LOC (c->sigcontext_addr + SC_REG_OFFSET(i), 0); } /* Set SP/CFA and PC/IP. */ dwarf_get (&c->dwarf, c->dwarf.loc[UNW_TDEP_SP], &c->dwarf.cfa); dwarf_get (&c->dwarf, c->dwarf.loc[UNW_TDEP_IP], &c->dwarf.ip); return 1; } int unw_step (unw_cursor_t *cursor) { struct cursor *c = (struct cursor *) cursor; int validate = c->validate; int ret; Debug (1, "(cursor=%p, ip=0x%016lx, sp=0x%016lx)\n", c, c->dwarf.ip, c->dwarf.cfa); /* Validate all addresses before dereferencing. */ c->validate = 1; /* Special handling the signal frame. */ if (unw_is_signal_frame (cursor) > 0) return riscv_handle_signal_frame (cursor); /* Restore default memory validation state */ c->validate = validate; /* Try DWARF-based unwinding... */ ret = dwarf_step (&c->dwarf); if (unlikely (ret == -UNW_ESTOPUNWIND)) return ret; /* DWARF unwinding didn't work, let's tread carefully here */ if (unlikely (ret < 0)) { Debug (1, "DWARF unwinding failed (cursor=%p, ip=0x%016lx, sp=0x%016lx)\n", c, c->dwarf.ip, c->dwarf.cfa); /* Try RA/X1? */ c->dwarf.loc[UNW_RISCV_PC] = c->dwarf.loc[UNW_RISCV_X1]; c->dwarf.loc[UNW_RISCV_X1] = DWARF_NULL_LOC; if (!DWARF_IS_NULL_LOC (c->dwarf.loc[UNW_RISCV_PC])) { ret = dwarf_get (&c->dwarf, c->dwarf.loc[UNW_RISCV_PC], &c->dwarf.ip); if (ret < 0) { Debug (2, "Failed to get PC from return address: %d\n", ret); return ret; } Debug (2, "ra= 0x%016lx\n", c->dwarf.ip); ret = 1; } else { c->dwarf.ip = 0; } } return (c->dwarf.ip == 0) ? 0 : 1; }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/sgen/sgen-pointer-queue.c
/** * \file * A pointer queue that can be sorted. * * Copyright (C) 2014 Xamarin Inc * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifdef HAVE_SGEN_GC #include <string.h> #include "mono/sgen/sgen-gc.h" #include "mono/sgen/sgen-pointer-queue.h" void sgen_pointer_queue_clear (SgenPointerQueue *queue) { queue->next_slot = 0; } void sgen_pointer_queue_init (SgenPointerQueue *queue, int mem_type) { queue->next_slot = 0; queue->size = 0; queue->data = NULL; queue->mem_type = mem_type; } static void realloc_queue (SgenPointerQueue *queue) { size_t new_size = queue->size ? queue->size + queue->size/2 : 1024; void **new_data = (void **)sgen_alloc_internal_dynamic (sizeof (void*) * new_size, queue->mem_type, TRUE); memcpy (new_data, queue->data, sizeof (void*) * queue->next_slot); sgen_free_internal_dynamic (queue->data, sizeof (void*) * queue->size, queue->mem_type); queue->data = new_data; queue->size = new_size; SGEN_LOG (4, "Reallocated pointer queue to size: %lu", (unsigned long)new_size); } gboolean sgen_pointer_queue_will_grow (SgenPointerQueue *queue) { return queue->next_slot >= queue->size; } void sgen_pointer_queue_add (SgenPointerQueue *queue, void *ptr) { if (sgen_pointer_queue_will_grow (queue)) realloc_queue (queue); queue->data [queue->next_slot++] = ptr; } void* sgen_pointer_queue_pop (SgenPointerQueue *queue) { g_assert (queue->next_slot); return queue->data [--queue->next_slot]; } size_t sgen_pointer_queue_search (SgenPointerQueue *queue, void *addr) { size_t first = 0, last = queue->next_slot; while (first < last) { size_t middle = first + ((last - first) >> 1); if (addr <= queue->data [middle]) last = middle; else first = middle + 1; } g_assert (first == last); return first; } /* * Removes all NULL pointers from the queue. */ void sgen_pointer_queue_remove_nulls (SgenPointerQueue *queue) { void **start, **cur, **end; start = cur = queue->data; end = queue->data + queue->next_slot; while (cur < end) { if (*cur) *start++ = *cur++; else ++cur; } queue->next_slot = start - queue->data; } /* * Sorts the pointers in the queue, then removes duplicates. */ void sgen_pointer_queue_sort_uniq (SgenPointerQueue *queue) { void **start, **cur, **end; /* sort and uniq pin_queue: we just sort and we let the rest discard multiple values */ /* it may be better to keep ranges of pinned memory instead of individually pinning objects */ SGEN_LOG (5, "Sorting pointer queue, size: %lu", (unsigned long)queue->next_slot); if (queue->next_slot > 1) sgen_sort_addresses (queue->data, queue->next_slot); start = cur = queue->data; end = queue->data + queue->next_slot; while (cur < end) { *start = *cur++; while (cur < end && *start == *cur) cur++; start++; }; queue->next_slot = start - queue->data; SGEN_LOG (5, "Pointer queue reduced to size: %lu", (unsigned long)queue->next_slot); } /* * Does a linear search through the pointer queue to find `ptr`. Returns the index if * found, otherwise (size_t)-1. */ size_t sgen_pointer_queue_find (SgenPointerQueue *queue, void *ptr) { size_t i; for (i = 0; i < queue->next_slot; ++i) if (queue->data [i] == ptr) return i; return (size_t)-1; } gboolean sgen_pointer_queue_is_empty (SgenPointerQueue *queue) { return !queue->next_slot; } void sgen_pointer_queue_free (SgenPointerQueue *queue) { sgen_free_internal_dynamic (queue->data, sizeof (void*) * queue->size, queue->mem_type); } #endif
/** * \file * A pointer queue that can be sorted. * * Copyright (C) 2014 Xamarin Inc * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifdef HAVE_SGEN_GC #include <string.h> #include "mono/sgen/sgen-gc.h" #include "mono/sgen/sgen-pointer-queue.h" void sgen_pointer_queue_clear (SgenPointerQueue *queue) { queue->next_slot = 0; } void sgen_pointer_queue_init (SgenPointerQueue *queue, int mem_type) { queue->next_slot = 0; queue->size = 0; queue->data = NULL; queue->mem_type = mem_type; } static void realloc_queue (SgenPointerQueue *queue) { size_t new_size = queue->size ? queue->size + queue->size/2 : 1024; void **new_data = (void **)sgen_alloc_internal_dynamic (sizeof (void*) * new_size, queue->mem_type, TRUE); memcpy (new_data, queue->data, sizeof (void*) * queue->next_slot); sgen_free_internal_dynamic (queue->data, sizeof (void*) * queue->size, queue->mem_type); queue->data = new_data; queue->size = new_size; SGEN_LOG (4, "Reallocated pointer queue to size: %lu", (unsigned long)new_size); } gboolean sgen_pointer_queue_will_grow (SgenPointerQueue *queue) { return queue->next_slot >= queue->size; } void sgen_pointer_queue_add (SgenPointerQueue *queue, void *ptr) { if (sgen_pointer_queue_will_grow (queue)) realloc_queue (queue); queue->data [queue->next_slot++] = ptr; } void* sgen_pointer_queue_pop (SgenPointerQueue *queue) { g_assert (queue->next_slot); return queue->data [--queue->next_slot]; } size_t sgen_pointer_queue_search (SgenPointerQueue *queue, void *addr) { size_t first = 0, last = queue->next_slot; while (first < last) { size_t middle = first + ((last - first) >> 1); if (addr <= queue->data [middle]) last = middle; else first = middle + 1; } g_assert (first == last); return first; } /* * Removes all NULL pointers from the queue. */ void sgen_pointer_queue_remove_nulls (SgenPointerQueue *queue) { void **start, **cur, **end; start = cur = queue->data; end = queue->data + queue->next_slot; while (cur < end) { if (*cur) *start++ = *cur++; else ++cur; } queue->next_slot = start - queue->data; } /* * Sorts the pointers in the queue, then removes duplicates. */ void sgen_pointer_queue_sort_uniq (SgenPointerQueue *queue) { void **start, **cur, **end; /* sort and uniq pin_queue: we just sort and we let the rest discard multiple values */ /* it may be better to keep ranges of pinned memory instead of individually pinning objects */ SGEN_LOG (5, "Sorting pointer queue, size: %lu", (unsigned long)queue->next_slot); if (queue->next_slot > 1) sgen_sort_addresses (queue->data, queue->next_slot); start = cur = queue->data; end = queue->data + queue->next_slot; while (cur < end) { *start = *cur++; while (cur < end && *start == *cur) cur++; start++; }; queue->next_slot = start - queue->data; SGEN_LOG (5, "Pointer queue reduced to size: %lu", (unsigned long)queue->next_slot); } /* * Does a linear search through the pointer queue to find `ptr`. Returns the index if * found, otherwise (size_t)-1. */ size_t sgen_pointer_queue_find (SgenPointerQueue *queue, void *ptr) { size_t i; for (i = 0; i < queue->next_slot; ++i) if (queue->data [i] == ptr) return i; return (size_t)-1; } gboolean sgen_pointer_queue_is_empty (SgenPointerQueue *queue) { return !queue->next_slot; } void sgen_pointer_queue_free (SgenPointerQueue *queue) { sgen_free_internal_dynamic (queue->data, sizeof (void*) * queue->size, queue->mem_type); } #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/ppc/Ginit_local.c
/* libunwind - a platform-independent unwind library Copied from src/x86_64/, modified slightly (or made empty stubs) for building frysk successfully on ppc64, by Wu Zhou <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <libunwind_i.h> #ifdef UNW_TARGET_PPC64 #include "../ppc64/init.h" #else #include "../ppc32/init.h" #endif #ifdef UNW_REMOTE_ONLY int unw_init_local (unw_cursor_t *cursor, ucontext_t *uc) { /* XXX: empty stub. */ return -UNW_EINVAL; } #else /* !UNW_REMOTE_ONLY */ static int unw_init_local_common(unw_cursor_t *cursor, ucontext_t *uc, unsigned use_prev_instr) { struct cursor *c = (struct cursor *) cursor; if (!atomic_load(&tdep_init_done)) tdep_init (); Debug (1, "(cursor=%p)\n", c); c->dwarf.as = unw_local_addr_space; c->dwarf.as_arg = uc; #ifdef UNW_TARGET_PPC64 return common_init_ppc64 (c, use_prev_instr); #else return common_init_ppc32 (c, use_prev_instr); #endif } int unw_init_local(unw_cursor_t *cursor, ucontext_t *uc) { return unw_init_local_common(cursor, uc, 1); } int unw_init_local2 (unw_cursor_t *cursor, ucontext_t *uc, int flag) { if (!flag) { return unw_init_local_common(cursor, uc, 1); } else if (flag == UNW_INIT_SIGNAL_FRAME) { return unw_init_local_common(cursor, uc, 0); } else { return -UNW_EINVAL; } } #endif /* !UNW_REMOTE_ONLY */
/* libunwind - a platform-independent unwind library Copied from src/x86_64/, modified slightly (or made empty stubs) for building frysk successfully on ppc64, by Wu Zhou <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <libunwind_i.h> #ifdef UNW_TARGET_PPC64 #include "../ppc64/init.h" #else #include "../ppc32/init.h" #endif #ifdef UNW_REMOTE_ONLY int unw_init_local (unw_cursor_t *cursor, ucontext_t *uc) { /* XXX: empty stub. */ return -UNW_EINVAL; } #else /* !UNW_REMOTE_ONLY */ static int unw_init_local_common(unw_cursor_t *cursor, ucontext_t *uc, unsigned use_prev_instr) { struct cursor *c = (struct cursor *) cursor; if (!atomic_load(&tdep_init_done)) tdep_init (); Debug (1, "(cursor=%p)\n", c); c->dwarf.as = unw_local_addr_space; c->dwarf.as_arg = uc; #ifdef UNW_TARGET_PPC64 return common_init_ppc64 (c, use_prev_instr); #else return common_init_ppc32 (c, use_prev_instr); #endif } int unw_init_local(unw_cursor_t *cursor, ucontext_t *uc) { return unw_init_local_common(cursor, uc, 1); } int unw_init_local2 (unw_cursor_t *cursor, ucontext_t *uc, int flag) { if (!flag) { return unw_init_local_common(cursor, uc, 1); } else if (flag == UNW_INIT_SIGNAL_FRAME) { return unw_init_local_common(cursor, uc, 0); } else { return -UNW_EINVAL; } } #endif /* !UNW_REMOTE_ONLY */
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/x86/Linit_local.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Ginit_local.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Ginit_local.c" #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/tasks/AndroidAppBuilder/Templates/monodroid.c
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <mono/utils/mono-publib.h> #include <mono/utils/mono-logger.h> #include <mono/metadata/assembly.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/mono-gc.h> #include <mono/metadata/exception.h> #include <mono/jit/jit.h> #include <mono/jit/mono-private-unstable.h> #include <sys/stat.h> #include <stdlib.h> #include <stdio.h> #include <fcntl.h> #include <errno.h> #include <string.h> #include <jni.h> #include <android/log.h> #include <sys/system_properties.h> #include <sys/mman.h> #include <assert.h> #include <unistd.h> static char *bundle_path; static char *executable; #define LOG_INFO(fmt, ...) __android_log_print(ANDROID_LOG_DEBUG, "DOTNET", fmt, ##__VA_ARGS__) #define LOG_ERROR(fmt, ...) __android_log_print(ANDROID_LOG_ERROR, "DOTNET", fmt, ##__VA_ARGS__) #if defined(__arm__) #define ANDROID_RUNTIME_IDENTIFIER "android-arm" #elif defined(__aarch64__) #define ANDROID_RUNTIME_IDENTIFIER "android-arm64" #elif defined(__i386__) #define ANDROID_RUNTIME_IDENTIFIER "android-x86" #elif defined(__x86_64__) #define ANDROID_RUNTIME_IDENTIFIER "android-x64" #else #error Unknown architecture #endif #define RUNTIMECONFIG_BIN_FILE "runtimeconfig.bin" static MonoAssembly* mono_droid_load_assembly (const char *name, const char *culture) { char filename [1024]; char path [1024]; int res; LOG_INFO ("assembly_preload_hook: %s %s %s\n", name, culture, bundle_path); int len = strlen (name); int has_extension = len > 3 && name [len - 4] == '.' && (!strcmp ("exe", name + (len - 3)) || !strcmp ("dll", name + (len - 3))); // add extensions if required. strlcpy (filename, name, sizeof (filename)); if (!has_extension) { strlcat (filename, ".dll", sizeof (filename)); } if (culture && strcmp (culture, "")) res = snprintf (path, sizeof (path) - 1, "%s/%s/%s", bundle_path, culture, filename); else res = snprintf (path, sizeof (path) - 1, "%s/%s", bundle_path, filename); assert (res > 0); struct stat buffer; if (stat (path, &buffer) == 0) { MonoAssembly *assembly = mono_assembly_open (path, NULL); assert (assembly); return assembly; } return NULL; } static MonoAssembly* mono_droid_assembly_preload_hook (MonoAssemblyName *aname, char **assemblies_path, void* user_data) { const char *name = mono_assembly_name_get_name (aname); const char *culture = mono_assembly_name_get_culture (aname); return mono_droid_load_assembly (name, culture); } static unsigned char * load_aot_data (MonoAssembly *assembly, int size, void *user_data, void **out_handle) { *out_handle = NULL; char path [1024]; int res; MonoAssemblyName *assembly_name = mono_assembly_get_name (assembly); const char *aname = mono_assembly_name_get_name (assembly_name); LOG_INFO ("Looking for aot data for assembly '%s'.", aname); res = snprintf (path, sizeof (path) - 1, "%s/%s.aotdata", bundle_path, aname); assert (res > 0); int fd = open (path, O_RDONLY); if (fd < 0) { LOG_INFO ("Could not load the aot data for %s from %s: %s\n", aname, path, strerror (errno)); return NULL; } void *ptr = mmap (NULL, size, PROT_READ, MAP_FILE | MAP_PRIVATE, fd, 0); if (ptr == MAP_FAILED) { LOG_INFO ("Could not map the aot file for %s: %s\n", aname, strerror (errno)); close (fd); return NULL; } close (fd); LOG_INFO ("Loaded aot data for %s.\n", aname); *out_handle = ptr; return (unsigned char *) ptr; } static void free_aot_data (MonoAssembly *assembly, int size, void *user_data, void *handle) { munmap (handle, size); } char * strdup_printf (const char *msg, ...) { va_list args; char *formatted = NULL; va_start (args, msg); vasprintf (&formatted, msg, args); va_end (args); return formatted; } static MonoObject * mono_droid_fetch_exception_property (MonoObject *obj, const char *name, bool is_virtual) { MonoMethod *get = NULL; MonoMethod *get_virt = NULL; MonoObject *exc = NULL; get = mono_class_get_method_from_name (mono_get_exception_class (), name, 0); if (get) { if (is_virtual) { get_virt = mono_object_get_virtual_method (obj, get); if (get_virt) get = get_virt; } return (MonoObject *) mono_runtime_invoke (get, obj, NULL, &exc); } else { printf ("Could not find the property System.Exception.%s", name); } return NULL; } static char * mono_droid_fetch_exception_property_string (MonoObject *obj, const char *name, bool is_virtual) { MonoString *str = (MonoString *) mono_droid_fetch_exception_property (obj, name, is_virtual); return str ? mono_string_to_utf8 (str) : NULL; } void unhandled_exception_handler (MonoObject *exc, void *user_data) { MonoClass *type = mono_object_get_class (exc); char *type_name = strdup_printf ("%s.%s", mono_class_get_namespace (type), mono_class_get_name (type)); char *trace = mono_droid_fetch_exception_property_string (exc, "get_StackTrace", true); char *message = mono_droid_fetch_exception_property_string (exc, "get_Message", true); LOG_ERROR("UnhandledException: %s %s %s", type_name, message, trace); free (trace); free (message); free (type_name); exit (1); } void log_callback (const char *log_domain, const char *log_level, const char *message, mono_bool fatal, void *user_data) { LOG_INFO ("(%s %s) %s", log_domain, log_level, message); if (fatal) { LOG_ERROR ("Exit code: %d.", 1); exit (1); } } #if defined(FORCE_AOT) && defined(STATIC_AOT) void register_aot_modules (void); #endif void cleanup_runtime_config (MonovmRuntimeConfigArguments *args, void *user_data) { free (args); free (user_data); } int mono_droid_runtime_init (const char* executable, int managed_argc, char* managed_argv[]) { // NOTE: these options can be set via command line args for adb or xharness, see AndroidSampleApp.csproj // uncomment for debug output: // //setenv ("XUNIT_VERBOSE", "true", true); //setenv ("MONO_LOG_LEVEL", "debug", true); //setenv ("MONO_LOG_MASK", "all", true); // build using DiagnosticPorts property in AndroidAppBuilder // or set DOTNET_DiagnosticPorts env via adb, xharness when undefined. // NOTE, using DOTNET_DiagnosticPorts requires app build using AndroidAppBuilder and RuntimeComponents=diagnostics_tracing #ifdef DIAGNOSTIC_PORTS setenv ("DOTNET_DiagnosticPorts", DIAGNOSTIC_PORTS, true); #endif bool wait_for_debugger = false; chdir (bundle_path); // TODO: set TRUSTED_PLATFORM_ASSEMBLIES, APP_PATHS and NATIVE_DLL_SEARCH_DIRECTORIES const char* appctx_keys[2]; appctx_keys[0] = "RUNTIME_IDENTIFIER"; appctx_keys[1] = "APP_CONTEXT_BASE_DIRECTORY"; const char* appctx_values[2]; appctx_values[0] = ANDROID_RUNTIME_IDENTIFIER; appctx_values[1] = bundle_path; char *file_name = RUNTIMECONFIG_BIN_FILE; int str_len = strlen (bundle_path) + strlen (file_name) + 1; // +1 is for the "/" char *file_path = (char *)malloc (sizeof (char) * (str_len +1)); // +1 is for the terminating null character int num_char = snprintf (file_path, (str_len + 1), "%s/%s", bundle_path, file_name); struct stat buffer; LOG_INFO ("file_path: %s\n", file_path); assert (num_char > 0 && num_char == str_len); if (stat (file_path, &buffer) == 0) { MonovmRuntimeConfigArguments *arg = (MonovmRuntimeConfigArguments *)malloc (sizeof (MonovmRuntimeConfigArguments)); arg->kind = 0; arg->runtimeconfig.name.path = file_path; monovm_runtimeconfig_initialize (arg, cleanup_runtime_config, file_path); } else { free (file_path); } monovm_initialize(2, appctx_keys, appctx_values); mono_debug_init (MONO_DEBUG_FORMAT_MONO); mono_install_assembly_preload_hook (mono_droid_assembly_preload_hook, NULL); mono_install_load_aot_data_hook (load_aot_data, free_aot_data, NULL); mono_install_unhandled_exception_hook (unhandled_exception_handler, NULL); mono_trace_set_log_handler (log_callback, NULL); mono_set_signal_chaining (true); mono_set_crash_chaining (true); if (wait_for_debugger) { char* options[] = { "--debugger-agent=transport=dt_socket,server=y,address=0.0.0.0:55555" }; mono_jit_parse_options (1, options); } #if FORCE_INTERPRETER LOG_INFO("Interp Enabled"); mono_jit_set_aot_mode(MONO_AOT_MODE_INTERP_ONLY); #elif FORCE_AOT LOG_INFO("AOT Enabled"); #if STATIC_AOT register_aot_modules(); #endif mono_jit_set_aot_mode(MONO_AOT_MODE_FULL); #endif MonoDomain *domain = mono_jit_init_version ("dotnet.android", "mobile"); assert (domain); MonoAssembly *assembly = mono_droid_load_assembly (executable, NULL); assert (assembly); LOG_INFO ("Executable: %s", executable); int res = mono_jit_exec (domain, assembly, managed_argc, managed_argv); LOG_INFO ("Exit code: %d.", res); mono_jit_cleanup (domain); return res; } static void strncpy_str (JNIEnv *env, char *buff, jstring str, int nbuff) { jboolean isCopy = 0; const char *copy_buff = (*env)->GetStringUTFChars (env, str, &isCopy); strncpy (buff, copy_buff, nbuff); if (isCopy) (*env)->ReleaseStringUTFChars (env, str, copy_buff); } void Java_net_dot_MonoRunner_setEnv (JNIEnv* env, jobject thiz, jstring j_key, jstring j_value) { const char *key = (*env)->GetStringUTFChars(env, j_key, 0); const char *val = (*env)->GetStringUTFChars(env, j_value, 0); setenv (key, val, true); (*env)->ReleaseStringUTFChars(env, j_key, key); (*env)->ReleaseStringUTFChars(env, j_value, val); } int Java_net_dot_MonoRunner_initRuntime (JNIEnv* env, jobject thiz, jstring j_files_dir, jstring j_cache_dir, jstring j_testresults_dir, jstring j_entryPointLibName, jobjectArray j_args) { char file_dir[2048]; char cache_dir[2048]; char testresults_dir[2048]; char entryPointLibName[2048]; strncpy_str (env, file_dir, j_files_dir, sizeof(file_dir)); strncpy_str (env, cache_dir, j_cache_dir, sizeof(cache_dir)); strncpy_str (env, testresults_dir, j_testresults_dir, sizeof(testresults_dir)); strncpy_str (env, entryPointLibName, j_entryPointLibName, sizeof(entryPointLibName)); bundle_path = file_dir; executable = entryPointLibName; setenv ("HOME", bundle_path, true); setenv ("TMPDIR", cache_dir, true); setenv ("TEST_RESULTS_DIR", testresults_dir, true); int args_len = (*env)->GetArrayLength(env, j_args); int managed_argc = args_len + 1; char** managed_argv = (char**)malloc(managed_argc * sizeof(char*)); managed_argv[0] = bundle_path; for (int i = 0; i < args_len; ++i) { jstring j_arg = (*env)->GetObjectArrayElement(env, j_args, i); managed_argv[i + 1] = (*env)->GetStringUTFChars(env, j_arg, NULL); } int res = mono_droid_runtime_init (executable, managed_argc, managed_argv); for (int i = 0; i < args_len; ++i) { jstring j_arg = (*env)->GetObjectArrayElement(env, j_args, i); (*env)->ReleaseStringUTFChars(env, j_arg, managed_argv[i + 1]); } free(managed_argv); return res; } // called from C# void invoke_external_native_api (void (*callback)(void)) { if (callback) callback(); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <mono/utils/mono-publib.h> #include <mono/utils/mono-logger.h> #include <mono/metadata/assembly.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/mono-gc.h> #include <mono/metadata/exception.h> #include <mono/jit/jit.h> #include <mono/jit/mono-private-unstable.h> #include <sys/stat.h> #include <stdlib.h> #include <stdio.h> #include <fcntl.h> #include <errno.h> #include <string.h> #include <jni.h> #include <android/log.h> #include <sys/system_properties.h> #include <sys/mman.h> #include <assert.h> #include <unistd.h> static char *bundle_path; static char *executable; #define LOG_INFO(fmt, ...) __android_log_print(ANDROID_LOG_DEBUG, "DOTNET", fmt, ##__VA_ARGS__) #define LOG_ERROR(fmt, ...) __android_log_print(ANDROID_LOG_ERROR, "DOTNET", fmt, ##__VA_ARGS__) #if defined(__arm__) #define ANDROID_RUNTIME_IDENTIFIER "android-arm" #elif defined(__aarch64__) #define ANDROID_RUNTIME_IDENTIFIER "android-arm64" #elif defined(__i386__) #define ANDROID_RUNTIME_IDENTIFIER "android-x86" #elif defined(__x86_64__) #define ANDROID_RUNTIME_IDENTIFIER "android-x64" #else #error Unknown architecture #endif #define RUNTIMECONFIG_BIN_FILE "runtimeconfig.bin" static MonoAssembly* mono_droid_load_assembly (const char *name, const char *culture) { char filename [1024]; char path [1024]; int res; LOG_INFO ("assembly_preload_hook: %s %s %s\n", name, culture, bundle_path); int len = strlen (name); int has_extension = len > 3 && name [len - 4] == '.' && (!strcmp ("exe", name + (len - 3)) || !strcmp ("dll", name + (len - 3))); // add extensions if required. strlcpy (filename, name, sizeof (filename)); if (!has_extension) { strlcat (filename, ".dll", sizeof (filename)); } if (culture && strcmp (culture, "")) res = snprintf (path, sizeof (path) - 1, "%s/%s/%s", bundle_path, culture, filename); else res = snprintf (path, sizeof (path) - 1, "%s/%s", bundle_path, filename); assert (res > 0); struct stat buffer; if (stat (path, &buffer) == 0) { MonoAssembly *assembly = mono_assembly_open (path, NULL); assert (assembly); return assembly; } return NULL; } static MonoAssembly* mono_droid_assembly_preload_hook (MonoAssemblyName *aname, char **assemblies_path, void* user_data) { const char *name = mono_assembly_name_get_name (aname); const char *culture = mono_assembly_name_get_culture (aname); return mono_droid_load_assembly (name, culture); } static unsigned char * load_aot_data (MonoAssembly *assembly, int size, void *user_data, void **out_handle) { *out_handle = NULL; char path [1024]; int res; MonoAssemblyName *assembly_name = mono_assembly_get_name (assembly); const char *aname = mono_assembly_name_get_name (assembly_name); LOG_INFO ("Looking for aot data for assembly '%s'.", aname); res = snprintf (path, sizeof (path) - 1, "%s/%s.aotdata", bundle_path, aname); assert (res > 0); int fd = open (path, O_RDONLY); if (fd < 0) { LOG_INFO ("Could not load the aot data for %s from %s: %s\n", aname, path, strerror (errno)); return NULL; } void *ptr = mmap (NULL, size, PROT_READ, MAP_FILE | MAP_PRIVATE, fd, 0); if (ptr == MAP_FAILED) { LOG_INFO ("Could not map the aot file for %s: %s\n", aname, strerror (errno)); close (fd); return NULL; } close (fd); LOG_INFO ("Loaded aot data for %s.\n", aname); *out_handle = ptr; return (unsigned char *) ptr; } static void free_aot_data (MonoAssembly *assembly, int size, void *user_data, void *handle) { munmap (handle, size); } char * strdup_printf (const char *msg, ...) { va_list args; char *formatted = NULL; va_start (args, msg); vasprintf (&formatted, msg, args); va_end (args); return formatted; } static MonoObject * mono_droid_fetch_exception_property (MonoObject *obj, const char *name, bool is_virtual) { MonoMethod *get = NULL; MonoMethod *get_virt = NULL; MonoObject *exc = NULL; get = mono_class_get_method_from_name (mono_get_exception_class (), name, 0); if (get) { if (is_virtual) { get_virt = mono_object_get_virtual_method (obj, get); if (get_virt) get = get_virt; } return (MonoObject *) mono_runtime_invoke (get, obj, NULL, &exc); } else { printf ("Could not find the property System.Exception.%s", name); } return NULL; } static char * mono_droid_fetch_exception_property_string (MonoObject *obj, const char *name, bool is_virtual) { MonoString *str = (MonoString *) mono_droid_fetch_exception_property (obj, name, is_virtual); return str ? mono_string_to_utf8 (str) : NULL; } void unhandled_exception_handler (MonoObject *exc, void *user_data) { MonoClass *type = mono_object_get_class (exc); char *type_name = strdup_printf ("%s.%s", mono_class_get_namespace (type), mono_class_get_name (type)); char *trace = mono_droid_fetch_exception_property_string (exc, "get_StackTrace", true); char *message = mono_droid_fetch_exception_property_string (exc, "get_Message", true); LOG_ERROR("UnhandledException: %s %s %s", type_name, message, trace); free (trace); free (message); free (type_name); exit (1); } void log_callback (const char *log_domain, const char *log_level, const char *message, mono_bool fatal, void *user_data) { LOG_INFO ("(%s %s) %s", log_domain, log_level, message); if (fatal) { LOG_ERROR ("Exit code: %d.", 1); exit (1); } } #if defined(FORCE_AOT) && defined(STATIC_AOT) void register_aot_modules (void); #endif void cleanup_runtime_config (MonovmRuntimeConfigArguments *args, void *user_data) { free (args); free (user_data); } int mono_droid_runtime_init (const char* executable, int managed_argc, char* managed_argv[]) { // NOTE: these options can be set via command line args for adb or xharness, see AndroidSampleApp.csproj // uncomment for debug output: // //setenv ("XUNIT_VERBOSE", "true", true); //setenv ("MONO_LOG_LEVEL", "debug", true); //setenv ("MONO_LOG_MASK", "all", true); // build using DiagnosticPorts property in AndroidAppBuilder // or set DOTNET_DiagnosticPorts env via adb, xharness when undefined. // NOTE, using DOTNET_DiagnosticPorts requires app build using AndroidAppBuilder and RuntimeComponents=diagnostics_tracing #ifdef DIAGNOSTIC_PORTS setenv ("DOTNET_DiagnosticPorts", DIAGNOSTIC_PORTS, true); #endif bool wait_for_debugger = false; chdir (bundle_path); // TODO: set TRUSTED_PLATFORM_ASSEMBLIES, APP_PATHS and NATIVE_DLL_SEARCH_DIRECTORIES const char* appctx_keys[2]; appctx_keys[0] = "RUNTIME_IDENTIFIER"; appctx_keys[1] = "APP_CONTEXT_BASE_DIRECTORY"; const char* appctx_values[2]; appctx_values[0] = ANDROID_RUNTIME_IDENTIFIER; appctx_values[1] = bundle_path; char *file_name = RUNTIMECONFIG_BIN_FILE; int str_len = strlen (bundle_path) + strlen (file_name) + 1; // +1 is for the "/" char *file_path = (char *)malloc (sizeof (char) * (str_len +1)); // +1 is for the terminating null character int num_char = snprintf (file_path, (str_len + 1), "%s/%s", bundle_path, file_name); struct stat buffer; LOG_INFO ("file_path: %s\n", file_path); assert (num_char > 0 && num_char == str_len); if (stat (file_path, &buffer) == 0) { MonovmRuntimeConfigArguments *arg = (MonovmRuntimeConfigArguments *)malloc (sizeof (MonovmRuntimeConfigArguments)); arg->kind = 0; arg->runtimeconfig.name.path = file_path; monovm_runtimeconfig_initialize (arg, cleanup_runtime_config, file_path); } else { free (file_path); } monovm_initialize(2, appctx_keys, appctx_values); mono_debug_init (MONO_DEBUG_FORMAT_MONO); mono_install_assembly_preload_hook (mono_droid_assembly_preload_hook, NULL); mono_install_load_aot_data_hook (load_aot_data, free_aot_data, NULL); mono_install_unhandled_exception_hook (unhandled_exception_handler, NULL); mono_trace_set_log_handler (log_callback, NULL); mono_set_signal_chaining (true); mono_set_crash_chaining (true); if (wait_for_debugger) { char* options[] = { "--debugger-agent=transport=dt_socket,server=y,address=0.0.0.0:55555" }; mono_jit_parse_options (1, options); } #if FORCE_INTERPRETER LOG_INFO("Interp Enabled"); mono_jit_set_aot_mode(MONO_AOT_MODE_INTERP_ONLY); #elif FORCE_AOT LOG_INFO("AOT Enabled"); #if STATIC_AOT register_aot_modules(); #endif mono_jit_set_aot_mode(MONO_AOT_MODE_FULL); #endif MonoDomain *domain = mono_jit_init_version ("dotnet.android", "mobile"); assert (domain); MonoAssembly *assembly = mono_droid_load_assembly (executable, NULL); assert (assembly); LOG_INFO ("Executable: %s", executable); int res = mono_jit_exec (domain, assembly, managed_argc, managed_argv); LOG_INFO ("Exit code: %d.", res); mono_jit_cleanup (domain); return res; } static void strncpy_str (JNIEnv *env, char *buff, jstring str, int nbuff) { jboolean isCopy = 0; const char *copy_buff = (*env)->GetStringUTFChars (env, str, &isCopy); strncpy (buff, copy_buff, nbuff); if (isCopy) (*env)->ReleaseStringUTFChars (env, str, copy_buff); } void Java_net_dot_MonoRunner_setEnv (JNIEnv* env, jobject thiz, jstring j_key, jstring j_value) { const char *key = (*env)->GetStringUTFChars(env, j_key, 0); const char *val = (*env)->GetStringUTFChars(env, j_value, 0); setenv (key, val, true); (*env)->ReleaseStringUTFChars(env, j_key, key); (*env)->ReleaseStringUTFChars(env, j_value, val); } int Java_net_dot_MonoRunner_initRuntime (JNIEnv* env, jobject thiz, jstring j_files_dir, jstring j_cache_dir, jstring j_testresults_dir, jstring j_entryPointLibName, jobjectArray j_args) { char file_dir[2048]; char cache_dir[2048]; char testresults_dir[2048]; char entryPointLibName[2048]; strncpy_str (env, file_dir, j_files_dir, sizeof(file_dir)); strncpy_str (env, cache_dir, j_cache_dir, sizeof(cache_dir)); strncpy_str (env, testresults_dir, j_testresults_dir, sizeof(testresults_dir)); strncpy_str (env, entryPointLibName, j_entryPointLibName, sizeof(entryPointLibName)); bundle_path = file_dir; executable = entryPointLibName; setenv ("HOME", bundle_path, true); setenv ("TMPDIR", cache_dir, true); setenv ("TEST_RESULTS_DIR", testresults_dir, true); int args_len = (*env)->GetArrayLength(env, j_args); int managed_argc = args_len + 1; char** managed_argv = (char**)malloc(managed_argc * sizeof(char*)); managed_argv[0] = bundle_path; for (int i = 0; i < args_len; ++i) { jstring j_arg = (*env)->GetObjectArrayElement(env, j_args, i); managed_argv[i + 1] = (*env)->GetStringUTFChars(env, j_arg, NULL); } int res = mono_droid_runtime_init (executable, managed_argc, managed_argv); for (int i = 0; i < args_len; ++i) { jstring j_arg = (*env)->GetObjectArrayElement(env, j_args, i); (*env)->ReleaseStringUTFChars(env, j_arg, managed_argv[i + 1]); } free(managed_argv); return res; } // called from C# void invoke_external_native_api (void (*callback)(void)) { if (callback) callback(); }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/arm/Gcreate_addr_space.c
/* libunwind - a platform-independent unwind library Copyright (C) 2008 CodeSourcery This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <stdlib.h> #include "unwind_i.h" unw_addr_space_t unw_create_addr_space (unw_accessors_t *a, int byte_order) { #ifdef UNW_LOCAL_ONLY return NULL; #else unw_addr_space_t as; /* * ARM supports little-endian and big-endian. */ if (byte_order != 0 && byte_order_is_valid(byte_order) == 0) return NULL; as = malloc (sizeof (*as)); if (!as) return NULL; memset (as, 0, sizeof (*as)); as->acc = *a; /* Default to little-endian for ARM. */ if (byte_order == 0 || byte_order == UNW_LITTLE_ENDIAN) as->big_endian = 0; else as->big_endian = 1; return as; #endif }
/* libunwind - a platform-independent unwind library Copyright (C) 2008 CodeSourcery This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <stdlib.h> #include "unwind_i.h" unw_addr_space_t unw_create_addr_space (unw_accessors_t *a, int byte_order) { #ifdef UNW_LOCAL_ONLY return NULL; #else unw_addr_space_t as; /* * ARM supports little-endian and big-endian. */ if (byte_order != 0 && byte_order_is_valid(byte_order) == 0) return NULL; as = malloc (sizeof (*as)); if (!as) return NULL; memset (as, 0, sizeof (*as)); as->acc = *a; /* Default to little-endian for ARM. */ if (byte_order == 0 || byte_order == UNW_LITTLE_ENDIAN) as->big_endian = 0; else as->big_endian = 1; return as; #endif }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/metadata/class.c
/** * \file * Class management for the Mono runtime * * Author: * Miguel de Icaza ([email protected]) * * Copyright 2001-2003 Ximian, Inc (http://www.ximian.com) * Copyright 2004-2009 Novell, Inc (http://www.novell.com) * Copyright 2012 Xamarin Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include <config.h> #ifdef HAVE_ALLOCA_H #include <alloca.h> #endif #include <glib.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <mono/metadata/image.h> #include <mono/metadata/image-internals.h> #include <mono/metadata/assembly.h> #include <mono/metadata/assembly-internals.h> #include <mono/metadata/exception-internals.h> #include <mono/metadata/metadata.h> #include <mono/metadata/metadata-internals.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/tokentype.h> #include <mono/metadata/class-init.h> #include <mono/metadata/class-internals.h> #include <mono/metadata/object.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/mono-endian.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/reflection.h> #include <mono/metadata/exception.h> #include <mono/metadata/attrdefs.h> #include <mono/metadata/gc-internals.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/metadata-update.h> #include <mono/utils/mono-counters.h> #include <mono/utils/mono-string.h> #include <mono/utils/mono-error-internals.h> #include <mono/utils/mono-logger-internals.h> #include <mono/utils/mono-memory-model.h> #include <mono/utils/atomic.h> #include <mono/utils/unlocked.h> #include <mono/utils/bsearch.h> #include <mono/utils/checked-build.h> MonoStats mono_stats; /* Statistics */ extern gint32 mono_inflated_methods_size; /* Function supplied by the runtime to find classes by name using information from the AOT file */ static MonoGetClassFromName get_class_from_name = NULL; static gboolean can_access_type (MonoClass *access_klass, MonoClass *member_klass); static char* mono_assembly_name_from_token (MonoImage *image, guint32 type_token); static guint32 mono_field_resolve_flags (MonoClassField *field); static MonoClass * mono_class_from_name_checked_aux (MonoImage *image, const char* name_space, const char *name, GHashTable* visited_images, gboolean case_sensitive, MonoError *error); GENERATE_GET_CLASS_WITH_CACHE (valuetype, "System", "ValueType") GENERATE_TRY_GET_CLASS_WITH_CACHE (handleref, "System.Runtime.InteropServices", "HandleRef") #define CTOR_REQUIRED_FLAGS (METHOD_ATTRIBUTE_SPECIAL_NAME | METHOD_ATTRIBUTE_RT_SPECIAL_NAME) #define CTOR_INVALID_FLAGS (METHOD_ATTRIBUTE_STATIC) // define to print types whenever custom modifiers are appended during inflation #undef DEBUG_INFLATE_CMODS static MonoImage * mono_method_get_image (MonoMethod *method) { return m_class_get_image (method->klass); } /** * mono_class_from_typeref: * \param image a MonoImage * \param type_token a TypeRef token * * Creates the \c MonoClass* structure representing the type defined by * the typeref token valid inside \p image. * \returns The \c MonoClass* representing the typeref token, or NULL if it could * not be loaded. */ MonoClass * mono_class_from_typeref (MonoImage *image, guint32 type_token) { ERROR_DECL (error); MonoClass *klass = mono_class_from_typeref_checked (image, type_token, error); g_assert (is_ok (error)); /*FIXME proper error handling*/ return klass; } /** * mono_class_from_typeref_checked: * \param image a MonoImage * \param type_token a TypeRef token * \param error error return code, if any. * * Creates the \c MonoClass* structure representing the type defined by * the typeref token valid inside \p image. * * \returns The \c MonoClass* representing the typeref token, NULL if it could * not be loaded with the \p error value filled with the information about the * error. */ MonoClass * mono_class_from_typeref_checked (MonoImage *image, guint32 type_token, MonoError *error) { guint32 cols [MONO_TYPEREF_SIZE]; MonoTableInfo *t = &image->tables [MONO_TABLE_TYPEREF]; guint32 idx; const char *name, *nspace; MonoClass *res = NULL; MonoImage *module; error_init (error); mono_metadata_decode_row (t, (type_token&0xffffff)-1, cols, MONO_TYPEREF_SIZE); name = mono_metadata_string_heap (image, cols [MONO_TYPEREF_NAME]); nspace = mono_metadata_string_heap (image, cols [MONO_TYPEREF_NAMESPACE]); idx = cols [MONO_TYPEREF_SCOPE] >> MONO_RESOLUTION_SCOPE_BITS; switch (cols [MONO_TYPEREF_SCOPE] & MONO_RESOLUTION_SCOPE_MASK) { case MONO_RESOLUTION_SCOPE_MODULE: /* LAMESPEC The spec says that a null module resolution scope should go through the exported type table. This is not the observed behavior of existing implementations. The defacto behavior is that it's just a typedef in disguise. */ /* a typedef in disguise */ res = mono_class_from_name_checked (image, nspace, name, error); goto done; case MONO_RESOLUTION_SCOPE_MODULEREF: module = mono_image_load_module_checked (image, idx, error); if (module) res = mono_class_from_name_checked (module, nspace, name, error); goto done; case MONO_RESOLUTION_SCOPE_TYPEREF: { MonoClass *enclosing; GList *tmp; if (idx == mono_metadata_token_index (type_token)) { mono_error_set_bad_image (error, image, "Image with self-referencing typeref token %08x.", type_token); return NULL; } enclosing = mono_class_from_typeref_checked (image, MONO_TOKEN_TYPE_REF | idx, error); return_val_if_nok (error, NULL); GList *nested_classes = mono_class_get_nested_classes_property (enclosing); if (m_class_is_nested_classes_inited (enclosing) && nested_classes) { /* Micro-optimization: don't scan the metadata tables if enclosing is already inited */ for (tmp = nested_classes; tmp; tmp = tmp->next) { res = (MonoClass *)tmp->data; if (strcmp (m_class_get_name (res), name) == 0) return res; } } else { MonoImage *enclosing_image = m_class_get_image (enclosing); guint32 enclosing_type_token = m_class_get_type_token (enclosing); /* Don't call mono_class_init_internal as we might've been called by it recursively */ int i = mono_metadata_nesting_typedef (enclosing_image, enclosing_type_token, 1); while (i) { guint32 class_nested = mono_metadata_decode_row_col (&enclosing_image->tables [MONO_TABLE_NESTEDCLASS], i - 1, MONO_NESTED_CLASS_NESTED); guint32 string_offset = mono_metadata_decode_row_col (&enclosing_image->tables [MONO_TABLE_TYPEDEF], class_nested - 1, MONO_TYPEDEF_NAME); const char *nname = mono_metadata_string_heap (enclosing_image, string_offset); if (strcmp (nname, name) == 0) return mono_class_create_from_typedef (enclosing_image, MONO_TOKEN_TYPE_DEF | class_nested, error); i = mono_metadata_nesting_typedef (enclosing_image, enclosing_type_token, i + 1); } } g_warning ("TypeRef ResolutionScope not yet handled (%d) for %s.%s in image %s", idx, nspace, name, image->name); goto done; } case MONO_RESOLUTION_SCOPE_ASSEMBLYREF: break; } if (mono_metadata_table_bounds_check (image, MONO_TABLE_ASSEMBLYREF, idx)) { mono_error_set_bad_image (error, image, "Image with invalid assemblyref token %08x.", idx); return NULL; } if (!image->references || !image->references [idx - 1]) mono_assembly_load_reference (image, idx - 1); g_assert (image->references [idx - 1]); /* If the assembly did not load, register this as a type load exception */ if (image->references [idx - 1] == REFERENCE_MISSING){ MonoAssemblyName aname; memset (&aname, 0, sizeof (MonoAssemblyName)); char *human_name; mono_assembly_get_assemblyref (image, idx - 1, &aname); human_name = mono_stringify_assembly_name (&aname); mono_error_set_simple_file_not_found (error, human_name); g_free (human_name); return NULL; } res = mono_class_from_name_checked (image->references [idx - 1]->image, nspace, name, error); done: /* Generic case, should be avoided for when a better error is possible. */ if (!res && is_ok (error)) { char *name = mono_class_name_from_token (image, type_token); char *assembly = mono_assembly_name_from_token (image, type_token); mono_error_set_type_load_name (error, name, assembly, "Could not resolve type with token %08x from typeref (expected class '%s' in assembly '%s')", type_token, name, assembly); } return res; } static void * mono_image_memdup (MonoImage *image, void *data, guint size) { void *res = mono_image_alloc (image, size); memcpy (res, data, size); return res; } /* Copy everything mono_metadata_free_array free. */ MonoArrayType * mono_dup_array_type (MonoImage *image, MonoArrayType *a) { if (image) { a = (MonoArrayType *)mono_image_memdup (image, a, sizeof (MonoArrayType)); if (a->sizes) a->sizes = (int *)mono_image_memdup (image, a->sizes, a->numsizes * sizeof (int)); if (a->lobounds) a->lobounds = (int *)mono_image_memdup (image, a->lobounds, a->numlobounds * sizeof (int)); } else { a = (MonoArrayType *)g_memdup (a, sizeof (MonoArrayType)); if (a->sizes) a->sizes = (int *)g_memdup (a->sizes, a->numsizes * sizeof (int)); if (a->lobounds) a->lobounds = (int *)g_memdup (a->lobounds, a->numlobounds * sizeof (int)); } return a; } /* Copy everything mono_metadata_free_method_signature free. */ MonoMethodSignature* mono_metadata_signature_deep_dup (MonoImage *image, MonoMethodSignature *sig) { int i; sig = mono_metadata_signature_dup_full (image, sig); sig->ret = mono_metadata_type_dup (image, sig->ret); for (i = 0; i < sig->param_count; ++i) sig->params [i] = mono_metadata_type_dup (image, sig->params [i]); return sig; } static void _mono_type_get_assembly_name (MonoClass *klass, GString *str) { MonoAssembly *ta = m_class_get_image (klass)->assembly; char *name; name = mono_stringify_assembly_name (&ta->aname); g_string_append_printf (str, ", %s", name); g_free (name); } static void mono_type_name_check_byref (MonoType *type, GString *str) { if (m_type_is_byref (type)) g_string_append_c (str, '&'); } static char* escape_special_chars (const char* identifier) { size_t id_len = strlen (identifier); // Assume the worst case, and thus only allocate once char *res = g_malloc (id_len * 2 + 1); char *res_ptr = res; for (const char *s = identifier; *s != 0; s++) { switch (*s) { case ',': case '+': case '&': case '*': case '[': case ']': case '\\': *res_ptr++ = '\\'; break; } *res_ptr++ = *s; } *res_ptr = '\0'; return res; } /** * mono_identifier_escape_type_name_chars: * \param identifier the display name of a mono type * * \returns The name in external form, that is with escaping backslashes. * * The displayed form of an identifier has the characters ,+&*[]\ * that have special meaning in type names escaped with a preceeding * backslash (\) character. */ char* mono_identifier_escape_type_name_chars (const char* identifier) { if (!identifier) return NULL; // If the string has any special characters escape the whole thing, otherwise just return the input for (const char *s = identifier; *s != 0; s++) { switch (*s) { case ',': case '+': case '&': case '*': case '[': case ']': case '\\': return escape_special_chars (identifier); } } return g_strdup (identifier); } static void mono_type_get_name_recurse (MonoType *type, GString *str, gboolean is_recursed, MonoTypeNameFormat format) { MonoClass *klass; switch (type->type) { case MONO_TYPE_ARRAY: { int i, rank = type->data.array->rank; MonoTypeNameFormat nested_format; nested_format = format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED ? MONO_TYPE_NAME_FORMAT_FULL_NAME : format; mono_type_get_name_recurse ( m_class_get_byval_arg (type->data.array->eklass), str, FALSE, nested_format); g_string_append_c (str, '['); if (rank == 1) g_string_append_c (str, '*'); else if (rank > 64) // Only taken in an error path, runtime will not load arrays of more than 32 dimensions g_string_append_printf (str, "%d", rank); else for (i = 1; i < rank; i++) g_string_append_c (str, ','); g_string_append_c (str, ']'); mono_type_name_check_byref (type, str); if (format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED) _mono_type_get_assembly_name (type->data.array->eklass, str); break; } case MONO_TYPE_SZARRAY: { MonoTypeNameFormat nested_format; nested_format = format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED ? MONO_TYPE_NAME_FORMAT_FULL_NAME : format; mono_type_get_name_recurse ( m_class_get_byval_arg (type->data.klass), str, FALSE, nested_format); g_string_append (str, "[]"); mono_type_name_check_byref (type, str); if (format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED) _mono_type_get_assembly_name (type->data.klass, str); break; } case MONO_TYPE_PTR: { MonoTypeNameFormat nested_format; nested_format = format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED ? MONO_TYPE_NAME_FORMAT_FULL_NAME : format; mono_type_get_name_recurse ( type->data.type, str, FALSE, nested_format); g_string_append_c (str, '*'); mono_type_name_check_byref (type, str); if (format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED) _mono_type_get_assembly_name (mono_class_from_mono_type_internal (type->data.type), str); break; } case MONO_TYPE_VAR: case MONO_TYPE_MVAR: if (!mono_generic_param_name (type->data.generic_param)) g_string_append_printf (str, "%s%d", type->type == MONO_TYPE_VAR ? "!" : "!!", type->data.generic_param->num); else g_string_append (str, mono_generic_param_name (type->data.generic_param)); mono_type_name_check_byref (type, str); break; default: klass = mono_class_from_mono_type_internal (type); if (m_class_get_nested_in (klass)) { mono_type_get_name_recurse ( m_class_get_byval_arg (m_class_get_nested_in (klass)), str, TRUE, format); if (format == MONO_TYPE_NAME_FORMAT_IL) g_string_append_c (str, '.'); else g_string_append_c (str, '+'); } else if (*m_class_get_name_space (klass)) { const char *klass_name_space = m_class_get_name_space (klass); if (format == MONO_TYPE_NAME_FORMAT_IL) g_string_append (str, klass_name_space); else { char *escaped = mono_identifier_escape_type_name_chars (klass_name_space); g_string_append (str, escaped); g_free (escaped); } g_string_append_c (str, '.'); } const char *klass_name = m_class_get_name (klass); if (format == MONO_TYPE_NAME_FORMAT_IL) { const char *s = strchr (klass_name, '`'); gssize len = s ? (s - klass_name) : (gssize)strlen (klass_name); g_string_append_len (str, klass_name, len); } else { char *escaped = mono_identifier_escape_type_name_chars (klass_name); g_string_append (str, escaped); g_free (escaped); } if (is_recursed) break; if (mono_class_is_ginst (klass)) { MonoGenericClass *gclass = mono_class_get_generic_class (klass); MonoGenericInst *inst = gclass->context.class_inst; MonoTypeNameFormat nested_format; int i; nested_format = format == MONO_TYPE_NAME_FORMAT_FULL_NAME ? MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED : format; if (format == MONO_TYPE_NAME_FORMAT_IL) g_string_append_c (str, '<'); else g_string_append_c (str, '['); for (i = 0; i < inst->type_argc; i++) { MonoType *t = inst->type_argv [i]; if (i) g_string_append_c (str, ','); if ((nested_format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED) && (t->type != MONO_TYPE_VAR) && (type->type != MONO_TYPE_MVAR)) g_string_append_c (str, '['); mono_type_get_name_recurse (inst->type_argv [i], str, FALSE, nested_format); if ((nested_format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED) && (t->type != MONO_TYPE_VAR) && (type->type != MONO_TYPE_MVAR)) g_string_append_c (str, ']'); } if (format == MONO_TYPE_NAME_FORMAT_IL) g_string_append_c (str, '>'); else g_string_append_c (str, ']'); } else if (mono_class_is_gtd (klass) && (format != MONO_TYPE_NAME_FORMAT_FULL_NAME) && (format != MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED)) { int i; if (format == MONO_TYPE_NAME_FORMAT_IL) g_string_append_c (str, '<'); else g_string_append_c (str, '['); for (i = 0; i < mono_class_get_generic_container (klass)->type_argc; i++) { if (i) g_string_append_c (str, ','); g_string_append (str, mono_generic_container_get_param_info (mono_class_get_generic_container (klass), i)->name); } if (format == MONO_TYPE_NAME_FORMAT_IL) g_string_append_c (str, '>'); else g_string_append_c (str, ']'); } mono_type_name_check_byref (type, str); if ((format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED) && (type->type != MONO_TYPE_VAR) && (type->type != MONO_TYPE_MVAR)) _mono_type_get_assembly_name (klass, str); break; } } /** * mono_type_get_name_full: * \param type a type * \param format the format for the return string. * * * \returns The string representation in a number of formats: * * if \p format is \c MONO_TYPE_NAME_FORMAT_REFLECTION, the return string is * returned in the format required by \c System.Reflection, this is the * inverse of mono_reflection_parse_type(). * * if \p format is \c MONO_TYPE_NAME_FORMAT_IL, it returns a syntax that can * be used by the IL assembler. * * if \p format is \c MONO_TYPE_NAME_FORMAT_FULL_NAME * * if \p format is \c MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED */ char* mono_type_get_name_full (MonoType *type, MonoTypeNameFormat format) { GString* result; result = g_string_new (""); mono_type_get_name_recurse (type, result, FALSE, format); return g_string_free (result, FALSE); } /** * mono_type_get_full_name: * \param class a class * * \returns The string representation for type as required by System.Reflection. * The inverse of mono_reflection_parse_type(). */ char * mono_type_get_full_name (MonoClass *klass) { return mono_type_get_name_full (m_class_get_byval_arg (klass), MONO_TYPE_NAME_FORMAT_REFLECTION); } /** * mono_type_get_name: * \param type a type * \returns The string representation for type as it would be represented in IL code. */ char* mono_type_get_name (MonoType *type) { return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_IL); } /** * mono_type_get_underlying_type: * \param type a type * \returns The \c MonoType for the underlying integer type if \p type * is an enum and byref is false, otherwise the type itself. */ MonoType* mono_type_get_underlying_type (MonoType *type) { if (type->type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (type->data.klass) && !m_type_is_byref (type)) return mono_class_enum_basetype_internal (type->data.klass); if (type->type == MONO_TYPE_GENERICINST && m_class_is_enumtype (type->data.generic_class->container_class) && !m_type_is_byref (type)) return mono_class_enum_basetype_internal (type->data.generic_class->container_class); return type; } /** * mono_class_is_open_constructed_type: * \param type a type * * \returns TRUE if type represents a generics open constructed type. * IOW, not all type parameters required for the instantiation have * been provided or it's a generic type definition. * * An open constructed type means it's a non realizable type. Not to * be mixed up with an abstract type - we can't cast or dispatch to * an open type, for example. */ gboolean mono_class_is_open_constructed_type (MonoType *t) { switch (t->type) { case MONO_TYPE_VAR: case MONO_TYPE_MVAR: return TRUE; case MONO_TYPE_SZARRAY: return mono_class_is_open_constructed_type (m_class_get_byval_arg (t->data.klass)); case MONO_TYPE_ARRAY: return mono_class_is_open_constructed_type (m_class_get_byval_arg (t->data.array->eklass)); case MONO_TYPE_PTR: return mono_class_is_open_constructed_type (t->data.type); case MONO_TYPE_GENERICINST: return t->data.generic_class->context.class_inst->is_open; case MONO_TYPE_CLASS: case MONO_TYPE_VALUETYPE: return mono_class_is_gtd (t->data.klass); default: return FALSE; } } /* This is a simple function to catch the most common bad instances of generic types. Specially those that might lead to further failures in the runtime. */ gboolean mono_type_is_valid_generic_argument (MonoType *type) { switch (type->type) { case MONO_TYPE_VOID: case MONO_TYPE_TYPEDBYREF: return FALSE; case MONO_TYPE_VALUETYPE: return !m_class_is_byreflike (type->data.klass); default: return TRUE; } } static gboolean can_inflate_gparam_with (MonoGenericParam *gparam, MonoType *type) { if (!mono_type_is_valid_generic_argument (type)) return FALSE; #if 0 /* Avoid inflating gparams with valuetype constraints with ref types during gsharing */ MonoGenericParamInfo *info = mono_generic_param_info (gparam); if (info && (info->flags & GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT)) { if (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) { MonoGenericParam *inst_gparam = type->data.generic_param; if (inst_gparam->gshared_constraint && inst_gparam->gshared_constraint->type == MONO_TYPE_OBJECT) return FALSE; } } #endif return TRUE; } static MonoType* inflate_generic_custom_modifiers (MonoImage *image, const MonoType *type, MonoGenericContext *context, MonoError *error); static MonoType* inflate_generic_type (MonoImage *image, MonoType *type, MonoGenericContext *context, MonoError *error) { gboolean changed = FALSE; error_init (error); /* C++/CLI (and some Roslyn tests) constructs method signatures like: * void .CL1`1.Test(!0 modopt(System.Nullable`1<!0>)) * where !0 has a custom modifier which itself mentions the type variable. * So we need to potentially inflate the modifiers. */ if (type->has_cmods) { MonoType *new_type = inflate_generic_custom_modifiers (image, type, context, error); return_val_if_nok (error, NULL); if (new_type != NULL) { type = new_type; changed = TRUE; } } switch (type->type) { case MONO_TYPE_MVAR: { MonoType *nt; int num = mono_type_get_generic_param_num (type); MonoGenericInst *inst = context->method_inst; if (!inst) { if (!changed) return NULL; else return type; } MonoGenericParam *gparam = type->data.generic_param; if (num >= inst->type_argc) { const char *pname = mono_generic_param_name (gparam); mono_error_set_bad_image (error, image, "MVAR %d (%s) cannot be expanded in this context with %d instantiations", num, pname ? pname : "", inst->type_argc); return NULL; } if (!can_inflate_gparam_with (gparam, inst->type_argv [num])) { const char *pname = mono_generic_param_name (gparam); mono_error_set_bad_image (error, image, "MVAR %d (%s) cannot be expanded with type 0x%x", num, pname ? pname : "", inst->type_argv [num]->type); return NULL; } /* * Note that the VAR/MVAR cases are different from the rest. The other cases duplicate @type, * while the VAR/MVAR duplicates a type from the context. So, we need to ensure that the * ->byref__ and ->attrs from @type are propagated to the returned type. */ nt = mono_metadata_type_dup_with_cmods (image, inst->type_argv [num], type); nt->byref__ = type->byref__; nt->attrs = type->attrs; return nt; } case MONO_TYPE_VAR: { MonoType *nt; int num = mono_type_get_generic_param_num (type); MonoGenericInst *inst = context->class_inst; if (!inst) { if (!changed) return NULL; else return type; } MonoGenericParam *gparam = type->data.generic_param; if (num >= inst->type_argc) { const char *pname = mono_generic_param_name (gparam); mono_error_set_bad_image (error, image, "VAR %d (%s) cannot be expanded in this context with %d instantiations", num, pname ? pname : "", inst->type_argc); return NULL; } if (!can_inflate_gparam_with (gparam, inst->type_argv [num])) { const char *pname = mono_generic_param_name (gparam); mono_error_set_bad_image (error, image, "VAR %d (%s) cannot be expanded with type 0x%x", num, pname ? pname : "", inst->type_argv [num]->type); return NULL; } #ifdef DEBUG_INFLATE_CMODS gboolean append_cmods; append_cmods = FALSE; if (type->has_cmods && inst->type_argv[num]->has_cmods) { char *tname = mono_type_full_name (type); char *vname = mono_type_full_name (inst->type_argv[num]); printf ("\n\n\tsubstitution for '%s' with '%s' yields...\n", tname, vname); g_free (tname); g_free (vname); append_cmods = TRUE; } #endif nt = mono_metadata_type_dup_with_cmods (image, inst->type_argv [num], type); nt->byref__ = type->byref__ || inst->type_argv[num]->byref__; nt->attrs = type->attrs; #ifdef DEBUG_INFLATE_CMODS if (append_cmods) { char *ntname = mono_type_full_name (nt); printf ("\tyields '%s'\n\n\n", ntname); g_free (ntname); } #endif return nt; } case MONO_TYPE_SZARRAY: { MonoClass *eclass = type->data.klass; MonoType *nt, *inflated = inflate_generic_type (NULL, m_class_get_byval_arg (eclass), context, error); if ((!inflated && !changed) || !is_ok (error)) return NULL; if (!inflated) return type; nt = mono_metadata_type_dup (image, type); nt->data.klass = mono_class_from_mono_type_internal (inflated); mono_metadata_free_type (inflated); return nt; } case MONO_TYPE_ARRAY: { MonoClass *eclass = type->data.array->eklass; MonoType *nt, *inflated = inflate_generic_type (NULL, m_class_get_byval_arg (eclass), context, error); if ((!inflated && !changed) || !is_ok (error)) return NULL; if (!inflated) return type; nt = mono_metadata_type_dup (image, type); nt->data.array->eklass = mono_class_from_mono_type_internal (inflated); mono_metadata_free_type (inflated); return nt; } case MONO_TYPE_GENERICINST: { MonoGenericClass *gclass = type->data.generic_class; MonoGenericInst *inst; MonoType *nt; if (!gclass->context.class_inst->is_open) { if (!changed) return NULL; else return type; } inst = mono_metadata_inflate_generic_inst (gclass->context.class_inst, context, error); return_val_if_nok (error, NULL); if (inst != gclass->context.class_inst) gclass = mono_metadata_lookup_generic_class (gclass->container_class, inst, gclass->is_dynamic); if (gclass == type->data.generic_class) { if (!changed) return NULL; else return type; } nt = mono_metadata_type_dup (image, type); nt->data.generic_class = gclass; return nt; } case MONO_TYPE_CLASS: case MONO_TYPE_VALUETYPE: { MonoClass *klass = type->data.klass; MonoGenericContainer *container = mono_class_try_get_generic_container (klass); MonoGenericInst *inst; MonoGenericClass *gclass = NULL; MonoType *nt; if (!container) { if (!changed) return NULL; else return type; } /* We can't use context->class_inst directly, since it can have more elements */ inst = mono_metadata_inflate_generic_inst (container->context.class_inst, context, error); return_val_if_nok (error, NULL); if (inst == container->context.class_inst) { if (!changed) return NULL; else return type; } gclass = mono_metadata_lookup_generic_class (klass, inst, image_is_dynamic (m_class_get_image (klass))); nt = mono_metadata_type_dup (image, type); nt->type = MONO_TYPE_GENERICINST; nt->data.generic_class = gclass; return nt; } case MONO_TYPE_PTR: { MonoType *nt, *inflated = inflate_generic_type (image, type->data.type, context, error); if ((!inflated && !changed) || !is_ok (error)) return NULL; if (!inflated && changed) return type; nt = mono_metadata_type_dup (image, type); nt->data.type = inflated; return nt; } default: if (!changed) return NULL; else return type; } return NULL; } static MonoType* inflate_generic_custom_modifiers (MonoImage *image, const MonoType *type, MonoGenericContext *context, MonoError *error) { MonoType *result = NULL; g_assert (type->has_cmods); int count = mono_type_custom_modifier_count (type); gboolean changed = FALSE; /* Try not to blow up the stack. See comment on MONO_MAX_EXPECTED_CMODS. */ g_assert (count < MONO_MAX_EXPECTED_CMODS); size_t aggregate_size = mono_sizeof_aggregate_modifiers (count); MonoAggregateModContainer *candidate_mods = g_alloca (aggregate_size); memset (candidate_mods, 0, aggregate_size); candidate_mods->count = count; for (int i = 0; i < count; ++i) { gboolean required; MonoType *cmod_old = mono_type_get_custom_modifier (type, i, &required, error); goto_if_nok (error, leave); MonoType *cmod_new = inflate_generic_type (NULL, cmod_old, context, error); goto_if_nok (error, leave); if (cmod_new) changed = TRUE; candidate_mods->modifiers [i].required = required; candidate_mods->modifiers [i].type = cmod_new; } if (changed) { /* if we're going to make a new type, fill in any modifiers that weren't affected by inflation with copies of the original values. */ for (int i = 0; i < count; ++i) { if (candidate_mods->modifiers [i].type == NULL) { candidate_mods->modifiers [i].type = mono_metadata_type_dup (NULL, mono_type_get_custom_modifier (type, i, NULL, error)); /* it didn't error in the first loop, so should be ok now, too */ mono_error_assert_ok (error); } } } #ifdef DEBUG_INFLATE_CMODS if (changed) { char *full_name = mono_type_full_name ((MonoType*)type); printf ("\n\n\tcustom modifier on '%s' affected by subsititution\n\n\n", full_name); g_free (full_name); } #endif if (changed) { MonoType *new_type = g_alloca (mono_sizeof_type_with_mods (count, TRUE)); /* first init just the non-modifier portion of new_type before populating the * new modifiers */ memcpy (new_type, type, MONO_SIZEOF_TYPE); mono_type_with_mods_init (new_type, count, TRUE); mono_type_set_amods (new_type, mono_metadata_get_canonical_aggregate_modifiers (candidate_mods)); result = mono_metadata_type_dup (image, new_type); } leave: for (int i = 0; i < count; ++i) { if (candidate_mods->modifiers [i].type) mono_metadata_free_type (candidate_mods->modifiers [i].type); } return result; } MonoGenericContext * mono_generic_class_get_context (MonoGenericClass *gclass) { return &gclass->context; } MonoGenericContext * mono_class_get_context (MonoClass *klass) { MonoGenericClass *gklass = mono_class_try_get_generic_class (klass); return gklass ? mono_generic_class_get_context (gklass) : NULL; } /* * mono_class_inflate_generic_type_with_mempool: * @mempool: a mempool * @type: a type * @context: a generics context * @error: error context * * The same as mono_class_inflate_generic_type, but allocates the MonoType * from mempool if it is non-NULL. If it is NULL, the MonoType is * allocated on the heap and is owned by the caller. * The returned type can potentially be the same as TYPE, so it should not be * modified by the caller, and it should be freed using mono_metadata_free_type (). */ MonoType* mono_class_inflate_generic_type_with_mempool (MonoImage *image, MonoType *type, MonoGenericContext *context, MonoError *error) { MonoType *inflated = NULL; error_init (error); if (context) inflated = inflate_generic_type (image, type, context, error); return_val_if_nok (error, NULL); if (!inflated) { MonoType *shared = mono_metadata_get_shared_type (type); if (shared && !type->has_cmods) { return shared; } else { return mono_metadata_type_dup (image, type); } } UnlockedIncrement (&mono_stats.inflated_type_count); return inflated; } /** * mono_class_inflate_generic_type: * \param type a type * \param context a generics context * \deprecated Please use \c mono_class_inflate_generic_type_checked instead * * If \p type is a generic type and \p context is not NULL, instantiate it using the * generics context \p context. * * \returns The instantiated type or a copy of \p type. The returned \c MonoType is allocated * on the heap and is owned by the caller. Returns NULL on error. */ MonoType* mono_class_inflate_generic_type (MonoType *type, MonoGenericContext *context) { ERROR_DECL (error); MonoType *result; result = mono_class_inflate_generic_type_checked (type, context, error); mono_error_cleanup (error); return result; } /* * mono_class_inflate_generic_type: * @type: a type * @context: a generics context * @error: error context to use * * If @type is a generic type and @context is not NULL, instantiate it using the * generics context @context. * * Returns: The instantiated type or a copy of @type. The returned MonoType is allocated * on the heap and is owned by the caller. */ MonoType* mono_class_inflate_generic_type_checked (MonoType *type, MonoGenericContext *context, MonoError *error) { return mono_class_inflate_generic_type_with_mempool (NULL, type, context, error); } /* * mono_class_inflate_generic_type_no_copy: * * Same as inflate_generic_type_with_mempool, but return TYPE if no inflation * was done. */ static MonoType* mono_class_inflate_generic_type_no_copy (MonoImage *image, MonoType *type, MonoGenericContext *context, MonoError *error) { MonoType *inflated = NULL; error_init (error); if (context) { inflated = inflate_generic_type (image, type, context, error); return_val_if_nok (error, NULL); } if (!inflated) return type; UnlockedIncrement (&mono_stats.inflated_type_count); return inflated; } /* * mono_class_inflate_generic_class: * * Inflate the class @gklass with @context. Set @error on failure. */ MonoClass* mono_class_inflate_generic_class_checked (MonoClass *gklass, MonoGenericContext *context, MonoError *error) { MonoClass *res; MonoType *inflated; inflated = mono_class_inflate_generic_type_checked (m_class_get_byval_arg (gklass), context, error); return_val_if_nok (error, NULL); res = mono_class_from_mono_type_internal (inflated); mono_metadata_free_type (inflated); return res; } static MonoGenericContext inflate_generic_context (MonoGenericContext *context, MonoGenericContext *inflate_with, MonoError *error) { MonoGenericInst *class_inst = NULL; MonoGenericInst *method_inst = NULL; MonoGenericContext res = { NULL, NULL }; error_init (error); if (context->class_inst) { class_inst = mono_metadata_inflate_generic_inst (context->class_inst, inflate_with, error); if (!is_ok (error)) goto fail; } if (context->method_inst) { method_inst = mono_metadata_inflate_generic_inst (context->method_inst, inflate_with, error); if (!is_ok (error)) goto fail; } res.class_inst = class_inst; res.method_inst = method_inst; fail: return res; } /** * mono_class_inflate_generic_method: * \param method a generic method * \param context a generics context * * Instantiate the generic method \p method using the generics context \p context. * * \returns The new instantiated method */ MonoMethod * mono_class_inflate_generic_method (MonoMethod *method, MonoGenericContext *context) { ERROR_DECL (error); MonoMethod *res = mono_class_inflate_generic_method_full_checked (method, NULL, context, error); mono_error_assert_msg_ok (error, "Could not inflate generic method"); return res; } MonoMethod * mono_class_inflate_generic_method_checked (MonoMethod *method, MonoGenericContext *context, MonoError *error) { return mono_class_inflate_generic_method_full_checked (method, NULL, context, error); } static gboolean inflated_method_equal (gconstpointer a, gconstpointer b) { const MonoMethodInflated *ma = (const MonoMethodInflated *)a; const MonoMethodInflated *mb = (const MonoMethodInflated *)b; if (ma->declaring != mb->declaring) return FALSE; return mono_metadata_generic_context_equal (&ma->context, &mb->context); } static guint inflated_method_hash (gconstpointer a) { const MonoMethodInflated *ma = (const MonoMethodInflated *)a; return (mono_metadata_generic_context_hash (&ma->context) ^ mono_aligned_addr_hash (ma->declaring)); } static void free_inflated_method (MonoMethodInflated *imethod) { MonoMethod *method = (MonoMethod*)imethod; if (method->signature) mono_metadata_free_inflated_signature (method->signature); if (method->wrapper_type) g_free (((MonoMethodWrapper*)method)->method_data); g_free (method); } /** * mono_class_inflate_generic_method_full_checked: * Instantiate method \p method with the generic context \p context. * On failure returns NULL and sets \p error. * * BEWARE: All non-trivial fields are invalid, including klass, signature, and header. * Use mono_method_signature_internal () and mono_method_get_header () to get the correct values. */ MonoMethod* mono_class_inflate_generic_method_full_checked (MonoMethod *method, MonoClass *klass_hint, MonoGenericContext *context, MonoError *error) { MonoMethod *result; MonoMethodInflated *iresult, *cached; MonoMethodSignature *sig; MonoGenericContext tmp_context; error_init (error); /* The `method' has already been instantiated before => we need to peel out the instantiation and create a new context */ while (method->is_inflated) { MonoGenericContext *method_context = mono_method_get_context (method); MonoMethodInflated *imethod = (MonoMethodInflated *) method; tmp_context = inflate_generic_context (method_context, context, error); return_val_if_nok (error, NULL); context = &tmp_context; if (mono_metadata_generic_context_equal (method_context, context)) return method; method = imethod->declaring; } /* * A method only needs to be inflated if the context has argument for which it is * parametric. Eg: * * class Foo<T> { void Bar(); } - doesn't need to be inflated if only mvars' are supplied * class Foo { void Bar<T> (); } - doesn't need to be if only vars' are supplied * */ if (!((method->is_generic && context->method_inst) || (mono_class_is_gtd (method->klass) && context->class_inst))) return method; iresult = g_new0 (MonoMethodInflated, 1); iresult->context = *context; iresult->declaring = method; if (!context->method_inst && method->is_generic) iresult->context.method_inst = mono_method_get_generic_container (method)->context.method_inst; if (!context->class_inst) { g_assert (!mono_class_is_ginst (iresult->declaring->klass)); if (mono_class_is_gtd (iresult->declaring->klass)) iresult->context.class_inst = mono_class_get_generic_container (iresult->declaring->klass)->context.class_inst; } /* This can happen with some callers like mono_object_get_virtual_method_internal () */ if (!mono_class_is_gtd (iresult->declaring->klass) && !mono_class_is_ginst (iresult->declaring->klass)) iresult->context.class_inst = NULL; MonoMemoryManager *mm = mono_metadata_get_mem_manager_for_method (iresult); // check cache mono_mem_manager_lock (mm); if (!mm->gmethod_cache) mm->gmethod_cache = g_hash_table_new_full (inflated_method_hash, inflated_method_equal, NULL, (GDestroyNotify)free_inflated_method); cached = (MonoMethodInflated *)g_hash_table_lookup (mm->gmethod_cache, iresult); mono_mem_manager_unlock (mm); if (cached) { g_free (iresult); return (MonoMethod*)cached; } UnlockedIncrement (&mono_stats.inflated_method_count); UnlockedAdd (&mono_inflated_methods_size, sizeof (MonoMethodInflated)); sig = mono_method_signature_internal (method); if (!sig) { char *name = mono_type_get_full_name (method->klass); mono_error_set_bad_image (error, mono_method_get_image (method), "Could not resolve signature of method %s:%s", name, method->name); g_free (name); goto fail; } if (sig->pinvoke) { memcpy (&iresult->method.pinvoke, method, sizeof (MonoMethodPInvoke)); } else { memcpy (&iresult->method.method, method, sizeof (MonoMethod)); } result = (MonoMethod *) iresult; result->is_inflated = TRUE; result->is_generic = FALSE; result->sre_method = FALSE; result->signature = NULL; if (method->wrapper_type) { MonoMethodWrapper *mw = (MonoMethodWrapper*)method; MonoMethodWrapper *resw = (MonoMethodWrapper*)result; int len = GPOINTER_TO_INT (((void**)mw->method_data) [0]); resw->method_data = (void **)g_malloc (sizeof (gpointer) * (len + 1)); memcpy (resw->method_data, mw->method_data, sizeof (gpointer) * (len + 1)); } if (iresult->context.method_inst) { MonoGenericInst *method_inst = iresult->context.method_inst; /* Set the generic_container of the result to the generic_container of method */ MonoGenericContainer *generic_container = mono_method_get_generic_container (method); if (generic_container && method_inst == generic_container->context.method_inst) { result->is_generic = 1; mono_method_set_generic_container (result, generic_container); } /* Check that the method is not instantiated with any invalid types */ for (int i = 0; i < method_inst->type_argc; i++) { if (!mono_type_is_valid_generic_argument (method_inst->type_argv [i])) { mono_error_set_bad_image (error, mono_method_get_image (method), "MVAR %d cannot be expanded with type 0x%x", i, method_inst->type_argv [i]->type); goto fail; } } } if (klass_hint) { MonoGenericClass *gklass_hint = mono_class_try_get_generic_class (klass_hint); if (gklass_hint && (gklass_hint->container_class != method->klass || gklass_hint->context.class_inst != context->class_inst)) klass_hint = NULL; } if (mono_class_is_gtd (method->klass)) result->klass = klass_hint; if (!result->klass) { MonoType *inflated = inflate_generic_type (NULL, m_class_get_byval_arg (method->klass), context, error); if (!is_ok (error)) goto fail; result->klass = inflated ? mono_class_from_mono_type_internal (inflated) : method->klass; if (inflated) mono_metadata_free_type (inflated); } /* * FIXME: This should hold, but it doesn't: * * if (result->is_inflated && mono_method_get_context (result)->method_inst && * mono_method_get_context (result)->method_inst == mono_method_get_generic_container (((MonoMethodInflated*)result)->declaring)->context.method_inst) { * g_assert (result->is_generic); * } * * Fixing this here causes other things to break, hence a very * ugly hack in mini-trampolines.c - see * is_generic_method_definition(). */ // check cache mono_mem_manager_lock (mm); cached = (MonoMethodInflated *)g_hash_table_lookup (mm->gmethod_cache, iresult); if (!cached) { g_hash_table_insert (mm->gmethod_cache, iresult, iresult); iresult->owner = mm; cached = iresult; } mono_mem_manager_unlock (mm); return (MonoMethod*)cached; fail: g_free (iresult); return NULL; } /** * mono_get_inflated_method: * * Obsolete. We keep it around since it's mentioned in the public API. */ MonoMethod* mono_get_inflated_method (MonoMethod *method) { return method; } /* * mono_method_get_context_general: * @method: a method * @uninflated: handle uninflated methods? * * Returns the generic context of a method or NULL if it doesn't have * one. For an inflated method that's the context stored in the * method. Otherwise it's in the method's generic container or in the * generic container of the method's class. */ MonoGenericContext* mono_method_get_context_general (MonoMethod *method, gboolean uninflated) { if (method->is_inflated) { MonoMethodInflated *imethod = (MonoMethodInflated *) method; return &imethod->context; } if (!uninflated) return NULL; if (method->is_generic) return &(mono_method_get_generic_container (method)->context); if (mono_class_is_gtd (method->klass)) return &mono_class_get_generic_container (method->klass)->context; return NULL; } /* * mono_method_get_context: * @method: a method * * Returns the generic context for method if it's inflated, otherwise * NULL. */ MonoGenericContext* mono_method_get_context (MonoMethod *method) { return mono_method_get_context_general (method, FALSE); } /* * mono_method_get_generic_container: * * Returns the generic container of METHOD, which should be a generic method definition. * Returns NULL if METHOD is not a generic method definition. * LOCKING: Acquires the loader lock. */ MonoGenericContainer* mono_method_get_generic_container (MonoMethod *method) { MonoGenericContainer *container; if (!method->is_generic) return NULL; container = (MonoGenericContainer *)mono_image_property_lookup (mono_method_get_image (method), method, MONO_METHOD_PROP_GENERIC_CONTAINER); g_assert (container); return container; } /* * mono_method_set_generic_container: * * Sets the generic container of METHOD to CONTAINER. * LOCKING: Acquires the image lock. */ void mono_method_set_generic_container (MonoMethod *method, MonoGenericContainer* container) { g_assert (method->is_generic); mono_image_property_insert (mono_method_get_image (method), method, MONO_METHOD_PROP_GENERIC_CONTAINER, container); } /** * mono_method_set_verification_success: * * Sets a bit indicating that the method has been verified. * * LOCKING: acquires the image lock. */ void mono_method_set_verification_success (MonoMethod *method) { g_assert (!method->is_inflated); mono_image_property_insert (mono_method_get_image (method), method, MONO_METHOD_PROP_VERIFICATION_SUCCESS, GUINT_TO_POINTER(1)); } /** * mono_method_get_verification_sucess: * * Returns \c TRUE if the method has been verified successfully. * * LOCKING: acquires the image lock. */ gboolean mono_method_get_verification_success (MonoMethod *method) { if (method->is_inflated) method = ((MonoMethodInflated *)method)->declaring; gpointer value = mono_image_property_lookup (mono_method_get_image (method), method, MONO_METHOD_PROP_VERIFICATION_SUCCESS); return value != NULL; } /** * mono_method_lookup_infrequent_bits: * * Looks for existing \c MonoMethodDefInfrequentBits struct associated with * this method definition. Unlike \c mono_method_get_infrequent bits, this * does not allocate a new struct if one doesn't exist. * * LOCKING: Acquires the image lock */ const MonoMethodDefInfrequentBits* mono_method_lookup_infrequent_bits (MonoMethod *method) { g_assert (!method->is_inflated); return (const MonoMethodDefInfrequentBits*)mono_image_property_lookup (mono_method_get_image (method), method, MONO_METHOD_PROP_INFREQUENT_BITS); } /** * mono_method_get_infrequent_bits: * * Looks for an existing, or allocates a new \c MonoMethodDefInfrequentBits struct for this method definition. * Method must not be inflated. * * Unlike \c mono_method_lookup_infrequent_bits, this will allocate a new * struct if the method didn't have one. * * LOCKING: Acquires the image lock */ MonoMethodDefInfrequentBits * mono_method_get_infrequent_bits (MonoMethod *method) { g_assert (!method->is_inflated); MonoImage *image = mono_method_get_image (method); MonoMethodDefInfrequentBits *infrequent_bits = NULL; mono_image_lock (image); infrequent_bits = (MonoMethodDefInfrequentBits *)mono_image_property_lookup (image, method, MONO_METHOD_PROP_INFREQUENT_BITS); if (!infrequent_bits) { infrequent_bits = (MonoMethodDefInfrequentBits *)mono_image_alloc0 (image, sizeof (MonoMethodDefInfrequentBits)); mono_image_property_insert (image, method, MONO_METHOD_PROP_INFREQUENT_BITS, infrequent_bits); } mono_image_unlock (image); return infrequent_bits; } gboolean mono_method_get_is_reabstracted (MonoMethod *method) { if (method->is_inflated) method = ((MonoMethodInflated*)method)->declaring; const MonoMethodDefInfrequentBits *infrequent_bits = mono_method_lookup_infrequent_bits (method); return infrequent_bits != NULL && infrequent_bits->is_reabstracted; } gboolean mono_method_get_is_covariant_override_impl (MonoMethod *method) { if (method->is_inflated) method = ((MonoMethodInflated*)method)->declaring; const MonoMethodDefInfrequentBits *infrequent_bits = mono_method_lookup_infrequent_bits (method); return infrequent_bits != NULL && infrequent_bits->is_covariant_override_impl; } /** * mono_method_set_is_reabstracted: * * Sets the \c MonoMethodDefInfrequentBits:is_reabstracted bit for this method * definition. The bit means that the method is a default interface method * that used to have a default implementation in an ancestor interface, but is * now abstract once again. * * LOCKING: Assumes the loader lock is held */ void mono_method_set_is_reabstracted (MonoMethod *method) { mono_method_get_infrequent_bits (method)->is_reabstracted = 1; } /** * mono_method_set_is_covariant_override_impl: * * Sets the \c MonoMethodDefInfrequentBits:is_covariant_override_impl bit for * this method definition. The bit means that the method is an override with a * signature that is not equal to the signature of the method that it is * overriding. * * LOCKING: Assumes the loader lock is held */ void mono_method_set_is_covariant_override_impl (MonoMethod *method) { mono_method_get_infrequent_bits (method)->is_covariant_override_impl = 1; } /** * mono_class_find_enum_basetype: * \param class The enum class * * Determine the basetype of an enum by iterating through its fields. We do this * in a separate function since it is cheaper than calling mono_class_setup_fields. */ MonoType* mono_class_find_enum_basetype (MonoClass *klass, MonoError *error) { MonoGenericContainer *container = NULL; MonoImage *image = m_class_get_image (klass); const int top = mono_class_get_field_count (klass); int i, first_field_idx; g_assert (m_class_is_enumtype (klass)); error_init (error); container = mono_class_try_get_generic_container (klass); if (mono_class_is_ginst (klass)) { MonoClass *gklass = mono_class_get_generic_class (klass)->container_class; container = mono_class_get_generic_container (gklass); g_assert (container); } /* * Fetch all the field information. */ first_field_idx = mono_class_get_first_field_idx (klass); for (i = 0; i < top; i++){ const char *sig; guint32 cols [MONO_FIELD_SIZE]; int idx = first_field_idx + i; MonoType *ftype; /* first_field_idx and idx points into the fieldptr table */ mono_metadata_decode_table_row (image, MONO_TABLE_FIELD, idx, cols, MONO_FIELD_SIZE); if (cols [MONO_FIELD_FLAGS] & FIELD_ATTRIBUTE_STATIC) //no need to decode static fields continue; sig = mono_metadata_blob_heap (image, cols [MONO_FIELD_SIGNATURE]); mono_metadata_decode_value (sig, &sig); /* FIELD signature == 0x06 */ if (*sig != 0x06) { mono_error_set_bad_image (error, image, "Invalid field signature %x, expected 0x6 but got %x", cols [MONO_FIELD_SIGNATURE], *sig); goto fail; } ftype = mono_metadata_parse_type_checked (image, container, cols [MONO_FIELD_FLAGS], FALSE, sig + 1, &sig, error); if (!ftype) goto fail; if (mono_class_is_ginst (klass)) { //FIXME do we leak here? ftype = mono_class_inflate_generic_type_checked (ftype, mono_class_get_context (klass), error); if (!is_ok (error)) goto fail; ftype->attrs = cols [MONO_FIELD_FLAGS]; } return ftype; } mono_error_set_type_load_class (error, klass, "Could not find base type"); fail: return NULL; } /* * Checks for MonoClass::has_failure without resolving all MonoType's into MonoClass'es */ gboolean mono_type_has_exceptions (MonoType *type) { switch (type->type) { case MONO_TYPE_CLASS: case MONO_TYPE_VALUETYPE: case MONO_TYPE_SZARRAY: return mono_class_has_failure (type->data.klass); case MONO_TYPE_ARRAY: return mono_class_has_failure (type->data.array->eklass); case MONO_TYPE_GENERICINST: return mono_class_has_failure (mono_class_create_generic_inst (type->data.generic_class)); default: return FALSE; } } void mono_error_set_for_class_failure (MonoError *oerror, const MonoClass *klass) { g_assert (mono_class_has_failure (klass)); MonoErrorBoxed *box = mono_class_get_exception_data ((MonoClass*)klass); mono_error_set_from_boxed (oerror, box); } /* * mono_class_alloc: * * Allocate memory for data belonging to CLASS. */ gpointer mono_class_alloc (MonoClass *klass, int size) { return m_class_alloc (klass, size); } gpointer (mono_class_alloc0) (MonoClass *klass, int size) { return m_class_alloc0 (klass, size); } #define mono_class_new0(klass,struct_type, n_structs) \ ((struct_type *) mono_class_alloc0 ((klass), ((gsize) sizeof (struct_type)) * ((gsize) (n_structs)))) /** * mono_class_set_failure_causedby_class: * \param klass the class that is failing * \param caused_by the class that caused the failure * \param msg Why \p klass is failing. * * If \p caused_by has a failure, sets a TypeLoadException failure on * \p klass with message "\p msg, due to: {\p caused_by message}". * * \returns TRUE if a failiure was set, or FALSE if \p caused_by doesn't have a failure. */ gboolean mono_class_set_type_load_failure_causedby_class (MonoClass *klass, const MonoClass *caused_by, const gchar* msg) { if (mono_class_has_failure (caused_by)) { ERROR_DECL (cause_error); mono_error_set_for_class_failure (cause_error, caused_by); mono_class_set_type_load_failure (klass, "%s, due to: %s", msg, mono_error_get_message (cause_error)); mono_error_cleanup (cause_error); return TRUE; } else { return FALSE; } } /* * mono_type_get_basic_type_from_generic: * @type: a type * * Returns a closed type corresponding to the possibly open type * passed to it. */ MonoType* mono_type_get_basic_type_from_generic (MonoType *type) { /* When we do generic sharing we let type variables stand for reference/primitive types. */ if (!m_type_is_byref (type) && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) && (!type->data.generic_param->gshared_constraint || type->data.generic_param->gshared_constraint->type == MONO_TYPE_OBJECT)) return mono_get_object_type (); return type; } /* * mono_class_get_method_by_index: * * Returns klass->methods [index], initializing klass->methods if neccesary. * * LOCKING: Acquires the loader lock. */ MonoMethod* mono_class_get_method_by_index (MonoClass *klass, int index) { ERROR_DECL (error); MonoGenericClass *gklass = mono_class_try_get_generic_class (klass); /* Avoid calling setup_methods () if possible */ if (gklass && !m_class_get_methods (klass)) { MonoMethod *m; m = mono_class_inflate_generic_method_full_checked ( m_class_get_methods (gklass->container_class) [index], klass, mono_class_get_context (klass), error); g_assert (is_ok (error)); /* FIXME don't swallow the error */ /* * If setup_methods () is called later for this class, no duplicates are created, * since inflate_generic_method guarantees that only one instance of a method * is created for each context. */ /* mono_class_setup_methods (klass); g_assert (m == klass->methods [index]); */ return m; } else { mono_class_setup_methods (klass); if (mono_class_has_failure (klass)) /*FIXME do proper error handling*/ return NULL; g_assert (index >= 0 && index < mono_class_get_method_count (klass)); return m_class_get_methods (klass) [index]; } } /** * mono_class_get_inflated_method: * \param klass an inflated class * \param method a method of \p klass's generic definition * \param error set on error * * Given an inflated class \p klass and a method \p method which should be a * method of \p klass's generic definition, return the inflated method * corresponding to \p method. * * On failure sets \p error and returns NULL. */ MonoMethod* mono_class_get_inflated_method (MonoClass *klass, MonoMethod *method, MonoError *error) { MonoClass *gklass = mono_class_get_generic_class (klass)->container_class; int i, mcount; g_assert (method->klass == gklass); mono_class_setup_methods (gklass); if (mono_class_has_failure (gklass)) { mono_error_set_for_class_failure (error, gklass); return NULL; } MonoMethod **gklass_methods = m_class_get_methods (gklass); mcount = mono_class_get_method_count (gklass); for (i = 0; i < mcount; ++i) { if (gklass_methods [i] == method) { MonoMethod *inflated_method = NULL; MonoMethod **klass_methods = m_class_get_methods (klass); if (klass_methods) { inflated_method = klass_methods [i]; } else { inflated_method = mono_class_inflate_generic_method_full_checked (gklass_methods [i], klass, mono_class_get_context (klass), error); return_val_if_nok (error, NULL); } g_assert (inflated_method); return inflated_method; } } g_assert_not_reached (); } /* * mono_class_get_vtable_entry: * * Returns klass->vtable [offset], computing it if neccesary. Returns NULL on failure. * LOCKING: Acquires the loader lock. */ MonoMethod* mono_class_get_vtable_entry (MonoClass *klass, int offset) { MonoMethod *m; if (m_class_get_rank (klass) == 1) { MonoClass *klass_parent = m_class_get_parent (klass); /* * szarrays do not overwrite any methods of Array, so we can avoid * initializing their vtables in some cases. */ mono_class_setup_vtable (klass_parent); if (offset < m_class_get_vtable_size (klass_parent)) return m_class_get_vtable (klass_parent) [offset]; } if (mono_class_is_ginst (klass)) { ERROR_DECL (error); MonoClass *gklass = mono_class_get_generic_class (klass)->container_class; mono_class_setup_vtable (gklass); m = m_class_get_vtable (gklass) [offset]; m = mono_class_inflate_generic_method_full_checked (m, klass, mono_class_get_context (klass), error); g_assert (is_ok (error)); /* FIXME don't swallow this error */ } else { mono_class_setup_vtable (klass); if (mono_class_has_failure (klass)) return NULL; m = m_class_get_vtable (klass) [offset]; } return m; } /* * mono_class_get_vtable_size: * * Return the vtable size for KLASS. */ int mono_class_get_vtable_size (MonoClass *klass) { mono_class_setup_vtable (klass); return m_class_get_vtable_size (klass); } static void collect_implemented_interfaces_aux (MonoClass *klass, GPtrArray **res, GHashTable **ifaces, MonoError *error) { int i; MonoClass *ic; mono_class_setup_interfaces (klass, error); return_if_nok (error); MonoClass **klass_interfaces = m_class_get_interfaces (klass); for (i = 0; i < m_class_get_interface_count (klass); i++) { ic = klass_interfaces [i]; if (*res == NULL) *res = g_ptr_array_new (); if (*ifaces == NULL) *ifaces = g_hash_table_new (NULL, NULL); if (g_hash_table_lookup (*ifaces, ic)) continue; /* A gparam is not an implemented interface for the purposes of * mono_class_get_implemented_interfaces */ if (mono_class_is_gparam (ic)) continue; g_ptr_array_add (*res, ic); g_hash_table_insert (*ifaces, ic, ic); mono_class_init_internal (ic); if (mono_class_has_failure (ic)) { mono_error_set_type_load_class (error, ic, "Error Loading class"); return; } collect_implemented_interfaces_aux (ic, res, ifaces, error); return_if_nok (error); } } GPtrArray* mono_class_get_implemented_interfaces (MonoClass *klass, MonoError *error) { GPtrArray *res = NULL; GHashTable *ifaces = NULL; collect_implemented_interfaces_aux (klass, &res, &ifaces, error); if (ifaces) g_hash_table_destroy (ifaces); if (!is_ok (error)) { if (res) g_ptr_array_free (res, TRUE); return NULL; } return res; } /*FIXME verify all callers if they should switch to mono_class_interface_offset_with_variance*/ int mono_class_interface_offset (MonoClass *klass, MonoClass *itf) { int i; MonoClass **klass_interfaces_packed = m_class_get_interfaces_packed (klass); for (i = m_class_get_interface_offsets_count (klass) -1 ; i >= 0 ; i-- ){ MonoClass *result = klass_interfaces_packed[i]; if (m_class_get_interface_id(result) == m_class_get_interface_id(itf)) { return m_class_get_interface_offsets_packed (klass) [i]; } } return -1; } /** * mono_class_interface_offset_with_variance: * * Return the interface offset of \p itf in \p klass. Sets \p non_exact_match to TRUE if the match required variance check * If \p itf is an interface with generic variant arguments, try to find the compatible one. * * Note that this function is responsible for resolving ambiguities. Right now we use whatever ordering interfaces_packed gives us. * * FIXME figure out MS disambiguation rules and fix this function. */ int mono_class_interface_offset_with_variance (MonoClass *klass, MonoClass *itf, gboolean *non_exact_match) { int i = mono_class_interface_offset (klass, itf); *non_exact_match = FALSE; if (i >= 0) return i; int klass_interface_offsets_count = m_class_get_interface_offsets_count (klass); if (m_class_is_array_special_interface (itf) && m_class_get_rank (klass) < 2) { MonoClass *gtd = mono_class_get_generic_type_definition (itf); int found = -1; for (i = 0; i < klass_interface_offsets_count; i++) { if (mono_class_is_variant_compatible (itf, m_class_get_interfaces_packed (klass) [i], FALSE)) { found = i; *non_exact_match = TRUE; break; } } if (found != -1) return m_class_get_interface_offsets_packed (klass) [found]; for (i = 0; i < klass_interface_offsets_count; i++) { if (mono_class_get_generic_type_definition (m_class_get_interfaces_packed (klass) [i]) == gtd) { found = i; *non_exact_match = TRUE; break; } } if (found == -1) return -1; return m_class_get_interface_offsets_packed (klass) [found]; } if (!mono_class_has_variant_generic_params (itf)) return -1; for (i = 0; i < klass_interface_offsets_count; i++) { if (mono_class_is_variant_compatible (itf, m_class_get_interfaces_packed (klass) [i], FALSE)) { *non_exact_match = TRUE; return m_class_get_interface_offsets_packed (klass) [i]; } } return -1; } /* * mono_method_get_vtable_slot: * * Returns method->slot, computing it if neccesary. Return -1 on failure. * LOCKING: Acquires the loader lock. * * FIXME Use proper MonoError machinery here. */ int mono_method_get_vtable_slot (MonoMethod *method) { if (method->slot == -1) { mono_class_setup_vtable (method->klass); if (mono_class_has_failure (method->klass)) return -1; if (method->slot == -1) { MonoClass *gklass; int i, mcount; if (!mono_class_is_ginst (method->klass)) { g_assert (method->is_inflated); return mono_method_get_vtable_slot (((MonoMethodInflated*)method)->declaring); } /* This can happen for abstract methods of generic instances due to the shortcut code in mono_class_setup_vtable_general (). */ g_assert (mono_class_is_ginst (method->klass)); gklass = mono_class_get_generic_class (method->klass)->container_class; mono_class_setup_methods (method->klass); MonoMethod **klass_methods = m_class_get_methods (method->klass); g_assert (klass_methods); mcount = mono_class_get_method_count (method->klass); for (i = 0; i < mcount; ++i) { if (klass_methods [i] == method) break; } g_assert (i < mcount); g_assert (m_class_get_methods (gklass)); method->slot = m_class_get_methods (gklass) [i]->slot; } g_assert (method->slot != -1); } return method->slot; } /** * mono_method_get_vtable_index: * \param method a method * * Returns the index into the runtime vtable to access the method or, * in the case of a virtual generic method, the virtual generic method * thunk. Returns -1 on failure. * * FIXME Use proper MonoError machinery here. */ int mono_method_get_vtable_index (MonoMethod *method) { if (method->is_inflated && (method->flags & METHOD_ATTRIBUTE_VIRTUAL)) { MonoMethodInflated *imethod = (MonoMethodInflated*)method; if (imethod->declaring->is_generic) return mono_method_get_vtable_slot (imethod->declaring); } return mono_method_get_vtable_slot (method); } /* * mono_class_has_finalizer: * * Return whenever KLASS has a finalizer, initializing klass->has_finalizer in the * process. * * LOCKING: Acquires the loader lock; */ gboolean mono_class_has_finalizer (MonoClass *klass) { if (!m_class_is_has_finalize_inited (klass)) mono_class_setup_has_finalizer (klass); return m_class_has_finalize (klass); } gboolean mono_is_corlib_image (MonoImage *image) { return image == mono_defaults.corlib; } /** Is klass a Nullable<T> ginst? */ gboolean mono_class_is_nullable (MonoClass *klass) { MonoGenericClass *gklass = mono_class_try_get_generic_class (klass); return gklass && gklass->container_class == mono_defaults.generic_nullable_class; } /** if klass is T? return T */ MonoClass* mono_class_get_nullable_param_internal (MonoClass *klass) { g_assert (mono_class_is_nullable (klass)); return mono_class_from_mono_type_internal (mono_class_get_generic_class (klass)->context.class_inst->type_argv [0]); } MonoClass* mono_class_get_nullable_param (MonoClass *klass) { MonoClass *result = NULL; MONO_ENTER_GC_UNSAFE; result = mono_class_get_nullable_param_internal (klass); MONO_EXIT_GC_UNSAFE; return result; } gboolean mono_type_is_primitive (MonoType *type) { return (type->type >= MONO_TYPE_BOOLEAN && type->type <= MONO_TYPE_R8) || type-> type == MONO_TYPE_I || type->type == MONO_TYPE_U; } static MonoImage * get_image_for_container (MonoGenericContainer *container) { MonoImage *result; if (container->is_anonymous) { result = container->owner.image; } else { MonoClass *klass; if (container->is_method) { MonoMethod *method = container->owner.method; g_assert_checked (method); klass = method->klass; } else { klass = container->owner.klass; } g_assert_checked (klass); result = m_class_get_image (klass); } g_assert (result); return result; } MonoImage * mono_get_image_for_generic_param (MonoGenericParam *param) { MonoGenericContainer *container = mono_generic_param_owner (param); g_assert_checked (container); return get_image_for_container (container); } // Make a string in the designated image consisting of a single integer. #define INT_STRING_SIZE 16 char * mono_make_generic_name_string (MonoImage *image, int num) { char *name = (char *)mono_image_alloc0 (image, INT_STRING_SIZE); g_snprintf (name, INT_STRING_SIZE, "%d", num); return name; } /** * mono_class_from_generic_parameter: * \param param Parameter to find/construct a class for. * \param arg2 Is ignored. * \param arg3 Is ignored. */ MonoClass * mono_class_from_generic_parameter (MonoGenericParam *param, MonoImage *arg2 G_GNUC_UNUSED, gboolean arg3 G_GNUC_UNUSED) { return mono_class_create_generic_parameter (param); } /** * mono_ptr_class_get: */ MonoClass * mono_ptr_class_get (MonoType *type) { return mono_class_create_ptr (type); } /** * mono_class_from_mono_type: * \param type describes the type to return * \returns a \c MonoClass for the specified \c MonoType, the value is never NULL. */ MonoClass * mono_class_from_mono_type (MonoType *type) { MonoClass *result; MONO_ENTER_GC_UNSAFE; result = mono_class_from_mono_type_internal (type); MONO_EXIT_GC_UNSAFE; return result; } MonoClass * mono_class_from_mono_type_internal (MonoType *type) { g_assert (type); switch (type->type) { case MONO_TYPE_OBJECT: return type->data.klass? type->data.klass: mono_defaults.object_class; case MONO_TYPE_VOID: return type->data.klass? type->data.klass: mono_defaults.void_class; case MONO_TYPE_BOOLEAN: return type->data.klass? type->data.klass: mono_defaults.boolean_class; case MONO_TYPE_CHAR: return type->data.klass? type->data.klass: mono_defaults.char_class; case MONO_TYPE_I1: return type->data.klass? type->data.klass: mono_defaults.sbyte_class; case MONO_TYPE_U1: return type->data.klass? type->data.klass: mono_defaults.byte_class; case MONO_TYPE_I2: return type->data.klass? type->data.klass: mono_defaults.int16_class; case MONO_TYPE_U2: return type->data.klass? type->data.klass: mono_defaults.uint16_class; case MONO_TYPE_I4: return type->data.klass? type->data.klass: mono_defaults.int32_class; case MONO_TYPE_U4: return type->data.klass? type->data.klass: mono_defaults.uint32_class; case MONO_TYPE_I: return type->data.klass? type->data.klass: mono_defaults.int_class; case MONO_TYPE_U: return type->data.klass? type->data.klass: mono_defaults.uint_class; case MONO_TYPE_I8: return type->data.klass? type->data.klass: mono_defaults.int64_class; case MONO_TYPE_U8: return type->data.klass? type->data.klass: mono_defaults.uint64_class; case MONO_TYPE_R4: return type->data.klass? type->data.klass: mono_defaults.single_class; case MONO_TYPE_R8: return type->data.klass? type->data.klass: mono_defaults.double_class; case MONO_TYPE_STRING: return type->data.klass? type->data.klass: mono_defaults.string_class; case MONO_TYPE_TYPEDBYREF: return type->data.klass? type->data.klass: mono_defaults.typed_reference_class; case MONO_TYPE_ARRAY: return mono_class_create_bounded_array (type->data.array->eklass, type->data.array->rank, TRUE); case MONO_TYPE_PTR: return mono_class_create_ptr (type->data.type); case MONO_TYPE_FNPTR: return mono_class_create_fnptr (type->data.method); case MONO_TYPE_SZARRAY: return mono_class_create_array (type->data.klass, 1); case MONO_TYPE_CLASS: case MONO_TYPE_VALUETYPE: return type->data.klass; case MONO_TYPE_GENERICINST: return mono_class_create_generic_inst (type->data.generic_class); case MONO_TYPE_MVAR: case MONO_TYPE_VAR: return mono_class_create_generic_parameter (type->data.generic_param); default: g_warning ("mono_class_from_mono_type_internal: implement me 0x%02x\n", type->type); g_assert_not_reached (); } // Yes, this returns NULL, even if it is documented as not doing so, but there // is no way for the code to make it this far, due to the assert above. return NULL; } /** * mono_type_retrieve_from_typespec * \param image context where the image is created * \param type_spec typespec token * \param context the generic context used to evaluate generic instantiations in */ static MonoType * mono_type_retrieve_from_typespec (MonoImage *image, guint32 type_spec, MonoGenericContext *context, gboolean *did_inflate, MonoError *error) { MonoType *t = mono_type_create_from_typespec_checked (image, type_spec, error); *did_inflate = FALSE; if (!t) return NULL; if (context && (context->class_inst || context->method_inst)) { MonoType *inflated = inflate_generic_type (NULL, t, context, error); if (!is_ok (error)) { return NULL; } if (inflated) { t = inflated; *did_inflate = TRUE; } } return t; } /** * mono_class_create_from_typespec * \param image context where the image is created * \param type_spec typespec token * \param context the generic context used to evaluate generic instantiations in */ static MonoClass * mono_class_create_from_typespec (MonoImage *image, guint32 type_spec, MonoGenericContext *context, MonoError *error) { MonoClass *ret; gboolean inflated = FALSE; MonoType *t = mono_type_retrieve_from_typespec (image, type_spec, context, &inflated, error); return_val_if_nok (error, NULL); ret = mono_class_from_mono_type_internal (t); if (inflated) mono_metadata_free_type (t); return ret; } /** * mono_bounded_array_class_get: * \param element_class element class * \param rank the dimension of the array class * \param bounded whenever the array has non-zero bounds * \returns A class object describing the array with element type \p element_type and * dimension \p rank. */ MonoClass * mono_bounded_array_class_get (MonoClass *eclass, guint32 rank, gboolean bounded) { return mono_class_create_bounded_array (eclass, rank, bounded); } /** * mono_array_class_get: * \param element_class element class * \param rank the dimension of the array class * \returns A class object describing the array with element type \p element_type and * dimension \p rank. */ MonoClass * mono_array_class_get (MonoClass *eclass, guint32 rank) { return mono_class_create_array (eclass, rank); } /** * mono_class_instance_size: * \param klass a class * * Use to get the size of a class in bytes. * * \returns The size of an object instance */ gint32 mono_class_instance_size (MonoClass *klass) { if (!m_class_is_size_inited (klass)) mono_class_init_internal (klass); return m_class_get_instance_size (klass); } /** * mono_class_min_align: * \param klass a class * * Use to get the computed minimum alignment requirements for the specified class. * * Returns: minimum alignment requirements */ gint32 mono_class_min_align (MonoClass *klass) { if (!m_class_is_size_inited (klass)) mono_class_init_internal (klass); return m_class_get_min_align (klass); } /** * mono_class_data_size: * \param klass a class * * \returns The size of the static class data */ gint32 mono_class_data_size (MonoClass *klass) { if (!m_class_is_inited (klass)) mono_class_init_internal (klass); /* This can happen with dynamically created types */ if (!m_class_is_fields_inited (klass)) mono_class_setup_fields (klass); /* in arrays, sizes.class_size is unioned with element_size * and arrays have no static fields */ if (m_class_get_rank (klass)) return 0; return m_class_get_sizes (klass).class_size; } /* * Auxiliary routine to mono_class_get_field * * Takes a field index instead of a field token. */ static MonoClassField * mono_class_get_field_idx (MonoClass *klass, int idx) { mono_class_setup_fields (klass); if (mono_class_has_failure (klass)) return NULL; while (klass) { int first_field_idx = mono_class_get_first_field_idx (klass); int fcount = mono_class_get_field_count (klass); MonoImage *klass_image = m_class_get_image (klass); MonoClassField *klass_fields = m_class_get_fields (klass); if (klass_image->uncompressed_metadata) { /* * first_field_idx points to the FieldPtr table, while idx points into the * Field table, so we have to do a search. */ /*FIXME this is broken for types with multiple fields with the same name.*/ const char *name = mono_metadata_string_heap (klass_image, mono_metadata_decode_row_col (&klass_image->tables [MONO_TABLE_FIELD], idx, MONO_FIELD_NAME)); int i; for (i = 0; i < fcount; ++i) if (mono_field_get_name (&klass_fields [i]) == name) return &klass_fields [i]; g_assert_not_reached (); } else { if (fcount) { if ((idx >= first_field_idx) && (idx < first_field_idx + fcount)){ return &klass_fields [idx - first_field_idx]; } } if (G_UNLIKELY (m_class_get_image (klass)->has_updates && mono_class_has_metadata_update_info (klass))) { uint32_t token = mono_metadata_make_token (MONO_TABLE_FIELD, idx + 1); return mono_metadata_update_get_field (klass, token); } } klass = m_class_get_parent (klass); } return NULL; } /** * mono_class_get_field: * \param class the class to lookup the field. * \param field_token the field token * * \returns A \c MonoClassField representing the type and offset of * the field, or a NULL value if the field does not belong to this * class. */ MonoClassField * mono_class_get_field (MonoClass *klass, guint32 field_token) { int idx = mono_metadata_token_index (field_token); g_assert (mono_metadata_token_code (field_token) == MONO_TOKEN_FIELD_DEF); return mono_class_get_field_idx (klass, idx - 1); } /** * mono_class_get_field_from_name: * \param klass the class to lookup the field. * \param name the field name * * Search the class \p klass and its parents for a field with the name \p name. * * \returns The \c MonoClassField pointer of the named field or NULL */ MonoClassField * mono_class_get_field_from_name (MonoClass *klass, const char *name) { MonoClassField *result; MONO_ENTER_GC_UNSAFE; result = mono_class_get_field_from_name_full (klass, name, NULL); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_get_field_from_name_full: * \param klass the class to lookup the field. * \param name the field name * \param type the type of the fields. This optional. * * Search the class \p klass and it's parents for a field with the name \p name and type \p type. * * If \p klass is an inflated generic type, the type comparison is done with the equivalent field * of its generic type definition. * * \returns The MonoClassField pointer of the named field or NULL */ MonoClassField * mono_class_get_field_from_name_full (MonoClass *klass, const char *name, MonoType *type) { MONO_REQ_GC_UNSAFE_MODE; int i; mono_class_setup_fields (klass); if (mono_class_has_failure (klass)) return NULL; while (klass) { int fcount = mono_class_get_field_count (klass); for (i = 0; i < fcount; ++i) { MonoClassField *field = &m_class_get_fields (klass) [i]; if (strcmp (name, mono_field_get_name (field)) != 0) continue; if (type) { MonoType *field_type = mono_metadata_get_corresponding_field_from_generic_type_definition (field)->type; if (!mono_metadata_type_equal_full (type, field_type, TRUE)) continue; } return field; } klass = m_class_get_parent (klass); } return NULL; } /** * mono_class_get_field_token: * \param field the field we need the token of * * Get the token of a field. Note that the tokesn is only valid for the image * the field was loaded from. Don't use this function for fields in dynamic types. * * \returns The token representing the field in the image it was loaded from. */ guint32 mono_class_get_field_token (MonoClassField *field) { MonoClass *klass = m_field_get_parent (field); int i; mono_class_setup_fields (klass); while (klass) { MonoClassField *klass_fields = m_class_get_fields (klass); if (!klass_fields) return 0; int first_field_idx = mono_class_get_first_field_idx (klass); int fcount = mono_class_get_field_count (klass); for (i = 0; i < fcount; ++i) { if (&klass_fields [i] == field) { int idx = first_field_idx + i + 1; if (m_class_get_image (klass)->uncompressed_metadata) idx = mono_metadata_translate_token_index (m_class_get_image (klass), MONO_TABLE_FIELD, idx); return mono_metadata_make_token (MONO_TABLE_FIELD, idx); } } klass = m_class_get_parent (klass); } g_assert_not_reached (); return 0; } static int mono_field_get_index (MonoClassField *field) { int index = field - m_class_get_fields (m_field_get_parent (field)); g_assert (index >= 0 && index < mono_class_get_field_count (m_field_get_parent (field))); return index; } /* * mono_class_get_field_default_value: * * Return the default value of the field as a pointer into the metadata blob. */ const char* mono_class_get_field_default_value (MonoClassField *field, MonoTypeEnum *def_type) { guint32 cindex; guint32 constant_cols [MONO_CONSTANT_SIZE]; int field_index; MonoClass *klass = m_field_get_parent (field); MonoFieldDefaultValue *def_values; g_assert (field->type->attrs & FIELD_ATTRIBUTE_HAS_DEFAULT); def_values = mono_class_get_field_def_values (klass); if (!def_values) { def_values = (MonoFieldDefaultValue *)mono_class_alloc0 (klass, sizeof (MonoFieldDefaultValue) * mono_class_get_field_count (klass)); mono_class_set_field_def_values (klass, def_values); } field_index = mono_field_get_index (field); if (!def_values [field_index].data) { MonoImage *field_parent_image = m_class_get_image (m_field_get_parent (field)); cindex = mono_metadata_get_constant_index (field_parent_image, mono_class_get_field_token (field), 0); if (!cindex) return NULL; g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA)); mono_metadata_decode_row (&field_parent_image->tables [MONO_TABLE_CONSTANT], cindex - 1, constant_cols, MONO_CONSTANT_SIZE); def_values [field_index].def_type = (MonoTypeEnum)constant_cols [MONO_CONSTANT_TYPE]; mono_memory_barrier (); def_values [field_index].data = (const char *)mono_metadata_blob_heap (field_parent_image, constant_cols [MONO_CONSTANT_VALUE]); } *def_type = def_values [field_index].def_type; return def_values [field_index].data; } static int mono_property_get_index (MonoProperty *prop) { MonoClassPropertyInfo *info = mono_class_get_property_info (prop->parent); int index = prop - info->properties; g_assert (index >= 0 && index < info->count); return index; } /* * mono_class_get_property_default_value: * * Return the default value of the field as a pointer into the metadata blob. */ const char* mono_class_get_property_default_value (MonoProperty *property, MonoTypeEnum *def_type) { guint32 cindex; guint32 constant_cols [MONO_CONSTANT_SIZE]; MonoClass *klass = property->parent; MonoImage *klass_image = m_class_get_image (klass); g_assert (property->attrs & PROPERTY_ATTRIBUTE_HAS_DEFAULT); /* * We don't cache here because it is not used by C# so it's quite rare, but * we still do the lookup in klass->ext because that is where the data * is stored for dynamic assemblies. */ if (image_is_dynamic (klass_image)) { MonoClassPropertyInfo *info = mono_class_get_property_info (klass); int prop_index = mono_property_get_index (property); if (info->def_values && info->def_values [prop_index].data) { *def_type = info->def_values [prop_index].def_type; return info->def_values [prop_index].data; } return NULL; } cindex = mono_metadata_get_constant_index (klass_image, mono_class_get_property_token (property), 0); if (!cindex) return NULL; mono_metadata_decode_row (&klass_image->tables [MONO_TABLE_CONSTANT], cindex - 1, constant_cols, MONO_CONSTANT_SIZE); *def_type = (MonoTypeEnum)constant_cols [MONO_CONSTANT_TYPE]; return (const char *)mono_metadata_blob_heap (klass_image, constant_cols [MONO_CONSTANT_VALUE]); } /** * mono_class_get_event_token: */ guint32 mono_class_get_event_token (MonoEvent *event) { MonoClass *klass = event->parent; int i; while (klass) { MonoClassEventInfo *info = mono_class_get_event_info (klass); if (info) { for (i = 0; i < info->count; ++i) { if (&info->events [i] == event) return mono_metadata_make_token (MONO_TABLE_EVENT, info->first + i + 1); } } klass = m_class_get_parent (klass); } g_assert_not_reached (); return 0; } MonoProperty* mono_class_get_property_from_name_internal (MonoClass *klass, const char *name) { MONO_REQ_GC_UNSAFE_MODE; while (klass) { MonoProperty* p; gpointer iter = NULL; while ((p = mono_class_get_properties (klass, &iter))) { if (! strcmp (name, p->name)) return p; } klass = m_class_get_parent (klass); } return NULL; } /** * mono_class_get_property_token: * \param prop MonoProperty to query * * \returns The ECMA token for the specified property. */ guint32 mono_class_get_property_token (MonoProperty *prop) { MonoClass *klass = prop->parent; while (klass) { MonoProperty* p; int i = 0; gpointer iter = NULL; MonoClassPropertyInfo *info = mono_class_get_property_info (klass); while ((p = mono_class_get_properties (klass, &iter))) { if (&info->properties [i] == prop) return mono_metadata_make_token (MONO_TABLE_PROPERTY, info->first + i + 1); i ++; } klass = m_class_get_parent (klass); } g_assert_not_reached (); return 0; } /** * mono_class_name_from_token: */ char * mono_class_name_from_token (MonoImage *image, guint32 type_token) { const char *name, *nspace; if (image_is_dynamic (image)) return g_strdup_printf ("DynamicType 0x%08x", type_token); switch (type_token & 0xff000000){ case MONO_TOKEN_TYPE_DEF: { guint tidx = mono_metadata_token_index (type_token); if (mono_metadata_table_bounds_check (image, MONO_TABLE_TYPEDEF, tidx)) return g_strdup_printf ("Invalid type token 0x%08x", type_token); guint32 cols [MONO_TYPEDEF_SIZE]; MonoTableInfo *tt = &image->tables [MONO_TABLE_TYPEDEF]; mono_metadata_decode_row (tt, tidx - 1, cols, MONO_TYPEDEF_SIZE); name = mono_metadata_string_heap (image, cols [MONO_TYPEDEF_NAME]); nspace = mono_metadata_string_heap (image, cols [MONO_TYPEDEF_NAMESPACE]); if (strlen (nspace) == 0) return g_strdup_printf ("%s", name); else return g_strdup_printf ("%s.%s", nspace, name); } case MONO_TOKEN_TYPE_REF: { guint tidx = mono_metadata_token_index (type_token); if (mono_metadata_table_bounds_check (image, MONO_TABLE_TYPEREF, tidx)) return g_strdup_printf ("Invalid type token 0x%08x", type_token); guint32 cols [MONO_TYPEREF_SIZE]; MonoTableInfo *t = &image->tables [MONO_TABLE_TYPEREF]; mono_metadata_decode_row (t, tidx-1, cols, MONO_TYPEREF_SIZE); name = mono_metadata_string_heap (image, cols [MONO_TYPEREF_NAME]); nspace = mono_metadata_string_heap (image, cols [MONO_TYPEREF_NAMESPACE]); if (strlen (nspace) == 0) return g_strdup_printf ("%s", name); else return g_strdup_printf ("%s.%s", nspace, name); } case MONO_TOKEN_TYPE_SPEC: return g_strdup_printf ("Typespec 0x%08x", type_token); default: return g_strdup_printf ("Invalid type token 0x%08x", type_token); } } static char * mono_assembly_name_from_token (MonoImage *image, guint32 type_token) { if (image_is_dynamic (image)) return g_strdup_printf ("DynamicAssembly %s", image->name); switch (type_token & 0xff000000){ case MONO_TOKEN_TYPE_DEF: if (image->assembly) return mono_stringify_assembly_name (&image->assembly->aname); else if (image->assembly_name) return g_strdup (image->assembly_name); return g_strdup_printf ("%s", image->name ? image->name : "[Could not resolve assembly name"); case MONO_TOKEN_TYPE_REF: { MonoAssemblyName aname; memset (&aname, 0, sizeof (MonoAssemblyName)); guint32 cols [MONO_TYPEREF_SIZE]; MonoTableInfo *t = &image->tables [MONO_TABLE_TYPEREF]; guint32 idx = mono_metadata_token_index (type_token); if (mono_metadata_table_bounds_check (image, MONO_TABLE_TYPEREF, idx)) return g_strdup_printf ("Invalid type token 0x%08x", type_token); mono_metadata_decode_row (t, idx-1, cols, MONO_TYPEREF_SIZE); idx = cols [MONO_TYPEREF_SCOPE] >> MONO_RESOLUTION_SCOPE_BITS; switch (cols [MONO_TYPEREF_SCOPE] & MONO_RESOLUTION_SCOPE_MASK) { case MONO_RESOLUTION_SCOPE_MODULE: /* FIXME: */ return g_strdup (""); case MONO_RESOLUTION_SCOPE_MODULEREF: /* FIXME: */ return g_strdup (""); case MONO_RESOLUTION_SCOPE_TYPEREF: /* FIXME: */ return g_strdup (""); case MONO_RESOLUTION_SCOPE_ASSEMBLYREF: mono_assembly_get_assemblyref (image, idx - 1, &aname); return mono_stringify_assembly_name (&aname); default: g_assert_not_reached (); } break; } case MONO_TOKEN_TYPE_SPEC: /* FIXME: */ return g_strdup (""); default: g_assert_not_reached (); } return NULL; } /** * mono_class_get_full: * \param image the image where the class resides * \param type_token the token for the class * \param context the generic context used to evaluate generic instantiations in * \deprecated Functions that expose \c MonoGenericContext are going away in mono 4.0 * \returns The \c MonoClass that represents \p type_token in \p image */ MonoClass * mono_class_get_full (MonoImage *image, guint32 type_token, MonoGenericContext *context) { ERROR_DECL (error); MonoClass *klass; klass = mono_class_get_checked (image, type_token, error); if (klass && context && mono_metadata_token_table (type_token) == MONO_TABLE_TYPESPEC) klass = mono_class_inflate_generic_class_checked (klass, context, error); mono_error_assert_ok (error); return klass; } MonoClass * mono_class_get_and_inflate_typespec_checked (MonoImage *image, guint32 type_token, MonoGenericContext *context, MonoError *error) { MonoClass *klass; error_init (error); klass = mono_class_get_checked (image, type_token, error); if (klass && context && mono_metadata_token_table (type_token) == MONO_TABLE_TYPESPEC) klass = mono_class_inflate_generic_class_checked (klass, context, error); return klass; } /** * mono_class_get_checked: * \param image the image where the class resides * \param type_token the token for the class * \param error error object to return any error * * \returns The MonoClass that represents \p type_token in \p image, or NULL on error. */ MonoClass * mono_class_get_checked (MonoImage *image, guint32 type_token, MonoError *error) { MonoClass *klass = NULL; error_init (error); if (image_is_dynamic (image)) { int table = mono_metadata_token_table (type_token); if (table != MONO_TABLE_TYPEDEF && table != MONO_TABLE_TYPEREF && table != MONO_TABLE_TYPESPEC) { mono_error_set_bad_image (error, image,"Bad token table for dynamic image: %x", table); return NULL; } klass = (MonoClass *)mono_lookup_dynamic_token (image, type_token, NULL, error); goto done; } switch (type_token & 0xff000000){ case MONO_TOKEN_TYPE_DEF: klass = mono_class_create_from_typedef (image, type_token, error); break; case MONO_TOKEN_TYPE_REF: klass = mono_class_from_typeref_checked (image, type_token, error); break; case MONO_TOKEN_TYPE_SPEC: klass = mono_class_create_from_typespec (image, type_token, NULL, error); break; default: mono_error_set_bad_image (error, image, "Unknown type token %x", type_token & 0xff000000); } done: /* Generic case, should be avoided for when a better error is possible. */ if (!klass && is_ok (error)) { char *name = mono_class_name_from_token (image, type_token); char *assembly = mono_assembly_name_from_token (image, type_token); mono_error_set_type_load_name (error, name, assembly, "Could not resolve type with token %08x (expected class '%s' in assembly '%s')", type_token, name, assembly); } return klass; } /** * mono_type_get_checked: * \param image the image where the type resides * \param type_token the token for the type * \param context the generic context used to evaluate generic instantiations in * \param error Error handling context * * This functions exists to fullfill the fact that sometimes it's desirable to have access to the * * \returns The MonoType that represents \p type_token in \p image */ MonoType * mono_type_get_checked (MonoImage *image, guint32 type_token, MonoGenericContext *context, MonoError *error) { MonoType *type = NULL; gboolean inflated = FALSE; error_init (error); //FIXME: this will not fix the very issue for which mono_type_get_full exists -but how to do it then? if (image_is_dynamic (image)) { MonoClass *klass = (MonoClass *)mono_lookup_dynamic_token (image, type_token, context, error); return_val_if_nok (error, NULL); return m_class_get_byval_arg (klass); } if ((type_token & 0xff000000) != MONO_TOKEN_TYPE_SPEC) { MonoClass *klass = mono_class_get_checked (image, type_token, error); if (!klass) return NULL; if (m_class_has_failure (klass)) { mono_error_set_for_class_failure (error, klass); return NULL; } return m_class_get_byval_arg (klass); } type = mono_type_retrieve_from_typespec (image, type_token, context, &inflated, error); if (!type) { return NULL; } if (inflated) { MonoType *tmp = type; type = m_class_get_byval_arg (mono_class_from_mono_type_internal (type)); /* FIXME: This is a workaround fo the fact that a typespec token sometimes reference to the generic type definition. * A MonoClass::_byval_arg of a generic type definion has type CLASS. * Some parts of mono create a GENERICINST to reference a generic type definition and this generates confict with _byval_arg. * * The long term solution is to chaise this places and make then set MonoType::type correctly. * */ if (type->type != tmp->type) type = tmp; else mono_metadata_free_type (tmp); } return type; } /** * mono_class_get: * \param image image where the class token will be looked up. * \param type_token a type token from the image * \returns the \c MonoClass with the given \p type_token on the \p image */ MonoClass * mono_class_get (MonoImage *image, guint32 type_token) { MonoClass *result; MONO_ENTER_GC_UNSAFE; ERROR_DECL (error); result = mono_class_get_checked (image, type_token, error); mono_error_assert_ok (error); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_image_init_name_cache: * * Initializes the class name cache stored in image->name_cache. * * LOCKING: Acquires the corresponding image lock. */ void mono_image_init_name_cache (MonoImage *image) { MonoTableInfo *t = &image->tables [MONO_TABLE_TYPEDEF]; guint32 cols [MONO_TYPEDEF_SIZE]; const char *name; const char *nspace; guint32 i, visib, nspace_index; GHashTable *name_cache2, *nspace_table, *the_name_cache; if (image->name_cache) return; the_name_cache = g_hash_table_new (g_str_hash, g_str_equal); if (image_is_dynamic (image)) { mono_image_lock (image); if (image->name_cache) { /* Somebody initialized it before us */ g_hash_table_destroy (the_name_cache); } else { mono_atomic_store_release (&image->name_cache, the_name_cache); } mono_image_unlock (image); return; } /* Temporary hash table to avoid lookups in the nspace_table */ name_cache2 = g_hash_table_new (NULL, NULL); /* FIXME: metadata-update */ int rows = table_info_get_rows (t); for (i = 1; i <= rows; ++i) { mono_metadata_decode_row (t, i - 1, cols, MONO_TYPEDEF_SIZE); visib = cols [MONO_TYPEDEF_FLAGS] & TYPE_ATTRIBUTE_VISIBILITY_MASK; /* * Nested types are accessed from the nesting name. We use the fact that nested types use different visibility flags * than toplevel types, thus avoiding the need to grovel through the NESTED_TYPE table */ if (visib >= TYPE_ATTRIBUTE_NESTED_PUBLIC && visib <= TYPE_ATTRIBUTE_NESTED_FAM_OR_ASSEM) continue; name = mono_metadata_string_heap (image, cols [MONO_TYPEDEF_NAME]); nspace = mono_metadata_string_heap (image, cols [MONO_TYPEDEF_NAMESPACE]); nspace_index = cols [MONO_TYPEDEF_NAMESPACE]; nspace_table = (GHashTable *)g_hash_table_lookup (name_cache2, GUINT_TO_POINTER (nspace_index)); if (!nspace_table) { nspace_table = g_hash_table_new (g_str_hash, g_str_equal); g_hash_table_insert (the_name_cache, (char*)nspace, nspace_table); g_hash_table_insert (name_cache2, GUINT_TO_POINTER (nspace_index), nspace_table); } g_hash_table_insert (nspace_table, (char *) name, GUINT_TO_POINTER (i)); } /* Load type names from EXPORTEDTYPES table */ { MonoTableInfo *t = &image->tables [MONO_TABLE_EXPORTEDTYPE]; guint32 cols [MONO_EXP_TYPE_SIZE]; int i; rows = table_info_get_rows (t); for (i = 0; i < rows; ++i) { mono_metadata_decode_row (t, i, cols, MONO_EXP_TYPE_SIZE); guint32 impl = cols [MONO_EXP_TYPE_IMPLEMENTATION]; if ((impl & MONO_IMPLEMENTATION_MASK) == MONO_IMPLEMENTATION_EXP_TYPE) /* Nested type */ continue; name = mono_metadata_string_heap (image, cols [MONO_EXP_TYPE_NAME]); nspace = mono_metadata_string_heap (image, cols [MONO_EXP_TYPE_NAMESPACE]); nspace_index = cols [MONO_EXP_TYPE_NAMESPACE]; nspace_table = (GHashTable *)g_hash_table_lookup (name_cache2, GUINT_TO_POINTER (nspace_index)); if (!nspace_table) { nspace_table = g_hash_table_new (g_str_hash, g_str_equal); g_hash_table_insert (the_name_cache, (char*)nspace, nspace_table); g_hash_table_insert (name_cache2, GUINT_TO_POINTER (nspace_index), nspace_table); } g_hash_table_insert (nspace_table, (char *) name, GUINT_TO_POINTER (mono_metadata_make_token (MONO_TABLE_EXPORTEDTYPE, i + 1))); } } g_hash_table_destroy (name_cache2); mono_image_lock (image); if (image->name_cache) { /* Somebody initialized it before us */ g_hash_table_destroy (the_name_cache); } else { mono_atomic_store_release (&image->name_cache, the_name_cache); } mono_image_unlock (image); } /*FIXME Only dynamic assemblies should allow this operation.*/ /** * mono_image_add_to_name_cache: */ void mono_image_add_to_name_cache (MonoImage *image, const char *nspace, const char *name, guint32 index) { GHashTable *nspace_table; GHashTable *name_cache; guint32 old_index; mono_image_init_name_cache (image); mono_image_lock (image); name_cache = image->name_cache; if (!(nspace_table = (GHashTable *)g_hash_table_lookup (name_cache, nspace))) { nspace_table = g_hash_table_new (g_str_hash, g_str_equal); g_hash_table_insert (name_cache, (char *)nspace, (char *)nspace_table); } if ((old_index = GPOINTER_TO_UINT (g_hash_table_lookup (nspace_table, (char*) name)))) g_error ("overrwritting old token %x on image %s for type %s::%s", old_index, image->name, nspace, name); g_hash_table_insert (nspace_table, (char *) name, GUINT_TO_POINTER (index)); mono_image_unlock (image); } typedef struct { gconstpointer key; GSList *values; } FindAllUserData; static void find_all_nocase (gpointer key, gpointer value, gpointer user_data) { char *name = (char*)key; FindAllUserData *data = (FindAllUserData*)user_data; if (mono_utf8_strcasecmp (name, (char*)data->key) == 0) data->values = g_slist_prepend (data->values, value); } typedef struct { gconstpointer key; gpointer value; } FindUserData; static void find_nocase (gpointer key, gpointer value, gpointer user_data) { char *name = (char*)key; FindUserData *data = (FindUserData*)user_data; if (!data->value && (mono_utf8_strcasecmp (name, (char*)data->key) == 0)) data->value = value; } /** * mono_class_from_name_case: * \param image The MonoImage where the type is looked up in * \param name_space the type namespace * \param name the type short name. * \deprecated use the mono_class_from_name_case_checked variant instead. * * Obtains a \c MonoClass with a given namespace and a given name which * is located in the given \c MonoImage. The namespace and name * lookups are case insensitive. */ MonoClass * mono_class_from_name_case (MonoImage *image, const char* name_space, const char *name) { ERROR_DECL (error); MonoClass *res = mono_class_from_name_case_checked (image, name_space, name, error); mono_error_cleanup (error); return res; } /** * mono_class_from_name_case_checked: * \param image The MonoImage where the type is looked up in * \param name_space the type namespace * \param name the type short name. * \param error if * * Obtains a MonoClass with a given namespace and a given name which * is located in the given MonoImage. The namespace and name * lookups are case insensitive. * * \returns The MonoClass if the given namespace and name were found, or NULL if it * was not found. The \p error object will contain information about the problem * in that case. */ MonoClass * mono_class_from_name_case_checked (MonoImage *image, const char *name_space, const char *name, MonoError *error) { MonoClass *klass; GHashTable *visited_images; visited_images = g_hash_table_new (g_direct_hash, g_direct_equal); klass = mono_class_from_name_checked_aux (image, name_space, name, visited_images, FALSE, error); g_hash_table_destroy (visited_images); return klass; } static MonoClass* return_nested_in (MonoClass *klass, char *nested, gboolean case_sensitive) { MonoClass *found; char *s = strchr (nested, '/'); gpointer iter = NULL; if (s) { *s = 0; s++; } while ((found = mono_class_get_nested_types (klass, &iter))) { const char *name = m_class_get_name (found); gint strcmp_result; if (case_sensitive) strcmp_result = strcmp (name, nested); else strcmp_result = mono_utf8_strcasecmp (name, nested); if (strcmp_result == 0) { if (s) return return_nested_in (found, s, case_sensitive); return found; } } return NULL; } static MonoClass* search_modules (MonoImage *image, const char *name_space, const char *name, gboolean case_sensitive, MonoError *error) { MonoTableInfo *file_table = &image->tables [MONO_TABLE_FILE]; MonoImage *file_image; MonoClass *klass; int i; error_init (error); /* * The EXPORTEDTYPES table only contains public types, so have to search the * modules as well. * Note: image->modules contains the contents of the MODULEREF table, while * the real module list is in the FILE table. */ int rows = table_info_get_rows (file_table); for (i = 0; i < rows; i++) { guint32 cols [MONO_FILE_SIZE]; mono_metadata_decode_row (file_table, i, cols, MONO_FILE_SIZE); if (cols [MONO_FILE_FLAGS] == FILE_CONTAINS_NO_METADATA) continue; file_image = mono_image_load_file_for_image_checked (image, i + 1, error); if (file_image) { if (case_sensitive) klass = mono_class_from_name_checked (file_image, name_space, name, error); else klass = mono_class_from_name_case_checked (file_image, name_space, name, error); if (klass || !is_ok (error)) return klass; } } return NULL; } static MonoClass * mono_class_from_name_checked_aux (MonoImage *image, const char* name_space, const char *name, GHashTable* visited_images, gboolean case_sensitive, MonoError *error) { GHashTable *nspace_table = NULL; MonoImage *loaded_image = NULL; guint32 token = 0; int i; MonoClass *klass; char *nested; char buf [1024]; error_init (error); // Checking visited images avoids stack overflows when cyclic references exist. if (g_hash_table_lookup (visited_images, image)) return NULL; g_hash_table_insert (visited_images, image, GUINT_TO_POINTER(1)); if ((nested = (char*)strchr (name, '/'))) { int pos = nested - name; int len = strlen (name); if (len > 1023) return NULL; memcpy (buf, name, len + 1); buf [pos] = 0; nested = buf + pos + 1; name = buf; } /* FIXME: get_class_from_name () can't handle types in the EXPORTEDTYPE table */ // The AOT cache in get_class_from_name is case-sensitive, so don't bother with it for case-insensitive lookups if (get_class_from_name && table_info_get_rows (&image->tables [MONO_TABLE_EXPORTEDTYPE]) == 0 && case_sensitive) { gboolean res = get_class_from_name (image, name_space, name, &klass); if (res) { if (!klass) { klass = search_modules (image, name_space, name, case_sensitive, error); if (!is_ok (error)) return NULL; } if (nested) return klass ? return_nested_in (klass, nested, case_sensitive) : NULL; else return klass; } } mono_image_init_name_cache (image); mono_image_lock (image); if (case_sensitive) { nspace_table = (GHashTable *)g_hash_table_lookup (image->name_cache, name_space); if (nspace_table) token = GPOINTER_TO_UINT (g_hash_table_lookup (nspace_table, name)); } else { FindAllUserData all_user_data = { name_space, NULL }; FindUserData user_data = { name, NULL }; GSList *values; // We're forced to check all matching namespaces, not just the first one found, // because our desired type could be in any of the ones that match case-insensitively. g_hash_table_foreach (image->name_cache, find_all_nocase, &all_user_data); values = all_user_data.values; while (values && !user_data.value) { nspace_table = (GHashTable*)values->data; g_hash_table_foreach (nspace_table, find_nocase, &user_data); values = values->next; } g_slist_free (all_user_data.values); if (user_data.value) token = GPOINTER_TO_UINT (user_data.value); } mono_image_unlock (image); if (!token && image_is_dynamic (image) && image->modules) { /* Search modules as well */ for (i = 0; i < image->module_count; ++i) { MonoImage *module = image->modules [i]; if (case_sensitive) klass = mono_class_from_name_checked (module, name_space, name, error); else klass = mono_class_from_name_case_checked (module, name_space, name, error); if (klass || !is_ok (error)) return klass; } } if (!token) { klass = search_modules (image, name_space, name, case_sensitive, error); if (klass || !is_ok (error)) return klass; return NULL; } if (mono_metadata_token_table (token) == MONO_TABLE_EXPORTEDTYPE) { MonoTableInfo *t = &image->tables [MONO_TABLE_EXPORTEDTYPE]; guint32 cols [MONO_EXP_TYPE_SIZE]; guint32 idx, impl; idx = mono_metadata_token_index (token); mono_metadata_decode_row (t, idx - 1, cols, MONO_EXP_TYPE_SIZE); impl = cols [MONO_EXP_TYPE_IMPLEMENTATION]; if ((impl & MONO_IMPLEMENTATION_MASK) == MONO_IMPLEMENTATION_FILE) { loaded_image = mono_assembly_load_module_checked (image->assembly, impl >> MONO_IMPLEMENTATION_BITS, error); if (!loaded_image) return NULL; klass = mono_class_from_name_checked_aux (loaded_image, name_space, name, visited_images, case_sensitive, error); if (nested) return klass ? return_nested_in (klass, nested, case_sensitive) : NULL; return klass; } else if ((impl & MONO_IMPLEMENTATION_MASK) == MONO_IMPLEMENTATION_ASSEMBLYREF) { guint32 assembly_idx; assembly_idx = impl >> MONO_IMPLEMENTATION_BITS; mono_assembly_load_reference (image, assembly_idx - 1); g_assert (image->references [assembly_idx - 1]); if (image->references [assembly_idx - 1] == (gpointer)-1) return NULL; klass = mono_class_from_name_checked_aux (image->references [assembly_idx - 1]->image, name_space, name, visited_images, case_sensitive, error); if (nested) return klass ? return_nested_in (klass, nested, case_sensitive) : NULL; return klass; } else { g_assert_not_reached (); } } token = MONO_TOKEN_TYPE_DEF | token; klass = mono_class_get_checked (image, token, error); if (nested) return return_nested_in (klass, nested, case_sensitive); return klass; } /** * mono_class_from_name_checked: * \param image The MonoImage where the type is looked up in * \param name_space the type namespace * \param name the type short name. * * Obtains a MonoClass with a given namespace and a given name which * is located in the given MonoImage. * * Works like mono_class_from_name, but error handling is tricky. It can return NULL and have no error * set if the class was not found or it will return NULL and set the error if there was a loading error. */ MonoClass * mono_class_from_name_checked (MonoImage *image, const char* name_space, const char *name, MonoError *error) { MonoClass *klass; GHashTable *visited_images; visited_images = g_hash_table_new (g_direct_hash, g_direct_equal); klass = mono_class_from_name_checked_aux (image, name_space, name, visited_images, TRUE, error); g_hash_table_destroy (visited_images); return klass; } /** * mono_class_from_name: * \param image The \c MonoImage where the type is looked up in * \param name_space the type namespace * \param name the type short name. * * Obtains a \c MonoClass with a given namespace and a given name which * is located in the given \c MonoImage. * * To reference nested classes, use the "/" character as a separator. * For example use \c "Foo/Bar" to reference the class \c Bar that is nested * inside \c Foo, like this: "class Foo { class Bar {} }". */ MonoClass * mono_class_from_name (MonoImage *image, const char* name_space, const char *name) { MonoClass *klass; MONO_ENTER_GC_UNSAFE; ERROR_DECL (error); klass = mono_class_from_name_checked (image, name_space, name, error); mono_error_cleanup (error); /* FIXME Don't swallow the error */ MONO_EXIT_GC_UNSAFE; return klass; } /** * mono_class_load_from_name: * \param image The MonoImage where the type is looked up in * \param name_space the type namespace * \param name the type short name. * * This function works exactly like mono_class_from_name but it will abort if the class is not found. * This function should be used by the runtime for critical types to which there's no way to recover but crash * if they are missing. For example, System.Object or System.String. */ MonoClass * mono_class_load_from_name (MonoImage *image, const char* name_space, const char *name) { ERROR_DECL (error); MonoClass *klass; klass = mono_class_from_name_checked (image, name_space, name, error); if (!klass) g_error ("Runtime critical type %s.%s not found", name_space, name); mono_error_assertf_ok (error, "Could not load runtime critical type %s.%s", name_space, name); return klass; } /** * mono_class_try_load_from_name: * \param image The MonoImage where the type is looked up in * \param name_space the type namespace * \param name the type short name. * * This function tries to load a type, returning the class was found or NULL otherwise. * This function should be used by the runtime when probing for optional types, those that could have being linked out. * * Big design consideration. This function aborts if there was an error loading the type. This prevents us from missing * a type that we would otherwise assume to be available but was not due some error. * */ MonoClass* mono_class_try_load_from_name (MonoImage *image, const char* name_space, const char *name) { ERROR_DECL (error); MonoClass *klass; klass = mono_class_from_name_checked (image, name_space, name, error); mono_error_assertf_ok (error, "Could not load runtime critical type %s.%s", name_space, name); return klass; } static gboolean mono_interface_implements_interface (MonoClass *interface_implementer, MonoClass *interface_implemented) { int i; ERROR_DECL (error); mono_class_setup_interfaces (interface_implementer, error); if (!is_ok (error)) { mono_error_cleanup (error); return FALSE; } MonoClass **klass_interfaces = m_class_get_interfaces (interface_implementer); for (i = 0; i < m_class_get_interface_count (interface_implementer); i++) { MonoClass *ic = klass_interfaces [i]; if (mono_class_is_ginst (ic)) ic = mono_class_get_generic_type_definition (ic); if (ic == interface_implemented) return TRUE; } return FALSE; } gboolean mono_class_is_subclass_of_internal (MonoClass *klass, MonoClass *klassc, gboolean check_interfaces) { MONO_REQ_GC_UNSAFE_MODE; /* FIXME test for interfaces with variant generic arguments */ if (check_interfaces) { mono_class_init_internal (klass); mono_class_init_internal (klassc); } if (check_interfaces && MONO_CLASS_IS_INTERFACE_INTERNAL (klassc) && !MONO_CLASS_IS_INTERFACE_INTERNAL (klass)) { if (MONO_CLASS_IMPLEMENTS_INTERFACE (klass, m_class_get_interface_id (klassc))) return TRUE; } else if (check_interfaces && MONO_CLASS_IS_INTERFACE_INTERNAL (klassc) && MONO_CLASS_IS_INTERFACE_INTERNAL (klass)) { int i; MonoClass **klass_interfaces = m_class_get_interfaces (klass); for (i = 0; i < m_class_get_interface_count (klass); i ++) { MonoClass *ic = klass_interfaces [i]; if (ic == klassc) return TRUE; } } else { if (!MONO_CLASS_IS_INTERFACE_INTERNAL (klass) && mono_class_has_parent (klass, klassc)) return TRUE; } /* * MS.NET thinks interfaces are a subclass of Object, so we think it as * well. */ if (klassc == mono_defaults.object_class) return TRUE; return FALSE; } static gboolean mono_type_is_generic_argument (MonoType *type) { return type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR; } gboolean mono_class_has_variant_generic_params (MonoClass *klass) { int i; MonoGenericContainer *container; if (!mono_class_is_ginst (klass)) return FALSE; container = mono_class_get_generic_container (mono_class_get_generic_class (klass)->container_class); for (i = 0; i < container->type_argc; ++i) if (mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)) return TRUE; return FALSE; } static gboolean mono_gparam_is_reference_conversible (MonoClass *target, MonoClass *candidate, gboolean check_for_reference_conv) { if (target == candidate) return TRUE; if (check_for_reference_conv && mono_type_is_generic_argument (m_class_get_byval_arg (target)) && mono_type_is_generic_argument (m_class_get_byval_arg (candidate))) { MonoGenericParam *gparam = m_class_get_byval_arg (candidate)->data.generic_param; MonoGenericParamInfo *pinfo = mono_generic_param_info (gparam); if (!pinfo || (pinfo->flags & GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT) == 0) return FALSE; } if (!mono_class_is_assignable_from_internal (target, candidate)) return FALSE; return TRUE; } /** * @container the generic container from the GTD * @klass: the class to be assigned to * @oklass: the source class * * Both @klass and @oklass must be instances of the same generic interface. * * Returns: TRUE if @klass can be assigned to a @klass variable */ gboolean mono_class_is_variant_compatible (MonoClass *klass, MonoClass *oklass, gboolean check_for_reference_conv) { int j; MonoType **klass_argv, **oklass_argv; MonoClass *klass_gtd = mono_class_get_generic_type_definition (klass); MonoGenericContainer *container = mono_class_get_generic_container (klass_gtd); if (klass == oklass) return TRUE; /*Viable candidates are instances of the same generic interface*/ if (mono_class_get_generic_type_definition (oklass) != klass_gtd || oklass == klass_gtd) return FALSE; klass_argv = &mono_class_get_generic_class (klass)->context.class_inst->type_argv [0]; oklass_argv = &mono_class_get_generic_class (oklass)->context.class_inst->type_argv [0]; for (j = 0; j < container->type_argc; ++j) { MonoClass *param1_class = mono_class_from_mono_type_internal (klass_argv [j]); MonoClass *param2_class = mono_class_from_mono_type_internal (oklass_argv [j]); if (m_class_is_valuetype (param1_class) != m_class_is_valuetype (param2_class) || (m_class_is_valuetype (param1_class) && param1_class != param2_class)) return FALSE; /* * The _VARIANT and _COVARIANT constants should read _COVARIANT and * _CONTRAVARIANT, but they are in a public header so we can't fix it. */ if (param1_class != param2_class) { if (mono_generic_container_get_param_info (container, j)->flags & MONO_GEN_PARAM_VARIANT) { if (!mono_gparam_is_reference_conversible (param1_class, param2_class, check_for_reference_conv)) return FALSE; } else if (mono_generic_container_get_param_info (container, j)->flags & MONO_GEN_PARAM_COVARIANT) { if (!mono_gparam_is_reference_conversible (param2_class, param1_class, check_for_reference_conv)) return FALSE; } else return FALSE; } } return TRUE; } static gboolean mono_gparam_is_assignable_from (MonoClass *target, MonoClass *candidate) { MonoGenericParam *gparam, *ogparam; MonoGenericParamInfo *tinfo, *cinfo; MonoClass **candidate_class; gboolean class_constraint_satisfied, valuetype_constraint_satisfied; int tmask, cmask; if (target == candidate) return TRUE; MonoType *target_byval_arg = m_class_get_byval_arg (target); MonoType *candidate_byval_arg = m_class_get_byval_arg (candidate); if (target_byval_arg->type != candidate_byval_arg->type) return FALSE; gparam = target_byval_arg->data.generic_param; ogparam = candidate_byval_arg->data.generic_param; tinfo = mono_generic_param_info (gparam); cinfo = mono_generic_param_info (ogparam); class_constraint_satisfied = FALSE; valuetype_constraint_satisfied = FALSE; /*candidate must have a super set of target's special constraints*/ tmask = tinfo->flags & GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK; cmask = cinfo->flags & GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK; if (cinfo->constraints) { for (candidate_class = cinfo->constraints; *candidate_class; ++candidate_class) { MonoClass *cc = *candidate_class; MonoType *cc_byval_arg = m_class_get_byval_arg (cc); if (mono_type_is_reference (cc_byval_arg) && !MONO_CLASS_IS_INTERFACE_INTERNAL (cc)) class_constraint_satisfied = TRUE; else if (!mono_type_is_reference (cc_byval_arg) && !MONO_CLASS_IS_INTERFACE_INTERNAL (cc)) valuetype_constraint_satisfied = TRUE; } } class_constraint_satisfied |= (cmask & GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT) != 0; valuetype_constraint_satisfied |= (cmask & GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT) != 0; if ((tmask & GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT) && !class_constraint_satisfied) return FALSE; if ((tmask & GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT) && !valuetype_constraint_satisfied) return FALSE; if ((tmask & GENERIC_PARAMETER_ATTRIBUTE_CONSTRUCTOR_CONSTRAINT) && !((cmask & GENERIC_PARAMETER_ATTRIBUTE_CONSTRUCTOR_CONSTRAINT) || valuetype_constraint_satisfied)) { return FALSE; } /*candidate type constraints must be a superset of target's*/ if (tinfo->constraints) { MonoClass **target_class; for (target_class = tinfo->constraints; *target_class; ++target_class) { MonoClass *tc = *target_class; MonoType *tc_byval_arg = m_class_get_byval_arg (tc); /* * A constraint from @target might inflate into @candidate itself and in that case we don't need * check it's constraints since it satisfy the constraint by itself. */ if (mono_metadata_type_equal (tc_byval_arg, candidate_byval_arg)) continue; if (!cinfo->constraints) return FALSE; for (candidate_class = cinfo->constraints; *candidate_class; ++candidate_class) { MonoClass *cc = *candidate_class; if (mono_class_is_assignable_from_internal (tc, cc)) break; /* * This happens when we have the following: * * Bar<K> where K : IFace * Foo<T, U> where T : U where U : IFace * ... * Bar<T> <- T here satisfy K constraint transitively through to U's constraint * */ if (mono_type_is_generic_argument (m_class_get_byval_arg (cc))) { if (mono_gparam_is_assignable_from (target, cc)) break; } } if (!*candidate_class) return FALSE; } } /*candidate itself must have a constraint that satisfy target*/ if (cinfo->constraints) { for (candidate_class = cinfo->constraints; *candidate_class; ++candidate_class) { MonoClass *cc = *candidate_class; if (mono_class_is_assignable_from_internal (target, cc)) return TRUE; } } return FALSE; } static MonoType* mono_type_get_underlying_type_ignore_byref (MonoType *type) { if (type->type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (type->data.klass)) return mono_class_enum_basetype_internal (type->data.klass); if (type->type == MONO_TYPE_GENERICINST && m_class_is_enumtype (type->data.generic_class->container_class)) return mono_class_enum_basetype_internal (type->data.generic_class->container_class); return type; } /** * mono_byref_type_is_assignable_from: * \param type The type assignee * \param ctype The type being assigned * \param signature_assignment whether this is a signature assginment check according to ECMA rules, or reflection * * Given two byref types, returns \c TRUE if values of the second type are assignable to locations of the first type. * * The \p signature_assignment parameter affects comparing T& and U& where T and U are both reference types. Reflection * does an IsAssignableFrom check for T and U here, but ECMA I.8.7.2 says that the verification types of T and U must be * identical. If \p signature_assignment is \c TRUE we do an ECMA check, otherwise, reflection. */ gboolean mono_byref_type_is_assignable_from (MonoType *type, MonoType *ctype, gboolean signature_assignment) { g_assert (m_type_is_byref (type)); g_assert (m_type_is_byref (ctype)); MonoType *t = mono_type_get_underlying_type_ignore_byref (type); MonoType *ot = mono_type_get_underlying_type_ignore_byref (ctype); MonoClass *klass = mono_class_from_mono_type_internal (t); MonoClass *klassc = mono_class_from_mono_type_internal (ot); if (mono_type_is_primitive (t)) { return mono_type_is_primitive (ot) && m_class_get_instance_size (klass) == m_class_get_instance_size (klassc); } else if (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) { return t->type == ot->type && t->data.generic_param->num == ot->data.generic_param->num; } else if (t->type == MONO_TYPE_PTR || t->type == MONO_TYPE_FNPTR) { return t->type == ot->type; } else { if (ot->type == MONO_TYPE_VAR || ot->type == MONO_TYPE_MVAR) return FALSE; if (m_class_is_valuetype (klass)) return klass == klassc; if (m_class_is_valuetype (klassc)) return FALSE; /* * assignment compatability for location types, ECMA I.8.7.2 - two managed pointer types T& and U& are * assignment compatible if the verification types of T and U are identical. */ if (signature_assignment) return klass == klassc; /* the reflection IsAssignableFrom does a subtype comparison here for reference types only */ return mono_class_is_assignable_from_internal (klass, klassc); } } /** * mono_class_is_assignable_from_internal: * \param klass the class to be assigned to * \param oklass the source class * * \returns TRUE if an instance of class \p oklass can be assigned to an * instance of class \p klass */ gboolean mono_class_is_assignable_from_internal (MonoClass *klass, MonoClass *oklass) { gboolean result = FALSE; ERROR_DECL (error); mono_class_is_assignable_from_checked (klass, oklass, &result, error); mono_error_cleanup (error); return result; } /** * mono_class_is_assignable_from: * \param klass the class to be assigned to * \param oklass the source class * * \returns TRUE if an instance of class \p oklass can be assigned to an * instance of class \p klass */ mono_bool mono_class_is_assignable_from (MonoClass *klass, MonoClass *oklass) { gboolean result; MONO_ENTER_GC_UNSAFE; result = mono_class_is_assignable_from_internal (klass, oklass); MONO_EXIT_GC_UNSAFE; return result; } /* * ECMA I.8.7.3 general assignment compatability is defined in terms of an "intermediate type" * whereas ECMA I.8.7.1 assignment compatability for signature types is defined in terms of a "reduced type". * * This matters when we're comparing arrays of IntPtr. IntPtr[] is generally * assignable to int[] or long[], depending on architecture. But for signature * compatability, IntPtr[] is distinct from both of them. * * Similarly for ulong* and IntPtr*, etc. */ static MonoClass* composite_type_to_reduced_element_type (MonoClass *array_klass) { switch (m_class_get_byval_arg (m_class_get_element_class (array_klass))->type) { case MONO_TYPE_I: case MONO_TYPE_U: return mono_defaults.int_class; default: return m_class_get_cast_class (array_klass); } } static void mono_class_is_assignable_from_general (MonoClass *klass, MonoClass *oklass, gboolean signature_assignment, gboolean *result, MonoError *error); /** * mono_class_is_assignable_from_checked: * \param klass the class to be assigned to * \param oklass the source class * \param result set if there was no error * \param error set if there was an error * * Sets \p result to TRUE if an instance of class \p oklass can be assigned to * an instance of class \p klass or FALSE if it cannot. On error, no \p error * is set and \p result is not valid. */ void mono_class_is_assignable_from_checked (MonoClass *klass, MonoClass *oklass, gboolean *result, MonoError *error) { const gboolean for_sig = FALSE; mono_class_is_assignable_from_general (klass, oklass, for_sig, result, error); } void mono_class_signature_is_assignable_from (MonoClass *klass, MonoClass *oklass, gboolean *result, MonoError *error) { const gboolean for_sig = TRUE; mono_class_is_assignable_from_general (klass, oklass, for_sig, result, error); } void mono_class_is_assignable_from_general (MonoClass *klass, MonoClass *oklass, gboolean signature_assignment, gboolean *result, MonoError *error) { g_assert (result); if (klass == oklass) { *result = TRUE; return; } MONO_REQ_GC_UNSAFE_MODE; /*FIXME this will cause a lot of irrelevant stuff to be loaded.*/ if (!m_class_is_inited (klass)) mono_class_init_internal (klass); if (!m_class_is_inited (oklass)) mono_class_init_internal (oklass); if (mono_class_has_failure (klass)) { mono_error_set_for_class_failure (error, klass); *result = FALSE; return; } if (mono_class_has_failure (oklass)) { mono_error_set_for_class_failure (error, oklass); *result = FALSE; return; } MonoType *klass_byval_arg = m_class_get_byval_arg (klass); MonoType *oklass_byval_arg = m_class_get_byval_arg (oklass); if (mono_type_is_generic_argument (klass_byval_arg)) { if (!mono_type_is_generic_argument (oklass_byval_arg)) { *result = FALSE; return; } *result = mono_gparam_is_assignable_from (klass, oklass); return; } /* This can happen if oklass is a tyvar that has a constraint which is another tyvar which in turn * has a constraint which is a class type: * * class Foo { } * class G<T1, T2> where T1 : T2 where T2 : Foo { } * * In this case, Foo is assignable from T1. */ if (mono_type_is_generic_argument (oklass_byval_arg)) { MonoGenericParam *gparam = oklass_byval_arg->data.generic_param; MonoClass **constraints = mono_generic_container_get_param_info (gparam->owner, gparam->num)->constraints; int i; if (constraints) { for (i = 0; constraints [i]; ++i) { if (mono_class_is_assignable_from_internal (klass, constraints [i])) { *result = TRUE; return; } } } *result = mono_class_has_parent (oklass, klass); return; } if (MONO_CLASS_IS_INTERFACE_INTERNAL (klass)) { /* interface_offsets might not be set for dynamic classes */ if (mono_class_get_ref_info_handle (oklass) && !m_class_get_interface_bitmap (oklass)) { /* * oklass might be a generic type parameter but they have * interface_offsets set. */ gboolean assign_result = mono_reflection_call_is_assignable_to (oklass, klass, error); return_if_nok (error); *result = assign_result; return; } if (!m_class_get_interface_bitmap (oklass)) { /* Happens with generic instances of not-yet created dynamic types */ *result = FALSE; return; } if (MONO_CLASS_IMPLEMENTS_INTERFACE (oklass, m_class_get_interface_id (klass))) { *result = TRUE; return; } if (m_class_is_array_special_interface (klass) && m_class_get_rank (oklass) == 1) { if (mono_class_is_gtd (klass)) { /* klass is an array special gtd like * IList`1<>, and oklass is X[] for some X. * Moreover we know that X isn't !0 (the gparam * of IList`1) because in that case we would * have returned TRUE for * MONO_CLASS_IMPLEMENTS_INTERFACE, above. */ *result = FALSE; return; } // FIXME: IEnumerator`1 should not be an array special interface. // The correct fix is to make // ((IEnumerable<U>) (new T[] {...})).GetEnumerator() // return an IEnumerator<U> (like .NET does) instead of IEnumerator<T> // and to stop marking IEnumerable`1 as an array_special_interface. if (mono_class_get_generic_type_definition (klass) == mono_defaults.generic_ienumerator_class) { *result = FALSE; return; } //XXX we could offset this by having the cast target computed at JIT time //XXX we could go even further and emit a wrapper that would do the extra type check MonoClass *iface_klass = mono_class_from_mono_type_internal (mono_class_get_generic_class (klass)->context.class_inst->type_argv [0]); MonoClass *obj_klass = m_class_get_cast_class (oklass); //This gets us the cast class of element type of the array // If the target we're trying to cast to is a valuetype, we must account of weird valuetype equivalences such as IntEnum <> int or uint <> int // We can't apply it for ref types as this would go wrong with arrays - IList<byte[]> would have byte tested if (!mono_class_is_nullable (iface_klass)) { if (m_class_is_valuetype (iface_klass)) iface_klass = m_class_get_cast_class (iface_klass); //array covariant casts only operates on scalar to scalar //This is so int[] can't be casted to IComparable<int>[] if (!(m_class_is_valuetype (obj_klass) && !m_class_is_valuetype (iface_klass)) && mono_class_is_assignable_from_internal (iface_klass, obj_klass)) { *result = TRUE; return; } } } if (mono_class_has_variant_generic_params (klass)) { int i; mono_class_setup_interfaces (oklass, error); return_if_nok (error); /*klass is a generic variant interface, We need to extract from oklass a list of ifaces which are viable candidates.*/ for (i = 0; i < m_class_get_interface_offsets_count (oklass); ++i) { MonoClass *iface = m_class_get_interfaces_packed (oklass) [i]; if (mono_class_is_variant_compatible (klass, iface, FALSE)) { *result = TRUE; return; } } } *result = FALSE; return; } else if (m_class_is_delegate (klass)) { if (mono_class_has_variant_generic_params (klass) && mono_class_is_variant_compatible (klass, oklass, FALSE)) { *result = TRUE; return; } } else if (m_class_get_rank (klass)) { MonoClass *eclass, *eoclass; if (m_class_get_rank (oklass) != m_class_get_rank (klass)) { *result = FALSE; return; } /* vectors vs. one dimensional arrays */ if (oklass_byval_arg->type != klass_byval_arg->type) { *result = FALSE; return; } if (signature_assignment) { eclass = composite_type_to_reduced_element_type (klass); eoclass = composite_type_to_reduced_element_type (oklass); } else { eclass = m_class_get_cast_class (klass); eoclass = m_class_get_cast_class (oklass); } /* * a is b does not imply a[] is b[] when a is a valuetype, and * b is a reference type. */ if (m_class_is_valuetype (eoclass)) { if ((eclass == mono_defaults.enum_class) || (eclass == m_class_get_parent (mono_defaults.enum_class)) || (!m_class_is_valuetype (eclass))) { *result = FALSE; return; } } /* * a is b does not imply a[] is b[] in the case where b is an interface and * a is a generic parameter, unless a has an additional class constraint. * For example (C#): * ``` * interface I {} * class G<T> where T : I {} * class H<U> where U : class, I {} * public class P { * public static void Main() { * var t = typeof(G<>).GetTypeInfo().GenericTypeParameters[0].MakeArrayType(); * var i = typeof(I).MakeArrayType(); * var u = typeof(H<>).GetTypeInfo().GenericTypeParameters[0].MakeArrayType(); * Console.WriteLine("I[] assignable from T[] ? {0}", i.IsAssignableFrom(t)); * Console.WriteLine("I[] assignable from U[] ? {0}", i.IsAssignableFrom(u)); * } * } * ``` * This should print: * I[] assignable from T[] ? False * I[] assignable from U[] ? True */ if (MONO_CLASS_IS_INTERFACE_INTERNAL (eclass)) { MonoType *eoclass_byval_arg = m_class_get_byval_arg (eoclass); if (mono_type_is_generic_argument (eoclass_byval_arg)) { MonoGenericParam *eoparam = eoclass_byval_arg->data.generic_param; MonoGenericParamInfo *eoinfo = mono_generic_param_info (eoparam); int eomask = eoinfo->flags & GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK; // check for class constraint if ((eomask & GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT) == 0) { *result = FALSE; return; } } } if (mono_class_is_nullable (eclass) ^ mono_class_is_nullable (eoclass)) { *result = FALSE; return; } mono_class_is_assignable_from_checked (eclass, eoclass, result, error); return; } else if (mono_class_is_nullable (klass)) { if (mono_class_is_nullable (oklass)) mono_class_is_assignable_from_checked (m_class_get_cast_class (klass), m_class_get_cast_class (oklass), result, error); else mono_class_is_assignable_from_checked (m_class_get_cast_class (klass), oklass, result, error); return; } else if (m_class_get_class_kind (klass) == MONO_CLASS_POINTER) { if (m_class_get_class_kind (oklass) != MONO_CLASS_POINTER) { *result = FALSE; return; } if (m_class_get_byval_arg (klass)->type == MONO_TYPE_FNPTR) { /* * if both klass and oklass are fnptr, and they're equal, we would have returned at the * beginning. */ /* Is this right? or do we need to look at signature compatability? */ *result = FALSE; return; } if (m_class_get_byval_arg (oklass)->type != MONO_TYPE_PTR) { *result = FALSE; } g_assert (m_class_get_byval_arg (klass)->type == MONO_TYPE_PTR); MonoClass *eclass; MonoClass *eoclass; if (signature_assignment) { eclass = composite_type_to_reduced_element_type (klass); eoclass = composite_type_to_reduced_element_type (oklass); } else { eclass = m_class_get_cast_class (klass); eoclass = m_class_get_cast_class (oklass); } *result = (eclass == eoclass); return; } else if (klass == mono_defaults.object_class) { if (m_class_get_class_kind (oklass) == MONO_CLASS_POINTER) *result = FALSE; else *result = TRUE; return; } *result = mono_class_has_parent (oklass, klass); } /*Check if @oklass is variant compatible with @klass.*/ static gboolean mono_class_is_variant_compatible_slow (MonoClass *klass, MonoClass *oklass) { int j; MonoType **klass_argv, **oklass_argv; MonoClass *klass_gtd = mono_class_get_generic_type_definition (klass); MonoGenericContainer *container = mono_class_get_generic_container (klass_gtd); /*Viable candidates are instances of the same generic interface*/ if (mono_class_get_generic_type_definition (oklass) != klass_gtd || oklass == klass_gtd) return FALSE; klass_argv = &mono_class_get_generic_class (klass)->context.class_inst->type_argv [0]; oklass_argv = &mono_class_get_generic_class (oklass)->context.class_inst->type_argv [0]; for (j = 0; j < container->type_argc; ++j) { MonoClass *param1_class = mono_class_from_mono_type_internal (klass_argv [j]); MonoClass *param2_class = mono_class_from_mono_type_internal (oklass_argv [j]); if (m_class_is_valuetype (param1_class) != m_class_is_valuetype (param2_class)) return FALSE; /* * The _VARIANT and _COVARIANT constants should read _COVARIANT and * _CONTRAVARIANT, but they are in a public header so we can't fix it. */ if (param1_class != param2_class) { if (mono_generic_container_get_param_info (container, j)->flags & MONO_GEN_PARAM_VARIANT) { if (!mono_class_is_assignable_from_slow (param1_class, param2_class)) return FALSE; } else if (mono_generic_container_get_param_info (container, j)->flags & MONO_GEN_PARAM_COVARIANT) { if (!mono_class_is_assignable_from_slow (param2_class, param1_class)) return FALSE; } else return FALSE; } } return TRUE; } /*Check if @candidate implements the interface @target*/ static gboolean mono_class_implement_interface_slow (MonoClass *target, MonoClass *candidate) { ERROR_DECL (error); int i; gboolean is_variant = mono_class_has_variant_generic_params (target); if (is_variant && MONO_CLASS_IS_INTERFACE_INTERNAL (candidate)) { if (mono_class_is_variant_compatible_slow (target, candidate)) return TRUE; } do { if (candidate == target) return TRUE; /*A TypeBuilder can have more interfaces on tb->interfaces than on candidate->interfaces*/ if (image_is_dynamic (m_class_get_image (candidate)) && !m_class_was_typebuilder (candidate)) { MonoReflectionTypeBuilder *tb = mono_class_get_ref_info_raw (candidate); /* FIXME use handles */ int j; if (tb && tb->interfaces) { for (j = mono_array_length_internal (tb->interfaces) - 1; j >= 0; --j) { MonoReflectionType *iface = mono_array_get_internal (tb->interfaces, MonoReflectionType*, j); MonoClass *iface_class; /* we can't realize the type here since it can do pretty much anything. */ if (!iface->type) continue; iface_class = mono_class_from_mono_type_internal (iface->type); if (iface_class == target) return TRUE; if (is_variant && mono_class_is_variant_compatible_slow (target, iface_class)) return TRUE; if (mono_class_implement_interface_slow (target, iface_class)) return TRUE; } } } else { /*setup_interfaces don't mono_class_init_internal anything*/ /*FIXME this doesn't handle primitive type arrays. ICollection<sbyte> x byte [] won't work because candidate->interfaces, for byte[], won't have IList<sbyte>. A possible way to fix this would be to move that to setup_interfaces from setup_interface_offsets. */ mono_class_setup_interfaces (candidate, error); if (!is_ok (error)) { mono_error_cleanup (error); return FALSE; } int candidate_interface_count = m_class_get_interface_count (candidate); MonoClass **candidate_interfaces = m_class_get_interfaces (candidate); for (i = 0; i < candidate_interface_count; ++i) { if (candidate_interfaces [i] == target) return TRUE; if (is_variant && mono_class_is_variant_compatible_slow (target, candidate_interfaces [i])) return TRUE; if (mono_class_implement_interface_slow (target, candidate_interfaces [i])) return TRUE; } } candidate = m_class_get_parent (candidate); } while (candidate); return FALSE; } /* * Check if @oklass can be assigned to @klass. * This function does the same as mono_class_is_assignable_from_internal but is safe to be used from mono_class_init_internal context. */ gboolean mono_class_is_assignable_from_slow (MonoClass *target, MonoClass *candidate) { if (candidate == target) return TRUE; if (target == mono_defaults.object_class) return TRUE; if (mono_class_has_parent (candidate, target)) return TRUE; /*If target is not an interface there is no need to check them.*/ if (MONO_CLASS_IS_INTERFACE_INTERNAL (target)) return mono_class_implement_interface_slow (target, candidate); if (m_class_is_delegate (target) && mono_class_has_variant_generic_params (target)) return mono_class_is_variant_compatible (target, candidate, FALSE); if (m_class_get_rank (target)) { MonoClass *eclass, *eoclass; if (m_class_get_rank (target) != m_class_get_rank (candidate)) return FALSE; /* vectors vs. one dimensional arrays */ if (m_class_get_byval_arg (target)->type != m_class_get_byval_arg (candidate)->type) return FALSE; eclass = m_class_get_cast_class (target); eoclass = m_class_get_cast_class (candidate); /* * a is b does not imply a[] is b[] when a is a valuetype, and * b is a reference type. */ if (m_class_is_valuetype (eoclass)) { if ((eclass == mono_defaults.enum_class) || (eclass == m_class_get_parent (mono_defaults.enum_class)) || (eclass == mono_defaults.object_class)) return FALSE; } return mono_class_is_assignable_from_slow (eclass, eoclass); } /*FIXME properly handle nullables */ /*FIXME properly handle (M)VAR */ return FALSE; } /** * mono_generic_param_get_base_type: * * Return the base type of the given generic parameter from its constraints. * * Could be another generic parameter, or it could be Object or ValueType. */ MonoClass* mono_generic_param_get_base_type (MonoClass *klass) { MonoType *type = m_class_get_byval_arg (klass); g_assert (mono_type_is_generic_argument (type)); MonoGenericParam *gparam = type->data.generic_param; g_assert (gparam->owner && !gparam->owner->is_anonymous); MonoClass **constraints = mono_generic_container_get_param_info (gparam->owner, gparam->num)->constraints; MonoClass *base_class = mono_defaults.object_class; if (constraints) { int i; for (i = 0; constraints [i]; ++i) { MonoClass *constraint = constraints[i]; if (MONO_CLASS_IS_INTERFACE_INTERNAL (constraint)) continue; MonoType *constraint_type = m_class_get_byval_arg (constraint); if (mono_type_is_generic_argument (constraint_type)) { MonoGenericParam *constraint_param = constraint_type->data.generic_param; MonoGenericParamInfo *constraint_info = mono_generic_param_info (constraint_param); if ((constraint_info->flags & GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT) == 0 && (constraint_info->flags & GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT) == 0) continue; } base_class = constraint; } } if (base_class == mono_defaults.object_class) { MonoGenericParamInfo *gparam_info = mono_generic_param_info (gparam); if ((gparam_info->flags & GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT) != 0) { base_class = mono_class_get_valuetype_class (); } } return base_class; } /** * mono_class_get_cctor: * \param klass A MonoClass pointer * * \returns The static constructor of \p klass if it exists, NULL otherwise. */ MonoMethod* mono_class_get_cctor (MonoClass *klass) { MonoMethod *result = NULL; ERROR_DECL (error); MonoCachedClassInfo cached_info; if (image_is_dynamic (m_class_get_image (klass))) { /* * has_cctor is not set for these classes because mono_class_init_internal () is * not run for them. */ result = mono_class_get_method_from_name_checked (klass, ".cctor", -1, METHOD_ATTRIBUTE_SPECIAL_NAME, error); mono_error_assert_msg_ok (error, "Could not lookup class cctor in dynamic image"); return result; } mono_class_init_internal (klass); if (!m_class_has_cctor (klass)) return result; if (mono_class_is_ginst (klass) && !m_class_get_methods (klass)) { result = mono_class_get_inflated_method (klass, mono_class_get_cctor (mono_class_get_generic_class (klass)->container_class), error); mono_error_assert_msg_ok (error, "Could not lookup inflated class cctor"); /* FIXME do proper error handling */ return result; } if (mono_class_get_cached_class_info (klass, &cached_info)) { result = mono_get_method_checked (m_class_get_image (klass), cached_info.cctor_token, klass, NULL, error); mono_error_assert_msg_ok (error, "Could not lookup class cctor from cached metadata"); return result; } result = mono_class_get_method_from_name_checked (klass, ".cctor", -1, METHOD_ATTRIBUTE_SPECIAL_NAME, error); mono_error_assert_msg_ok (error, "Could not lookup class cctor"); return result; } /** * mono_class_get_finalizer: * \param klass: The MonoClass pointer * * \returns The finalizer method of \p klass if it exists, NULL otherwise. */ MonoMethod* mono_class_get_finalizer (MonoClass *klass) { MonoCachedClassInfo cached_info; if (!m_class_is_inited (klass)) mono_class_init_internal (klass); if (!mono_class_has_finalizer (klass)) return NULL; if (mono_class_get_cached_class_info (klass, &cached_info)) { ERROR_DECL (error); MonoMethod *result = mono_get_method_checked (cached_info.finalize_image, cached_info.finalize_token, NULL, NULL, error); mono_error_assert_msg_ok (error, "Could not lookup finalizer from cached metadata"); return result; }else { mono_class_setup_vtable (klass); return m_class_get_vtable (klass) [mono_class_get_object_finalize_slot ()]; } } /** * mono_class_needs_cctor_run: * \param klass the MonoClass pointer * \param caller a MonoMethod describing the caller * * Determines whenever the class has a static constructor and whenever it * needs to be called when executing CALLER. */ gboolean mono_class_needs_cctor_run (MonoClass *klass, MonoMethod *caller) { MonoMethod *method; method = mono_class_get_cctor (klass); if (method) return (method == caller) ? FALSE : TRUE; else return FALSE; } /** * mono_class_array_element_size: * \param klass * * \returns The number of bytes an element of type \p klass uses when stored into an array. */ gint32 mono_class_array_element_size (MonoClass *klass) { MonoType *type = m_class_get_byval_arg (klass); handle_enum: switch (type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_BOOLEAN: return 1; case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: return 2; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_R4: return 4; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: return TARGET_SIZEOF_VOID_P; case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R8: return 8; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { type = mono_class_enum_basetype_internal (type->data.klass); klass = m_class_get_element_class (klass); goto handle_enum; } return mono_class_value_size (klass, NULL); case MONO_TYPE_GENERICINST: type = m_class_get_byval_arg (type->data.generic_class->container_class); goto handle_enum; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: { int align; return mono_type_size (type, &align); } case MONO_TYPE_VOID: return 0; default: g_error ("unknown type 0x%02x in mono_class_array_element_size", type->type); } return -1; } /** * mono_array_element_size: * \param ac pointer to a \c MonoArrayClass * * \returns The size of single array element. * * LOCKING: Acquires the loader lock. */ gint32 mono_array_element_size (MonoClass *ac) { g_assert (m_class_get_rank (ac)); if (G_UNLIKELY (!m_class_is_size_inited (ac))) { mono_class_setup_fields (ac); } return m_class_get_sizes (ac).element_size; } /** * mono_ldtoken: */ gpointer mono_ldtoken (MonoImage *image, guint32 token, MonoClass **handle_class, MonoGenericContext *context) { gpointer res; MONO_ENTER_GC_UNSAFE; ERROR_DECL (error); res = mono_ldtoken_checked (image, token, handle_class, context, error); mono_error_assert_ok (error); MONO_EXIT_GC_UNSAFE; return res; } gpointer mono_ldtoken_checked (MonoImage *image, guint32 token, MonoClass **handle_class, MonoGenericContext *context, MonoError *error) { error_init (error); if (image_is_dynamic (image)) { MonoClass *tmp_handle_class; gpointer obj = mono_lookup_dynamic_token_class (image, token, TRUE, &tmp_handle_class, context, error); mono_error_assert_ok (error); g_assert (tmp_handle_class); if (handle_class) *handle_class = tmp_handle_class; if (tmp_handle_class == mono_defaults.typehandle_class) return m_class_get_byval_arg ((MonoClass*)obj); else return obj; } switch (token & 0xff000000) { case MONO_TOKEN_TYPE_DEF: case MONO_TOKEN_TYPE_REF: case MONO_TOKEN_TYPE_SPEC: { MonoType *type; MonoClass *klass; if (handle_class) *handle_class = mono_defaults.typehandle_class; type = mono_type_get_checked (image, token, context, error); if (!type) return NULL; klass = mono_class_from_mono_type_internal (type); mono_class_init_internal (klass); if (mono_class_has_failure (klass)) { mono_error_set_for_class_failure (error, klass); return NULL; } /* We return a MonoType* as handle */ return type; } case MONO_TOKEN_FIELD_DEF: { MonoClass *klass; guint32 type = mono_metadata_typedef_from_field (image, mono_metadata_token_index (token)); if (!type) { mono_error_set_bad_image (error, image, "Bad ldtoken %x", token); return NULL; } if (handle_class) *handle_class = mono_defaults.fieldhandle_class; klass = mono_class_get_and_inflate_typespec_checked (image, MONO_TOKEN_TYPE_DEF | type, context, error); if (!klass) return NULL; mono_class_init_internal (klass); return mono_class_get_field (klass, token); } case MONO_TOKEN_METHOD_DEF: case MONO_TOKEN_METHOD_SPEC: { MonoMethod *meth; meth = mono_get_method_checked (image, token, NULL, context, error); if (handle_class) *handle_class = mono_defaults.methodhandle_class; if (!meth) return NULL; return meth; } case MONO_TOKEN_MEMBER_REF: { guint32 cols [MONO_MEMBERREF_SIZE]; const char *sig; mono_metadata_decode_row (&image->tables [MONO_TABLE_MEMBERREF], mono_metadata_token_index (token) - 1, cols, MONO_MEMBERREF_SIZE); sig = mono_metadata_blob_heap (image, cols [MONO_MEMBERREF_SIGNATURE]); mono_metadata_decode_blob_size (sig, &sig); if (*sig == 0x6) { /* it's a field */ MonoClass *klass; MonoClassField *field; field = mono_field_from_token_checked (image, token, &klass, context, error); if (handle_class) *handle_class = mono_defaults.fieldhandle_class; return field; } else { MonoMethod *meth; meth = mono_get_method_checked (image, token, NULL, context, error); if (handle_class) *handle_class = mono_defaults.methodhandle_class; return meth; } } default: mono_error_set_bad_image (error, image, "Bad ldtoken %x", token); } return NULL; } gpointer mono_lookup_dynamic_token (MonoImage *image, guint32 token, MonoGenericContext *context, MonoError *error) { MonoClass *handle_class; error_init (error); return mono_reflection_lookup_dynamic_token (image, token, TRUE, &handle_class, context, error); } gpointer mono_lookup_dynamic_token_class (MonoImage *image, guint32 token, gboolean valid_token, MonoClass **handle_class, MonoGenericContext *context, MonoError *error) { return mono_reflection_lookup_dynamic_token (image, token, valid_token, handle_class, context, error); } static MonoGetCachedClassInfo get_cached_class_info = NULL; void mono_install_get_cached_class_info (MonoGetCachedClassInfo func) { get_cached_class_info = func; } gboolean mono_class_get_cached_class_info (MonoClass *klass, MonoCachedClassInfo *res) { if (!get_cached_class_info) return FALSE; else return get_cached_class_info (klass, res); } void mono_install_get_class_from_name (MonoGetClassFromName func) { get_class_from_name = func; } /** * mono_class_get_image: * * Use this method to get the \c MonoImage* where this class came from. * * \returns The image where this class is defined. */ MonoImage* mono_class_get_image (MonoClass *klass) { return m_class_get_image (klass); } /** * mono_class_get_element_class: * \param klass the \c MonoClass to act on * * Use this function to get the element class of an array. * * \returns The element class of an array. */ MonoClass* mono_class_get_element_class (MonoClass *klass) { MonoClass *result; MONO_ENTER_GC_UNSAFE; result = m_class_get_element_class (klass); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_is_valuetype: * \param klass the \c MonoClass to act on * * Use this method to determine if the provided \c MonoClass* represents a value type, * or a reference type. * * \returns TRUE if the \c MonoClass represents a \c ValueType, FALSE if it represents a reference type. */ gboolean mono_class_is_valuetype (MonoClass *klass) { gboolean result; MONO_ENTER_GC_UNSAFE; result = m_class_is_valuetype (klass); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_is_enum: * \param klass the \c MonoClass to act on * * Use this function to determine if the provided \c MonoClass* represents an enumeration. * * \returns TRUE if the \c MonoClass represents an enumeration. */ gboolean mono_class_is_enum (MonoClass *klass) { gboolean result; MONO_ENTER_GC_UNSAFE; result = m_class_is_enumtype (klass); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_enum_basetype_internal: * \param klass the \c MonoClass to act on * * Use this function to get the underlying type for an enumeration value. * * \returns The underlying type representation for an enumeration. */ MonoType* mono_class_enum_basetype_internal (MonoClass *klass) { if (m_class_get_element_class (klass) == klass) /* SRE or broken types */ return NULL; return m_class_get_byval_arg (m_class_get_element_class (klass)); } /** * mono_class_enum_basetype: * \param klass the \c MonoClass to act on * * Use this function to get the underlying type for an enumeration value. * * \returns The underlying type representation for an enumeration. */ MonoType* mono_class_enum_basetype (MonoClass *klass) { MonoType *res; MONO_ENTER_GC_UNSAFE; res = mono_class_enum_basetype_internal (klass); MONO_EXIT_GC_UNSAFE; return res; } /** * mono_class_get_parent * \param klass the \c MonoClass to act on * * \returns The parent class for this class. */ MonoClass* mono_class_get_parent (MonoClass *klass) { MonoClass *result; MONO_ENTER_GC_UNSAFE; result = m_class_get_parent (klass); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_get_nesting_type: * \param klass the \c MonoClass to act on * * Use this function to obtain the class that the provided \c MonoClass* is nested on. * * If the return is NULL, this indicates that this class is not nested. * * \returns The container type where this type is nested or NULL if this type is not a nested type. */ MonoClass* mono_class_get_nesting_type (MonoClass *klass) { return m_class_get_nested_in (klass); } /** * mono_class_get_rank: * \param klass the MonoClass to act on * * \returns The rank for the array (the number of dimensions). */ int mono_class_get_rank (MonoClass *klass) { return m_class_get_rank (klass); } /** * mono_class_get_name * \param klass the \c MonoClass to act on * * \returns The name of the class. */ const char* mono_class_get_name (MonoClass *klass) { const char *result; MONO_ENTER_GC_UNSAFE; result = m_class_get_name (klass); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_get_namespace: * \param klass the \c MonoClass to act on * * \returns The namespace of the class. */ const char* mono_class_get_namespace (MonoClass *klass) { const char *result; MONO_ENTER_GC_UNSAFE; result = m_class_get_name_space (klass); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_get_type: * \param klass the \c MonoClass to act on * * This method returns the internal \c MonoType representation for the class. * * \returns The \c MonoType from the class. */ MonoType* mono_class_get_type (MonoClass *klass) { return m_class_get_byval_arg (klass); } /** * mono_class_get_type_token: * \param klass the \c MonoClass to act on * * This method returns type token for the class. * * \returns The type token for the class. */ guint32 mono_class_get_type_token (MonoClass *klass) { return m_class_get_type_token (klass); } /** * mono_class_get_byref_type: * \param klass the \c MonoClass to act on * * */ MonoType* mono_class_get_byref_type (MonoClass *klass) { return m_class_get_this_arg (klass); } /** * mono_class_num_fields: * \param klass the \c MonoClass to act on * * \returns The number of static and instance fields in the class. */ int mono_class_num_fields (MonoClass *klass) { return mono_class_get_field_count (klass); } /** * mono_class_num_methods: * \param klass the \c MonoClass to act on * * \returns The number of methods in the class. */ int mono_class_num_methods (MonoClass *klass) { return mono_class_get_method_count (klass); } /** * mono_class_num_properties * \param klass the \c MonoClass to act on * * \returns The number of properties in the class. */ int mono_class_num_properties (MonoClass *klass) { mono_class_setup_properties (klass); return mono_class_get_property_info (klass)->count; } /** * mono_class_num_events: * \param klass the \c MonoClass to act on * * \returns The number of events in the class. */ int mono_class_num_events (MonoClass *klass) { mono_class_setup_events (klass); return mono_class_get_event_info (klass)->count; } /** * mono_class_get_fields: * \param klass the \c MonoClass to act on * * This routine is an iterator routine for retrieving the fields in a class. * * You must pass a \c gpointer that points to zero and is treated as an opaque handle to * iterate over all of the elements. When no more values are * available, the return value is NULL. * * \returns a \c MonoClassField* on each iteration, or NULL when no more fields are available. */ MonoClassField* mono_class_get_fields (MonoClass* klass, gpointer *iter) { MonoClassField *result; MONO_ENTER_GC_UNSAFE; result = mono_class_get_fields_internal (klass, iter); MONO_EXIT_GC_UNSAFE; return result; } MonoClassField* mono_class_get_fields_internal (MonoClass *klass, gpointer *iter) { MonoClassField* field; if (!iter) return NULL; if (!*iter) { mono_class_setup_fields (klass); if (mono_class_has_failure (klass)) return NULL; /* start from the first */ if (mono_class_get_field_count (klass)) { MonoClassField *klass_fields = m_class_get_fields (klass); *iter = &klass_fields [0]; return &klass_fields [0]; } else { /* no fields */ return NULL; } } field = (MonoClassField *)*iter; field++; if (field < &m_class_get_fields (klass) [mono_class_get_field_count (klass)]) { *iter = field; return field; } return NULL; } /** * mono_class_get_methods: * \param klass the \c MonoClass to act on * * This routine is an iterator routine for retrieving the fields in a class. * * You must pass a \c gpointer that points to zero and is treated as an opaque handle to * iterate over all of the elements. When no more values are * available, the return value is NULL. * * \returns a \c MonoMethod on each iteration or NULL when no more methods are available. */ MonoMethod* mono_class_get_methods (MonoClass* klass, gpointer *iter) { MonoMethod** method; if (!iter) return NULL; if (!*iter) { mono_class_setup_methods (klass); MonoMethod **klass_methods = m_class_get_methods (klass); /* * We can't fail lookup of methods otherwise the runtime will burst in flames on all sort of places. * FIXME we should better report this error to the caller */ if (!klass_methods) return NULL; /* start from the first */ if (mono_class_get_method_count (klass)) { *iter = &klass_methods [0]; return klass_methods [0]; } else { /* no method */ return NULL; } } method = (MonoMethod **)*iter; method++; if (method < &m_class_get_methods (klass) [mono_class_get_method_count (klass)]) { *iter = method; return *method; } return NULL; } /** * mono_class_get_properties: * \param klass the \c MonoClass to act on * * This routine is an iterator routine for retrieving the properties in a class. * * You must pass a gpointer that points to zero and is treated as an opaque handle to * iterate over all of the elements. When no more values are * available, the return value is NULL. * * Returns: a \c MonoProperty* on each invocation, or NULL when no more are available. */ MonoProperty* mono_class_get_properties (MonoClass* klass, gpointer *iter) { MonoProperty* property; if (!iter) return NULL; if (!*iter) { mono_class_setup_properties (klass); MonoClassPropertyInfo *info = mono_class_get_property_info (klass); /* start from the first */ if (info->count) { *iter = &info->properties [0]; return (MonoProperty *)*iter; } else { /* no fields */ return NULL; } } property = (MonoProperty *)*iter; property++; MonoClassPropertyInfo *info = mono_class_get_property_info (klass); if (property < &info->properties [info->count]) { *iter = property; return (MonoProperty *)*iter; } return NULL; } /** * mono_class_get_events: * \param klass the \c MonoClass to act on * * This routine is an iterator routine for retrieving the properties in a class. * * You must pass a \c gpointer that points to zero and is treated as an opaque handle to * iterate over all of the elements. When no more values are * available, the return value is NULL. * * \returns a \c MonoEvent* on each invocation, or NULL when no more are available. */ MonoEvent* mono_class_get_events (MonoClass* klass, gpointer *iter) { MonoEvent* event; if (!iter) return NULL; if (!*iter) { mono_class_setup_events (klass); MonoClassEventInfo *info = mono_class_get_event_info (klass); /* start from the first */ if (info->count) { *iter = &info->events [0]; return (MonoEvent *)*iter; } else { /* no fields */ return NULL; } } event = (MonoEvent *)*iter; event++; MonoClassEventInfo *info = mono_class_get_event_info (klass); if (event < &info->events [info->count]) { *iter = event; return (MonoEvent *)*iter; } return NULL; } /** * mono_class_get_interfaces * \param klass the \c MonoClass to act on * * This routine is an iterator routine for retrieving the interfaces implemented by this class. * * You must pass a \c gpointer that points to zero and is treated as an opaque handle to * iterate over all of the elements. When no more values are * available, the return value is NULL. * * \returns a \c MonoClass* on each invocation, or NULL when no more are available. */ MonoClass* mono_class_get_interfaces (MonoClass* klass, gpointer *iter) { ERROR_DECL (error); MonoClass** iface; if (!iter) return NULL; if (!*iter) { if (!m_class_is_inited (klass)) mono_class_init_internal (klass); if (!m_class_is_interfaces_inited (klass)) { mono_class_setup_interfaces (klass, error); if (!is_ok (error)) { mono_error_cleanup (error); return NULL; } } /* start from the first */ if (m_class_get_interface_count (klass)) { *iter = &m_class_get_interfaces (klass) [0]; return m_class_get_interfaces (klass) [0]; } else { /* no interface */ return NULL; } } iface = (MonoClass **)*iter; iface++; if (iface < &m_class_get_interfaces (klass) [m_class_get_interface_count (klass)]) { *iter = iface; return *iface; } return NULL; } /** * mono_class_get_nested_types * \param klass the \c MonoClass to act on * * This routine is an iterator routine for retrieving the nested types of a class. * This works only if \p klass is non-generic, or a generic type definition. * * You must pass a \c gpointer that points to zero and is treated as an opaque handle to * iterate over all of the elements. When no more values are * available, the return value is NULL. * * \returns a \c Monoclass* on each invocation, or NULL when no more are available. */ MonoClass* mono_class_get_nested_types (MonoClass* klass, gpointer *iter) { GList *item; if (!iter) return NULL; if (!m_class_is_nested_classes_inited (klass)) mono_class_setup_nested_types (klass); if (!*iter) { GList *nested_classes = mono_class_get_nested_classes_property (klass); /* start from the first */ if (nested_classes) { *iter = nested_classes; return (MonoClass *)nested_classes->data; } else { /* no nested types */ return NULL; } } item = (GList *)*iter; item = item->next; if (item) { *iter = item; return (MonoClass *)item->data; } return NULL; } /** * mono_class_is_delegate * \param klass the \c MonoClass to act on * * \returns TRUE if the \c MonoClass represents a \c System.Delegate. */ mono_bool mono_class_is_delegate (MonoClass *klass) { mono_bool result; MONO_ENTER_GC_UNSAFE; result = m_class_is_delegate (klass); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_implements_interface * \param klass The MonoClass to act on * \param interface The interface to check if \p klass implements. * * \returns TRUE if \p klass implements \p interface. */ mono_bool mono_class_implements_interface (MonoClass* klass, MonoClass* iface) { mono_bool result; MONO_ENTER_GC_UNSAFE; result = mono_class_is_assignable_from_internal (iface, klass); MONO_EXIT_GC_UNSAFE; return result; } static mono_bool class_implements_interface_ignore_generics (MonoClass* klass, MonoClass* iface) { int i; ERROR_DECL (error); if (mono_class_is_ginst (iface)) iface = mono_class_get_generic_type_definition (iface); while (klass != NULL) { if (mono_class_is_assignable_from_internal (iface, klass)) return TRUE; mono_class_setup_interfaces (klass, error); if (!is_ok (error)) { mono_error_cleanup (error); return FALSE; } MonoClass **klass_interfaces = m_class_get_interfaces (klass); for (i = 0; i < m_class_get_interface_count (klass); i++) { MonoClass *ic = klass_interfaces [i]; if (mono_class_is_ginst (ic)) ic = mono_class_get_generic_type_definition (ic); if (ic == iface) { return TRUE; } } klass = m_class_get_parent (klass); } return FALSE; } /** * mono_field_get_name: * \param field the \c MonoClassField to act on * * \returns The name of the field. */ const char* mono_field_get_name (MonoClassField *field) { return field->name; } /** * mono_field_get_type_internal: * \param field the \c MonoClassField to act on * \returns \c MonoType of the field. */ MonoType* mono_field_get_type_internal (MonoClassField *field) { MonoType *type = field->type; if (type) return type; ERROR_DECL (error); type = mono_field_get_type_checked (field, error); if (!is_ok (error)) { mono_trace_warning (MONO_TRACE_TYPE, "Could not load field's type due to %s", mono_error_get_message (error)); mono_error_cleanup (error); } return type; } /** * mono_field_get_type: * \param field the \c MonoClassField to act on * \returns \c MonoType of the field. */ MonoType* mono_field_get_type (MonoClassField *field) { MonoType *type = field->type; if (type) return type; MONO_ENTER_GC_UNSAFE; type = mono_field_get_type_internal (field); MONO_EXIT_GC_UNSAFE; return type; } /** * mono_field_get_type_checked: * \param field the \c MonoClassField to act on * \param error used to return any error found while retrieving \p field type * * \returns \c MonoType of the field. */ MonoType* mono_field_get_type_checked (MonoClassField *field, MonoError *error) { error_init (error); MonoType *type = field->type; if (type) return type; mono_field_resolve_type (field, error); return field->type; } /** * mono_field_get_parent: * \param field the \c MonoClassField to act on * * \returns \c MonoClass where the field was defined. */ MonoClass* mono_field_get_parent (MonoClassField *field) { return m_field_get_parent (field); } /** * mono_field_get_flags; * \param field the \c MonoClassField to act on * * The metadata flags for a field are encoded using the * \c FIELD_ATTRIBUTE_* constants. See the \c tabledefs.h file for details. * * \returns The flags for the field. */ guint32 mono_field_get_flags (MonoClassField *field) { if (!field->type) return mono_field_resolve_flags (field); return field->type->attrs; } /** * mono_field_get_offset: * \param field the \c MonoClassField to act on * * \returns The field offset. */ guint32 mono_field_get_offset (MonoClassField *field) { mono_class_setup_fields(m_field_get_parent (field)); return field->offset; } const char * mono_field_get_rva (MonoClassField *field, int swizzle) { guint32 rva; int field_index; MonoClass *klass = m_field_get_parent (field); MonoFieldDefaultValue *def_values; g_assert (field->type->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA); def_values = mono_class_get_field_def_values_with_swizzle (klass, swizzle); if (!def_values) { def_values = (MonoFieldDefaultValue *)mono_class_alloc0 (klass, sizeof (MonoFieldDefaultValue) * mono_class_get_field_count (klass)); mono_class_set_field_def_values_with_swizzle (klass, def_values, swizzle); } field_index = mono_field_get_index (field); if (!def_values [field_index].data) { const char *rvaData; if (!image_is_dynamic (m_class_get_image (klass))) { int first_field_idx = mono_class_get_first_field_idx (klass); mono_metadata_field_info (m_class_get_image (m_field_get_parent (field)), first_field_idx + field_index, NULL, &rva, NULL); if (!rva) g_warning ("field %s in %s should have RVA data, but hasn't", mono_field_get_name (field), m_class_get_name (m_field_get_parent (field))); rvaData = mono_image_rva_map (m_class_get_image (m_field_get_parent (field)), rva); } else { rvaData = mono_field_get_data (field); } if (rvaData == NULL) return NULL; if (swizzle != 1) { int dummy; int dataSizeInBytes = mono_type_size (field->type, &dummy); char *swizzledRvaData = mono_class_alloc0 (klass, dataSizeInBytes); #define SWAP(n) { \ guint ## n *data = (guint ## n *) swizzledRvaData; \ guint ## n *src = (guint ## n *) rvaData; \ int i, \ nEnt = (dataSizeInBytes / sizeof(guint ## n)); \ \ for (i = 0; i < nEnt; i++) { \ data[i] = read ## n (&src[i]); \ } \ } if (swizzle == 2) { SWAP (16); } else if (swizzle == 4) { SWAP (32); } else { SWAP (64); } #undef SWAP def_values [field_index].data = swizzledRvaData; } else { def_values [field_index].data = rvaData; } } return def_values [field_index].data; } /** * mono_field_get_data: * \param field the \c MonoClassField to act on * * \returns A pointer to the metadata constant value or to the field * data if it has an RVA flag. */ const char * mono_field_get_data (MonoClassField *field) { if (field->type->attrs & FIELD_ATTRIBUTE_HAS_DEFAULT) { MonoTypeEnum def_type; return mono_class_get_field_default_value (field, &def_type); } else if (field->type->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA) { return mono_field_get_rva (field, 1); } else { return NULL; } } /** * mono_property_get_name: * \param prop the \c MonoProperty to act on * \returns The name of the property */ const char* mono_property_get_name (MonoProperty *prop) { return prop->name; } /** * mono_property_get_set_method * \param prop the \c MonoProperty to act on. * \returns The setter method of the property, a \c MonoMethod. */ MonoMethod* mono_property_get_set_method (MonoProperty *prop) { return prop->set; } /** * mono_property_get_get_method * \param prop the MonoProperty to act on. * \returns The getter method of the property (A \c MonoMethod) */ MonoMethod* mono_property_get_get_method (MonoProperty *prop) { return prop->get; } /** * mono_property_get_parent: * \param prop the \c MonoProperty to act on. * \returns The \c MonoClass where the property was defined. */ MonoClass* mono_property_get_parent (MonoProperty *prop) { return prop->parent; } /** * mono_property_get_flags: * \param prop the \c MonoProperty to act on. * * The metadata flags for a property are encoded using the * \c PROPERTY_ATTRIBUTE_* constants. See the \c tabledefs.h file for details. * * \returns The flags for the property. */ guint32 mono_property_get_flags (MonoProperty *prop) { return prop->attrs; } /** * mono_event_get_name: * \param event the MonoEvent to act on * \returns The name of the event. */ const char* mono_event_get_name (MonoEvent *event) { return event->name; } /** * mono_event_get_add_method: * \param event The \c MonoEvent to act on. * \returns The \c add method for the event, a \c MonoMethod. */ MonoMethod* mono_event_get_add_method (MonoEvent *event) { return event->add; } /** * mono_event_get_remove_method: * \param event The \c MonoEvent to act on. * \returns The \c remove method for the event, a \c MonoMethod. */ MonoMethod* mono_event_get_remove_method (MonoEvent *event) { return event->remove; } /** * mono_event_get_raise_method: * \param event The \c MonoEvent to act on. * \returns The \c raise method for the event, a \c MonoMethod. */ MonoMethod* mono_event_get_raise_method (MonoEvent *event) { return event->raise; } /** * mono_event_get_parent: * \param event the MonoEvent to act on. * \returns The \c MonoClass where the event is defined. */ MonoClass* mono_event_get_parent (MonoEvent *event) { return event->parent; } /** * mono_event_get_flags * \param event the \c MonoEvent to act on. * * The metadata flags for an event are encoded using the * \c EVENT_* constants. See the \c tabledefs.h file for details. * * \returns The flags for the event. */ guint32 mono_event_get_flags (MonoEvent *event) { return event->attrs; } /** * mono_class_get_method_from_name: * \param klass where to look for the method * \param name name of the method * \param param_count number of parameters. -1 for any number. * * Obtains a \c MonoMethod with a given name and number of parameters. * It only works if there are no multiple signatures for any given method name. */ MonoMethod * mono_class_get_method_from_name (MonoClass *klass, const char *name, int param_count) { MonoMethod *result; MONO_ENTER_GC_UNSAFE; ERROR_DECL (error); result = mono_class_get_method_from_name_checked (klass, name, param_count, 0, error); mono_error_cleanup (error); MONO_EXIT_GC_UNSAFE; return result; } MonoMethod* mono_find_method_in_metadata (MonoClass *klass, const char *name, int param_count, int flags) { MonoImage *klass_image = m_class_get_image (klass); MonoMethod *res = NULL; int i; /* Search directly in the metadata to avoid calling setup_methods () */ int first_idx = mono_class_get_first_method_idx (klass); int mcount = mono_class_get_method_count (klass); for (i = 0; i < mcount; ++i) { ERROR_DECL (error); guint32 cols [MONO_METHOD_SIZE]; MonoMethod *method; MonoMethodSignature *sig; /* first_idx points into the methodptr table */ mono_metadata_decode_table_row (klass_image, MONO_TABLE_METHOD, first_idx + i, cols, MONO_METHOD_SIZE); if (!strcmp (mono_metadata_string_heap (klass_image, cols [MONO_METHOD_NAME]), name)) { method = mono_get_method_checked (klass_image, MONO_TOKEN_METHOD_DEF | (first_idx + i + 1), klass, NULL, error); if (!method) { mono_error_cleanup (error); /* FIXME don't swallow the error */ continue; } if (param_count == -1) { res = method; break; } sig = mono_method_signature_checked (method, error); if (!sig) { mono_error_cleanup (error); /* FIXME don't swallow the error */ continue; } if (sig->param_count == param_count) { res = method; break; } } } if (G_UNLIKELY (!res && klass_image->has_updates)) { if (mono_class_has_metadata_update_info (klass)) { ERROR_DECL (error); res = mono_metadata_update_find_method_by_name (klass, name, param_count, flags, error); mono_error_cleanup (error); } } return res; } /** * mono_class_get_method_from_name_flags: * \param klass where to look for the method * \param name_space name of the method * \param param_count number of parameters. -1 for any number. * \param flags flags which must be set in the method * * Obtains a \c MonoMethod with a given name and number of parameters. * It only works if there are no multiple signatures for any given method name. */ MonoMethod * mono_class_get_method_from_name_flags (MonoClass *klass, const char *name, int param_count, int flags) { MonoMethod *method; MONO_ENTER_GC_UNSAFE; ERROR_DECL (error); method = mono_class_get_method_from_name_checked (klass, name, param_count, flags, error); mono_error_cleanup (error); MONO_EXIT_GC_UNSAFE; return method; } /** * mono_class_get_method_from_name_checked: * \param klass where to look for the method * \param name_space name of the method * \param param_count number of parameters. -1 for any number. * \param flags flags which must be set in the method * \param error * * Obtains a \c MonoMethod with a given name and number of parameters. * It only works if there are no multiple signatures for any given method name. */ MonoMethod * mono_class_get_method_from_name_checked (MonoClass *klass, const char *name, int param_count, int flags, MonoError *error) { MonoMethod *res = NULL; int i; mono_class_init_internal (klass); if (mono_class_is_ginst (klass) && !m_class_get_methods (klass)) { res = mono_class_get_method_from_name_checked (mono_class_get_generic_class (klass)->container_class, name, param_count, flags, error); if (res) res = mono_class_inflate_generic_method_full_checked (res, klass, mono_class_get_context (klass), error); return res; } if (m_class_get_methods (klass) || !MONO_CLASS_HAS_STATIC_METADATA (klass)) { mono_class_setup_methods (klass); /* We can't fail lookup of methods otherwise the runtime will burst in flames on all sort of places. See mono/tests/array_load_exception.il FIXME we should better report this error to the caller */ MonoMethod **klass_methods = m_class_get_methods (klass); gboolean has_updates = m_class_get_image (klass)->has_updates; if (!klass_methods && !has_updates) return NULL; int mcount = mono_class_get_method_count (klass); for (i = 0; i < mcount; ++i) { MonoMethod *method = klass_methods [i]; if (method->name[0] == name [0] && !strcmp (name, method->name) && (param_count == -1 || mono_method_signature_internal (method)->param_count == param_count) && ((method->flags & flags) == flags)) { res = method; break; } } if (G_UNLIKELY (!res && has_updates && mono_class_has_metadata_update_info (klass))) { res = mono_metadata_update_find_method_by_name (klass, name, param_count, flags, error); } } else { res = mono_find_method_in_metadata (klass, name, param_count, flags); } return res; } gboolean mono_class_has_failure (const MonoClass *klass) { g_assert (klass != NULL); return m_class_has_failure ((MonoClass*)klass) != 0; } /** * mono_class_set_type_load_failure: * \param klass class in which the failure was detected * \param fmt \c printf -style error message string. * * Collect detected failure informaion in the class for later processing. * The error is stored as a MonoErrorBoxed as with mono_error_set_type_load_class() * Note that only the first failure is kept. * * LOCKING: Acquires the loader lock. * * \returns FALSE if a failure was already set on the class, or TRUE otherwise. */ gboolean mono_class_set_type_load_failure (MonoClass *klass, const char * fmt, ...) { ERROR_DECL (prepare_error); va_list args; if (mono_class_has_failure (klass)) return FALSE; va_start (args, fmt); mono_error_vset_type_load_class (prepare_error, klass, fmt, args); va_end (args); MonoErrorBoxed *box = mono_error_box (prepare_error, m_class_get_image (klass)); mono_error_cleanup (prepare_error); return mono_class_set_failure (klass, box); } /** * mono_class_get_exception_for_failure: * \param klass class in which the failure was detected * * \returns a constructed MonoException than the caller can then throw * using mono_raise_exception - or NULL if no failure is present (or * doesn't result in an exception). */ MonoException* mono_class_get_exception_for_failure (MonoClass *klass) { if (!mono_class_has_failure (klass)) return NULL; ERROR_DECL (unboxed_error); mono_error_set_for_class_failure (unboxed_error, klass); return mono_error_convert_to_exception (unboxed_error); } static gboolean is_nesting_type (MonoClass *outer_klass, MonoClass *inner_klass) { outer_klass = mono_class_get_generic_type_definition (outer_klass); inner_klass = mono_class_get_generic_type_definition (inner_klass); do { if (outer_klass == inner_klass) return TRUE; inner_klass = m_class_get_nested_in (inner_klass); } while (inner_klass); return FALSE; } MonoClass * mono_class_get_generic_type_definition (MonoClass *klass) { MonoGenericClass *gklass = mono_class_try_get_generic_class (klass); return gklass ? gklass->container_class : klass; } /* * Check if @klass is a subtype of @parent ignoring generic instantiations. * * Generic instantiations are ignored for all super types of @klass. * * Visibility checks ignoring generic instantiations. * * Class implementing interface visibility checks ignore generic instantiations */ gboolean mono_class_has_parent_and_ignore_generics (MonoClass *klass, MonoClass *parent) { int i; klass = mono_class_get_generic_type_definition (klass); parent = mono_class_get_generic_type_definition (parent); mono_class_setup_supertypes (klass); for (i = 0; i < m_class_get_idepth (klass); ++i) { if (parent == mono_class_get_generic_type_definition (m_class_get_supertypes (klass) [i])) return TRUE; } if (MONO_CLASS_IS_INTERFACE_INTERNAL (parent) && class_implements_interface_ignore_generics (klass, parent)) return TRUE; return FALSE; } /* * Subtype can only access parent members with family protection if the site object * is subclass of Subtype. For example: * class A { protected int x; } * class B : A { * void valid_access () { * B b; * b.x = 0; * } * void invalid_access () { * A a; * a.x = 0; * } * } * */ static gboolean is_valid_family_access (MonoClass *access_klass, MonoClass *member_klass, MonoClass *context_klass) { if (MONO_CLASS_IS_INTERFACE_INTERNAL (member_klass) && !MONO_CLASS_IS_INTERFACE_INTERNAL (access_klass)) { /* Can happen with default interface methods */ if (!class_implements_interface_ignore_generics (access_klass, member_klass)) return FALSE; } else if (member_klass != access_klass && MONO_CLASS_IS_INTERFACE_INTERNAL (member_klass) && MONO_CLASS_IS_INTERFACE_INTERNAL (access_klass)) { /* Can happen with default interface methods */ if (!mono_interface_implements_interface (access_klass, member_klass)) return FALSE; } else { if (!mono_class_has_parent_and_ignore_generics (access_klass, member_klass)) return FALSE; } if (context_klass == NULL) return TRUE; /*if access_klass is not member_klass context_klass must be type compat*/ if (access_klass != member_klass && !mono_class_has_parent_and_ignore_generics (context_klass, access_klass)) return FALSE; return TRUE; } static gboolean ignores_access_checks_to (MonoAssembly *accessing, MonoAssembly *accessed) { if (!accessing || !accessed) return FALSE; mono_assembly_load_friends (accessing); for (GSList *tmp = accessing->ignores_checks_assembly_names; tmp; tmp = tmp->next) { MonoAssemblyName *victim = (MonoAssemblyName *)tmp->data; if (!victim->name) continue; if (!g_ascii_strcasecmp (accessed->aname.name, victim->name)) return TRUE; } return FALSE; } static gboolean can_access_internals (MonoAssembly *accessing, MonoAssembly* accessed) { GSList *tmp; if (accessing == accessed) return TRUE; if (!accessed || !accessing) return FALSE; mono_assembly_load_friends (accessed); for (tmp = accessed->friend_assembly_names; tmp; tmp = tmp->next) { MonoAssemblyName *friend_ = (MonoAssemblyName *)tmp->data; /* Be conservative with checks */ if (!friend_->name) continue; if (g_ascii_strcasecmp (accessing->aname.name, friend_->name)) continue; if (friend_->public_key_token [0]) { if (!accessing->aname.public_key_token [0]) continue; if (!mono_public_tokens_are_equal (friend_->public_key_token, accessing->aname.public_key_token)) continue; } return TRUE; } return ignores_access_checks_to (accessing, accessed); } /* * If klass is a generic type or if it is derived from a generic type, return the * MonoClass of the generic definition * Returns NULL if not found */ static MonoClass* get_generic_definition_class (MonoClass *klass) { while (klass) { MonoGenericClass *gklass = mono_class_try_get_generic_class (klass); if (gklass && gklass->container_class) return gklass->container_class; klass = m_class_get_parent (klass); } return NULL; } static gboolean can_access_instantiation (MonoClass *access_klass, MonoGenericInst *ginst) { int i; for (i = 0; i < ginst->type_argc; ++i) { MonoType *type = ginst->type_argv[i]; switch (type->type) { case MONO_TYPE_SZARRAY: if (!can_access_type (access_klass, type->data.klass)) return FALSE; break; case MONO_TYPE_ARRAY: if (!can_access_type (access_klass, type->data.array->eklass)) return FALSE; break; case MONO_TYPE_PTR: if (!can_access_type (access_klass, mono_class_from_mono_type_internal (type->data.type))) return FALSE; break; case MONO_TYPE_CLASS: case MONO_TYPE_VALUETYPE: case MONO_TYPE_GENERICINST: if (!can_access_type (access_klass, mono_class_from_mono_type_internal (type))) return FALSE; default: break; } } return TRUE; } static gboolean can_access_type (MonoClass *access_klass, MonoClass *member_klass) { int access_level; if (access_klass == member_klass) return TRUE; MonoAssembly *access_klass_assembly = m_class_get_image (access_klass)->assembly; MonoAssembly *member_klass_assembly = m_class_get_image (member_klass)->assembly; if (m_class_get_element_class (access_klass) && !m_class_is_enumtype (access_klass)) { access_klass = m_class_get_element_class (access_klass); access_klass_assembly = m_class_get_image (access_klass)->assembly; } if (m_class_get_element_class (member_klass) && !m_class_is_enumtype (member_klass)) { member_klass = m_class_get_element_class (member_klass); member_klass_assembly = m_class_get_image (member_klass)->assembly; } access_level = mono_class_get_flags (member_klass) & TYPE_ATTRIBUTE_VISIBILITY_MASK; if (mono_type_is_generic_argument (m_class_get_byval_arg (member_klass))) return TRUE; if (mono_class_is_ginst (member_klass) && !can_access_instantiation (access_klass, mono_class_get_generic_class (member_klass)->context.class_inst)) return FALSE; if (is_nesting_type (access_klass, member_klass) || (m_class_get_nested_in (access_klass) && is_nesting_type (m_class_get_nested_in (access_klass), member_klass))) return TRUE; /*Non nested type with nested visibility. We just fail it.*/ if (access_level >= TYPE_ATTRIBUTE_NESTED_PRIVATE && access_level <= TYPE_ATTRIBUTE_NESTED_FAM_OR_ASSEM && m_class_get_nested_in (member_klass) == NULL) return FALSE; MonoClass *member_klass_nested_in = m_class_get_nested_in (member_klass); switch (access_level) { case TYPE_ATTRIBUTE_NOT_PUBLIC: return can_access_internals (access_klass_assembly, member_klass_assembly); case TYPE_ATTRIBUTE_PUBLIC: return TRUE; case TYPE_ATTRIBUTE_NESTED_PUBLIC: return member_klass_nested_in && can_access_type (access_klass, member_klass_nested_in); case TYPE_ATTRIBUTE_NESTED_PRIVATE: if (is_nesting_type (member_klass, access_klass) && member_klass_nested_in && can_access_type (access_klass, member_klass_nested_in)) return TRUE; return ignores_access_checks_to (access_klass_assembly, member_klass_assembly); case TYPE_ATTRIBUTE_NESTED_FAMILY: return mono_class_has_parent_and_ignore_generics (access_klass, m_class_get_nested_in (member_klass)); case TYPE_ATTRIBUTE_NESTED_ASSEMBLY: return can_access_internals (access_klass_assembly, member_klass_assembly) && member_klass_nested_in && can_access_type (access_klass, member_klass_nested_in); case TYPE_ATTRIBUTE_NESTED_FAM_AND_ASSEM: return can_access_internals (access_klass_assembly, m_class_get_image (member_klass_nested_in)->assembly) && mono_class_has_parent_and_ignore_generics (access_klass, member_klass_nested_in); case TYPE_ATTRIBUTE_NESTED_FAM_OR_ASSEM: return can_access_internals (access_klass_assembly, m_class_get_image (member_klass_nested_in)->assembly) || mono_class_has_parent_and_ignore_generics (access_klass, member_klass_nested_in); } return FALSE; } /* FIXME: check visibility of type, too */ static gboolean can_access_member (MonoClass *access_klass, MonoClass *member_klass, MonoClass* context_klass, int access_level) { MonoClass *member_generic_def; MonoAssembly *access_klass_assembly = m_class_get_image (access_klass)->assembly; MonoGenericClass *access_gklass = mono_class_try_get_generic_class (access_klass); if (((access_gklass && access_gklass->container_class) || mono_class_is_gtd (access_klass)) && (member_generic_def = get_generic_definition_class (member_klass))) { MonoClass *access_container; if (mono_class_is_gtd (access_klass)) access_container = access_klass; else access_container = access_gklass->container_class; if (can_access_member (access_container, member_generic_def, context_klass, access_level)) return TRUE; } MonoImage *member_klass_image = m_class_get_image (member_klass); /* Partition I 8.5.3.2 */ /* the access level values are the same for fields and methods */ switch (access_level) { case FIELD_ATTRIBUTE_COMPILER_CONTROLLED: /* same compilation unit */ return m_class_get_image (access_klass) == member_klass_image; case FIELD_ATTRIBUTE_PRIVATE: return (access_klass == member_klass) || ignores_access_checks_to (access_klass_assembly, member_klass_image->assembly); case FIELD_ATTRIBUTE_FAM_AND_ASSEM: if (is_valid_family_access (access_klass, member_klass, context_klass) && can_access_internals (access_klass_assembly, member_klass_image->assembly)) return TRUE; return FALSE; case FIELD_ATTRIBUTE_ASSEMBLY: return can_access_internals (access_klass_assembly, member_klass_image->assembly); case FIELD_ATTRIBUTE_FAMILY: if (is_valid_family_access (access_klass, member_klass, context_klass)) return TRUE; return FALSE; case FIELD_ATTRIBUTE_FAM_OR_ASSEM: if (is_valid_family_access (access_klass, member_klass, context_klass)) return TRUE; return can_access_internals (access_klass_assembly, member_klass_image->assembly); case FIELD_ATTRIBUTE_PUBLIC: return TRUE; } return FALSE; } /** * mono_method_can_access_field: * \param method Method that will attempt to access the field * \param field the field to access * * Used to determine if a method is allowed to access the specified field. * * \returns TRUE if the given \p method is allowed to access the \p field while following * the accessibility rules of the CLI. */ gboolean mono_method_can_access_field (MonoMethod *method, MonoClassField *field) { /* FIXME: check all overlapping fields */ int can = can_access_member (method->klass, m_field_get_parent (field), NULL, mono_field_get_type_internal (field)->attrs & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK); if (!can) { MonoClass *nested = m_class_get_nested_in (method->klass); while (nested) { can = can_access_member (nested, m_field_get_parent (field), NULL, mono_field_get_type_internal (field)->attrs & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK); if (can) return TRUE; nested = m_class_get_nested_in (nested); } } return can; } static MonoMethod* mono_method_get_method_definition (MonoMethod *method) { while (method->is_inflated) method = ((MonoMethodInflated*)method)->declaring; return method; } /** * mono_method_can_access_method: * \param method Method that will attempt to access the other method * \param called the method that we want to probe for accessibility. * * Used to determine if the \p method is allowed to access the specified \p called method. * * \returns TRUE if the given \p method is allowed to invoke the \p called while following * the accessibility rules of the CLI. */ gboolean mono_method_can_access_method (MonoMethod *method, MonoMethod *called) { method = mono_method_get_method_definition (method); called = mono_method_get_method_definition (called); return mono_method_can_access_method_full (method, called, NULL); } /* * mono_method_can_access_method_full: * @method: The caller method * @called: The called method * @context_klass: The static type on stack of the owner @called object used * * This function must be used with instance calls, as they have more strict family accessibility. * It can be used with static methods, but context_klass should be NULL. * * Returns: TRUE if caller have proper visibility and acessibility to @called */ gboolean mono_method_can_access_method_full (MonoMethod *method, MonoMethod *called, MonoClass *context_klass) { /* Wrappers are except from access checks */ if (method->wrapper_type != MONO_WRAPPER_NONE || called->wrapper_type != MONO_WRAPPER_NONE) return TRUE; MonoClass *access_class = method->klass; MonoClass *member_class = called->klass; int can = can_access_member (access_class, member_class, context_klass, called->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK); if (!can) { MonoClass *nested = m_class_get_nested_in (access_class); while (nested) { can = can_access_member (nested, member_class, context_klass, called->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK); if (can) break; nested = m_class_get_nested_in (nested); } } if (!can) return FALSE; can = can_access_type (access_class, member_class); if (!can) { MonoClass *nested = m_class_get_nested_in (access_class); while (nested) { can = can_access_type (nested, member_class); if (can) break; nested = m_class_get_nested_in (nested); } } if (!can) return FALSE; if (called->is_inflated) { MonoMethodInflated * infl = (MonoMethodInflated*)called; if (infl->context.method_inst && !can_access_instantiation (access_class, infl->context.method_inst)) return FALSE; } return TRUE; } /* * mono_method_can_access_field_full: * @method: The caller method * @field: The accessed field * @context_klass: The static type on stack of the owner @field object used * * This function must be used with instance fields, as they have more strict family accessibility. * It can be used with static fields, but context_klass should be NULL. * * Returns: TRUE if caller have proper visibility and acessibility to @field */ gboolean mono_method_can_access_field_full (MonoMethod *method, MonoClassField *field, MonoClass *context_klass) { MonoClass *access_class = method->klass; MonoClass *member_class = m_field_get_parent (field); /* FIXME: check all overlapping fields */ int can = can_access_member (access_class, member_class, context_klass, field->type->attrs & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK); if (!can) { MonoClass *nested = m_class_get_nested_in (access_class); while (nested) { can = can_access_member (nested, member_class, context_klass, field->type->attrs & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK); if (can) break; nested = m_class_get_nested_in (nested); } } if (!can) return FALSE; can = can_access_type (access_class, member_class); if (!can) { MonoClass *nested = m_class_get_nested_in (access_class); while (nested) { can = can_access_type (nested, member_class); if (can) break; nested = m_class_get_nested_in (nested); } } if (!can) return FALSE; return TRUE; } /* * mono_class_can_access_class: * @source_class: The source class * @target_class: The accessed class * * This function returns is @target_class is visible to @source_class * * Returns: TRUE if source have proper visibility and acessibility to target */ gboolean mono_class_can_access_class (MonoClass *source_class, MonoClass *target_class) { return can_access_type (source_class, target_class); } /** * mono_type_is_valid_enum_basetype: * \param type The MonoType to check * \returns TRUE if the type can be used as the basetype of an enum */ gboolean mono_type_is_valid_enum_basetype (MonoType * type) { switch (type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_BOOLEAN: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_R8: case MONO_TYPE_R4: return TRUE; default: return FALSE; } } /** * mono_class_is_valid_enum: * \param klass An enum class to be validated * * This method verify the required properties an enum should have. * * FIXME: TypeBuilder enums are allowed to implement interfaces, but since they cannot have methods, only empty interfaces are possible * FIXME: enum types are not allowed to have a cctor, but mono_reflection_create_runtime_class sets has_cctor to 1 for all types * FIXME: TypeBuilder enums can have any kind of static fields, but the spec is very explicit about that (P II 14.3) * * \returns TRUE if the informed enum class is valid */ gboolean mono_class_is_valid_enum (MonoClass *klass) { MonoClassField * field; gpointer iter = NULL; gboolean found_base_field = FALSE; g_assert (m_class_is_enumtype (klass)); MonoClass *klass_parent = m_class_get_parent (klass); /* we cannot test against mono_defaults.enum_class, or mcs won't be able to compile the System namespace*/ if (!klass_parent || strcmp (m_class_get_name (klass_parent), "Enum") || strcmp (m_class_get_name_space (klass_parent), "System") ) { return FALSE; } if (!mono_class_is_auto_layout (klass)) return FALSE; while ((field = mono_class_get_fields_internal (klass, &iter))) { if (!(field->type->attrs & FIELD_ATTRIBUTE_STATIC)) { if (found_base_field) return FALSE; found_base_field = TRUE; if (!mono_type_is_valid_enum_basetype (field->type)) return FALSE; } } if (!found_base_field) return FALSE; if (mono_class_get_method_count (klass) > 0) return FALSE; return TRUE; } gboolean mono_generic_class_is_generic_type_definition (MonoGenericClass *gklass) { return gklass->context.class_inst == mono_class_get_generic_container (gklass->container_class)->context.class_inst; } void mono_field_resolve_type (MonoClassField *field, MonoError *error) { MonoClass *klass = m_field_get_parent (field); MonoImage *image = m_class_get_image (klass); MonoClass *gtd = mono_class_is_ginst (klass) ? mono_class_get_generic_type_definition (klass) : NULL; MonoType *ftype; int field_idx; if (G_UNLIKELY (m_field_is_from_update (field))) { field_idx = -1; } else { field_idx = field - m_class_get_fields (klass); } error_init (error); if (gtd) { g_assert (field_idx != -1); MonoClassField *gfield = &m_class_get_fields (gtd) [field_idx]; MonoType *gtype = mono_field_get_type_checked (gfield, error); if (!is_ok (error)) { char *full_name = mono_type_get_full_name (gtd); mono_class_set_type_load_failure (klass, "Could not load generic type of field '%s:%s' (%d) due to: %s", full_name, gfield->name, field_idx, mono_error_get_message (error)); g_free (full_name); } ftype = mono_class_inflate_generic_type_no_copy (image, gtype, mono_class_get_context (klass), error); if (!is_ok (error)) { char *full_name = mono_type_get_full_name (klass); mono_class_set_type_load_failure (klass, "Could not load instantiated type of field '%s:%s' (%d) due to: %s", full_name, field->name, field_idx, mono_error_get_message (error)); g_free (full_name); } } else { const char *sig; guint32 cols [MONO_FIELD_SIZE]; MonoGenericContainer *container = NULL; int idx; if (G_UNLIKELY (m_field_is_from_update (field))) { idx = mono_metadata_update_get_field_idx (field) - 1; } else { idx = mono_class_get_first_field_idx (klass) + field_idx; } /*FIXME, in theory we do not lazy load SRE fields*/ g_assert (!image_is_dynamic (image)); if (mono_class_is_gtd (klass)) { container = mono_class_get_generic_container (klass); } else if (gtd) { container = mono_class_get_generic_container (gtd); g_assert (container); } /* first_field_idx and idx points into the fieldptr table */ mono_metadata_decode_table_row (image, MONO_TABLE_FIELD, idx, cols, MONO_FIELD_SIZE); sig = mono_metadata_blob_heap (image, cols [MONO_FIELD_SIGNATURE]); mono_metadata_decode_value (sig, &sig); /* FIELD signature == 0x06 */ g_assert (*sig == 0x06); ftype = mono_metadata_parse_type_checked (image, container, cols [MONO_FIELD_FLAGS], FALSE, sig + 1, &sig, error); if (!ftype) { char *full_name = mono_type_get_full_name (klass); mono_class_set_type_load_failure (klass, "Could not load type of field '%s:%s' (%d) due to: %s", full_name, field->name, field_idx, mono_error_get_message (error)); g_free (full_name); } } mono_memory_barrier (); field->type = ftype; } static guint32 mono_field_resolve_flags (MonoClassField *field) { MonoClass *klass = m_field_get_parent (field); MonoImage *image = m_class_get_image (klass); MonoClass *gtd = mono_class_is_ginst (klass) ? mono_class_get_generic_type_definition (klass) : NULL; int field_idx = field - m_class_get_fields (klass); if (gtd) { MonoClassField *gfield = &m_class_get_fields (gtd) [field_idx]; return mono_field_get_flags (gfield); } else { int idx = mono_class_get_first_field_idx (klass) + field_idx; /*FIXME, in theory we do not lazy load SRE fields*/ g_assert (!image_is_dynamic (image)); return mono_metadata_decode_table_row_col (image, MONO_TABLE_FIELD, idx, MONO_FIELD_FLAGS); } } /** * mono_class_get_fields_lazy: * \param klass the MonoClass to act on * * This routine is an iterator routine for retrieving the fields in a class. * Only minimal information about fields are loaded. Accessors must be used * for all MonoClassField returned. * * You must pass a gpointer that points to zero and is treated as an opaque handle to * iterate over all of the elements. When no more values are * available, the return value is NULL. * * \returns a \c MonoClassField* on each iteration, or NULL when no more fields are available. */ MonoClassField* mono_class_get_fields_lazy (MonoClass* klass, gpointer *iter) { MonoClassField* field; if (!iter) return NULL; if (!*iter) { mono_class_setup_basic_field_info (klass); MonoClassField *klass_fields = m_class_get_fields (klass); if (!klass_fields) return NULL; /* start from the first */ if (mono_class_get_field_count (klass)) { *iter = &klass_fields [0]; return (MonoClassField *)*iter; } else { /* no fields */ return NULL; } } field = (MonoClassField *)*iter; field++; if (field < &m_class_get_fields (klass) [mono_class_get_field_count (klass)]) { *iter = field; return (MonoClassField *)*iter; } return NULL; } char* mono_class_full_name (MonoClass *klass) { return mono_type_full_name (m_class_get_byval_arg (klass)); } /* Declare all shared lazy type lookup functions */ GENERATE_TRY_GET_CLASS_WITH_CACHE (safehandle, "System.Runtime.InteropServices", "SafeHandle") /** * mono_method_get_base_method: * \param method a method * \param definition if true, get the definition * \param error set on failure * * Given a virtual method associated with a subclass, return the corresponding * method from an ancestor. If \p definition is FALSE, returns the method in the * superclass of the given method. If \p definition is TRUE, return the method * in the ancestor class where it was first declared. The type arguments will * be inflated in the ancestor classes. If the method is not associated with a * class, or isn't virtual, returns the method itself. On failure returns NULL * and sets \p error. */ MonoMethod* mono_method_get_base_method (MonoMethod *method, gboolean definition, MonoError *error) { MonoClass *klass, *parent; MonoGenericContext *generic_inst = NULL; MonoMethod *result = NULL; int slot; if (method->klass == NULL) return method; if (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_CLASS_IS_INTERFACE_INTERNAL (method->klass) || method->flags & METHOD_ATTRIBUTE_NEW_SLOT) return method; slot = mono_method_get_vtable_slot (method); if (slot == -1) return method; klass = method->klass; if (mono_class_is_gtd (klass)) { /* If we get a GTD like Foo`2 replace look instead at its instantiation with its own generic params: Foo`2<!0, !1>. */ /* In particular we want generic_inst to be initialized to <!0, * !1> so that we can inflate parent classes correctly as we go * up the class hierarchy. */ MonoType *ty = mono_class_gtd_get_canonical_inst (klass); g_assert (ty->type == MONO_TYPE_GENERICINST); MonoGenericClass *gklass = ty->data.generic_class; generic_inst = mono_generic_class_get_context (gklass); klass = gklass->container_class; } else if (mono_class_is_ginst (klass)) { generic_inst = mono_class_get_context (klass); klass = mono_class_get_generic_class (klass)->container_class; } retry: if (definition) { /* At the end of the loop, klass points to the eldest class that has this virtual function slot. */ for (parent = m_class_get_parent (klass); parent != NULL; parent = m_class_get_parent (parent)) { /* on entry, klass is either a plain old non-generic class and generic_inst == NULL or klass is the generic container class and generic_inst is the instantiation. when we go to the parent, if the parent is an open constructed type, we need to replace the type parameters by the definitions from the generic_inst, and then take it apart again into the klass and the generic_inst. For cases like this: class C<T> : B<T, int> { public override void Foo () { ... } } class B<U,V> : A<HashMap<U,V>> { public override void Foo () { ... } } class A<X> { public virtual void Foo () { ... } } if at each iteration the parent isn't open, we can skip inflating it. if at some iteration the parent isn't generic (after possible inflation), we set generic_inst to NULL; */ MonoGenericContext *parent_inst = NULL; if (mono_class_is_open_constructed_type (m_class_get_byval_arg (parent))) { parent = mono_class_inflate_generic_class_checked (parent, generic_inst, error); return_val_if_nok (error, NULL); } if (mono_class_is_ginst (parent)) { parent_inst = mono_class_get_context (parent); parent = mono_class_get_generic_class (parent)->container_class; } mono_class_setup_vtable (parent); if (m_class_get_vtable_size (parent) <= slot) break; klass = parent; generic_inst = parent_inst; } } else { /* When we get here, possibly after a retry, if generic_inst is * set, then the class is must be a gtd */ g_assert (generic_inst == NULL || mono_class_is_gtd (klass)); klass = m_class_get_parent (klass); if (!klass) return method; if (mono_class_is_open_constructed_type (m_class_get_byval_arg (klass))) { klass = mono_class_inflate_generic_class_checked (klass, generic_inst, error); return_val_if_nok (error, NULL); generic_inst = NULL; } if (mono_class_is_ginst (klass)) { generic_inst = mono_class_get_context (klass); klass = mono_class_get_generic_class (klass)->container_class; } } if (generic_inst) { klass = mono_class_inflate_generic_class_checked (klass, generic_inst, error); return_val_if_nok (error, NULL); generic_inst = NULL; } if (klass == method->klass) return method; /*This is possible if definition == FALSE. * Do it here to be really sure we don't read invalid memory. */ if (slot >= m_class_get_vtable_size (klass)) return method; mono_class_setup_vtable (klass); result = m_class_get_vtable (klass) [slot]; if (result == NULL) { /* It is an abstract method */ gboolean found = FALSE; gpointer iter = NULL; while ((result = mono_class_get_methods (klass, &iter))) { if (result->slot == slot) { found = TRUE; break; } } /* found might be FALSE if we looked in an abstract class * that doesn't override an abstract method of its * parent: * abstract class Base { * public abstract void Foo (); * } * abstract class Derived : Base { } * class Child : Derived { * public override void Foo () { } * } * * if m was Child.Foo and we ask for the base method, * then we get here with klass == Derived and found == FALSE */ /* but it shouldn't be the case that if we're looking * for the definition and didn't find a result; the * loop above should've taken us as far as we could * go! */ g_assert (!(definition && !found)); if (!found) goto retry; } g_assert (result != NULL); return result; } gboolean mono_method_is_constructor (MonoMethod *method) { return ((method->flags & CTOR_REQUIRED_FLAGS) == CTOR_REQUIRED_FLAGS && !(method->flags & CTOR_INVALID_FLAGS) && !strcmp (".ctor", method->name)); } gboolean mono_class_has_default_constructor (MonoClass *klass, gboolean public_only) { MonoMethod *method; int i; mono_class_setup_methods (klass); if (mono_class_has_failure (klass)) return FALSE; int mcount = mono_class_get_method_count (klass); MonoMethod **klass_methods = m_class_get_methods (klass); for (i = 0; i < mcount; ++i) { method = klass_methods [i]; if (mono_method_is_constructor (method) && mono_method_signature_internal (method) && mono_method_signature_internal (method)->param_count == 0 && (!public_only || (method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC)) return TRUE; } return FALSE; }
/** * \file * Class management for the Mono runtime * * Author: * Miguel de Icaza ([email protected]) * * Copyright 2001-2003 Ximian, Inc (http://www.ximian.com) * Copyright 2004-2009 Novell, Inc (http://www.novell.com) * Copyright 2012 Xamarin Inc (http://www.xamarin.com) * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #include <config.h> #ifdef HAVE_ALLOCA_H #include <alloca.h> #endif #include <glib.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <mono/metadata/image.h> #include <mono/metadata/image-internals.h> #include <mono/metadata/assembly.h> #include <mono/metadata/assembly-internals.h> #include <mono/metadata/exception-internals.h> #include <mono/metadata/metadata.h> #include <mono/metadata/metadata-internals.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/tokentype.h> #include <mono/metadata/class-init.h> #include <mono/metadata/class-internals.h> #include <mono/metadata/object.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/mono-endian.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/reflection.h> #include <mono/metadata/exception.h> #include <mono/metadata/attrdefs.h> #include <mono/metadata/gc-internals.h> #include <mono/metadata/mono-debug.h> #include <mono/metadata/metadata-update.h> #include <mono/utils/mono-counters.h> #include <mono/utils/mono-string.h> #include <mono/utils/mono-error-internals.h> #include <mono/utils/mono-logger-internals.h> #include <mono/utils/mono-memory-model.h> #include <mono/utils/atomic.h> #include <mono/utils/unlocked.h> #include <mono/utils/bsearch.h> #include <mono/utils/checked-build.h> MonoStats mono_stats; /* Statistics */ extern gint32 mono_inflated_methods_size; /* Function supplied by the runtime to find classes by name using information from the AOT file */ static MonoGetClassFromName get_class_from_name = NULL; static gboolean can_access_type (MonoClass *access_klass, MonoClass *member_klass); static char* mono_assembly_name_from_token (MonoImage *image, guint32 type_token); static guint32 mono_field_resolve_flags (MonoClassField *field); static MonoClass * mono_class_from_name_checked_aux (MonoImage *image, const char* name_space, const char *name, GHashTable* visited_images, gboolean case_sensitive, MonoError *error); GENERATE_GET_CLASS_WITH_CACHE (valuetype, "System", "ValueType") GENERATE_TRY_GET_CLASS_WITH_CACHE (handleref, "System.Runtime.InteropServices", "HandleRef") #define CTOR_REQUIRED_FLAGS (METHOD_ATTRIBUTE_SPECIAL_NAME | METHOD_ATTRIBUTE_RT_SPECIAL_NAME) #define CTOR_INVALID_FLAGS (METHOD_ATTRIBUTE_STATIC) // define to print types whenever custom modifiers are appended during inflation #undef DEBUG_INFLATE_CMODS static MonoImage * mono_method_get_image (MonoMethod *method) { return m_class_get_image (method->klass); } /** * mono_class_from_typeref: * \param image a MonoImage * \param type_token a TypeRef token * * Creates the \c MonoClass* structure representing the type defined by * the typeref token valid inside \p image. * \returns The \c MonoClass* representing the typeref token, or NULL if it could * not be loaded. */ MonoClass * mono_class_from_typeref (MonoImage *image, guint32 type_token) { ERROR_DECL (error); MonoClass *klass = mono_class_from_typeref_checked (image, type_token, error); g_assert (is_ok (error)); /*FIXME proper error handling*/ return klass; } /** * mono_class_from_typeref_checked: * \param image a MonoImage * \param type_token a TypeRef token * \param error error return code, if any. * * Creates the \c MonoClass* structure representing the type defined by * the typeref token valid inside \p image. * * \returns The \c MonoClass* representing the typeref token, NULL if it could * not be loaded with the \p error value filled with the information about the * error. */ MonoClass * mono_class_from_typeref_checked (MonoImage *image, guint32 type_token, MonoError *error) { guint32 cols [MONO_TYPEREF_SIZE]; MonoTableInfo *t = &image->tables [MONO_TABLE_TYPEREF]; guint32 idx; const char *name, *nspace; MonoClass *res = NULL; MonoImage *module; error_init (error); mono_metadata_decode_row (t, (type_token&0xffffff)-1, cols, MONO_TYPEREF_SIZE); name = mono_metadata_string_heap (image, cols [MONO_TYPEREF_NAME]); nspace = mono_metadata_string_heap (image, cols [MONO_TYPEREF_NAMESPACE]); idx = cols [MONO_TYPEREF_SCOPE] >> MONO_RESOLUTION_SCOPE_BITS; switch (cols [MONO_TYPEREF_SCOPE] & MONO_RESOLUTION_SCOPE_MASK) { case MONO_RESOLUTION_SCOPE_MODULE: /* LAMESPEC The spec says that a null module resolution scope should go through the exported type table. This is not the observed behavior of existing implementations. The defacto behavior is that it's just a typedef in disguise. */ /* a typedef in disguise */ res = mono_class_from_name_checked (image, nspace, name, error); goto done; case MONO_RESOLUTION_SCOPE_MODULEREF: module = mono_image_load_module_checked (image, idx, error); if (module) res = mono_class_from_name_checked (module, nspace, name, error); goto done; case MONO_RESOLUTION_SCOPE_TYPEREF: { MonoClass *enclosing; GList *tmp; if (idx == mono_metadata_token_index (type_token)) { mono_error_set_bad_image (error, image, "Image with self-referencing typeref token %08x.", type_token); return NULL; } enclosing = mono_class_from_typeref_checked (image, MONO_TOKEN_TYPE_REF | idx, error); return_val_if_nok (error, NULL); GList *nested_classes = mono_class_get_nested_classes_property (enclosing); if (m_class_is_nested_classes_inited (enclosing) && nested_classes) { /* Micro-optimization: don't scan the metadata tables if enclosing is already inited */ for (tmp = nested_classes; tmp; tmp = tmp->next) { res = (MonoClass *)tmp->data; if (strcmp (m_class_get_name (res), name) == 0) return res; } } else { MonoImage *enclosing_image = m_class_get_image (enclosing); guint32 enclosing_type_token = m_class_get_type_token (enclosing); /* Don't call mono_class_init_internal as we might've been called by it recursively */ int i = mono_metadata_nesting_typedef (enclosing_image, enclosing_type_token, 1); while (i) { guint32 class_nested = mono_metadata_decode_row_col (&enclosing_image->tables [MONO_TABLE_NESTEDCLASS], i - 1, MONO_NESTED_CLASS_NESTED); guint32 string_offset = mono_metadata_decode_row_col (&enclosing_image->tables [MONO_TABLE_TYPEDEF], class_nested - 1, MONO_TYPEDEF_NAME); const char *nname = mono_metadata_string_heap (enclosing_image, string_offset); if (strcmp (nname, name) == 0) return mono_class_create_from_typedef (enclosing_image, MONO_TOKEN_TYPE_DEF | class_nested, error); i = mono_metadata_nesting_typedef (enclosing_image, enclosing_type_token, i + 1); } } g_warning ("TypeRef ResolutionScope not yet handled (%d) for %s.%s in image %s", idx, nspace, name, image->name); goto done; } case MONO_RESOLUTION_SCOPE_ASSEMBLYREF: break; } if (mono_metadata_table_bounds_check (image, MONO_TABLE_ASSEMBLYREF, idx)) { mono_error_set_bad_image (error, image, "Image with invalid assemblyref token %08x.", idx); return NULL; } if (!image->references || !image->references [idx - 1]) mono_assembly_load_reference (image, idx - 1); g_assert (image->references [idx - 1]); /* If the assembly did not load, register this as a type load exception */ if (image->references [idx - 1] == REFERENCE_MISSING){ MonoAssemblyName aname; memset (&aname, 0, sizeof (MonoAssemblyName)); char *human_name; mono_assembly_get_assemblyref (image, idx - 1, &aname); human_name = mono_stringify_assembly_name (&aname); mono_error_set_simple_file_not_found (error, human_name); g_free (human_name); return NULL; } res = mono_class_from_name_checked (image->references [idx - 1]->image, nspace, name, error); done: /* Generic case, should be avoided for when a better error is possible. */ if (!res && is_ok (error)) { char *name = mono_class_name_from_token (image, type_token); char *assembly = mono_assembly_name_from_token (image, type_token); mono_error_set_type_load_name (error, name, assembly, "Could not resolve type with token %08x from typeref (expected class '%s' in assembly '%s')", type_token, name, assembly); } return res; } static void * mono_image_memdup (MonoImage *image, void *data, guint size) { void *res = mono_image_alloc (image, size); memcpy (res, data, size); return res; } /* Copy everything mono_metadata_free_array free. */ MonoArrayType * mono_dup_array_type (MonoImage *image, MonoArrayType *a) { if (image) { a = (MonoArrayType *)mono_image_memdup (image, a, sizeof (MonoArrayType)); if (a->sizes) a->sizes = (int *)mono_image_memdup (image, a->sizes, a->numsizes * sizeof (int)); if (a->lobounds) a->lobounds = (int *)mono_image_memdup (image, a->lobounds, a->numlobounds * sizeof (int)); } else { a = (MonoArrayType *)g_memdup (a, sizeof (MonoArrayType)); if (a->sizes) a->sizes = (int *)g_memdup (a->sizes, a->numsizes * sizeof (int)); if (a->lobounds) a->lobounds = (int *)g_memdup (a->lobounds, a->numlobounds * sizeof (int)); } return a; } /* Copy everything mono_metadata_free_method_signature free. */ MonoMethodSignature* mono_metadata_signature_deep_dup (MonoImage *image, MonoMethodSignature *sig) { int i; sig = mono_metadata_signature_dup_full (image, sig); sig->ret = mono_metadata_type_dup (image, sig->ret); for (i = 0; i < sig->param_count; ++i) sig->params [i] = mono_metadata_type_dup (image, sig->params [i]); return sig; } static void _mono_type_get_assembly_name (MonoClass *klass, GString *str) { MonoAssembly *ta = m_class_get_image (klass)->assembly; char *name; name = mono_stringify_assembly_name (&ta->aname); g_string_append_printf (str, ", %s", name); g_free (name); } static void mono_type_name_check_byref (MonoType *type, GString *str) { if (m_type_is_byref (type)) g_string_append_c (str, '&'); } static char* escape_special_chars (const char* identifier) { size_t id_len = strlen (identifier); // Assume the worst case, and thus only allocate once char *res = g_malloc (id_len * 2 + 1); char *res_ptr = res; for (const char *s = identifier; *s != 0; s++) { switch (*s) { case ',': case '+': case '&': case '*': case '[': case ']': case '\\': *res_ptr++ = '\\'; break; } *res_ptr++ = *s; } *res_ptr = '\0'; return res; } /** * mono_identifier_escape_type_name_chars: * \param identifier the display name of a mono type * * \returns The name in external form, that is with escaping backslashes. * * The displayed form of an identifier has the characters ,+&*[]\ * that have special meaning in type names escaped with a preceeding * backslash (\) character. */ char* mono_identifier_escape_type_name_chars (const char* identifier) { if (!identifier) return NULL; // If the string has any special characters escape the whole thing, otherwise just return the input for (const char *s = identifier; *s != 0; s++) { switch (*s) { case ',': case '+': case '&': case '*': case '[': case ']': case '\\': return escape_special_chars (identifier); } } return g_strdup (identifier); } static void mono_type_get_name_recurse (MonoType *type, GString *str, gboolean is_recursed, MonoTypeNameFormat format) { MonoClass *klass; switch (type->type) { case MONO_TYPE_ARRAY: { int i, rank = type->data.array->rank; MonoTypeNameFormat nested_format; nested_format = format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED ? MONO_TYPE_NAME_FORMAT_FULL_NAME : format; mono_type_get_name_recurse ( m_class_get_byval_arg (type->data.array->eklass), str, FALSE, nested_format); g_string_append_c (str, '['); if (rank == 1) g_string_append_c (str, '*'); else if (rank > 64) // Only taken in an error path, runtime will not load arrays of more than 32 dimensions g_string_append_printf (str, "%d", rank); else for (i = 1; i < rank; i++) g_string_append_c (str, ','); g_string_append_c (str, ']'); mono_type_name_check_byref (type, str); if (format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED) _mono_type_get_assembly_name (type->data.array->eklass, str); break; } case MONO_TYPE_SZARRAY: { MonoTypeNameFormat nested_format; nested_format = format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED ? MONO_TYPE_NAME_FORMAT_FULL_NAME : format; mono_type_get_name_recurse ( m_class_get_byval_arg (type->data.klass), str, FALSE, nested_format); g_string_append (str, "[]"); mono_type_name_check_byref (type, str); if (format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED) _mono_type_get_assembly_name (type->data.klass, str); break; } case MONO_TYPE_PTR: { MonoTypeNameFormat nested_format; nested_format = format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED ? MONO_TYPE_NAME_FORMAT_FULL_NAME : format; mono_type_get_name_recurse ( type->data.type, str, FALSE, nested_format); g_string_append_c (str, '*'); mono_type_name_check_byref (type, str); if (format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED) _mono_type_get_assembly_name (mono_class_from_mono_type_internal (type->data.type), str); break; } case MONO_TYPE_VAR: case MONO_TYPE_MVAR: if (!mono_generic_param_name (type->data.generic_param)) g_string_append_printf (str, "%s%d", type->type == MONO_TYPE_VAR ? "!" : "!!", type->data.generic_param->num); else g_string_append (str, mono_generic_param_name (type->data.generic_param)); mono_type_name_check_byref (type, str); break; default: klass = mono_class_from_mono_type_internal (type); if (m_class_get_nested_in (klass)) { mono_type_get_name_recurse ( m_class_get_byval_arg (m_class_get_nested_in (klass)), str, TRUE, format); if (format == MONO_TYPE_NAME_FORMAT_IL) g_string_append_c (str, '.'); else g_string_append_c (str, '+'); } else if (*m_class_get_name_space (klass)) { const char *klass_name_space = m_class_get_name_space (klass); if (format == MONO_TYPE_NAME_FORMAT_IL) g_string_append (str, klass_name_space); else { char *escaped = mono_identifier_escape_type_name_chars (klass_name_space); g_string_append (str, escaped); g_free (escaped); } g_string_append_c (str, '.'); } const char *klass_name = m_class_get_name (klass); if (format == MONO_TYPE_NAME_FORMAT_IL) { const char *s = strchr (klass_name, '`'); gssize len = s ? (s - klass_name) : (gssize)strlen (klass_name); g_string_append_len (str, klass_name, len); } else { char *escaped = mono_identifier_escape_type_name_chars (klass_name); g_string_append (str, escaped); g_free (escaped); } if (is_recursed) break; if (mono_class_is_ginst (klass)) { MonoGenericClass *gclass = mono_class_get_generic_class (klass); MonoGenericInst *inst = gclass->context.class_inst; MonoTypeNameFormat nested_format; int i; nested_format = format == MONO_TYPE_NAME_FORMAT_FULL_NAME ? MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED : format; if (format == MONO_TYPE_NAME_FORMAT_IL) g_string_append_c (str, '<'); else g_string_append_c (str, '['); for (i = 0; i < inst->type_argc; i++) { MonoType *t = inst->type_argv [i]; if (i) g_string_append_c (str, ','); if ((nested_format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED) && (t->type != MONO_TYPE_VAR) && (type->type != MONO_TYPE_MVAR)) g_string_append_c (str, '['); mono_type_get_name_recurse (inst->type_argv [i], str, FALSE, nested_format); if ((nested_format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED) && (t->type != MONO_TYPE_VAR) && (type->type != MONO_TYPE_MVAR)) g_string_append_c (str, ']'); } if (format == MONO_TYPE_NAME_FORMAT_IL) g_string_append_c (str, '>'); else g_string_append_c (str, ']'); } else if (mono_class_is_gtd (klass) && (format != MONO_TYPE_NAME_FORMAT_FULL_NAME) && (format != MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED)) { int i; if (format == MONO_TYPE_NAME_FORMAT_IL) g_string_append_c (str, '<'); else g_string_append_c (str, '['); for (i = 0; i < mono_class_get_generic_container (klass)->type_argc; i++) { if (i) g_string_append_c (str, ','); g_string_append (str, mono_generic_container_get_param_info (mono_class_get_generic_container (klass), i)->name); } if (format == MONO_TYPE_NAME_FORMAT_IL) g_string_append_c (str, '>'); else g_string_append_c (str, ']'); } mono_type_name_check_byref (type, str); if ((format == MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED) && (type->type != MONO_TYPE_VAR) && (type->type != MONO_TYPE_MVAR)) _mono_type_get_assembly_name (klass, str); break; } } /** * mono_type_get_name_full: * \param type a type * \param format the format for the return string. * * * \returns The string representation in a number of formats: * * if \p format is \c MONO_TYPE_NAME_FORMAT_REFLECTION, the return string is * returned in the format required by \c System.Reflection, this is the * inverse of mono_reflection_parse_type(). * * if \p format is \c MONO_TYPE_NAME_FORMAT_IL, it returns a syntax that can * be used by the IL assembler. * * if \p format is \c MONO_TYPE_NAME_FORMAT_FULL_NAME * * if \p format is \c MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED */ char* mono_type_get_name_full (MonoType *type, MonoTypeNameFormat format) { GString* result; result = g_string_new (""); mono_type_get_name_recurse (type, result, FALSE, format); return g_string_free (result, FALSE); } /** * mono_type_get_full_name: * \param class a class * * \returns The string representation for type as required by System.Reflection. * The inverse of mono_reflection_parse_type(). */ char * mono_type_get_full_name (MonoClass *klass) { return mono_type_get_name_full (m_class_get_byval_arg (klass), MONO_TYPE_NAME_FORMAT_REFLECTION); } /** * mono_type_get_name: * \param type a type * \returns The string representation for type as it would be represented in IL code. */ char* mono_type_get_name (MonoType *type) { return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_IL); } /** * mono_type_get_underlying_type: * \param type a type * \returns The \c MonoType for the underlying integer type if \p type * is an enum and byref is false, otherwise the type itself. */ MonoType* mono_type_get_underlying_type (MonoType *type) { if (type->type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (type->data.klass) && !m_type_is_byref (type)) return mono_class_enum_basetype_internal (type->data.klass); if (type->type == MONO_TYPE_GENERICINST && m_class_is_enumtype (type->data.generic_class->container_class) && !m_type_is_byref (type)) return mono_class_enum_basetype_internal (type->data.generic_class->container_class); return type; } /** * mono_class_is_open_constructed_type: * \param type a type * * \returns TRUE if type represents a generics open constructed type. * IOW, not all type parameters required for the instantiation have * been provided or it's a generic type definition. * * An open constructed type means it's a non realizable type. Not to * be mixed up with an abstract type - we can't cast or dispatch to * an open type, for example. */ gboolean mono_class_is_open_constructed_type (MonoType *t) { switch (t->type) { case MONO_TYPE_VAR: case MONO_TYPE_MVAR: return TRUE; case MONO_TYPE_SZARRAY: return mono_class_is_open_constructed_type (m_class_get_byval_arg (t->data.klass)); case MONO_TYPE_ARRAY: return mono_class_is_open_constructed_type (m_class_get_byval_arg (t->data.array->eklass)); case MONO_TYPE_PTR: return mono_class_is_open_constructed_type (t->data.type); case MONO_TYPE_GENERICINST: return t->data.generic_class->context.class_inst->is_open; case MONO_TYPE_CLASS: case MONO_TYPE_VALUETYPE: return mono_class_is_gtd (t->data.klass); default: return FALSE; } } /* This is a simple function to catch the most common bad instances of generic types. Specially those that might lead to further failures in the runtime. */ gboolean mono_type_is_valid_generic_argument (MonoType *type) { switch (type->type) { case MONO_TYPE_VOID: case MONO_TYPE_TYPEDBYREF: return FALSE; case MONO_TYPE_VALUETYPE: return !m_class_is_byreflike (type->data.klass); default: return TRUE; } } static gboolean can_inflate_gparam_with (MonoGenericParam *gparam, MonoType *type) { if (!mono_type_is_valid_generic_argument (type)) return FALSE; #if 0 /* Avoid inflating gparams with valuetype constraints with ref types during gsharing */ MonoGenericParamInfo *info = mono_generic_param_info (gparam); if (info && (info->flags & GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT)) { if (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) { MonoGenericParam *inst_gparam = type->data.generic_param; if (inst_gparam->gshared_constraint && inst_gparam->gshared_constraint->type == MONO_TYPE_OBJECT) return FALSE; } } #endif return TRUE; } static MonoType* inflate_generic_custom_modifiers (MonoImage *image, const MonoType *type, MonoGenericContext *context, MonoError *error); static MonoType* inflate_generic_type (MonoImage *image, MonoType *type, MonoGenericContext *context, MonoError *error) { gboolean changed = FALSE; error_init (error); /* C++/CLI (and some Roslyn tests) constructs method signatures like: * void .CL1`1.Test(!0 modopt(System.Nullable`1<!0>)) * where !0 has a custom modifier which itself mentions the type variable. * So we need to potentially inflate the modifiers. */ if (type->has_cmods) { MonoType *new_type = inflate_generic_custom_modifiers (image, type, context, error); return_val_if_nok (error, NULL); if (new_type != NULL) { type = new_type; changed = TRUE; } } switch (type->type) { case MONO_TYPE_MVAR: { MonoType *nt; int num = mono_type_get_generic_param_num (type); MonoGenericInst *inst = context->method_inst; if (!inst) { if (!changed) return NULL; else return type; } MonoGenericParam *gparam = type->data.generic_param; if (num >= inst->type_argc) { const char *pname = mono_generic_param_name (gparam); mono_error_set_bad_image (error, image, "MVAR %d (%s) cannot be expanded in this context with %d instantiations", num, pname ? pname : "", inst->type_argc); return NULL; } if (!can_inflate_gparam_with (gparam, inst->type_argv [num])) { const char *pname = mono_generic_param_name (gparam); mono_error_set_bad_image (error, image, "MVAR %d (%s) cannot be expanded with type 0x%x", num, pname ? pname : "", inst->type_argv [num]->type); return NULL; } /* * Note that the VAR/MVAR cases are different from the rest. The other cases duplicate @type, * while the VAR/MVAR duplicates a type from the context. So, we need to ensure that the * ->byref__ and ->attrs from @type are propagated to the returned type. */ nt = mono_metadata_type_dup_with_cmods (image, inst->type_argv [num], type); nt->byref__ = type->byref__; nt->attrs = type->attrs; return nt; } case MONO_TYPE_VAR: { MonoType *nt; int num = mono_type_get_generic_param_num (type); MonoGenericInst *inst = context->class_inst; if (!inst) { if (!changed) return NULL; else return type; } MonoGenericParam *gparam = type->data.generic_param; if (num >= inst->type_argc) { const char *pname = mono_generic_param_name (gparam); mono_error_set_bad_image (error, image, "VAR %d (%s) cannot be expanded in this context with %d instantiations", num, pname ? pname : "", inst->type_argc); return NULL; } if (!can_inflate_gparam_with (gparam, inst->type_argv [num])) { const char *pname = mono_generic_param_name (gparam); mono_error_set_bad_image (error, image, "VAR %d (%s) cannot be expanded with type 0x%x", num, pname ? pname : "", inst->type_argv [num]->type); return NULL; } #ifdef DEBUG_INFLATE_CMODS gboolean append_cmods; append_cmods = FALSE; if (type->has_cmods && inst->type_argv[num]->has_cmods) { char *tname = mono_type_full_name (type); char *vname = mono_type_full_name (inst->type_argv[num]); printf ("\n\n\tsubstitution for '%s' with '%s' yields...\n", tname, vname); g_free (tname); g_free (vname); append_cmods = TRUE; } #endif nt = mono_metadata_type_dup_with_cmods (image, inst->type_argv [num], type); nt->byref__ = type->byref__ || inst->type_argv[num]->byref__; nt->attrs = type->attrs; #ifdef DEBUG_INFLATE_CMODS if (append_cmods) { char *ntname = mono_type_full_name (nt); printf ("\tyields '%s'\n\n\n", ntname); g_free (ntname); } #endif return nt; } case MONO_TYPE_SZARRAY: { MonoClass *eclass = type->data.klass; MonoType *nt, *inflated = inflate_generic_type (NULL, m_class_get_byval_arg (eclass), context, error); if ((!inflated && !changed) || !is_ok (error)) return NULL; if (!inflated) return type; nt = mono_metadata_type_dup (image, type); nt->data.klass = mono_class_from_mono_type_internal (inflated); mono_metadata_free_type (inflated); return nt; } case MONO_TYPE_ARRAY: { MonoClass *eclass = type->data.array->eklass; MonoType *nt, *inflated = inflate_generic_type (NULL, m_class_get_byval_arg (eclass), context, error); if ((!inflated && !changed) || !is_ok (error)) return NULL; if (!inflated) return type; nt = mono_metadata_type_dup (image, type); nt->data.array->eklass = mono_class_from_mono_type_internal (inflated); mono_metadata_free_type (inflated); return nt; } case MONO_TYPE_GENERICINST: { MonoGenericClass *gclass = type->data.generic_class; MonoGenericInst *inst; MonoType *nt; if (!gclass->context.class_inst->is_open) { if (!changed) return NULL; else return type; } inst = mono_metadata_inflate_generic_inst (gclass->context.class_inst, context, error); return_val_if_nok (error, NULL); if (inst != gclass->context.class_inst) gclass = mono_metadata_lookup_generic_class (gclass->container_class, inst, gclass->is_dynamic); if (gclass == type->data.generic_class) { if (!changed) return NULL; else return type; } nt = mono_metadata_type_dup (image, type); nt->data.generic_class = gclass; return nt; } case MONO_TYPE_CLASS: case MONO_TYPE_VALUETYPE: { MonoClass *klass = type->data.klass; MonoGenericContainer *container = mono_class_try_get_generic_container (klass); MonoGenericInst *inst; MonoGenericClass *gclass = NULL; MonoType *nt; if (!container) { if (!changed) return NULL; else return type; } /* We can't use context->class_inst directly, since it can have more elements */ inst = mono_metadata_inflate_generic_inst (container->context.class_inst, context, error); return_val_if_nok (error, NULL); if (inst == container->context.class_inst) { if (!changed) return NULL; else return type; } gclass = mono_metadata_lookup_generic_class (klass, inst, image_is_dynamic (m_class_get_image (klass))); nt = mono_metadata_type_dup (image, type); nt->type = MONO_TYPE_GENERICINST; nt->data.generic_class = gclass; return nt; } case MONO_TYPE_PTR: { MonoType *nt, *inflated = inflate_generic_type (image, type->data.type, context, error); if ((!inflated && !changed) || !is_ok (error)) return NULL; if (!inflated && changed) return type; nt = mono_metadata_type_dup (image, type); nt->data.type = inflated; return nt; } default: if (!changed) return NULL; else return type; } return NULL; } static MonoType* inflate_generic_custom_modifiers (MonoImage *image, const MonoType *type, MonoGenericContext *context, MonoError *error) { MonoType *result = NULL; g_assert (type->has_cmods); int count = mono_type_custom_modifier_count (type); gboolean changed = FALSE; /* Try not to blow up the stack. See comment on MONO_MAX_EXPECTED_CMODS. */ g_assert (count < MONO_MAX_EXPECTED_CMODS); size_t aggregate_size = mono_sizeof_aggregate_modifiers (count); MonoAggregateModContainer *candidate_mods = g_alloca (aggregate_size); memset (candidate_mods, 0, aggregate_size); candidate_mods->count = count; for (int i = 0; i < count; ++i) { gboolean required; MonoType *cmod_old = mono_type_get_custom_modifier (type, i, &required, error); goto_if_nok (error, leave); MonoType *cmod_new = inflate_generic_type (NULL, cmod_old, context, error); goto_if_nok (error, leave); if (cmod_new) changed = TRUE; candidate_mods->modifiers [i].required = required; candidate_mods->modifiers [i].type = cmod_new; } if (changed) { /* if we're going to make a new type, fill in any modifiers that weren't affected by inflation with copies of the original values. */ for (int i = 0; i < count; ++i) { if (candidate_mods->modifiers [i].type == NULL) { candidate_mods->modifiers [i].type = mono_metadata_type_dup (NULL, mono_type_get_custom_modifier (type, i, NULL, error)); /* it didn't error in the first loop, so should be ok now, too */ mono_error_assert_ok (error); } } } #ifdef DEBUG_INFLATE_CMODS if (changed) { char *full_name = mono_type_full_name ((MonoType*)type); printf ("\n\n\tcustom modifier on '%s' affected by subsititution\n\n\n", full_name); g_free (full_name); } #endif if (changed) { MonoType *new_type = g_alloca (mono_sizeof_type_with_mods (count, TRUE)); /* first init just the non-modifier portion of new_type before populating the * new modifiers */ memcpy (new_type, type, MONO_SIZEOF_TYPE); mono_type_with_mods_init (new_type, count, TRUE); mono_type_set_amods (new_type, mono_metadata_get_canonical_aggregate_modifiers (candidate_mods)); result = mono_metadata_type_dup (image, new_type); } leave: for (int i = 0; i < count; ++i) { if (candidate_mods->modifiers [i].type) mono_metadata_free_type (candidate_mods->modifiers [i].type); } return result; } MonoGenericContext * mono_generic_class_get_context (MonoGenericClass *gclass) { return &gclass->context; } MonoGenericContext * mono_class_get_context (MonoClass *klass) { MonoGenericClass *gklass = mono_class_try_get_generic_class (klass); return gklass ? mono_generic_class_get_context (gklass) : NULL; } /* * mono_class_inflate_generic_type_with_mempool: * @mempool: a mempool * @type: a type * @context: a generics context * @error: error context * * The same as mono_class_inflate_generic_type, but allocates the MonoType * from mempool if it is non-NULL. If it is NULL, the MonoType is * allocated on the heap and is owned by the caller. * The returned type can potentially be the same as TYPE, so it should not be * modified by the caller, and it should be freed using mono_metadata_free_type (). */ MonoType* mono_class_inflate_generic_type_with_mempool (MonoImage *image, MonoType *type, MonoGenericContext *context, MonoError *error) { MonoType *inflated = NULL; error_init (error); if (context) inflated = inflate_generic_type (image, type, context, error); return_val_if_nok (error, NULL); if (!inflated) { MonoType *shared = mono_metadata_get_shared_type (type); if (shared && !type->has_cmods) { return shared; } else { return mono_metadata_type_dup (image, type); } } UnlockedIncrement (&mono_stats.inflated_type_count); return inflated; } /** * mono_class_inflate_generic_type: * \param type a type * \param context a generics context * \deprecated Please use \c mono_class_inflate_generic_type_checked instead * * If \p type is a generic type and \p context is not NULL, instantiate it using the * generics context \p context. * * \returns The instantiated type or a copy of \p type. The returned \c MonoType is allocated * on the heap and is owned by the caller. Returns NULL on error. */ MonoType* mono_class_inflate_generic_type (MonoType *type, MonoGenericContext *context) { ERROR_DECL (error); MonoType *result; result = mono_class_inflate_generic_type_checked (type, context, error); mono_error_cleanup (error); return result; } /* * mono_class_inflate_generic_type: * @type: a type * @context: a generics context * @error: error context to use * * If @type is a generic type and @context is not NULL, instantiate it using the * generics context @context. * * Returns: The instantiated type or a copy of @type. The returned MonoType is allocated * on the heap and is owned by the caller. */ MonoType* mono_class_inflate_generic_type_checked (MonoType *type, MonoGenericContext *context, MonoError *error) { return mono_class_inflate_generic_type_with_mempool (NULL, type, context, error); } /* * mono_class_inflate_generic_type_no_copy: * * Same as inflate_generic_type_with_mempool, but return TYPE if no inflation * was done. */ static MonoType* mono_class_inflate_generic_type_no_copy (MonoImage *image, MonoType *type, MonoGenericContext *context, MonoError *error) { MonoType *inflated = NULL; error_init (error); if (context) { inflated = inflate_generic_type (image, type, context, error); return_val_if_nok (error, NULL); } if (!inflated) return type; UnlockedIncrement (&mono_stats.inflated_type_count); return inflated; } /* * mono_class_inflate_generic_class: * * Inflate the class @gklass with @context. Set @error on failure. */ MonoClass* mono_class_inflate_generic_class_checked (MonoClass *gklass, MonoGenericContext *context, MonoError *error) { MonoClass *res; MonoType *inflated; inflated = mono_class_inflate_generic_type_checked (m_class_get_byval_arg (gklass), context, error); return_val_if_nok (error, NULL); res = mono_class_from_mono_type_internal (inflated); mono_metadata_free_type (inflated); return res; } static MonoGenericContext inflate_generic_context (MonoGenericContext *context, MonoGenericContext *inflate_with, MonoError *error) { MonoGenericInst *class_inst = NULL; MonoGenericInst *method_inst = NULL; MonoGenericContext res = { NULL, NULL }; error_init (error); if (context->class_inst) { class_inst = mono_metadata_inflate_generic_inst (context->class_inst, inflate_with, error); if (!is_ok (error)) goto fail; } if (context->method_inst) { method_inst = mono_metadata_inflate_generic_inst (context->method_inst, inflate_with, error); if (!is_ok (error)) goto fail; } res.class_inst = class_inst; res.method_inst = method_inst; fail: return res; } /** * mono_class_inflate_generic_method: * \param method a generic method * \param context a generics context * * Instantiate the generic method \p method using the generics context \p context. * * \returns The new instantiated method */ MonoMethod * mono_class_inflate_generic_method (MonoMethod *method, MonoGenericContext *context) { ERROR_DECL (error); MonoMethod *res = mono_class_inflate_generic_method_full_checked (method, NULL, context, error); mono_error_assert_msg_ok (error, "Could not inflate generic method"); return res; } MonoMethod * mono_class_inflate_generic_method_checked (MonoMethod *method, MonoGenericContext *context, MonoError *error) { return mono_class_inflate_generic_method_full_checked (method, NULL, context, error); } static gboolean inflated_method_equal (gconstpointer a, gconstpointer b) { const MonoMethodInflated *ma = (const MonoMethodInflated *)a; const MonoMethodInflated *mb = (const MonoMethodInflated *)b; if (ma->declaring != mb->declaring) return FALSE; return mono_metadata_generic_context_equal (&ma->context, &mb->context); } static guint inflated_method_hash (gconstpointer a) { const MonoMethodInflated *ma = (const MonoMethodInflated *)a; return (mono_metadata_generic_context_hash (&ma->context) ^ mono_aligned_addr_hash (ma->declaring)); } static void free_inflated_method (MonoMethodInflated *imethod) { MonoMethod *method = (MonoMethod*)imethod; if (method->signature) mono_metadata_free_inflated_signature (method->signature); if (method->wrapper_type) g_free (((MonoMethodWrapper*)method)->method_data); g_free (method); } /** * mono_class_inflate_generic_method_full_checked: * Instantiate method \p method with the generic context \p context. * On failure returns NULL and sets \p error. * * BEWARE: All non-trivial fields are invalid, including klass, signature, and header. * Use mono_method_signature_internal () and mono_method_get_header () to get the correct values. */ MonoMethod* mono_class_inflate_generic_method_full_checked (MonoMethod *method, MonoClass *klass_hint, MonoGenericContext *context, MonoError *error) { MonoMethod *result; MonoMethodInflated *iresult, *cached; MonoMethodSignature *sig; MonoGenericContext tmp_context; error_init (error); /* The `method' has already been instantiated before => we need to peel out the instantiation and create a new context */ while (method->is_inflated) { MonoGenericContext *method_context = mono_method_get_context (method); MonoMethodInflated *imethod = (MonoMethodInflated *) method; tmp_context = inflate_generic_context (method_context, context, error); return_val_if_nok (error, NULL); context = &tmp_context; if (mono_metadata_generic_context_equal (method_context, context)) return method; method = imethod->declaring; } /* * A method only needs to be inflated if the context has argument for which it is * parametric. Eg: * * class Foo<T> { void Bar(); } - doesn't need to be inflated if only mvars' are supplied * class Foo { void Bar<T> (); } - doesn't need to be if only vars' are supplied * */ if (!((method->is_generic && context->method_inst) || (mono_class_is_gtd (method->klass) && context->class_inst))) return method; iresult = g_new0 (MonoMethodInflated, 1); iresult->context = *context; iresult->declaring = method; if (!context->method_inst && method->is_generic) iresult->context.method_inst = mono_method_get_generic_container (method)->context.method_inst; if (!context->class_inst) { g_assert (!mono_class_is_ginst (iresult->declaring->klass)); if (mono_class_is_gtd (iresult->declaring->klass)) iresult->context.class_inst = mono_class_get_generic_container (iresult->declaring->klass)->context.class_inst; } /* This can happen with some callers like mono_object_get_virtual_method_internal () */ if (!mono_class_is_gtd (iresult->declaring->klass) && !mono_class_is_ginst (iresult->declaring->klass)) iresult->context.class_inst = NULL; MonoMemoryManager *mm = mono_metadata_get_mem_manager_for_method (iresult); // check cache mono_mem_manager_lock (mm); if (!mm->gmethod_cache) mm->gmethod_cache = g_hash_table_new_full (inflated_method_hash, inflated_method_equal, NULL, (GDestroyNotify)free_inflated_method); cached = (MonoMethodInflated *)g_hash_table_lookup (mm->gmethod_cache, iresult); mono_mem_manager_unlock (mm); if (cached) { g_free (iresult); return (MonoMethod*)cached; } UnlockedIncrement (&mono_stats.inflated_method_count); UnlockedAdd (&mono_inflated_methods_size, sizeof (MonoMethodInflated)); sig = mono_method_signature_internal (method); if (!sig) { char *name = mono_type_get_full_name (method->klass); mono_error_set_bad_image (error, mono_method_get_image (method), "Could not resolve signature of method %s:%s", name, method->name); g_free (name); goto fail; } if (sig->pinvoke) { memcpy (&iresult->method.pinvoke, method, sizeof (MonoMethodPInvoke)); } else { memcpy (&iresult->method.method, method, sizeof (MonoMethod)); } result = (MonoMethod *) iresult; result->is_inflated = TRUE; result->is_generic = FALSE; result->sre_method = FALSE; result->signature = NULL; if (method->wrapper_type) { MonoMethodWrapper *mw = (MonoMethodWrapper*)method; MonoMethodWrapper *resw = (MonoMethodWrapper*)result; int len = GPOINTER_TO_INT (((void**)mw->method_data) [0]); resw->method_data = (void **)g_malloc (sizeof (gpointer) * (len + 1)); memcpy (resw->method_data, mw->method_data, sizeof (gpointer) * (len + 1)); } if (iresult->context.method_inst) { MonoGenericInst *method_inst = iresult->context.method_inst; /* Set the generic_container of the result to the generic_container of method */ MonoGenericContainer *generic_container = mono_method_get_generic_container (method); if (generic_container && method_inst == generic_container->context.method_inst) { result->is_generic = 1; mono_method_set_generic_container (result, generic_container); } /* Check that the method is not instantiated with any invalid types */ for (int i = 0; i < method_inst->type_argc; i++) { if (!mono_type_is_valid_generic_argument (method_inst->type_argv [i])) { mono_error_set_bad_image (error, mono_method_get_image (method), "MVAR %d cannot be expanded with type 0x%x", i, method_inst->type_argv [i]->type); goto fail; } } } if (klass_hint) { MonoGenericClass *gklass_hint = mono_class_try_get_generic_class (klass_hint); if (gklass_hint && (gklass_hint->container_class != method->klass || gklass_hint->context.class_inst != context->class_inst)) klass_hint = NULL; } if (mono_class_is_gtd (method->klass)) result->klass = klass_hint; if (!result->klass) { MonoType *inflated = inflate_generic_type (NULL, m_class_get_byval_arg (method->klass), context, error); if (!is_ok (error)) goto fail; result->klass = inflated ? mono_class_from_mono_type_internal (inflated) : method->klass; if (inflated) mono_metadata_free_type (inflated); } /* * FIXME: This should hold, but it doesn't: * * if (result->is_inflated && mono_method_get_context (result)->method_inst && * mono_method_get_context (result)->method_inst == mono_method_get_generic_container (((MonoMethodInflated*)result)->declaring)->context.method_inst) { * g_assert (result->is_generic); * } * * Fixing this here causes other things to break, hence a very * ugly hack in mini-trampolines.c - see * is_generic_method_definition(). */ // check cache mono_mem_manager_lock (mm); cached = (MonoMethodInflated *)g_hash_table_lookup (mm->gmethod_cache, iresult); if (!cached) { g_hash_table_insert (mm->gmethod_cache, iresult, iresult); iresult->owner = mm; cached = iresult; } mono_mem_manager_unlock (mm); return (MonoMethod*)cached; fail: g_free (iresult); return NULL; } /** * mono_get_inflated_method: * * Obsolete. We keep it around since it's mentioned in the public API. */ MonoMethod* mono_get_inflated_method (MonoMethod *method) { return method; } /* * mono_method_get_context_general: * @method: a method * @uninflated: handle uninflated methods? * * Returns the generic context of a method or NULL if it doesn't have * one. For an inflated method that's the context stored in the * method. Otherwise it's in the method's generic container or in the * generic container of the method's class. */ MonoGenericContext* mono_method_get_context_general (MonoMethod *method, gboolean uninflated) { if (method->is_inflated) { MonoMethodInflated *imethod = (MonoMethodInflated *) method; return &imethod->context; } if (!uninflated) return NULL; if (method->is_generic) return &(mono_method_get_generic_container (method)->context); if (mono_class_is_gtd (method->klass)) return &mono_class_get_generic_container (method->klass)->context; return NULL; } /* * mono_method_get_context: * @method: a method * * Returns the generic context for method if it's inflated, otherwise * NULL. */ MonoGenericContext* mono_method_get_context (MonoMethod *method) { return mono_method_get_context_general (method, FALSE); } /* * mono_method_get_generic_container: * * Returns the generic container of METHOD, which should be a generic method definition. * Returns NULL if METHOD is not a generic method definition. * LOCKING: Acquires the loader lock. */ MonoGenericContainer* mono_method_get_generic_container (MonoMethod *method) { MonoGenericContainer *container; if (!method->is_generic) return NULL; container = (MonoGenericContainer *)mono_image_property_lookup (mono_method_get_image (method), method, MONO_METHOD_PROP_GENERIC_CONTAINER); g_assert (container); return container; } /* * mono_method_set_generic_container: * * Sets the generic container of METHOD to CONTAINER. * LOCKING: Acquires the image lock. */ void mono_method_set_generic_container (MonoMethod *method, MonoGenericContainer* container) { g_assert (method->is_generic); mono_image_property_insert (mono_method_get_image (method), method, MONO_METHOD_PROP_GENERIC_CONTAINER, container); } /** * mono_method_set_verification_success: * * Sets a bit indicating that the method has been verified. * * LOCKING: acquires the image lock. */ void mono_method_set_verification_success (MonoMethod *method) { g_assert (!method->is_inflated); mono_image_property_insert (mono_method_get_image (method), method, MONO_METHOD_PROP_VERIFICATION_SUCCESS, GUINT_TO_POINTER(1)); } /** * mono_method_get_verification_sucess: * * Returns \c TRUE if the method has been verified successfully. * * LOCKING: acquires the image lock. */ gboolean mono_method_get_verification_success (MonoMethod *method) { if (method->is_inflated) method = ((MonoMethodInflated *)method)->declaring; gpointer value = mono_image_property_lookup (mono_method_get_image (method), method, MONO_METHOD_PROP_VERIFICATION_SUCCESS); return value != NULL; } /** * mono_method_lookup_infrequent_bits: * * Looks for existing \c MonoMethodDefInfrequentBits struct associated with * this method definition. Unlike \c mono_method_get_infrequent bits, this * does not allocate a new struct if one doesn't exist. * * LOCKING: Acquires the image lock */ const MonoMethodDefInfrequentBits* mono_method_lookup_infrequent_bits (MonoMethod *method) { g_assert (!method->is_inflated); return (const MonoMethodDefInfrequentBits*)mono_image_property_lookup (mono_method_get_image (method), method, MONO_METHOD_PROP_INFREQUENT_BITS); } /** * mono_method_get_infrequent_bits: * * Looks for an existing, or allocates a new \c MonoMethodDefInfrequentBits struct for this method definition. * Method must not be inflated. * * Unlike \c mono_method_lookup_infrequent_bits, this will allocate a new * struct if the method didn't have one. * * LOCKING: Acquires the image lock */ MonoMethodDefInfrequentBits * mono_method_get_infrequent_bits (MonoMethod *method) { g_assert (!method->is_inflated); MonoImage *image = mono_method_get_image (method); MonoMethodDefInfrequentBits *infrequent_bits = NULL; mono_image_lock (image); infrequent_bits = (MonoMethodDefInfrequentBits *)mono_image_property_lookup (image, method, MONO_METHOD_PROP_INFREQUENT_BITS); if (!infrequent_bits) { infrequent_bits = (MonoMethodDefInfrequentBits *)mono_image_alloc0 (image, sizeof (MonoMethodDefInfrequentBits)); mono_image_property_insert (image, method, MONO_METHOD_PROP_INFREQUENT_BITS, infrequent_bits); } mono_image_unlock (image); return infrequent_bits; } gboolean mono_method_get_is_reabstracted (MonoMethod *method) { if (method->is_inflated) method = ((MonoMethodInflated*)method)->declaring; const MonoMethodDefInfrequentBits *infrequent_bits = mono_method_lookup_infrequent_bits (method); return infrequent_bits != NULL && infrequent_bits->is_reabstracted; } gboolean mono_method_get_is_covariant_override_impl (MonoMethod *method) { if (method->is_inflated) method = ((MonoMethodInflated*)method)->declaring; const MonoMethodDefInfrequentBits *infrequent_bits = mono_method_lookup_infrequent_bits (method); return infrequent_bits != NULL && infrequent_bits->is_covariant_override_impl; } /** * mono_method_set_is_reabstracted: * * Sets the \c MonoMethodDefInfrequentBits:is_reabstracted bit for this method * definition. The bit means that the method is a default interface method * that used to have a default implementation in an ancestor interface, but is * now abstract once again. * * LOCKING: Assumes the loader lock is held */ void mono_method_set_is_reabstracted (MonoMethod *method) { mono_method_get_infrequent_bits (method)->is_reabstracted = 1; } /** * mono_method_set_is_covariant_override_impl: * * Sets the \c MonoMethodDefInfrequentBits:is_covariant_override_impl bit for * this method definition. The bit means that the method is an override with a * signature that is not equal to the signature of the method that it is * overriding. * * LOCKING: Assumes the loader lock is held */ void mono_method_set_is_covariant_override_impl (MonoMethod *method) { mono_method_get_infrequent_bits (method)->is_covariant_override_impl = 1; } /** * mono_class_find_enum_basetype: * \param class The enum class * * Determine the basetype of an enum by iterating through its fields. We do this * in a separate function since it is cheaper than calling mono_class_setup_fields. */ MonoType* mono_class_find_enum_basetype (MonoClass *klass, MonoError *error) { MonoGenericContainer *container = NULL; MonoImage *image = m_class_get_image (klass); const int top = mono_class_get_field_count (klass); int i, first_field_idx; g_assert (m_class_is_enumtype (klass)); error_init (error); container = mono_class_try_get_generic_container (klass); if (mono_class_is_ginst (klass)) { MonoClass *gklass = mono_class_get_generic_class (klass)->container_class; container = mono_class_get_generic_container (gklass); g_assert (container); } /* * Fetch all the field information. */ first_field_idx = mono_class_get_first_field_idx (klass); for (i = 0; i < top; i++){ const char *sig; guint32 cols [MONO_FIELD_SIZE]; int idx = first_field_idx + i; MonoType *ftype; /* first_field_idx and idx points into the fieldptr table */ mono_metadata_decode_table_row (image, MONO_TABLE_FIELD, idx, cols, MONO_FIELD_SIZE); if (cols [MONO_FIELD_FLAGS] & FIELD_ATTRIBUTE_STATIC) //no need to decode static fields continue; sig = mono_metadata_blob_heap (image, cols [MONO_FIELD_SIGNATURE]); mono_metadata_decode_value (sig, &sig); /* FIELD signature == 0x06 */ if (*sig != 0x06) { mono_error_set_bad_image (error, image, "Invalid field signature %x, expected 0x6 but got %x", cols [MONO_FIELD_SIGNATURE], *sig); goto fail; } ftype = mono_metadata_parse_type_checked (image, container, cols [MONO_FIELD_FLAGS], FALSE, sig + 1, &sig, error); if (!ftype) goto fail; if (mono_class_is_ginst (klass)) { //FIXME do we leak here? ftype = mono_class_inflate_generic_type_checked (ftype, mono_class_get_context (klass), error); if (!is_ok (error)) goto fail; ftype->attrs = cols [MONO_FIELD_FLAGS]; } return ftype; } mono_error_set_type_load_class (error, klass, "Could not find base type"); fail: return NULL; } /* * Checks for MonoClass::has_failure without resolving all MonoType's into MonoClass'es */ gboolean mono_type_has_exceptions (MonoType *type) { switch (type->type) { case MONO_TYPE_CLASS: case MONO_TYPE_VALUETYPE: case MONO_TYPE_SZARRAY: return mono_class_has_failure (type->data.klass); case MONO_TYPE_ARRAY: return mono_class_has_failure (type->data.array->eklass); case MONO_TYPE_GENERICINST: return mono_class_has_failure (mono_class_create_generic_inst (type->data.generic_class)); default: return FALSE; } } void mono_error_set_for_class_failure (MonoError *oerror, const MonoClass *klass) { g_assert (mono_class_has_failure (klass)); MonoErrorBoxed *box = mono_class_get_exception_data ((MonoClass*)klass); mono_error_set_from_boxed (oerror, box); } /* * mono_class_alloc: * * Allocate memory for data belonging to CLASS. */ gpointer mono_class_alloc (MonoClass *klass, int size) { return m_class_alloc (klass, size); } gpointer (mono_class_alloc0) (MonoClass *klass, int size) { return m_class_alloc0 (klass, size); } #define mono_class_new0(klass,struct_type, n_structs) \ ((struct_type *) mono_class_alloc0 ((klass), ((gsize) sizeof (struct_type)) * ((gsize) (n_structs)))) /** * mono_class_set_failure_causedby_class: * \param klass the class that is failing * \param caused_by the class that caused the failure * \param msg Why \p klass is failing. * * If \p caused_by has a failure, sets a TypeLoadException failure on * \p klass with message "\p msg, due to: {\p caused_by message}". * * \returns TRUE if a failiure was set, or FALSE if \p caused_by doesn't have a failure. */ gboolean mono_class_set_type_load_failure_causedby_class (MonoClass *klass, const MonoClass *caused_by, const gchar* msg) { if (mono_class_has_failure (caused_by)) { ERROR_DECL (cause_error); mono_error_set_for_class_failure (cause_error, caused_by); mono_class_set_type_load_failure (klass, "%s, due to: %s", msg, mono_error_get_message (cause_error)); mono_error_cleanup (cause_error); return TRUE; } else { return FALSE; } } /* * mono_type_get_basic_type_from_generic: * @type: a type * * Returns a closed type corresponding to the possibly open type * passed to it. */ MonoType* mono_type_get_basic_type_from_generic (MonoType *type) { /* When we do generic sharing we let type variables stand for reference/primitive types. */ if (!m_type_is_byref (type) && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR) && (!type->data.generic_param->gshared_constraint || type->data.generic_param->gshared_constraint->type == MONO_TYPE_OBJECT)) return mono_get_object_type (); return type; } /* * mono_class_get_method_by_index: * * Returns klass->methods [index], initializing klass->methods if neccesary. * * LOCKING: Acquires the loader lock. */ MonoMethod* mono_class_get_method_by_index (MonoClass *klass, int index) { ERROR_DECL (error); MonoGenericClass *gklass = mono_class_try_get_generic_class (klass); /* Avoid calling setup_methods () if possible */ if (gklass && !m_class_get_methods (klass)) { MonoMethod *m; m = mono_class_inflate_generic_method_full_checked ( m_class_get_methods (gklass->container_class) [index], klass, mono_class_get_context (klass), error); g_assert (is_ok (error)); /* FIXME don't swallow the error */ /* * If setup_methods () is called later for this class, no duplicates are created, * since inflate_generic_method guarantees that only one instance of a method * is created for each context. */ /* mono_class_setup_methods (klass); g_assert (m == klass->methods [index]); */ return m; } else { mono_class_setup_methods (klass); if (mono_class_has_failure (klass)) /*FIXME do proper error handling*/ return NULL; g_assert (index >= 0 && index < mono_class_get_method_count (klass)); return m_class_get_methods (klass) [index]; } } /** * mono_class_get_inflated_method: * \param klass an inflated class * \param method a method of \p klass's generic definition * \param error set on error * * Given an inflated class \p klass and a method \p method which should be a * method of \p klass's generic definition, return the inflated method * corresponding to \p method. * * On failure sets \p error and returns NULL. */ MonoMethod* mono_class_get_inflated_method (MonoClass *klass, MonoMethod *method, MonoError *error) { MonoClass *gklass = mono_class_get_generic_class (klass)->container_class; int i, mcount; g_assert (method->klass == gklass); mono_class_setup_methods (gklass); if (mono_class_has_failure (gklass)) { mono_error_set_for_class_failure (error, gklass); return NULL; } MonoMethod **gklass_methods = m_class_get_methods (gklass); mcount = mono_class_get_method_count (gklass); for (i = 0; i < mcount; ++i) { if (gklass_methods [i] == method) { MonoMethod *inflated_method = NULL; MonoMethod **klass_methods = m_class_get_methods (klass); if (klass_methods) { inflated_method = klass_methods [i]; } else { inflated_method = mono_class_inflate_generic_method_full_checked (gklass_methods [i], klass, mono_class_get_context (klass), error); return_val_if_nok (error, NULL); } g_assert (inflated_method); return inflated_method; } } g_assert_not_reached (); } /* * mono_class_get_vtable_entry: * * Returns klass->vtable [offset], computing it if neccesary. Returns NULL on failure. * LOCKING: Acquires the loader lock. */ MonoMethod* mono_class_get_vtable_entry (MonoClass *klass, int offset) { MonoMethod *m; if (m_class_get_rank (klass) == 1) { MonoClass *klass_parent = m_class_get_parent (klass); /* * szarrays do not overwrite any methods of Array, so we can avoid * initializing their vtables in some cases. */ mono_class_setup_vtable (klass_parent); if (offset < m_class_get_vtable_size (klass_parent)) return m_class_get_vtable (klass_parent) [offset]; } if (mono_class_is_ginst (klass)) { ERROR_DECL (error); MonoClass *gklass = mono_class_get_generic_class (klass)->container_class; mono_class_setup_vtable (gklass); m = m_class_get_vtable (gklass) [offset]; m = mono_class_inflate_generic_method_full_checked (m, klass, mono_class_get_context (klass), error); g_assert (is_ok (error)); /* FIXME don't swallow this error */ } else { mono_class_setup_vtable (klass); if (mono_class_has_failure (klass)) return NULL; m = m_class_get_vtable (klass) [offset]; } return m; } /* * mono_class_get_vtable_size: * * Return the vtable size for KLASS. */ int mono_class_get_vtable_size (MonoClass *klass) { mono_class_setup_vtable (klass); return m_class_get_vtable_size (klass); } static void collect_implemented_interfaces_aux (MonoClass *klass, GPtrArray **res, GHashTable **ifaces, MonoError *error) { int i; MonoClass *ic; mono_class_setup_interfaces (klass, error); return_if_nok (error); MonoClass **klass_interfaces = m_class_get_interfaces (klass); for (i = 0; i < m_class_get_interface_count (klass); i++) { ic = klass_interfaces [i]; if (*res == NULL) *res = g_ptr_array_new (); if (*ifaces == NULL) *ifaces = g_hash_table_new (NULL, NULL); if (g_hash_table_lookup (*ifaces, ic)) continue; /* A gparam is not an implemented interface for the purposes of * mono_class_get_implemented_interfaces */ if (mono_class_is_gparam (ic)) continue; g_ptr_array_add (*res, ic); g_hash_table_insert (*ifaces, ic, ic); mono_class_init_internal (ic); if (mono_class_has_failure (ic)) { mono_error_set_type_load_class (error, ic, "Error Loading class"); return; } collect_implemented_interfaces_aux (ic, res, ifaces, error); return_if_nok (error); } } GPtrArray* mono_class_get_implemented_interfaces (MonoClass *klass, MonoError *error) { GPtrArray *res = NULL; GHashTable *ifaces = NULL; collect_implemented_interfaces_aux (klass, &res, &ifaces, error); if (ifaces) g_hash_table_destroy (ifaces); if (!is_ok (error)) { if (res) g_ptr_array_free (res, TRUE); return NULL; } return res; } /*FIXME verify all callers if they should switch to mono_class_interface_offset_with_variance*/ int mono_class_interface_offset (MonoClass *klass, MonoClass *itf) { int i; MonoClass **klass_interfaces_packed = m_class_get_interfaces_packed (klass); for (i = m_class_get_interface_offsets_count (klass) -1 ; i >= 0 ; i-- ){ MonoClass *result = klass_interfaces_packed[i]; if (m_class_get_interface_id(result) == m_class_get_interface_id(itf)) { return m_class_get_interface_offsets_packed (klass) [i]; } } return -1; } /** * mono_class_interface_offset_with_variance: * * Return the interface offset of \p itf in \p klass. Sets \p non_exact_match to TRUE if the match required variance check * If \p itf is an interface with generic variant arguments, try to find the compatible one. * * Note that this function is responsible for resolving ambiguities. Right now we use whatever ordering interfaces_packed gives us. * * FIXME figure out MS disambiguation rules and fix this function. */ int mono_class_interface_offset_with_variance (MonoClass *klass, MonoClass *itf, gboolean *non_exact_match) { int i = mono_class_interface_offset (klass, itf); *non_exact_match = FALSE; if (i >= 0) return i; int klass_interface_offsets_count = m_class_get_interface_offsets_count (klass); if (m_class_is_array_special_interface (itf) && m_class_get_rank (klass) < 2) { MonoClass *gtd = mono_class_get_generic_type_definition (itf); int found = -1; for (i = 0; i < klass_interface_offsets_count; i++) { if (mono_class_is_variant_compatible (itf, m_class_get_interfaces_packed (klass) [i], FALSE)) { found = i; *non_exact_match = TRUE; break; } } if (found != -1) return m_class_get_interface_offsets_packed (klass) [found]; for (i = 0; i < klass_interface_offsets_count; i++) { if (mono_class_get_generic_type_definition (m_class_get_interfaces_packed (klass) [i]) == gtd) { found = i; *non_exact_match = TRUE; break; } } if (found == -1) return -1; return m_class_get_interface_offsets_packed (klass) [found]; } if (!mono_class_has_variant_generic_params (itf)) return -1; for (i = 0; i < klass_interface_offsets_count; i++) { if (mono_class_is_variant_compatible (itf, m_class_get_interfaces_packed (klass) [i], FALSE)) { *non_exact_match = TRUE; return m_class_get_interface_offsets_packed (klass) [i]; } } return -1; } /* * mono_method_get_vtable_slot: * * Returns method->slot, computing it if neccesary. Return -1 on failure. * LOCKING: Acquires the loader lock. * * FIXME Use proper MonoError machinery here. */ int mono_method_get_vtable_slot (MonoMethod *method) { if (method->slot == -1) { mono_class_setup_vtable (method->klass); if (mono_class_has_failure (method->klass)) return -1; if (method->slot == -1) { MonoClass *gklass; int i, mcount; if (!mono_class_is_ginst (method->klass)) { g_assert (method->is_inflated); return mono_method_get_vtable_slot (((MonoMethodInflated*)method)->declaring); } /* This can happen for abstract methods of generic instances due to the shortcut code in mono_class_setup_vtable_general (). */ g_assert (mono_class_is_ginst (method->klass)); gklass = mono_class_get_generic_class (method->klass)->container_class; mono_class_setup_methods (method->klass); MonoMethod **klass_methods = m_class_get_methods (method->klass); g_assert (klass_methods); mcount = mono_class_get_method_count (method->klass); for (i = 0; i < mcount; ++i) { if (klass_methods [i] == method) break; } g_assert (i < mcount); g_assert (m_class_get_methods (gklass)); method->slot = m_class_get_methods (gklass) [i]->slot; } g_assert (method->slot != -1); } return method->slot; } /** * mono_method_get_vtable_index: * \param method a method * * Returns the index into the runtime vtable to access the method or, * in the case of a virtual generic method, the virtual generic method * thunk. Returns -1 on failure. * * FIXME Use proper MonoError machinery here. */ int mono_method_get_vtable_index (MonoMethod *method) { if (method->is_inflated && (method->flags & METHOD_ATTRIBUTE_VIRTUAL)) { MonoMethodInflated *imethod = (MonoMethodInflated*)method; if (imethod->declaring->is_generic) return mono_method_get_vtable_slot (imethod->declaring); } return mono_method_get_vtable_slot (method); } /* * mono_class_has_finalizer: * * Return whenever KLASS has a finalizer, initializing klass->has_finalizer in the * process. * * LOCKING: Acquires the loader lock; */ gboolean mono_class_has_finalizer (MonoClass *klass) { if (!m_class_is_has_finalize_inited (klass)) mono_class_setup_has_finalizer (klass); return m_class_has_finalize (klass); } gboolean mono_is_corlib_image (MonoImage *image) { return image == mono_defaults.corlib; } /** Is klass a Nullable<T> ginst? */ gboolean mono_class_is_nullable (MonoClass *klass) { MonoGenericClass *gklass = mono_class_try_get_generic_class (klass); return gklass && gklass->container_class == mono_defaults.generic_nullable_class; } /** if klass is T? return T */ MonoClass* mono_class_get_nullable_param_internal (MonoClass *klass) { g_assert (mono_class_is_nullable (klass)); return mono_class_from_mono_type_internal (mono_class_get_generic_class (klass)->context.class_inst->type_argv [0]); } MonoClass* mono_class_get_nullable_param (MonoClass *klass) { MonoClass *result = NULL; MONO_ENTER_GC_UNSAFE; result = mono_class_get_nullable_param_internal (klass); MONO_EXIT_GC_UNSAFE; return result; } gboolean mono_type_is_primitive (MonoType *type) { return (type->type >= MONO_TYPE_BOOLEAN && type->type <= MONO_TYPE_R8) || type-> type == MONO_TYPE_I || type->type == MONO_TYPE_U; } static MonoImage * get_image_for_container (MonoGenericContainer *container) { MonoImage *result; if (container->is_anonymous) { result = container->owner.image; } else { MonoClass *klass; if (container->is_method) { MonoMethod *method = container->owner.method; g_assert_checked (method); klass = method->klass; } else { klass = container->owner.klass; } g_assert_checked (klass); result = m_class_get_image (klass); } g_assert (result); return result; } MonoImage * mono_get_image_for_generic_param (MonoGenericParam *param) { MonoGenericContainer *container = mono_generic_param_owner (param); g_assert_checked (container); return get_image_for_container (container); } // Make a string in the designated image consisting of a single integer. #define INT_STRING_SIZE 16 char * mono_make_generic_name_string (MonoImage *image, int num) { char *name = (char *)mono_image_alloc0 (image, INT_STRING_SIZE); g_snprintf (name, INT_STRING_SIZE, "%d", num); return name; } /** * mono_class_from_generic_parameter: * \param param Parameter to find/construct a class for. * \param arg2 Is ignored. * \param arg3 Is ignored. */ MonoClass * mono_class_from_generic_parameter (MonoGenericParam *param, MonoImage *arg2 G_GNUC_UNUSED, gboolean arg3 G_GNUC_UNUSED) { return mono_class_create_generic_parameter (param); } /** * mono_ptr_class_get: */ MonoClass * mono_ptr_class_get (MonoType *type) { return mono_class_create_ptr (type); } /** * mono_class_from_mono_type: * \param type describes the type to return * \returns a \c MonoClass for the specified \c MonoType, the value is never NULL. */ MonoClass * mono_class_from_mono_type (MonoType *type) { MonoClass *result; MONO_ENTER_GC_UNSAFE; result = mono_class_from_mono_type_internal (type); MONO_EXIT_GC_UNSAFE; return result; } MonoClass * mono_class_from_mono_type_internal (MonoType *type) { g_assert (type); switch (type->type) { case MONO_TYPE_OBJECT: return type->data.klass? type->data.klass: mono_defaults.object_class; case MONO_TYPE_VOID: return type->data.klass? type->data.klass: mono_defaults.void_class; case MONO_TYPE_BOOLEAN: return type->data.klass? type->data.klass: mono_defaults.boolean_class; case MONO_TYPE_CHAR: return type->data.klass? type->data.klass: mono_defaults.char_class; case MONO_TYPE_I1: return type->data.klass? type->data.klass: mono_defaults.sbyte_class; case MONO_TYPE_U1: return type->data.klass? type->data.klass: mono_defaults.byte_class; case MONO_TYPE_I2: return type->data.klass? type->data.klass: mono_defaults.int16_class; case MONO_TYPE_U2: return type->data.klass? type->data.klass: mono_defaults.uint16_class; case MONO_TYPE_I4: return type->data.klass? type->data.klass: mono_defaults.int32_class; case MONO_TYPE_U4: return type->data.klass? type->data.klass: mono_defaults.uint32_class; case MONO_TYPE_I: return type->data.klass? type->data.klass: mono_defaults.int_class; case MONO_TYPE_U: return type->data.klass? type->data.klass: mono_defaults.uint_class; case MONO_TYPE_I8: return type->data.klass? type->data.klass: mono_defaults.int64_class; case MONO_TYPE_U8: return type->data.klass? type->data.klass: mono_defaults.uint64_class; case MONO_TYPE_R4: return type->data.klass? type->data.klass: mono_defaults.single_class; case MONO_TYPE_R8: return type->data.klass? type->data.klass: mono_defaults.double_class; case MONO_TYPE_STRING: return type->data.klass? type->data.klass: mono_defaults.string_class; case MONO_TYPE_TYPEDBYREF: return type->data.klass? type->data.klass: mono_defaults.typed_reference_class; case MONO_TYPE_ARRAY: return mono_class_create_bounded_array (type->data.array->eklass, type->data.array->rank, TRUE); case MONO_TYPE_PTR: return mono_class_create_ptr (type->data.type); case MONO_TYPE_FNPTR: return mono_class_create_fnptr (type->data.method); case MONO_TYPE_SZARRAY: return mono_class_create_array (type->data.klass, 1); case MONO_TYPE_CLASS: case MONO_TYPE_VALUETYPE: return type->data.klass; case MONO_TYPE_GENERICINST: return mono_class_create_generic_inst (type->data.generic_class); case MONO_TYPE_MVAR: case MONO_TYPE_VAR: return mono_class_create_generic_parameter (type->data.generic_param); default: g_warning ("mono_class_from_mono_type_internal: implement me 0x%02x\n", type->type); g_assert_not_reached (); } // Yes, this returns NULL, even if it is documented as not doing so, but there // is no way for the code to make it this far, due to the assert above. return NULL; } /** * mono_type_retrieve_from_typespec * \param image context where the image is created * \param type_spec typespec token * \param context the generic context used to evaluate generic instantiations in */ static MonoType * mono_type_retrieve_from_typespec (MonoImage *image, guint32 type_spec, MonoGenericContext *context, gboolean *did_inflate, MonoError *error) { MonoType *t = mono_type_create_from_typespec_checked (image, type_spec, error); *did_inflate = FALSE; if (!t) return NULL; if (context && (context->class_inst || context->method_inst)) { MonoType *inflated = inflate_generic_type (NULL, t, context, error); if (!is_ok (error)) { return NULL; } if (inflated) { t = inflated; *did_inflate = TRUE; } } return t; } /** * mono_class_create_from_typespec * \param image context where the image is created * \param type_spec typespec token * \param context the generic context used to evaluate generic instantiations in */ static MonoClass * mono_class_create_from_typespec (MonoImage *image, guint32 type_spec, MonoGenericContext *context, MonoError *error) { MonoClass *ret; gboolean inflated = FALSE; MonoType *t = mono_type_retrieve_from_typespec (image, type_spec, context, &inflated, error); return_val_if_nok (error, NULL); ret = mono_class_from_mono_type_internal (t); if (inflated) mono_metadata_free_type (t); return ret; } /** * mono_bounded_array_class_get: * \param element_class element class * \param rank the dimension of the array class * \param bounded whenever the array has non-zero bounds * \returns A class object describing the array with element type \p element_type and * dimension \p rank. */ MonoClass * mono_bounded_array_class_get (MonoClass *eclass, guint32 rank, gboolean bounded) { return mono_class_create_bounded_array (eclass, rank, bounded); } /** * mono_array_class_get: * \param element_class element class * \param rank the dimension of the array class * \returns A class object describing the array with element type \p element_type and * dimension \p rank. */ MonoClass * mono_array_class_get (MonoClass *eclass, guint32 rank) { return mono_class_create_array (eclass, rank); } /** * mono_class_instance_size: * \param klass a class * * Use to get the size of a class in bytes. * * \returns The size of an object instance */ gint32 mono_class_instance_size (MonoClass *klass) { if (!m_class_is_size_inited (klass)) mono_class_init_internal (klass); return m_class_get_instance_size (klass); } /** * mono_class_min_align: * \param klass a class * * Use to get the computed minimum alignment requirements for the specified class. * * Returns: minimum alignment requirements */ gint32 mono_class_min_align (MonoClass *klass) { if (!m_class_is_size_inited (klass)) mono_class_init_internal (klass); return m_class_get_min_align (klass); } /** * mono_class_data_size: * \param klass a class * * \returns The size of the static class data */ gint32 mono_class_data_size (MonoClass *klass) { if (!m_class_is_inited (klass)) mono_class_init_internal (klass); /* This can happen with dynamically created types */ if (!m_class_is_fields_inited (klass)) mono_class_setup_fields (klass); /* in arrays, sizes.class_size is unioned with element_size * and arrays have no static fields */ if (m_class_get_rank (klass)) return 0; return m_class_get_sizes (klass).class_size; } /* * Auxiliary routine to mono_class_get_field * * Takes a field index instead of a field token. */ static MonoClassField * mono_class_get_field_idx (MonoClass *klass, int idx) { mono_class_setup_fields (klass); if (mono_class_has_failure (klass)) return NULL; while (klass) { int first_field_idx = mono_class_get_first_field_idx (klass); int fcount = mono_class_get_field_count (klass); MonoImage *klass_image = m_class_get_image (klass); MonoClassField *klass_fields = m_class_get_fields (klass); if (klass_image->uncompressed_metadata) { /* * first_field_idx points to the FieldPtr table, while idx points into the * Field table, so we have to do a search. */ /*FIXME this is broken for types with multiple fields with the same name.*/ const char *name = mono_metadata_string_heap (klass_image, mono_metadata_decode_row_col (&klass_image->tables [MONO_TABLE_FIELD], idx, MONO_FIELD_NAME)); int i; for (i = 0; i < fcount; ++i) if (mono_field_get_name (&klass_fields [i]) == name) return &klass_fields [i]; g_assert_not_reached (); } else { if (fcount) { if ((idx >= first_field_idx) && (idx < first_field_idx + fcount)){ return &klass_fields [idx - first_field_idx]; } } if (G_UNLIKELY (m_class_get_image (klass)->has_updates && mono_class_has_metadata_update_info (klass))) { uint32_t token = mono_metadata_make_token (MONO_TABLE_FIELD, idx + 1); return mono_metadata_update_get_field (klass, token); } } klass = m_class_get_parent (klass); } return NULL; } /** * mono_class_get_field: * \param class the class to lookup the field. * \param field_token the field token * * \returns A \c MonoClassField representing the type and offset of * the field, or a NULL value if the field does not belong to this * class. */ MonoClassField * mono_class_get_field (MonoClass *klass, guint32 field_token) { int idx = mono_metadata_token_index (field_token); g_assert (mono_metadata_token_code (field_token) == MONO_TOKEN_FIELD_DEF); return mono_class_get_field_idx (klass, idx - 1); } /** * mono_class_get_field_from_name: * \param klass the class to lookup the field. * \param name the field name * * Search the class \p klass and its parents for a field with the name \p name. * * \returns The \c MonoClassField pointer of the named field or NULL */ MonoClassField * mono_class_get_field_from_name (MonoClass *klass, const char *name) { MonoClassField *result; MONO_ENTER_GC_UNSAFE; result = mono_class_get_field_from_name_full (klass, name, NULL); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_get_field_from_name_full: * \param klass the class to lookup the field. * \param name the field name * \param type the type of the fields. This optional. * * Search the class \p klass and it's parents for a field with the name \p name and type \p type. * * If \p klass is an inflated generic type, the type comparison is done with the equivalent field * of its generic type definition. * * \returns The MonoClassField pointer of the named field or NULL */ MonoClassField * mono_class_get_field_from_name_full (MonoClass *klass, const char *name, MonoType *type) { MONO_REQ_GC_UNSAFE_MODE; int i; mono_class_setup_fields (klass); if (mono_class_has_failure (klass)) return NULL; while (klass) { int fcount = mono_class_get_field_count (klass); for (i = 0; i < fcount; ++i) { MonoClassField *field = &m_class_get_fields (klass) [i]; if (strcmp (name, mono_field_get_name (field)) != 0) continue; if (type) { MonoType *field_type = mono_metadata_get_corresponding_field_from_generic_type_definition (field)->type; if (!mono_metadata_type_equal_full (type, field_type, TRUE)) continue; } return field; } klass = m_class_get_parent (klass); } return NULL; } /** * mono_class_get_field_token: * \param field the field we need the token of * * Get the token of a field. Note that the tokesn is only valid for the image * the field was loaded from. Don't use this function for fields in dynamic types. * * \returns The token representing the field in the image it was loaded from. */ guint32 mono_class_get_field_token (MonoClassField *field) { MonoClass *klass = m_field_get_parent (field); int i; mono_class_setup_fields (klass); while (klass) { MonoClassField *klass_fields = m_class_get_fields (klass); if (!klass_fields) return 0; int first_field_idx = mono_class_get_first_field_idx (klass); int fcount = mono_class_get_field_count (klass); for (i = 0; i < fcount; ++i) { if (&klass_fields [i] == field) { int idx = first_field_idx + i + 1; if (m_class_get_image (klass)->uncompressed_metadata) idx = mono_metadata_translate_token_index (m_class_get_image (klass), MONO_TABLE_FIELD, idx); return mono_metadata_make_token (MONO_TABLE_FIELD, idx); } } klass = m_class_get_parent (klass); } g_assert_not_reached (); return 0; } static int mono_field_get_index (MonoClassField *field) { int index = field - m_class_get_fields (m_field_get_parent (field)); g_assert (index >= 0 && index < mono_class_get_field_count (m_field_get_parent (field))); return index; } /* * mono_class_get_field_default_value: * * Return the default value of the field as a pointer into the metadata blob. */ const char* mono_class_get_field_default_value (MonoClassField *field, MonoTypeEnum *def_type) { guint32 cindex; guint32 constant_cols [MONO_CONSTANT_SIZE]; int field_index; MonoClass *klass = m_field_get_parent (field); MonoFieldDefaultValue *def_values; g_assert (field->type->attrs & FIELD_ATTRIBUTE_HAS_DEFAULT); def_values = mono_class_get_field_def_values (klass); if (!def_values) { def_values = (MonoFieldDefaultValue *)mono_class_alloc0 (klass, sizeof (MonoFieldDefaultValue) * mono_class_get_field_count (klass)); mono_class_set_field_def_values (klass, def_values); } field_index = mono_field_get_index (field); if (!def_values [field_index].data) { MonoImage *field_parent_image = m_class_get_image (m_field_get_parent (field)); cindex = mono_metadata_get_constant_index (field_parent_image, mono_class_get_field_token (field), 0); if (!cindex) return NULL; g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA)); mono_metadata_decode_row (&field_parent_image->tables [MONO_TABLE_CONSTANT], cindex - 1, constant_cols, MONO_CONSTANT_SIZE); def_values [field_index].def_type = (MonoTypeEnum)constant_cols [MONO_CONSTANT_TYPE]; mono_memory_barrier (); def_values [field_index].data = (const char *)mono_metadata_blob_heap (field_parent_image, constant_cols [MONO_CONSTANT_VALUE]); } *def_type = def_values [field_index].def_type; return def_values [field_index].data; } static int mono_property_get_index (MonoProperty *prop) { MonoClassPropertyInfo *info = mono_class_get_property_info (prop->parent); int index = prop - info->properties; g_assert (index >= 0 && index < info->count); return index; } /* * mono_class_get_property_default_value: * * Return the default value of the field as a pointer into the metadata blob. */ const char* mono_class_get_property_default_value (MonoProperty *property, MonoTypeEnum *def_type) { guint32 cindex; guint32 constant_cols [MONO_CONSTANT_SIZE]; MonoClass *klass = property->parent; MonoImage *klass_image = m_class_get_image (klass); g_assert (property->attrs & PROPERTY_ATTRIBUTE_HAS_DEFAULT); /* * We don't cache here because it is not used by C# so it's quite rare, but * we still do the lookup in klass->ext because that is where the data * is stored for dynamic assemblies. */ if (image_is_dynamic (klass_image)) { MonoClassPropertyInfo *info = mono_class_get_property_info (klass); int prop_index = mono_property_get_index (property); if (info->def_values && info->def_values [prop_index].data) { *def_type = info->def_values [prop_index].def_type; return info->def_values [prop_index].data; } return NULL; } cindex = mono_metadata_get_constant_index (klass_image, mono_class_get_property_token (property), 0); if (!cindex) return NULL; mono_metadata_decode_row (&klass_image->tables [MONO_TABLE_CONSTANT], cindex - 1, constant_cols, MONO_CONSTANT_SIZE); *def_type = (MonoTypeEnum)constant_cols [MONO_CONSTANT_TYPE]; return (const char *)mono_metadata_blob_heap (klass_image, constant_cols [MONO_CONSTANT_VALUE]); } /** * mono_class_get_event_token: */ guint32 mono_class_get_event_token (MonoEvent *event) { MonoClass *klass = event->parent; int i; while (klass) { MonoClassEventInfo *info = mono_class_get_event_info (klass); if (info) { for (i = 0; i < info->count; ++i) { if (&info->events [i] == event) return mono_metadata_make_token (MONO_TABLE_EVENT, info->first + i + 1); } } klass = m_class_get_parent (klass); } g_assert_not_reached (); return 0; } MonoProperty* mono_class_get_property_from_name_internal (MonoClass *klass, const char *name) { MONO_REQ_GC_UNSAFE_MODE; while (klass) { MonoProperty* p; gpointer iter = NULL; while ((p = mono_class_get_properties (klass, &iter))) { if (! strcmp (name, p->name)) return p; } klass = m_class_get_parent (klass); } return NULL; } /** * mono_class_get_property_token: * \param prop MonoProperty to query * * \returns The ECMA token for the specified property. */ guint32 mono_class_get_property_token (MonoProperty *prop) { MonoClass *klass = prop->parent; while (klass) { MonoProperty* p; int i = 0; gpointer iter = NULL; MonoClassPropertyInfo *info = mono_class_get_property_info (klass); while ((p = mono_class_get_properties (klass, &iter))) { if (&info->properties [i] == prop) return mono_metadata_make_token (MONO_TABLE_PROPERTY, info->first + i + 1); i ++; } klass = m_class_get_parent (klass); } g_assert_not_reached (); return 0; } /** * mono_class_name_from_token: */ char * mono_class_name_from_token (MonoImage *image, guint32 type_token) { const char *name, *nspace; if (image_is_dynamic (image)) return g_strdup_printf ("DynamicType 0x%08x", type_token); switch (type_token & 0xff000000){ case MONO_TOKEN_TYPE_DEF: { guint tidx = mono_metadata_token_index (type_token); if (mono_metadata_table_bounds_check (image, MONO_TABLE_TYPEDEF, tidx)) return g_strdup_printf ("Invalid type token 0x%08x", type_token); guint32 cols [MONO_TYPEDEF_SIZE]; MonoTableInfo *tt = &image->tables [MONO_TABLE_TYPEDEF]; mono_metadata_decode_row (tt, tidx - 1, cols, MONO_TYPEDEF_SIZE); name = mono_metadata_string_heap (image, cols [MONO_TYPEDEF_NAME]); nspace = mono_metadata_string_heap (image, cols [MONO_TYPEDEF_NAMESPACE]); if (strlen (nspace) == 0) return g_strdup_printf ("%s", name); else return g_strdup_printf ("%s.%s", nspace, name); } case MONO_TOKEN_TYPE_REF: { guint tidx = mono_metadata_token_index (type_token); if (mono_metadata_table_bounds_check (image, MONO_TABLE_TYPEREF, tidx)) return g_strdup_printf ("Invalid type token 0x%08x", type_token); guint32 cols [MONO_TYPEREF_SIZE]; MonoTableInfo *t = &image->tables [MONO_TABLE_TYPEREF]; mono_metadata_decode_row (t, tidx-1, cols, MONO_TYPEREF_SIZE); name = mono_metadata_string_heap (image, cols [MONO_TYPEREF_NAME]); nspace = mono_metadata_string_heap (image, cols [MONO_TYPEREF_NAMESPACE]); if (strlen (nspace) == 0) return g_strdup_printf ("%s", name); else return g_strdup_printf ("%s.%s", nspace, name); } case MONO_TOKEN_TYPE_SPEC: return g_strdup_printf ("Typespec 0x%08x", type_token); default: return g_strdup_printf ("Invalid type token 0x%08x", type_token); } } static char * mono_assembly_name_from_token (MonoImage *image, guint32 type_token) { if (image_is_dynamic (image)) return g_strdup_printf ("DynamicAssembly %s", image->name); switch (type_token & 0xff000000){ case MONO_TOKEN_TYPE_DEF: if (image->assembly) return mono_stringify_assembly_name (&image->assembly->aname); else if (image->assembly_name) return g_strdup (image->assembly_name); return g_strdup_printf ("%s", image->name ? image->name : "[Could not resolve assembly name"); case MONO_TOKEN_TYPE_REF: { MonoAssemblyName aname; memset (&aname, 0, sizeof (MonoAssemblyName)); guint32 cols [MONO_TYPEREF_SIZE]; MonoTableInfo *t = &image->tables [MONO_TABLE_TYPEREF]; guint32 idx = mono_metadata_token_index (type_token); if (mono_metadata_table_bounds_check (image, MONO_TABLE_TYPEREF, idx)) return g_strdup_printf ("Invalid type token 0x%08x", type_token); mono_metadata_decode_row (t, idx-1, cols, MONO_TYPEREF_SIZE); idx = cols [MONO_TYPEREF_SCOPE] >> MONO_RESOLUTION_SCOPE_BITS; switch (cols [MONO_TYPEREF_SCOPE] & MONO_RESOLUTION_SCOPE_MASK) { case MONO_RESOLUTION_SCOPE_MODULE: /* FIXME: */ return g_strdup (""); case MONO_RESOLUTION_SCOPE_MODULEREF: /* FIXME: */ return g_strdup (""); case MONO_RESOLUTION_SCOPE_TYPEREF: /* FIXME: */ return g_strdup (""); case MONO_RESOLUTION_SCOPE_ASSEMBLYREF: mono_assembly_get_assemblyref (image, idx - 1, &aname); return mono_stringify_assembly_name (&aname); default: g_assert_not_reached (); } break; } case MONO_TOKEN_TYPE_SPEC: /* FIXME: */ return g_strdup (""); default: g_assert_not_reached (); } return NULL; } /** * mono_class_get_full: * \param image the image where the class resides * \param type_token the token for the class * \param context the generic context used to evaluate generic instantiations in * \deprecated Functions that expose \c MonoGenericContext are going away in mono 4.0 * \returns The \c MonoClass that represents \p type_token in \p image */ MonoClass * mono_class_get_full (MonoImage *image, guint32 type_token, MonoGenericContext *context) { ERROR_DECL (error); MonoClass *klass; klass = mono_class_get_checked (image, type_token, error); if (klass && context && mono_metadata_token_table (type_token) == MONO_TABLE_TYPESPEC) klass = mono_class_inflate_generic_class_checked (klass, context, error); mono_error_assert_ok (error); return klass; } MonoClass * mono_class_get_and_inflate_typespec_checked (MonoImage *image, guint32 type_token, MonoGenericContext *context, MonoError *error) { MonoClass *klass; error_init (error); klass = mono_class_get_checked (image, type_token, error); if (klass && context && mono_metadata_token_table (type_token) == MONO_TABLE_TYPESPEC) klass = mono_class_inflate_generic_class_checked (klass, context, error); return klass; } /** * mono_class_get_checked: * \param image the image where the class resides * \param type_token the token for the class * \param error error object to return any error * * \returns The MonoClass that represents \p type_token in \p image, or NULL on error. */ MonoClass * mono_class_get_checked (MonoImage *image, guint32 type_token, MonoError *error) { MonoClass *klass = NULL; error_init (error); if (image_is_dynamic (image)) { int table = mono_metadata_token_table (type_token); if (table != MONO_TABLE_TYPEDEF && table != MONO_TABLE_TYPEREF && table != MONO_TABLE_TYPESPEC) { mono_error_set_bad_image (error, image,"Bad token table for dynamic image: %x", table); return NULL; } klass = (MonoClass *)mono_lookup_dynamic_token (image, type_token, NULL, error); goto done; } switch (type_token & 0xff000000){ case MONO_TOKEN_TYPE_DEF: klass = mono_class_create_from_typedef (image, type_token, error); break; case MONO_TOKEN_TYPE_REF: klass = mono_class_from_typeref_checked (image, type_token, error); break; case MONO_TOKEN_TYPE_SPEC: klass = mono_class_create_from_typespec (image, type_token, NULL, error); break; default: mono_error_set_bad_image (error, image, "Unknown type token %x", type_token & 0xff000000); } done: /* Generic case, should be avoided for when a better error is possible. */ if (!klass && is_ok (error)) { char *name = mono_class_name_from_token (image, type_token); char *assembly = mono_assembly_name_from_token (image, type_token); mono_error_set_type_load_name (error, name, assembly, "Could not resolve type with token %08x (expected class '%s' in assembly '%s')", type_token, name, assembly); } return klass; } /** * mono_type_get_checked: * \param image the image where the type resides * \param type_token the token for the type * \param context the generic context used to evaluate generic instantiations in * \param error Error handling context * * This functions exists to fullfill the fact that sometimes it's desirable to have access to the * * \returns The MonoType that represents \p type_token in \p image */ MonoType * mono_type_get_checked (MonoImage *image, guint32 type_token, MonoGenericContext *context, MonoError *error) { MonoType *type = NULL; gboolean inflated = FALSE; error_init (error); //FIXME: this will not fix the very issue for which mono_type_get_full exists -but how to do it then? if (image_is_dynamic (image)) { MonoClass *klass = (MonoClass *)mono_lookup_dynamic_token (image, type_token, context, error); return_val_if_nok (error, NULL); return m_class_get_byval_arg (klass); } if ((type_token & 0xff000000) != MONO_TOKEN_TYPE_SPEC) { MonoClass *klass = mono_class_get_checked (image, type_token, error); if (!klass) return NULL; if (m_class_has_failure (klass)) { mono_error_set_for_class_failure (error, klass); return NULL; } return m_class_get_byval_arg (klass); } type = mono_type_retrieve_from_typespec (image, type_token, context, &inflated, error); if (!type) { return NULL; } if (inflated) { MonoType *tmp = type; type = m_class_get_byval_arg (mono_class_from_mono_type_internal (type)); /* FIXME: This is a workaround fo the fact that a typespec token sometimes reference to the generic type definition. * A MonoClass::_byval_arg of a generic type definion has type CLASS. * Some parts of mono create a GENERICINST to reference a generic type definition and this generates confict with _byval_arg. * * The long term solution is to chaise this places and make then set MonoType::type correctly. * */ if (type->type != tmp->type) type = tmp; else mono_metadata_free_type (tmp); } return type; } /** * mono_class_get: * \param image image where the class token will be looked up. * \param type_token a type token from the image * \returns the \c MonoClass with the given \p type_token on the \p image */ MonoClass * mono_class_get (MonoImage *image, guint32 type_token) { MonoClass *result; MONO_ENTER_GC_UNSAFE; ERROR_DECL (error); result = mono_class_get_checked (image, type_token, error); mono_error_assert_ok (error); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_image_init_name_cache: * * Initializes the class name cache stored in image->name_cache. * * LOCKING: Acquires the corresponding image lock. */ void mono_image_init_name_cache (MonoImage *image) { MonoTableInfo *t = &image->tables [MONO_TABLE_TYPEDEF]; guint32 cols [MONO_TYPEDEF_SIZE]; const char *name; const char *nspace; guint32 i, visib, nspace_index; GHashTable *name_cache2, *nspace_table, *the_name_cache; if (image->name_cache) return; the_name_cache = g_hash_table_new (g_str_hash, g_str_equal); if (image_is_dynamic (image)) { mono_image_lock (image); if (image->name_cache) { /* Somebody initialized it before us */ g_hash_table_destroy (the_name_cache); } else { mono_atomic_store_release (&image->name_cache, the_name_cache); } mono_image_unlock (image); return; } /* Temporary hash table to avoid lookups in the nspace_table */ name_cache2 = g_hash_table_new (NULL, NULL); /* FIXME: metadata-update */ int rows = table_info_get_rows (t); for (i = 1; i <= rows; ++i) { mono_metadata_decode_row (t, i - 1, cols, MONO_TYPEDEF_SIZE); visib = cols [MONO_TYPEDEF_FLAGS] & TYPE_ATTRIBUTE_VISIBILITY_MASK; /* * Nested types are accessed from the nesting name. We use the fact that nested types use different visibility flags * than toplevel types, thus avoiding the need to grovel through the NESTED_TYPE table */ if (visib >= TYPE_ATTRIBUTE_NESTED_PUBLIC && visib <= TYPE_ATTRIBUTE_NESTED_FAM_OR_ASSEM) continue; name = mono_metadata_string_heap (image, cols [MONO_TYPEDEF_NAME]); nspace = mono_metadata_string_heap (image, cols [MONO_TYPEDEF_NAMESPACE]); nspace_index = cols [MONO_TYPEDEF_NAMESPACE]; nspace_table = (GHashTable *)g_hash_table_lookup (name_cache2, GUINT_TO_POINTER (nspace_index)); if (!nspace_table) { nspace_table = g_hash_table_new (g_str_hash, g_str_equal); g_hash_table_insert (the_name_cache, (char*)nspace, nspace_table); g_hash_table_insert (name_cache2, GUINT_TO_POINTER (nspace_index), nspace_table); } g_hash_table_insert (nspace_table, (char *) name, GUINT_TO_POINTER (i)); } /* Load type names from EXPORTEDTYPES table */ { MonoTableInfo *t = &image->tables [MONO_TABLE_EXPORTEDTYPE]; guint32 cols [MONO_EXP_TYPE_SIZE]; int i; rows = table_info_get_rows (t); for (i = 0; i < rows; ++i) { mono_metadata_decode_row (t, i, cols, MONO_EXP_TYPE_SIZE); guint32 impl = cols [MONO_EXP_TYPE_IMPLEMENTATION]; if ((impl & MONO_IMPLEMENTATION_MASK) == MONO_IMPLEMENTATION_EXP_TYPE) /* Nested type */ continue; name = mono_metadata_string_heap (image, cols [MONO_EXP_TYPE_NAME]); nspace = mono_metadata_string_heap (image, cols [MONO_EXP_TYPE_NAMESPACE]); nspace_index = cols [MONO_EXP_TYPE_NAMESPACE]; nspace_table = (GHashTable *)g_hash_table_lookup (name_cache2, GUINT_TO_POINTER (nspace_index)); if (!nspace_table) { nspace_table = g_hash_table_new (g_str_hash, g_str_equal); g_hash_table_insert (the_name_cache, (char*)nspace, nspace_table); g_hash_table_insert (name_cache2, GUINT_TO_POINTER (nspace_index), nspace_table); } g_hash_table_insert (nspace_table, (char *) name, GUINT_TO_POINTER (mono_metadata_make_token (MONO_TABLE_EXPORTEDTYPE, i + 1))); } } g_hash_table_destroy (name_cache2); mono_image_lock (image); if (image->name_cache) { /* Somebody initialized it before us */ g_hash_table_destroy (the_name_cache); } else { mono_atomic_store_release (&image->name_cache, the_name_cache); } mono_image_unlock (image); } /*FIXME Only dynamic assemblies should allow this operation.*/ /** * mono_image_add_to_name_cache: */ void mono_image_add_to_name_cache (MonoImage *image, const char *nspace, const char *name, guint32 index) { GHashTable *nspace_table; GHashTable *name_cache; guint32 old_index; mono_image_init_name_cache (image); mono_image_lock (image); name_cache = image->name_cache; if (!(nspace_table = (GHashTable *)g_hash_table_lookup (name_cache, nspace))) { nspace_table = g_hash_table_new (g_str_hash, g_str_equal); g_hash_table_insert (name_cache, (char *)nspace, (char *)nspace_table); } if ((old_index = GPOINTER_TO_UINT (g_hash_table_lookup (nspace_table, (char*) name)))) g_error ("overrwritting old token %x on image %s for type %s::%s", old_index, image->name, nspace, name); g_hash_table_insert (nspace_table, (char *) name, GUINT_TO_POINTER (index)); mono_image_unlock (image); } typedef struct { gconstpointer key; GSList *values; } FindAllUserData; static void find_all_nocase (gpointer key, gpointer value, gpointer user_data) { char *name = (char*)key; FindAllUserData *data = (FindAllUserData*)user_data; if (mono_utf8_strcasecmp (name, (char*)data->key) == 0) data->values = g_slist_prepend (data->values, value); } typedef struct { gconstpointer key; gpointer value; } FindUserData; static void find_nocase (gpointer key, gpointer value, gpointer user_data) { char *name = (char*)key; FindUserData *data = (FindUserData*)user_data; if (!data->value && (mono_utf8_strcasecmp (name, (char*)data->key) == 0)) data->value = value; } /** * mono_class_from_name_case: * \param image The MonoImage where the type is looked up in * \param name_space the type namespace * \param name the type short name. * \deprecated use the mono_class_from_name_case_checked variant instead. * * Obtains a \c MonoClass with a given namespace and a given name which * is located in the given \c MonoImage. The namespace and name * lookups are case insensitive. */ MonoClass * mono_class_from_name_case (MonoImage *image, const char* name_space, const char *name) { ERROR_DECL (error); MonoClass *res = mono_class_from_name_case_checked (image, name_space, name, error); mono_error_cleanup (error); return res; } /** * mono_class_from_name_case_checked: * \param image The MonoImage where the type is looked up in * \param name_space the type namespace * \param name the type short name. * \param error if * * Obtains a MonoClass with a given namespace and a given name which * is located in the given MonoImage. The namespace and name * lookups are case insensitive. * * \returns The MonoClass if the given namespace and name were found, or NULL if it * was not found. The \p error object will contain information about the problem * in that case. */ MonoClass * mono_class_from_name_case_checked (MonoImage *image, const char *name_space, const char *name, MonoError *error) { MonoClass *klass; GHashTable *visited_images; visited_images = g_hash_table_new (g_direct_hash, g_direct_equal); klass = mono_class_from_name_checked_aux (image, name_space, name, visited_images, FALSE, error); g_hash_table_destroy (visited_images); return klass; } static MonoClass* return_nested_in (MonoClass *klass, char *nested, gboolean case_sensitive) { MonoClass *found; char *s = strchr (nested, '/'); gpointer iter = NULL; if (s) { *s = 0; s++; } while ((found = mono_class_get_nested_types (klass, &iter))) { const char *name = m_class_get_name (found); gint strcmp_result; if (case_sensitive) strcmp_result = strcmp (name, nested); else strcmp_result = mono_utf8_strcasecmp (name, nested); if (strcmp_result == 0) { if (s) return return_nested_in (found, s, case_sensitive); return found; } } return NULL; } static MonoClass* search_modules (MonoImage *image, const char *name_space, const char *name, gboolean case_sensitive, MonoError *error) { MonoTableInfo *file_table = &image->tables [MONO_TABLE_FILE]; MonoImage *file_image; MonoClass *klass; int i; error_init (error); /* * The EXPORTEDTYPES table only contains public types, so have to search the * modules as well. * Note: image->modules contains the contents of the MODULEREF table, while * the real module list is in the FILE table. */ int rows = table_info_get_rows (file_table); for (i = 0; i < rows; i++) { guint32 cols [MONO_FILE_SIZE]; mono_metadata_decode_row (file_table, i, cols, MONO_FILE_SIZE); if (cols [MONO_FILE_FLAGS] == FILE_CONTAINS_NO_METADATA) continue; file_image = mono_image_load_file_for_image_checked (image, i + 1, error); if (file_image) { if (case_sensitive) klass = mono_class_from_name_checked (file_image, name_space, name, error); else klass = mono_class_from_name_case_checked (file_image, name_space, name, error); if (klass || !is_ok (error)) return klass; } } return NULL; } static MonoClass * mono_class_from_name_checked_aux (MonoImage *image, const char* name_space, const char *name, GHashTable* visited_images, gboolean case_sensitive, MonoError *error) { GHashTable *nspace_table = NULL; MonoImage *loaded_image = NULL; guint32 token = 0; int i; MonoClass *klass; char *nested; char buf [1024]; error_init (error); // Checking visited images avoids stack overflows when cyclic references exist. if (g_hash_table_lookup (visited_images, image)) return NULL; g_hash_table_insert (visited_images, image, GUINT_TO_POINTER(1)); if ((nested = (char*)strchr (name, '/'))) { int pos = nested - name; int len = strlen (name); if (len > 1023) return NULL; memcpy (buf, name, len + 1); buf [pos] = 0; nested = buf + pos + 1; name = buf; } /* FIXME: get_class_from_name () can't handle types in the EXPORTEDTYPE table */ // The AOT cache in get_class_from_name is case-sensitive, so don't bother with it for case-insensitive lookups if (get_class_from_name && table_info_get_rows (&image->tables [MONO_TABLE_EXPORTEDTYPE]) == 0 && case_sensitive) { gboolean res = get_class_from_name (image, name_space, name, &klass); if (res) { if (!klass) { klass = search_modules (image, name_space, name, case_sensitive, error); if (!is_ok (error)) return NULL; } if (nested) return klass ? return_nested_in (klass, nested, case_sensitive) : NULL; else return klass; } } mono_image_init_name_cache (image); mono_image_lock (image); if (case_sensitive) { nspace_table = (GHashTable *)g_hash_table_lookup (image->name_cache, name_space); if (nspace_table) token = GPOINTER_TO_UINT (g_hash_table_lookup (nspace_table, name)); } else { FindAllUserData all_user_data = { name_space, NULL }; FindUserData user_data = { name, NULL }; GSList *values; // We're forced to check all matching namespaces, not just the first one found, // because our desired type could be in any of the ones that match case-insensitively. g_hash_table_foreach (image->name_cache, find_all_nocase, &all_user_data); values = all_user_data.values; while (values && !user_data.value) { nspace_table = (GHashTable*)values->data; g_hash_table_foreach (nspace_table, find_nocase, &user_data); values = values->next; } g_slist_free (all_user_data.values); if (user_data.value) token = GPOINTER_TO_UINT (user_data.value); } mono_image_unlock (image); if (!token && image_is_dynamic (image) && image->modules) { /* Search modules as well */ for (i = 0; i < image->module_count; ++i) { MonoImage *module = image->modules [i]; if (case_sensitive) klass = mono_class_from_name_checked (module, name_space, name, error); else klass = mono_class_from_name_case_checked (module, name_space, name, error); if (klass || !is_ok (error)) return klass; } } if (!token) { klass = search_modules (image, name_space, name, case_sensitive, error); if (klass || !is_ok (error)) return klass; return NULL; } if (mono_metadata_token_table (token) == MONO_TABLE_EXPORTEDTYPE) { MonoTableInfo *t = &image->tables [MONO_TABLE_EXPORTEDTYPE]; guint32 cols [MONO_EXP_TYPE_SIZE]; guint32 idx, impl; idx = mono_metadata_token_index (token); mono_metadata_decode_row (t, idx - 1, cols, MONO_EXP_TYPE_SIZE); impl = cols [MONO_EXP_TYPE_IMPLEMENTATION]; if ((impl & MONO_IMPLEMENTATION_MASK) == MONO_IMPLEMENTATION_FILE) { loaded_image = mono_assembly_load_module_checked (image->assembly, impl >> MONO_IMPLEMENTATION_BITS, error); if (!loaded_image) return NULL; klass = mono_class_from_name_checked_aux (loaded_image, name_space, name, visited_images, case_sensitive, error); if (nested) return klass ? return_nested_in (klass, nested, case_sensitive) : NULL; return klass; } else if ((impl & MONO_IMPLEMENTATION_MASK) == MONO_IMPLEMENTATION_ASSEMBLYREF) { guint32 assembly_idx; assembly_idx = impl >> MONO_IMPLEMENTATION_BITS; mono_assembly_load_reference (image, assembly_idx - 1); g_assert (image->references [assembly_idx - 1]); if (image->references [assembly_idx - 1] == (gpointer)-1) return NULL; klass = mono_class_from_name_checked_aux (image->references [assembly_idx - 1]->image, name_space, name, visited_images, case_sensitive, error); if (nested) return klass ? return_nested_in (klass, nested, case_sensitive) : NULL; return klass; } else { g_assert_not_reached (); } } token = MONO_TOKEN_TYPE_DEF | token; klass = mono_class_get_checked (image, token, error); if (nested) return return_nested_in (klass, nested, case_sensitive); return klass; } /** * mono_class_from_name_checked: * \param image The MonoImage where the type is looked up in * \param name_space the type namespace * \param name the type short name. * * Obtains a MonoClass with a given namespace and a given name which * is located in the given MonoImage. * * Works like mono_class_from_name, but error handling is tricky. It can return NULL and have no error * set if the class was not found or it will return NULL and set the error if there was a loading error. */ MonoClass * mono_class_from_name_checked (MonoImage *image, const char* name_space, const char *name, MonoError *error) { MonoClass *klass; GHashTable *visited_images; visited_images = g_hash_table_new (g_direct_hash, g_direct_equal); klass = mono_class_from_name_checked_aux (image, name_space, name, visited_images, TRUE, error); g_hash_table_destroy (visited_images); return klass; } /** * mono_class_from_name: * \param image The \c MonoImage where the type is looked up in * \param name_space the type namespace * \param name the type short name. * * Obtains a \c MonoClass with a given namespace and a given name which * is located in the given \c MonoImage. * * To reference nested classes, use the "/" character as a separator. * For example use \c "Foo/Bar" to reference the class \c Bar that is nested * inside \c Foo, like this: "class Foo { class Bar {} }". */ MonoClass * mono_class_from_name (MonoImage *image, const char* name_space, const char *name) { MonoClass *klass; MONO_ENTER_GC_UNSAFE; ERROR_DECL (error); klass = mono_class_from_name_checked (image, name_space, name, error); mono_error_cleanup (error); /* FIXME Don't swallow the error */ MONO_EXIT_GC_UNSAFE; return klass; } /** * mono_class_load_from_name: * \param image The MonoImage where the type is looked up in * \param name_space the type namespace * \param name the type short name. * * This function works exactly like mono_class_from_name but it will abort if the class is not found. * This function should be used by the runtime for critical types to which there's no way to recover but crash * if they are missing. For example, System.Object or System.String. */ MonoClass * mono_class_load_from_name (MonoImage *image, const char* name_space, const char *name) { ERROR_DECL (error); MonoClass *klass; klass = mono_class_from_name_checked (image, name_space, name, error); if (!klass) g_error ("Runtime critical type %s.%s not found", name_space, name); mono_error_assertf_ok (error, "Could not load runtime critical type %s.%s", name_space, name); return klass; } /** * mono_class_try_load_from_name: * \param image The MonoImage where the type is looked up in * \param name_space the type namespace * \param name the type short name. * * This function tries to load a type, returning the class was found or NULL otherwise. * This function should be used by the runtime when probing for optional types, those that could have being linked out. * * Big design consideration. This function aborts if there was an error loading the type. This prevents us from missing * a type that we would otherwise assume to be available but was not due some error. * */ MonoClass* mono_class_try_load_from_name (MonoImage *image, const char* name_space, const char *name) { ERROR_DECL (error); MonoClass *klass; klass = mono_class_from_name_checked (image, name_space, name, error); mono_error_assertf_ok (error, "Could not load runtime critical type %s.%s", name_space, name); return klass; } static gboolean mono_interface_implements_interface (MonoClass *interface_implementer, MonoClass *interface_implemented) { int i; ERROR_DECL (error); mono_class_setup_interfaces (interface_implementer, error); if (!is_ok (error)) { mono_error_cleanup (error); return FALSE; } MonoClass **klass_interfaces = m_class_get_interfaces (interface_implementer); for (i = 0; i < m_class_get_interface_count (interface_implementer); i++) { MonoClass *ic = klass_interfaces [i]; if (mono_class_is_ginst (ic)) ic = mono_class_get_generic_type_definition (ic); if (ic == interface_implemented) return TRUE; } return FALSE; } gboolean mono_class_is_subclass_of_internal (MonoClass *klass, MonoClass *klassc, gboolean check_interfaces) { MONO_REQ_GC_UNSAFE_MODE; /* FIXME test for interfaces with variant generic arguments */ if (check_interfaces) { mono_class_init_internal (klass); mono_class_init_internal (klassc); } if (check_interfaces && MONO_CLASS_IS_INTERFACE_INTERNAL (klassc) && !MONO_CLASS_IS_INTERFACE_INTERNAL (klass)) { if (MONO_CLASS_IMPLEMENTS_INTERFACE (klass, m_class_get_interface_id (klassc))) return TRUE; } else if (check_interfaces && MONO_CLASS_IS_INTERFACE_INTERNAL (klassc) && MONO_CLASS_IS_INTERFACE_INTERNAL (klass)) { int i; MonoClass **klass_interfaces = m_class_get_interfaces (klass); for (i = 0; i < m_class_get_interface_count (klass); i ++) { MonoClass *ic = klass_interfaces [i]; if (ic == klassc) return TRUE; } } else { if (!MONO_CLASS_IS_INTERFACE_INTERNAL (klass) && mono_class_has_parent (klass, klassc)) return TRUE; } /* * MS.NET thinks interfaces are a subclass of Object, so we think it as * well. */ if (klassc == mono_defaults.object_class) return TRUE; return FALSE; } static gboolean mono_type_is_generic_argument (MonoType *type) { return type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR; } gboolean mono_class_has_variant_generic_params (MonoClass *klass) { int i; MonoGenericContainer *container; if (!mono_class_is_ginst (klass)) return FALSE; container = mono_class_get_generic_container (mono_class_get_generic_class (klass)->container_class); for (i = 0; i < container->type_argc; ++i) if (mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)) return TRUE; return FALSE; } static gboolean mono_gparam_is_reference_conversible (MonoClass *target, MonoClass *candidate, gboolean check_for_reference_conv) { if (target == candidate) return TRUE; if (check_for_reference_conv && mono_type_is_generic_argument (m_class_get_byval_arg (target)) && mono_type_is_generic_argument (m_class_get_byval_arg (candidate))) { MonoGenericParam *gparam = m_class_get_byval_arg (candidate)->data.generic_param; MonoGenericParamInfo *pinfo = mono_generic_param_info (gparam); if (!pinfo || (pinfo->flags & GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT) == 0) return FALSE; } if (!mono_class_is_assignable_from_internal (target, candidate)) return FALSE; return TRUE; } /** * @container the generic container from the GTD * @klass: the class to be assigned to * @oklass: the source class * * Both @klass and @oklass must be instances of the same generic interface. * * Returns: TRUE if @klass can be assigned to a @klass variable */ gboolean mono_class_is_variant_compatible (MonoClass *klass, MonoClass *oklass, gboolean check_for_reference_conv) { int j; MonoType **klass_argv, **oklass_argv; MonoClass *klass_gtd = mono_class_get_generic_type_definition (klass); MonoGenericContainer *container = mono_class_get_generic_container (klass_gtd); if (klass == oklass) return TRUE; /*Viable candidates are instances of the same generic interface*/ if (mono_class_get_generic_type_definition (oklass) != klass_gtd || oklass == klass_gtd) return FALSE; klass_argv = &mono_class_get_generic_class (klass)->context.class_inst->type_argv [0]; oklass_argv = &mono_class_get_generic_class (oklass)->context.class_inst->type_argv [0]; for (j = 0; j < container->type_argc; ++j) { MonoClass *param1_class = mono_class_from_mono_type_internal (klass_argv [j]); MonoClass *param2_class = mono_class_from_mono_type_internal (oklass_argv [j]); if (m_class_is_valuetype (param1_class) != m_class_is_valuetype (param2_class) || (m_class_is_valuetype (param1_class) && param1_class != param2_class)) return FALSE; /* * The _VARIANT and _COVARIANT constants should read _COVARIANT and * _CONTRAVARIANT, but they are in a public header so we can't fix it. */ if (param1_class != param2_class) { if (mono_generic_container_get_param_info (container, j)->flags & MONO_GEN_PARAM_VARIANT) { if (!mono_gparam_is_reference_conversible (param1_class, param2_class, check_for_reference_conv)) return FALSE; } else if (mono_generic_container_get_param_info (container, j)->flags & MONO_GEN_PARAM_COVARIANT) { if (!mono_gparam_is_reference_conversible (param2_class, param1_class, check_for_reference_conv)) return FALSE; } else return FALSE; } } return TRUE; } static gboolean mono_gparam_is_assignable_from (MonoClass *target, MonoClass *candidate) { MonoGenericParam *gparam, *ogparam; MonoGenericParamInfo *tinfo, *cinfo; MonoClass **candidate_class; gboolean class_constraint_satisfied, valuetype_constraint_satisfied; int tmask, cmask; if (target == candidate) return TRUE; MonoType *target_byval_arg = m_class_get_byval_arg (target); MonoType *candidate_byval_arg = m_class_get_byval_arg (candidate); if (target_byval_arg->type != candidate_byval_arg->type) return FALSE; gparam = target_byval_arg->data.generic_param; ogparam = candidate_byval_arg->data.generic_param; tinfo = mono_generic_param_info (gparam); cinfo = mono_generic_param_info (ogparam); class_constraint_satisfied = FALSE; valuetype_constraint_satisfied = FALSE; /*candidate must have a super set of target's special constraints*/ tmask = tinfo->flags & GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK; cmask = cinfo->flags & GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK; if (cinfo->constraints) { for (candidate_class = cinfo->constraints; *candidate_class; ++candidate_class) { MonoClass *cc = *candidate_class; MonoType *cc_byval_arg = m_class_get_byval_arg (cc); if (mono_type_is_reference (cc_byval_arg) && !MONO_CLASS_IS_INTERFACE_INTERNAL (cc)) class_constraint_satisfied = TRUE; else if (!mono_type_is_reference (cc_byval_arg) && !MONO_CLASS_IS_INTERFACE_INTERNAL (cc)) valuetype_constraint_satisfied = TRUE; } } class_constraint_satisfied |= (cmask & GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT) != 0; valuetype_constraint_satisfied |= (cmask & GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT) != 0; if ((tmask & GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT) && !class_constraint_satisfied) return FALSE; if ((tmask & GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT) && !valuetype_constraint_satisfied) return FALSE; if ((tmask & GENERIC_PARAMETER_ATTRIBUTE_CONSTRUCTOR_CONSTRAINT) && !((cmask & GENERIC_PARAMETER_ATTRIBUTE_CONSTRUCTOR_CONSTRAINT) || valuetype_constraint_satisfied)) { return FALSE; } /*candidate type constraints must be a superset of target's*/ if (tinfo->constraints) { MonoClass **target_class; for (target_class = tinfo->constraints; *target_class; ++target_class) { MonoClass *tc = *target_class; MonoType *tc_byval_arg = m_class_get_byval_arg (tc); /* * A constraint from @target might inflate into @candidate itself and in that case we don't need * check it's constraints since it satisfy the constraint by itself. */ if (mono_metadata_type_equal (tc_byval_arg, candidate_byval_arg)) continue; if (!cinfo->constraints) return FALSE; for (candidate_class = cinfo->constraints; *candidate_class; ++candidate_class) { MonoClass *cc = *candidate_class; if (mono_class_is_assignable_from_internal (tc, cc)) break; /* * This happens when we have the following: * * Bar<K> where K : IFace * Foo<T, U> where T : U where U : IFace * ... * Bar<T> <- T here satisfy K constraint transitively through to U's constraint * */ if (mono_type_is_generic_argument (m_class_get_byval_arg (cc))) { if (mono_gparam_is_assignable_from (target, cc)) break; } } if (!*candidate_class) return FALSE; } } /*candidate itself must have a constraint that satisfy target*/ if (cinfo->constraints) { for (candidate_class = cinfo->constraints; *candidate_class; ++candidate_class) { MonoClass *cc = *candidate_class; if (mono_class_is_assignable_from_internal (target, cc)) return TRUE; } } return FALSE; } static MonoType* mono_type_get_underlying_type_ignore_byref (MonoType *type) { if (type->type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (type->data.klass)) return mono_class_enum_basetype_internal (type->data.klass); if (type->type == MONO_TYPE_GENERICINST && m_class_is_enumtype (type->data.generic_class->container_class)) return mono_class_enum_basetype_internal (type->data.generic_class->container_class); return type; } /** * mono_byref_type_is_assignable_from: * \param type The type assignee * \param ctype The type being assigned * \param signature_assignment whether this is a signature assginment check according to ECMA rules, or reflection * * Given two byref types, returns \c TRUE if values of the second type are assignable to locations of the first type. * * The \p signature_assignment parameter affects comparing T& and U& where T and U are both reference types. Reflection * does an IsAssignableFrom check for T and U here, but ECMA I.8.7.2 says that the verification types of T and U must be * identical. If \p signature_assignment is \c TRUE we do an ECMA check, otherwise, reflection. */ gboolean mono_byref_type_is_assignable_from (MonoType *type, MonoType *ctype, gboolean signature_assignment) { g_assert (m_type_is_byref (type)); g_assert (m_type_is_byref (ctype)); MonoType *t = mono_type_get_underlying_type_ignore_byref (type); MonoType *ot = mono_type_get_underlying_type_ignore_byref (ctype); MonoClass *klass = mono_class_from_mono_type_internal (t); MonoClass *klassc = mono_class_from_mono_type_internal (ot); if (mono_type_is_primitive (t)) { return mono_type_is_primitive (ot) && m_class_get_instance_size (klass) == m_class_get_instance_size (klassc); } else if (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) { return t->type == ot->type && t->data.generic_param->num == ot->data.generic_param->num; } else if (t->type == MONO_TYPE_PTR || t->type == MONO_TYPE_FNPTR) { return t->type == ot->type; } else { if (ot->type == MONO_TYPE_VAR || ot->type == MONO_TYPE_MVAR) return FALSE; if (m_class_is_valuetype (klass)) return klass == klassc; if (m_class_is_valuetype (klassc)) return FALSE; /* * assignment compatability for location types, ECMA I.8.7.2 - two managed pointer types T& and U& are * assignment compatible if the verification types of T and U are identical. */ if (signature_assignment) return klass == klassc; /* the reflection IsAssignableFrom does a subtype comparison here for reference types only */ return mono_class_is_assignable_from_internal (klass, klassc); } } /** * mono_class_is_assignable_from_internal: * \param klass the class to be assigned to * \param oklass the source class * * \returns TRUE if an instance of class \p oklass can be assigned to an * instance of class \p klass */ gboolean mono_class_is_assignable_from_internal (MonoClass *klass, MonoClass *oklass) { gboolean result = FALSE; ERROR_DECL (error); mono_class_is_assignable_from_checked (klass, oklass, &result, error); mono_error_cleanup (error); return result; } /** * mono_class_is_assignable_from: * \param klass the class to be assigned to * \param oklass the source class * * \returns TRUE if an instance of class \p oklass can be assigned to an * instance of class \p klass */ mono_bool mono_class_is_assignable_from (MonoClass *klass, MonoClass *oklass) { gboolean result; MONO_ENTER_GC_UNSAFE; result = mono_class_is_assignable_from_internal (klass, oklass); MONO_EXIT_GC_UNSAFE; return result; } /* * ECMA I.8.7.3 general assignment compatability is defined in terms of an "intermediate type" * whereas ECMA I.8.7.1 assignment compatability for signature types is defined in terms of a "reduced type". * * This matters when we're comparing arrays of IntPtr. IntPtr[] is generally * assignable to int[] or long[], depending on architecture. But for signature * compatability, IntPtr[] is distinct from both of them. * * Similarly for ulong* and IntPtr*, etc. */ static MonoClass* composite_type_to_reduced_element_type (MonoClass *array_klass) { switch (m_class_get_byval_arg (m_class_get_element_class (array_klass))->type) { case MONO_TYPE_I: case MONO_TYPE_U: return mono_defaults.int_class; default: return m_class_get_cast_class (array_klass); } } static void mono_class_is_assignable_from_general (MonoClass *klass, MonoClass *oklass, gboolean signature_assignment, gboolean *result, MonoError *error); /** * mono_class_is_assignable_from_checked: * \param klass the class to be assigned to * \param oklass the source class * \param result set if there was no error * \param error set if there was an error * * Sets \p result to TRUE if an instance of class \p oklass can be assigned to * an instance of class \p klass or FALSE if it cannot. On error, no \p error * is set and \p result is not valid. */ void mono_class_is_assignable_from_checked (MonoClass *klass, MonoClass *oklass, gboolean *result, MonoError *error) { const gboolean for_sig = FALSE; mono_class_is_assignable_from_general (klass, oklass, for_sig, result, error); } void mono_class_signature_is_assignable_from (MonoClass *klass, MonoClass *oklass, gboolean *result, MonoError *error) { const gboolean for_sig = TRUE; mono_class_is_assignable_from_general (klass, oklass, for_sig, result, error); } void mono_class_is_assignable_from_general (MonoClass *klass, MonoClass *oklass, gboolean signature_assignment, gboolean *result, MonoError *error) { g_assert (result); if (klass == oklass) { *result = TRUE; return; } MONO_REQ_GC_UNSAFE_MODE; /*FIXME this will cause a lot of irrelevant stuff to be loaded.*/ if (!m_class_is_inited (klass)) mono_class_init_internal (klass); if (!m_class_is_inited (oklass)) mono_class_init_internal (oklass); if (mono_class_has_failure (klass)) { mono_error_set_for_class_failure (error, klass); *result = FALSE; return; } if (mono_class_has_failure (oklass)) { mono_error_set_for_class_failure (error, oklass); *result = FALSE; return; } MonoType *klass_byval_arg = m_class_get_byval_arg (klass); MonoType *oklass_byval_arg = m_class_get_byval_arg (oklass); if (mono_type_is_generic_argument (klass_byval_arg)) { if (!mono_type_is_generic_argument (oklass_byval_arg)) { *result = FALSE; return; } *result = mono_gparam_is_assignable_from (klass, oklass); return; } /* This can happen if oklass is a tyvar that has a constraint which is another tyvar which in turn * has a constraint which is a class type: * * class Foo { } * class G<T1, T2> where T1 : T2 where T2 : Foo { } * * In this case, Foo is assignable from T1. */ if (mono_type_is_generic_argument (oklass_byval_arg)) { MonoGenericParam *gparam = oklass_byval_arg->data.generic_param; MonoClass **constraints = mono_generic_container_get_param_info (gparam->owner, gparam->num)->constraints; int i; if (constraints) { for (i = 0; constraints [i]; ++i) { if (mono_class_is_assignable_from_internal (klass, constraints [i])) { *result = TRUE; return; } } } *result = mono_class_has_parent (oklass, klass); return; } if (MONO_CLASS_IS_INTERFACE_INTERNAL (klass)) { /* interface_offsets might not be set for dynamic classes */ if (mono_class_get_ref_info_handle (oklass) && !m_class_get_interface_bitmap (oklass)) { /* * oklass might be a generic type parameter but they have * interface_offsets set. */ gboolean assign_result = mono_reflection_call_is_assignable_to (oklass, klass, error); return_if_nok (error); *result = assign_result; return; } if (!m_class_get_interface_bitmap (oklass)) { /* Happens with generic instances of not-yet created dynamic types */ *result = FALSE; return; } if (MONO_CLASS_IMPLEMENTS_INTERFACE (oklass, m_class_get_interface_id (klass))) { *result = TRUE; return; } if (m_class_is_array_special_interface (klass) && m_class_get_rank (oklass) == 1) { if (mono_class_is_gtd (klass)) { /* klass is an array special gtd like * IList`1<>, and oklass is X[] for some X. * Moreover we know that X isn't !0 (the gparam * of IList`1) because in that case we would * have returned TRUE for * MONO_CLASS_IMPLEMENTS_INTERFACE, above. */ *result = FALSE; return; } // FIXME: IEnumerator`1 should not be an array special interface. // The correct fix is to make // ((IEnumerable<U>) (new T[] {...})).GetEnumerator() // return an IEnumerator<U> (like .NET does) instead of IEnumerator<T> // and to stop marking IEnumerable`1 as an array_special_interface. if (mono_class_get_generic_type_definition (klass) == mono_defaults.generic_ienumerator_class) { *result = FALSE; return; } //XXX we could offset this by having the cast target computed at JIT time //XXX we could go even further and emit a wrapper that would do the extra type check MonoClass *iface_klass = mono_class_from_mono_type_internal (mono_class_get_generic_class (klass)->context.class_inst->type_argv [0]); MonoClass *obj_klass = m_class_get_cast_class (oklass); //This gets us the cast class of element type of the array // If the target we're trying to cast to is a valuetype, we must account of weird valuetype equivalences such as IntEnum <> int or uint <> int // We can't apply it for ref types as this would go wrong with arrays - IList<byte[]> would have byte tested if (!mono_class_is_nullable (iface_klass)) { if (m_class_is_valuetype (iface_klass)) iface_klass = m_class_get_cast_class (iface_klass); //array covariant casts only operates on scalar to scalar //This is so int[] can't be casted to IComparable<int>[] if (!(m_class_is_valuetype (obj_klass) && !m_class_is_valuetype (iface_klass)) && mono_class_is_assignable_from_internal (iface_klass, obj_klass)) { *result = TRUE; return; } } } if (mono_class_has_variant_generic_params (klass)) { int i; mono_class_setup_interfaces (oklass, error); return_if_nok (error); /*klass is a generic variant interface, We need to extract from oklass a list of ifaces which are viable candidates.*/ for (i = 0; i < m_class_get_interface_offsets_count (oklass); ++i) { MonoClass *iface = m_class_get_interfaces_packed (oklass) [i]; if (mono_class_is_variant_compatible (klass, iface, FALSE)) { *result = TRUE; return; } } } *result = FALSE; return; } else if (m_class_is_delegate (klass)) { if (mono_class_has_variant_generic_params (klass) && mono_class_is_variant_compatible (klass, oklass, FALSE)) { *result = TRUE; return; } } else if (m_class_get_rank (klass)) { MonoClass *eclass, *eoclass; if (m_class_get_rank (oklass) != m_class_get_rank (klass)) { *result = FALSE; return; } /* vectors vs. one dimensional arrays */ if (oklass_byval_arg->type != klass_byval_arg->type) { *result = FALSE; return; } if (signature_assignment) { eclass = composite_type_to_reduced_element_type (klass); eoclass = composite_type_to_reduced_element_type (oklass); } else { eclass = m_class_get_cast_class (klass); eoclass = m_class_get_cast_class (oklass); } /* * a is b does not imply a[] is b[] when a is a valuetype, and * b is a reference type. */ if (m_class_is_valuetype (eoclass)) { if ((eclass == mono_defaults.enum_class) || (eclass == m_class_get_parent (mono_defaults.enum_class)) || (!m_class_is_valuetype (eclass))) { *result = FALSE; return; } } /* * a is b does not imply a[] is b[] in the case where b is an interface and * a is a generic parameter, unless a has an additional class constraint. * For example (C#): * ``` * interface I {} * class G<T> where T : I {} * class H<U> where U : class, I {} * public class P { * public static void Main() { * var t = typeof(G<>).GetTypeInfo().GenericTypeParameters[0].MakeArrayType(); * var i = typeof(I).MakeArrayType(); * var u = typeof(H<>).GetTypeInfo().GenericTypeParameters[0].MakeArrayType(); * Console.WriteLine("I[] assignable from T[] ? {0}", i.IsAssignableFrom(t)); * Console.WriteLine("I[] assignable from U[] ? {0}", i.IsAssignableFrom(u)); * } * } * ``` * This should print: * I[] assignable from T[] ? False * I[] assignable from U[] ? True */ if (MONO_CLASS_IS_INTERFACE_INTERNAL (eclass)) { MonoType *eoclass_byval_arg = m_class_get_byval_arg (eoclass); if (mono_type_is_generic_argument (eoclass_byval_arg)) { MonoGenericParam *eoparam = eoclass_byval_arg->data.generic_param; MonoGenericParamInfo *eoinfo = mono_generic_param_info (eoparam); int eomask = eoinfo->flags & GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK; // check for class constraint if ((eomask & GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT) == 0) { *result = FALSE; return; } } } if (mono_class_is_nullable (eclass) ^ mono_class_is_nullable (eoclass)) { *result = FALSE; return; } mono_class_is_assignable_from_checked (eclass, eoclass, result, error); return; } else if (mono_class_is_nullable (klass)) { if (mono_class_is_nullable (oklass)) mono_class_is_assignable_from_checked (m_class_get_cast_class (klass), m_class_get_cast_class (oklass), result, error); else mono_class_is_assignable_from_checked (m_class_get_cast_class (klass), oklass, result, error); return; } else if (m_class_get_class_kind (klass) == MONO_CLASS_POINTER) { if (m_class_get_class_kind (oklass) != MONO_CLASS_POINTER) { *result = FALSE; return; } if (m_class_get_byval_arg (klass)->type == MONO_TYPE_FNPTR) { /* * if both klass and oklass are fnptr, and they're equal, we would have returned at the * beginning. */ /* Is this right? or do we need to look at signature compatability? */ *result = FALSE; return; } if (m_class_get_byval_arg (oklass)->type != MONO_TYPE_PTR) { *result = FALSE; } g_assert (m_class_get_byval_arg (klass)->type == MONO_TYPE_PTR); MonoClass *eclass; MonoClass *eoclass; if (signature_assignment) { eclass = composite_type_to_reduced_element_type (klass); eoclass = composite_type_to_reduced_element_type (oklass); } else { eclass = m_class_get_cast_class (klass); eoclass = m_class_get_cast_class (oklass); } *result = (eclass == eoclass); return; } else if (klass == mono_defaults.object_class) { if (m_class_get_class_kind (oklass) == MONO_CLASS_POINTER) *result = FALSE; else *result = TRUE; return; } *result = mono_class_has_parent (oklass, klass); } /*Check if @oklass is variant compatible with @klass.*/ static gboolean mono_class_is_variant_compatible_slow (MonoClass *klass, MonoClass *oklass) { int j; MonoType **klass_argv, **oklass_argv; MonoClass *klass_gtd = mono_class_get_generic_type_definition (klass); MonoGenericContainer *container = mono_class_get_generic_container (klass_gtd); /*Viable candidates are instances of the same generic interface*/ if (mono_class_get_generic_type_definition (oklass) != klass_gtd || oklass == klass_gtd) return FALSE; klass_argv = &mono_class_get_generic_class (klass)->context.class_inst->type_argv [0]; oklass_argv = &mono_class_get_generic_class (oklass)->context.class_inst->type_argv [0]; for (j = 0; j < container->type_argc; ++j) { MonoClass *param1_class = mono_class_from_mono_type_internal (klass_argv [j]); MonoClass *param2_class = mono_class_from_mono_type_internal (oklass_argv [j]); if (m_class_is_valuetype (param1_class) != m_class_is_valuetype (param2_class)) return FALSE; /* * The _VARIANT and _COVARIANT constants should read _COVARIANT and * _CONTRAVARIANT, but they are in a public header so we can't fix it. */ if (param1_class != param2_class) { if (mono_generic_container_get_param_info (container, j)->flags & MONO_GEN_PARAM_VARIANT) { if (!mono_class_is_assignable_from_slow (param1_class, param2_class)) return FALSE; } else if (mono_generic_container_get_param_info (container, j)->flags & MONO_GEN_PARAM_COVARIANT) { if (!mono_class_is_assignable_from_slow (param2_class, param1_class)) return FALSE; } else return FALSE; } } return TRUE; } /*Check if @candidate implements the interface @target*/ static gboolean mono_class_implement_interface_slow (MonoClass *target, MonoClass *candidate) { ERROR_DECL (error); int i; gboolean is_variant = mono_class_has_variant_generic_params (target); if (is_variant && MONO_CLASS_IS_INTERFACE_INTERNAL (candidate)) { if (mono_class_is_variant_compatible_slow (target, candidate)) return TRUE; } do { if (candidate == target) return TRUE; /*A TypeBuilder can have more interfaces on tb->interfaces than on candidate->interfaces*/ if (image_is_dynamic (m_class_get_image (candidate)) && !m_class_was_typebuilder (candidate)) { MonoReflectionTypeBuilder *tb = mono_class_get_ref_info_raw (candidate); /* FIXME use handles */ int j; if (tb && tb->interfaces) { for (j = mono_array_length_internal (tb->interfaces) - 1; j >= 0; --j) { MonoReflectionType *iface = mono_array_get_internal (tb->interfaces, MonoReflectionType*, j); MonoClass *iface_class; /* we can't realize the type here since it can do pretty much anything. */ if (!iface->type) continue; iface_class = mono_class_from_mono_type_internal (iface->type); if (iface_class == target) return TRUE; if (is_variant && mono_class_is_variant_compatible_slow (target, iface_class)) return TRUE; if (mono_class_implement_interface_slow (target, iface_class)) return TRUE; } } } else { /*setup_interfaces don't mono_class_init_internal anything*/ /*FIXME this doesn't handle primitive type arrays. ICollection<sbyte> x byte [] won't work because candidate->interfaces, for byte[], won't have IList<sbyte>. A possible way to fix this would be to move that to setup_interfaces from setup_interface_offsets. */ mono_class_setup_interfaces (candidate, error); if (!is_ok (error)) { mono_error_cleanup (error); return FALSE; } int candidate_interface_count = m_class_get_interface_count (candidate); MonoClass **candidate_interfaces = m_class_get_interfaces (candidate); for (i = 0; i < candidate_interface_count; ++i) { if (candidate_interfaces [i] == target) return TRUE; if (is_variant && mono_class_is_variant_compatible_slow (target, candidate_interfaces [i])) return TRUE; if (mono_class_implement_interface_slow (target, candidate_interfaces [i])) return TRUE; } } candidate = m_class_get_parent (candidate); } while (candidate); return FALSE; } /* * Check if @oklass can be assigned to @klass. * This function does the same as mono_class_is_assignable_from_internal but is safe to be used from mono_class_init_internal context. */ gboolean mono_class_is_assignable_from_slow (MonoClass *target, MonoClass *candidate) { if (candidate == target) return TRUE; if (target == mono_defaults.object_class) return TRUE; if (mono_class_has_parent (candidate, target)) return TRUE; /*If target is not an interface there is no need to check them.*/ if (MONO_CLASS_IS_INTERFACE_INTERNAL (target)) return mono_class_implement_interface_slow (target, candidate); if (m_class_is_delegate (target) && mono_class_has_variant_generic_params (target)) return mono_class_is_variant_compatible (target, candidate, FALSE); if (m_class_get_rank (target)) { MonoClass *eclass, *eoclass; if (m_class_get_rank (target) != m_class_get_rank (candidate)) return FALSE; /* vectors vs. one dimensional arrays */ if (m_class_get_byval_arg (target)->type != m_class_get_byval_arg (candidate)->type) return FALSE; eclass = m_class_get_cast_class (target); eoclass = m_class_get_cast_class (candidate); /* * a is b does not imply a[] is b[] when a is a valuetype, and * b is a reference type. */ if (m_class_is_valuetype (eoclass)) { if ((eclass == mono_defaults.enum_class) || (eclass == m_class_get_parent (mono_defaults.enum_class)) || (eclass == mono_defaults.object_class)) return FALSE; } return mono_class_is_assignable_from_slow (eclass, eoclass); } /*FIXME properly handle nullables */ /*FIXME properly handle (M)VAR */ return FALSE; } /** * mono_generic_param_get_base_type: * * Return the base type of the given generic parameter from its constraints. * * Could be another generic parameter, or it could be Object or ValueType. */ MonoClass* mono_generic_param_get_base_type (MonoClass *klass) { MonoType *type = m_class_get_byval_arg (klass); g_assert (mono_type_is_generic_argument (type)); MonoGenericParam *gparam = type->data.generic_param; g_assert (gparam->owner && !gparam->owner->is_anonymous); MonoClass **constraints = mono_generic_container_get_param_info (gparam->owner, gparam->num)->constraints; MonoClass *base_class = mono_defaults.object_class; if (constraints) { int i; for (i = 0; constraints [i]; ++i) { MonoClass *constraint = constraints[i]; if (MONO_CLASS_IS_INTERFACE_INTERNAL (constraint)) continue; MonoType *constraint_type = m_class_get_byval_arg (constraint); if (mono_type_is_generic_argument (constraint_type)) { MonoGenericParam *constraint_param = constraint_type->data.generic_param; MonoGenericParamInfo *constraint_info = mono_generic_param_info (constraint_param); if ((constraint_info->flags & GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT) == 0 && (constraint_info->flags & GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT) == 0) continue; } base_class = constraint; } } if (base_class == mono_defaults.object_class) { MonoGenericParamInfo *gparam_info = mono_generic_param_info (gparam); if ((gparam_info->flags & GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT) != 0) { base_class = mono_class_get_valuetype_class (); } } return base_class; } /** * mono_class_get_cctor: * \param klass A MonoClass pointer * * \returns The static constructor of \p klass if it exists, NULL otherwise. */ MonoMethod* mono_class_get_cctor (MonoClass *klass) { MonoMethod *result = NULL; ERROR_DECL (error); MonoCachedClassInfo cached_info; if (image_is_dynamic (m_class_get_image (klass))) { /* * has_cctor is not set for these classes because mono_class_init_internal () is * not run for them. */ result = mono_class_get_method_from_name_checked (klass, ".cctor", -1, METHOD_ATTRIBUTE_SPECIAL_NAME, error); mono_error_assert_msg_ok (error, "Could not lookup class cctor in dynamic image"); return result; } mono_class_init_internal (klass); if (!m_class_has_cctor (klass)) return result; if (mono_class_is_ginst (klass) && !m_class_get_methods (klass)) { result = mono_class_get_inflated_method (klass, mono_class_get_cctor (mono_class_get_generic_class (klass)->container_class), error); mono_error_assert_msg_ok (error, "Could not lookup inflated class cctor"); /* FIXME do proper error handling */ return result; } if (mono_class_get_cached_class_info (klass, &cached_info)) { result = mono_get_method_checked (m_class_get_image (klass), cached_info.cctor_token, klass, NULL, error); mono_error_assert_msg_ok (error, "Could not lookup class cctor from cached metadata"); return result; } result = mono_class_get_method_from_name_checked (klass, ".cctor", -1, METHOD_ATTRIBUTE_SPECIAL_NAME, error); mono_error_assert_msg_ok (error, "Could not lookup class cctor"); return result; } /** * mono_class_get_finalizer: * \param klass: The MonoClass pointer * * \returns The finalizer method of \p klass if it exists, NULL otherwise. */ MonoMethod* mono_class_get_finalizer (MonoClass *klass) { MonoCachedClassInfo cached_info; if (!m_class_is_inited (klass)) mono_class_init_internal (klass); if (!mono_class_has_finalizer (klass)) return NULL; if (mono_class_get_cached_class_info (klass, &cached_info)) { ERROR_DECL (error); MonoMethod *result = mono_get_method_checked (cached_info.finalize_image, cached_info.finalize_token, NULL, NULL, error); mono_error_assert_msg_ok (error, "Could not lookup finalizer from cached metadata"); return result; }else { mono_class_setup_vtable (klass); return m_class_get_vtable (klass) [mono_class_get_object_finalize_slot ()]; } } /** * mono_class_needs_cctor_run: * \param klass the MonoClass pointer * \param caller a MonoMethod describing the caller * * Determines whenever the class has a static constructor and whenever it * needs to be called when executing CALLER. */ gboolean mono_class_needs_cctor_run (MonoClass *klass, MonoMethod *caller) { MonoMethod *method; method = mono_class_get_cctor (klass); if (method) return (method == caller) ? FALSE : TRUE; else return FALSE; } /** * mono_class_array_element_size: * \param klass * * \returns The number of bytes an element of type \p klass uses when stored into an array. */ gint32 mono_class_array_element_size (MonoClass *klass) { MonoType *type = m_class_get_byval_arg (klass); handle_enum: switch (type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_BOOLEAN: return 1; case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: return 2; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_R4: return 4; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: case MONO_TYPE_CLASS: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: return TARGET_SIZEOF_VOID_P; case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R8: return 8; case MONO_TYPE_VALUETYPE: if (m_class_is_enumtype (type->data.klass)) { type = mono_class_enum_basetype_internal (type->data.klass); klass = m_class_get_element_class (klass); goto handle_enum; } return mono_class_value_size (klass, NULL); case MONO_TYPE_GENERICINST: type = m_class_get_byval_arg (type->data.generic_class->container_class); goto handle_enum; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: { int align; return mono_type_size (type, &align); } case MONO_TYPE_VOID: return 0; default: g_error ("unknown type 0x%02x in mono_class_array_element_size", type->type); } return -1; } /** * mono_array_element_size: * \param ac pointer to a \c MonoArrayClass * * \returns The size of single array element. * * LOCKING: Acquires the loader lock. */ gint32 mono_array_element_size (MonoClass *ac) { g_assert (m_class_get_rank (ac)); if (G_UNLIKELY (!m_class_is_size_inited (ac))) { mono_class_setup_fields (ac); } return m_class_get_sizes (ac).element_size; } /** * mono_ldtoken: */ gpointer mono_ldtoken (MonoImage *image, guint32 token, MonoClass **handle_class, MonoGenericContext *context) { gpointer res; MONO_ENTER_GC_UNSAFE; ERROR_DECL (error); res = mono_ldtoken_checked (image, token, handle_class, context, error); mono_error_assert_ok (error); MONO_EXIT_GC_UNSAFE; return res; } gpointer mono_ldtoken_checked (MonoImage *image, guint32 token, MonoClass **handle_class, MonoGenericContext *context, MonoError *error) { error_init (error); if (image_is_dynamic (image)) { MonoClass *tmp_handle_class; gpointer obj = mono_lookup_dynamic_token_class (image, token, TRUE, &tmp_handle_class, context, error); mono_error_assert_ok (error); g_assert (tmp_handle_class); if (handle_class) *handle_class = tmp_handle_class; if (tmp_handle_class == mono_defaults.typehandle_class) return m_class_get_byval_arg ((MonoClass*)obj); else return obj; } switch (token & 0xff000000) { case MONO_TOKEN_TYPE_DEF: case MONO_TOKEN_TYPE_REF: case MONO_TOKEN_TYPE_SPEC: { MonoType *type; MonoClass *klass; if (handle_class) *handle_class = mono_defaults.typehandle_class; type = mono_type_get_checked (image, token, context, error); if (!type) return NULL; klass = mono_class_from_mono_type_internal (type); mono_class_init_internal (klass); if (mono_class_has_failure (klass)) { mono_error_set_for_class_failure (error, klass); return NULL; } /* We return a MonoType* as handle */ return type; } case MONO_TOKEN_FIELD_DEF: { MonoClass *klass; guint32 type = mono_metadata_typedef_from_field (image, mono_metadata_token_index (token)); if (!type) { mono_error_set_bad_image (error, image, "Bad ldtoken %x", token); return NULL; } if (handle_class) *handle_class = mono_defaults.fieldhandle_class; klass = mono_class_get_and_inflate_typespec_checked (image, MONO_TOKEN_TYPE_DEF | type, context, error); if (!klass) return NULL; mono_class_init_internal (klass); return mono_class_get_field (klass, token); } case MONO_TOKEN_METHOD_DEF: case MONO_TOKEN_METHOD_SPEC: { MonoMethod *meth; meth = mono_get_method_checked (image, token, NULL, context, error); if (handle_class) *handle_class = mono_defaults.methodhandle_class; if (!meth) return NULL; return meth; } case MONO_TOKEN_MEMBER_REF: { guint32 cols [MONO_MEMBERREF_SIZE]; const char *sig; mono_metadata_decode_row (&image->tables [MONO_TABLE_MEMBERREF], mono_metadata_token_index (token) - 1, cols, MONO_MEMBERREF_SIZE); sig = mono_metadata_blob_heap (image, cols [MONO_MEMBERREF_SIGNATURE]); mono_metadata_decode_blob_size (sig, &sig); if (*sig == 0x6) { /* it's a field */ MonoClass *klass; MonoClassField *field; field = mono_field_from_token_checked (image, token, &klass, context, error); if (handle_class) *handle_class = mono_defaults.fieldhandle_class; return field; } else { MonoMethod *meth; meth = mono_get_method_checked (image, token, NULL, context, error); if (handle_class) *handle_class = mono_defaults.methodhandle_class; return meth; } } default: mono_error_set_bad_image (error, image, "Bad ldtoken %x", token); } return NULL; } gpointer mono_lookup_dynamic_token (MonoImage *image, guint32 token, MonoGenericContext *context, MonoError *error) { MonoClass *handle_class; error_init (error); return mono_reflection_lookup_dynamic_token (image, token, TRUE, &handle_class, context, error); } gpointer mono_lookup_dynamic_token_class (MonoImage *image, guint32 token, gboolean valid_token, MonoClass **handle_class, MonoGenericContext *context, MonoError *error) { return mono_reflection_lookup_dynamic_token (image, token, valid_token, handle_class, context, error); } static MonoGetCachedClassInfo get_cached_class_info = NULL; void mono_install_get_cached_class_info (MonoGetCachedClassInfo func) { get_cached_class_info = func; } gboolean mono_class_get_cached_class_info (MonoClass *klass, MonoCachedClassInfo *res) { if (!get_cached_class_info) return FALSE; else return get_cached_class_info (klass, res); } void mono_install_get_class_from_name (MonoGetClassFromName func) { get_class_from_name = func; } /** * mono_class_get_image: * * Use this method to get the \c MonoImage* where this class came from. * * \returns The image where this class is defined. */ MonoImage* mono_class_get_image (MonoClass *klass) { return m_class_get_image (klass); } /** * mono_class_get_element_class: * \param klass the \c MonoClass to act on * * Use this function to get the element class of an array. * * \returns The element class of an array. */ MonoClass* mono_class_get_element_class (MonoClass *klass) { MonoClass *result; MONO_ENTER_GC_UNSAFE; result = m_class_get_element_class (klass); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_is_valuetype: * \param klass the \c MonoClass to act on * * Use this method to determine if the provided \c MonoClass* represents a value type, * or a reference type. * * \returns TRUE if the \c MonoClass represents a \c ValueType, FALSE if it represents a reference type. */ gboolean mono_class_is_valuetype (MonoClass *klass) { gboolean result; MONO_ENTER_GC_UNSAFE; result = m_class_is_valuetype (klass); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_is_enum: * \param klass the \c MonoClass to act on * * Use this function to determine if the provided \c MonoClass* represents an enumeration. * * \returns TRUE if the \c MonoClass represents an enumeration. */ gboolean mono_class_is_enum (MonoClass *klass) { gboolean result; MONO_ENTER_GC_UNSAFE; result = m_class_is_enumtype (klass); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_enum_basetype_internal: * \param klass the \c MonoClass to act on * * Use this function to get the underlying type for an enumeration value. * * \returns The underlying type representation for an enumeration. */ MonoType* mono_class_enum_basetype_internal (MonoClass *klass) { if (m_class_get_element_class (klass) == klass) /* SRE or broken types */ return NULL; return m_class_get_byval_arg (m_class_get_element_class (klass)); } /** * mono_class_enum_basetype: * \param klass the \c MonoClass to act on * * Use this function to get the underlying type for an enumeration value. * * \returns The underlying type representation for an enumeration. */ MonoType* mono_class_enum_basetype (MonoClass *klass) { MonoType *res; MONO_ENTER_GC_UNSAFE; res = mono_class_enum_basetype_internal (klass); MONO_EXIT_GC_UNSAFE; return res; } /** * mono_class_get_parent * \param klass the \c MonoClass to act on * * \returns The parent class for this class. */ MonoClass* mono_class_get_parent (MonoClass *klass) { MonoClass *result; MONO_ENTER_GC_UNSAFE; result = m_class_get_parent (klass); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_get_nesting_type: * \param klass the \c MonoClass to act on * * Use this function to obtain the class that the provided \c MonoClass* is nested on. * * If the return is NULL, this indicates that this class is not nested. * * \returns The container type where this type is nested or NULL if this type is not a nested type. */ MonoClass* mono_class_get_nesting_type (MonoClass *klass) { return m_class_get_nested_in (klass); } /** * mono_class_get_rank: * \param klass the MonoClass to act on * * \returns The rank for the array (the number of dimensions). */ int mono_class_get_rank (MonoClass *klass) { return m_class_get_rank (klass); } /** * mono_class_get_name * \param klass the \c MonoClass to act on * * \returns The name of the class. */ const char* mono_class_get_name (MonoClass *klass) { const char *result; MONO_ENTER_GC_UNSAFE; result = m_class_get_name (klass); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_get_namespace: * \param klass the \c MonoClass to act on * * \returns The namespace of the class. */ const char* mono_class_get_namespace (MonoClass *klass) { const char *result; MONO_ENTER_GC_UNSAFE; result = m_class_get_name_space (klass); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_get_type: * \param klass the \c MonoClass to act on * * This method returns the internal \c MonoType representation for the class. * * \returns The \c MonoType from the class. */ MonoType* mono_class_get_type (MonoClass *klass) { return m_class_get_byval_arg (klass); } /** * mono_class_get_type_token: * \param klass the \c MonoClass to act on * * This method returns type token for the class. * * \returns The type token for the class. */ guint32 mono_class_get_type_token (MonoClass *klass) { return m_class_get_type_token (klass); } /** * mono_class_get_byref_type: * \param klass the \c MonoClass to act on * * */ MonoType* mono_class_get_byref_type (MonoClass *klass) { return m_class_get_this_arg (klass); } /** * mono_class_num_fields: * \param klass the \c MonoClass to act on * * \returns The number of static and instance fields in the class. */ int mono_class_num_fields (MonoClass *klass) { return mono_class_get_field_count (klass); } /** * mono_class_num_methods: * \param klass the \c MonoClass to act on * * \returns The number of methods in the class. */ int mono_class_num_methods (MonoClass *klass) { return mono_class_get_method_count (klass); } /** * mono_class_num_properties * \param klass the \c MonoClass to act on * * \returns The number of properties in the class. */ int mono_class_num_properties (MonoClass *klass) { mono_class_setup_properties (klass); return mono_class_get_property_info (klass)->count; } /** * mono_class_num_events: * \param klass the \c MonoClass to act on * * \returns The number of events in the class. */ int mono_class_num_events (MonoClass *klass) { mono_class_setup_events (klass); return mono_class_get_event_info (klass)->count; } /** * mono_class_get_fields: * \param klass the \c MonoClass to act on * * This routine is an iterator routine for retrieving the fields in a class. * * You must pass a \c gpointer that points to zero and is treated as an opaque handle to * iterate over all of the elements. When no more values are * available, the return value is NULL. * * \returns a \c MonoClassField* on each iteration, or NULL when no more fields are available. */ MonoClassField* mono_class_get_fields (MonoClass* klass, gpointer *iter) { MonoClassField *result; MONO_ENTER_GC_UNSAFE; result = mono_class_get_fields_internal (klass, iter); MONO_EXIT_GC_UNSAFE; return result; } MonoClassField* mono_class_get_fields_internal (MonoClass *klass, gpointer *iter) { MonoClassField* field; if (!iter) return NULL; if (!*iter) { mono_class_setup_fields (klass); if (mono_class_has_failure (klass)) return NULL; /* start from the first */ if (mono_class_get_field_count (klass)) { MonoClassField *klass_fields = m_class_get_fields (klass); *iter = &klass_fields [0]; return &klass_fields [0]; } else { /* no fields */ return NULL; } } field = (MonoClassField *)*iter; field++; if (field < &m_class_get_fields (klass) [mono_class_get_field_count (klass)]) { *iter = field; return field; } return NULL; } /** * mono_class_get_methods: * \param klass the \c MonoClass to act on * * This routine is an iterator routine for retrieving the fields in a class. * * You must pass a \c gpointer that points to zero and is treated as an opaque handle to * iterate over all of the elements. When no more values are * available, the return value is NULL. * * \returns a \c MonoMethod on each iteration or NULL when no more methods are available. */ MonoMethod* mono_class_get_methods (MonoClass* klass, gpointer *iter) { MonoMethod** method; if (!iter) return NULL; if (!*iter) { mono_class_setup_methods (klass); MonoMethod **klass_methods = m_class_get_methods (klass); /* * We can't fail lookup of methods otherwise the runtime will burst in flames on all sort of places. * FIXME we should better report this error to the caller */ if (!klass_methods) return NULL; /* start from the first */ if (mono_class_get_method_count (klass)) { *iter = &klass_methods [0]; return klass_methods [0]; } else { /* no method */ return NULL; } } method = (MonoMethod **)*iter; method++; if (method < &m_class_get_methods (klass) [mono_class_get_method_count (klass)]) { *iter = method; return *method; } return NULL; } /** * mono_class_get_properties: * \param klass the \c MonoClass to act on * * This routine is an iterator routine for retrieving the properties in a class. * * You must pass a gpointer that points to zero and is treated as an opaque handle to * iterate over all of the elements. When no more values are * available, the return value is NULL. * * Returns: a \c MonoProperty* on each invocation, or NULL when no more are available. */ MonoProperty* mono_class_get_properties (MonoClass* klass, gpointer *iter) { MonoProperty* property; if (!iter) return NULL; if (!*iter) { mono_class_setup_properties (klass); MonoClassPropertyInfo *info = mono_class_get_property_info (klass); /* start from the first */ if (info->count) { *iter = &info->properties [0]; return (MonoProperty *)*iter; } else { /* no fields */ return NULL; } } property = (MonoProperty *)*iter; property++; MonoClassPropertyInfo *info = mono_class_get_property_info (klass); if (property < &info->properties [info->count]) { *iter = property; return (MonoProperty *)*iter; } return NULL; } /** * mono_class_get_events: * \param klass the \c MonoClass to act on * * This routine is an iterator routine for retrieving the properties in a class. * * You must pass a \c gpointer that points to zero and is treated as an opaque handle to * iterate over all of the elements. When no more values are * available, the return value is NULL. * * \returns a \c MonoEvent* on each invocation, or NULL when no more are available. */ MonoEvent* mono_class_get_events (MonoClass* klass, gpointer *iter) { MonoEvent* event; if (!iter) return NULL; if (!*iter) { mono_class_setup_events (klass); MonoClassEventInfo *info = mono_class_get_event_info (klass); /* start from the first */ if (info->count) { *iter = &info->events [0]; return (MonoEvent *)*iter; } else { /* no fields */ return NULL; } } event = (MonoEvent *)*iter; event++; MonoClassEventInfo *info = mono_class_get_event_info (klass); if (event < &info->events [info->count]) { *iter = event; return (MonoEvent *)*iter; } return NULL; } /** * mono_class_get_interfaces * \param klass the \c MonoClass to act on * * This routine is an iterator routine for retrieving the interfaces implemented by this class. * * You must pass a \c gpointer that points to zero and is treated as an opaque handle to * iterate over all of the elements. When no more values are * available, the return value is NULL. * * \returns a \c MonoClass* on each invocation, or NULL when no more are available. */ MonoClass* mono_class_get_interfaces (MonoClass* klass, gpointer *iter) { ERROR_DECL (error); MonoClass** iface; if (!iter) return NULL; if (!*iter) { if (!m_class_is_inited (klass)) mono_class_init_internal (klass); if (!m_class_is_interfaces_inited (klass)) { mono_class_setup_interfaces (klass, error); if (!is_ok (error)) { mono_error_cleanup (error); return NULL; } } /* start from the first */ if (m_class_get_interface_count (klass)) { *iter = &m_class_get_interfaces (klass) [0]; return m_class_get_interfaces (klass) [0]; } else { /* no interface */ return NULL; } } iface = (MonoClass **)*iter; iface++; if (iface < &m_class_get_interfaces (klass) [m_class_get_interface_count (klass)]) { *iter = iface; return *iface; } return NULL; } /** * mono_class_get_nested_types * \param klass the \c MonoClass to act on * * This routine is an iterator routine for retrieving the nested types of a class. * This works only if \p klass is non-generic, or a generic type definition. * * You must pass a \c gpointer that points to zero and is treated as an opaque handle to * iterate over all of the elements. When no more values are * available, the return value is NULL. * * \returns a \c Monoclass* on each invocation, or NULL when no more are available. */ MonoClass* mono_class_get_nested_types (MonoClass* klass, gpointer *iter) { GList *item; if (!iter) return NULL; if (!m_class_is_nested_classes_inited (klass)) mono_class_setup_nested_types (klass); if (!*iter) { GList *nested_classes = mono_class_get_nested_classes_property (klass); /* start from the first */ if (nested_classes) { *iter = nested_classes; return (MonoClass *)nested_classes->data; } else { /* no nested types */ return NULL; } } item = (GList *)*iter; item = item->next; if (item) { *iter = item; return (MonoClass *)item->data; } return NULL; } /** * mono_class_is_delegate * \param klass the \c MonoClass to act on * * \returns TRUE if the \c MonoClass represents a \c System.Delegate. */ mono_bool mono_class_is_delegate (MonoClass *klass) { mono_bool result; MONO_ENTER_GC_UNSAFE; result = m_class_is_delegate (klass); MONO_EXIT_GC_UNSAFE; return result; } /** * mono_class_implements_interface * \param klass The MonoClass to act on * \param interface The interface to check if \p klass implements. * * \returns TRUE if \p klass implements \p interface. */ mono_bool mono_class_implements_interface (MonoClass* klass, MonoClass* iface) { mono_bool result; MONO_ENTER_GC_UNSAFE; result = mono_class_is_assignable_from_internal (iface, klass); MONO_EXIT_GC_UNSAFE; return result; } static mono_bool class_implements_interface_ignore_generics (MonoClass* klass, MonoClass* iface) { int i; ERROR_DECL (error); if (mono_class_is_ginst (iface)) iface = mono_class_get_generic_type_definition (iface); while (klass != NULL) { if (mono_class_is_assignable_from_internal (iface, klass)) return TRUE; mono_class_setup_interfaces (klass, error); if (!is_ok (error)) { mono_error_cleanup (error); return FALSE; } MonoClass **klass_interfaces = m_class_get_interfaces (klass); for (i = 0; i < m_class_get_interface_count (klass); i++) { MonoClass *ic = klass_interfaces [i]; if (mono_class_is_ginst (ic)) ic = mono_class_get_generic_type_definition (ic); if (ic == iface) { return TRUE; } } klass = m_class_get_parent (klass); } return FALSE; } /** * mono_field_get_name: * \param field the \c MonoClassField to act on * * \returns The name of the field. */ const char* mono_field_get_name (MonoClassField *field) { return field->name; } /** * mono_field_get_type_internal: * \param field the \c MonoClassField to act on * \returns \c MonoType of the field. */ MonoType* mono_field_get_type_internal (MonoClassField *field) { MonoType *type = field->type; if (type) return type; ERROR_DECL (error); type = mono_field_get_type_checked (field, error); if (!is_ok (error)) { mono_trace_warning (MONO_TRACE_TYPE, "Could not load field's type due to %s", mono_error_get_message (error)); mono_error_cleanup (error); } return type; } /** * mono_field_get_type: * \param field the \c MonoClassField to act on * \returns \c MonoType of the field. */ MonoType* mono_field_get_type (MonoClassField *field) { MonoType *type = field->type; if (type) return type; MONO_ENTER_GC_UNSAFE; type = mono_field_get_type_internal (field); MONO_EXIT_GC_UNSAFE; return type; } /** * mono_field_get_type_checked: * \param field the \c MonoClassField to act on * \param error used to return any error found while retrieving \p field type * * \returns \c MonoType of the field. */ MonoType* mono_field_get_type_checked (MonoClassField *field, MonoError *error) { error_init (error); MonoType *type = field->type; if (type) return type; mono_field_resolve_type (field, error); return field->type; } /** * mono_field_get_parent: * \param field the \c MonoClassField to act on * * \returns \c MonoClass where the field was defined. */ MonoClass* mono_field_get_parent (MonoClassField *field) { return m_field_get_parent (field); } /** * mono_field_get_flags; * \param field the \c MonoClassField to act on * * The metadata flags for a field are encoded using the * \c FIELD_ATTRIBUTE_* constants. See the \c tabledefs.h file for details. * * \returns The flags for the field. */ guint32 mono_field_get_flags (MonoClassField *field) { if (!field->type) return mono_field_resolve_flags (field); return field->type->attrs; } /** * mono_field_get_offset: * \param field the \c MonoClassField to act on * * \returns The field offset. */ guint32 mono_field_get_offset (MonoClassField *field) { mono_class_setup_fields(m_field_get_parent (field)); return field->offset; } const char * mono_field_get_rva (MonoClassField *field, int swizzle) { guint32 rva; int field_index; MonoClass *klass = m_field_get_parent (field); MonoFieldDefaultValue *def_values; g_assert (field->type->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA); def_values = mono_class_get_field_def_values_with_swizzle (klass, swizzle); if (!def_values) { def_values = (MonoFieldDefaultValue *)mono_class_alloc0 (klass, sizeof (MonoFieldDefaultValue) * mono_class_get_field_count (klass)); mono_class_set_field_def_values_with_swizzle (klass, def_values, swizzle); } field_index = mono_field_get_index (field); if (!def_values [field_index].data) { const char *rvaData; if (!image_is_dynamic (m_class_get_image (klass))) { int first_field_idx = mono_class_get_first_field_idx (klass); mono_metadata_field_info (m_class_get_image (m_field_get_parent (field)), first_field_idx + field_index, NULL, &rva, NULL); if (!rva) g_warning ("field %s in %s should have RVA data, but hasn't", mono_field_get_name (field), m_class_get_name (m_field_get_parent (field))); rvaData = mono_image_rva_map (m_class_get_image (m_field_get_parent (field)), rva); } else { rvaData = mono_field_get_data (field); } if (rvaData == NULL) return NULL; if (swizzle != 1) { int dummy; int dataSizeInBytes = mono_type_size (field->type, &dummy); char *swizzledRvaData = mono_class_alloc0 (klass, dataSizeInBytes); #define SWAP(n) { \ guint ## n *data = (guint ## n *) swizzledRvaData; \ guint ## n *src = (guint ## n *) rvaData; \ int i, \ nEnt = (dataSizeInBytes / sizeof(guint ## n)); \ \ for (i = 0; i < nEnt; i++) { \ data[i] = read ## n (&src[i]); \ } \ } if (swizzle == 2) { SWAP (16); } else if (swizzle == 4) { SWAP (32); } else { SWAP (64); } #undef SWAP def_values [field_index].data = swizzledRvaData; } else { def_values [field_index].data = rvaData; } } return def_values [field_index].data; } /** * mono_field_get_data: * \param field the \c MonoClassField to act on * * \returns A pointer to the metadata constant value or to the field * data if it has an RVA flag. */ const char * mono_field_get_data (MonoClassField *field) { if (field->type->attrs & FIELD_ATTRIBUTE_HAS_DEFAULT) { MonoTypeEnum def_type; return mono_class_get_field_default_value (field, &def_type); } else if (field->type->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA) { return mono_field_get_rva (field, 1); } else { return NULL; } } /** * mono_property_get_name: * \param prop the \c MonoProperty to act on * \returns The name of the property */ const char* mono_property_get_name (MonoProperty *prop) { return prop->name; } /** * mono_property_get_set_method * \param prop the \c MonoProperty to act on. * \returns The setter method of the property, a \c MonoMethod. */ MonoMethod* mono_property_get_set_method (MonoProperty *prop) { return prop->set; } /** * mono_property_get_get_method * \param prop the MonoProperty to act on. * \returns The getter method of the property (A \c MonoMethod) */ MonoMethod* mono_property_get_get_method (MonoProperty *prop) { return prop->get; } /** * mono_property_get_parent: * \param prop the \c MonoProperty to act on. * \returns The \c MonoClass where the property was defined. */ MonoClass* mono_property_get_parent (MonoProperty *prop) { return prop->parent; } /** * mono_property_get_flags: * \param prop the \c MonoProperty to act on. * * The metadata flags for a property are encoded using the * \c PROPERTY_ATTRIBUTE_* constants. See the \c tabledefs.h file for details. * * \returns The flags for the property. */ guint32 mono_property_get_flags (MonoProperty *prop) { return prop->attrs; } /** * mono_event_get_name: * \param event the MonoEvent to act on * \returns The name of the event. */ const char* mono_event_get_name (MonoEvent *event) { return event->name; } /** * mono_event_get_add_method: * \param event The \c MonoEvent to act on. * \returns The \c add method for the event, a \c MonoMethod. */ MonoMethod* mono_event_get_add_method (MonoEvent *event) { return event->add; } /** * mono_event_get_remove_method: * \param event The \c MonoEvent to act on. * \returns The \c remove method for the event, a \c MonoMethod. */ MonoMethod* mono_event_get_remove_method (MonoEvent *event) { return event->remove; } /** * mono_event_get_raise_method: * \param event The \c MonoEvent to act on. * \returns The \c raise method for the event, a \c MonoMethod. */ MonoMethod* mono_event_get_raise_method (MonoEvent *event) { return event->raise; } /** * mono_event_get_parent: * \param event the MonoEvent to act on. * \returns The \c MonoClass where the event is defined. */ MonoClass* mono_event_get_parent (MonoEvent *event) { return event->parent; } /** * mono_event_get_flags * \param event the \c MonoEvent to act on. * * The metadata flags for an event are encoded using the * \c EVENT_* constants. See the \c tabledefs.h file for details. * * \returns The flags for the event. */ guint32 mono_event_get_flags (MonoEvent *event) { return event->attrs; } /** * mono_class_get_method_from_name: * \param klass where to look for the method * \param name name of the method * \param param_count number of parameters. -1 for any number. * * Obtains a \c MonoMethod with a given name and number of parameters. * It only works if there are no multiple signatures for any given method name. */ MonoMethod * mono_class_get_method_from_name (MonoClass *klass, const char *name, int param_count) { MonoMethod *result; MONO_ENTER_GC_UNSAFE; ERROR_DECL (error); result = mono_class_get_method_from_name_checked (klass, name, param_count, 0, error); mono_error_cleanup (error); MONO_EXIT_GC_UNSAFE; return result; } MonoMethod* mono_find_method_in_metadata (MonoClass *klass, const char *name, int param_count, int flags) { MonoImage *klass_image = m_class_get_image (klass); MonoMethod *res = NULL; int i; /* Search directly in the metadata to avoid calling setup_methods () */ int first_idx = mono_class_get_first_method_idx (klass); int mcount = mono_class_get_method_count (klass); for (i = 0; i < mcount; ++i) { ERROR_DECL (error); guint32 cols [MONO_METHOD_SIZE]; MonoMethod *method; MonoMethodSignature *sig; /* first_idx points into the methodptr table */ mono_metadata_decode_table_row (klass_image, MONO_TABLE_METHOD, first_idx + i, cols, MONO_METHOD_SIZE); if (!strcmp (mono_metadata_string_heap (klass_image, cols [MONO_METHOD_NAME]), name)) { method = mono_get_method_checked (klass_image, MONO_TOKEN_METHOD_DEF | (first_idx + i + 1), klass, NULL, error); if (!method) { mono_error_cleanup (error); /* FIXME don't swallow the error */ continue; } if (param_count == -1) { res = method; break; } sig = mono_method_signature_checked (method, error); if (!sig) { mono_error_cleanup (error); /* FIXME don't swallow the error */ continue; } if (sig->param_count == param_count) { res = method; break; } } } if (G_UNLIKELY (!res && klass_image->has_updates)) { if (mono_class_has_metadata_update_info (klass)) { ERROR_DECL (error); res = mono_metadata_update_find_method_by_name (klass, name, param_count, flags, error); mono_error_cleanup (error); } } return res; } /** * mono_class_get_method_from_name_flags: * \param klass where to look for the method * \param name_space name of the method * \param param_count number of parameters. -1 for any number. * \param flags flags which must be set in the method * * Obtains a \c MonoMethod with a given name and number of parameters. * It only works if there are no multiple signatures for any given method name. */ MonoMethod * mono_class_get_method_from_name_flags (MonoClass *klass, const char *name, int param_count, int flags) { MonoMethod *method; MONO_ENTER_GC_UNSAFE; ERROR_DECL (error); method = mono_class_get_method_from_name_checked (klass, name, param_count, flags, error); mono_error_cleanup (error); MONO_EXIT_GC_UNSAFE; return method; } /** * mono_class_get_method_from_name_checked: * \param klass where to look for the method * \param name_space name of the method * \param param_count number of parameters. -1 for any number. * \param flags flags which must be set in the method * \param error * * Obtains a \c MonoMethod with a given name and number of parameters. * It only works if there are no multiple signatures for any given method name. */ MonoMethod * mono_class_get_method_from_name_checked (MonoClass *klass, const char *name, int param_count, int flags, MonoError *error) { MonoMethod *res = NULL; int i; mono_class_init_internal (klass); if (mono_class_is_ginst (klass) && !m_class_get_methods (klass)) { res = mono_class_get_method_from_name_checked (mono_class_get_generic_class (klass)->container_class, name, param_count, flags, error); if (res) res = mono_class_inflate_generic_method_full_checked (res, klass, mono_class_get_context (klass), error); return res; } if (m_class_get_methods (klass) || !MONO_CLASS_HAS_STATIC_METADATA (klass)) { mono_class_setup_methods (klass); /* We can't fail lookup of methods otherwise the runtime will burst in flames on all sort of places. See mono/tests/array_load_exception.il FIXME we should better report this error to the caller */ MonoMethod **klass_methods = m_class_get_methods (klass); gboolean has_updates = m_class_get_image (klass)->has_updates; if (!klass_methods && !has_updates) return NULL; int mcount = mono_class_get_method_count (klass); for (i = 0; i < mcount; ++i) { MonoMethod *method = klass_methods [i]; if (method->name[0] == name [0] && !strcmp (name, method->name) && (param_count == -1 || mono_method_signature_internal (method)->param_count == param_count) && ((method->flags & flags) == flags)) { res = method; break; } } if (G_UNLIKELY (!res && has_updates && mono_class_has_metadata_update_info (klass))) { res = mono_metadata_update_find_method_by_name (klass, name, param_count, flags, error); } } else { res = mono_find_method_in_metadata (klass, name, param_count, flags); } return res; } gboolean mono_class_has_failure (const MonoClass *klass) { g_assert (klass != NULL); return m_class_has_failure ((MonoClass*)klass) != 0; } /** * mono_class_set_type_load_failure: * \param klass class in which the failure was detected * \param fmt \c printf -style error message string. * * Collect detected failure informaion in the class for later processing. * The error is stored as a MonoErrorBoxed as with mono_error_set_type_load_class() * Note that only the first failure is kept. * * LOCKING: Acquires the loader lock. * * \returns FALSE if a failure was already set on the class, or TRUE otherwise. */ gboolean mono_class_set_type_load_failure (MonoClass *klass, const char * fmt, ...) { ERROR_DECL (prepare_error); va_list args; if (mono_class_has_failure (klass)) return FALSE; va_start (args, fmt); mono_error_vset_type_load_class (prepare_error, klass, fmt, args); va_end (args); MonoErrorBoxed *box = mono_error_box (prepare_error, m_class_get_image (klass)); mono_error_cleanup (prepare_error); return mono_class_set_failure (klass, box); } /** * mono_class_get_exception_for_failure: * \param klass class in which the failure was detected * * \returns a constructed MonoException than the caller can then throw * using mono_raise_exception - or NULL if no failure is present (or * doesn't result in an exception). */ MonoException* mono_class_get_exception_for_failure (MonoClass *klass) { if (!mono_class_has_failure (klass)) return NULL; ERROR_DECL (unboxed_error); mono_error_set_for_class_failure (unboxed_error, klass); return mono_error_convert_to_exception (unboxed_error); } static gboolean is_nesting_type (MonoClass *outer_klass, MonoClass *inner_klass) { outer_klass = mono_class_get_generic_type_definition (outer_klass); inner_klass = mono_class_get_generic_type_definition (inner_klass); do { if (outer_klass == inner_klass) return TRUE; inner_klass = m_class_get_nested_in (inner_klass); } while (inner_klass); return FALSE; } MonoClass * mono_class_get_generic_type_definition (MonoClass *klass) { MonoGenericClass *gklass = mono_class_try_get_generic_class (klass); return gklass ? gklass->container_class : klass; } /* * Check if @klass is a subtype of @parent ignoring generic instantiations. * * Generic instantiations are ignored for all super types of @klass. * * Visibility checks ignoring generic instantiations. * * Class implementing interface visibility checks ignore generic instantiations */ gboolean mono_class_has_parent_and_ignore_generics (MonoClass *klass, MonoClass *parent) { int i; klass = mono_class_get_generic_type_definition (klass); parent = mono_class_get_generic_type_definition (parent); mono_class_setup_supertypes (klass); for (i = 0; i < m_class_get_idepth (klass); ++i) { if (parent == mono_class_get_generic_type_definition (m_class_get_supertypes (klass) [i])) return TRUE; } if (MONO_CLASS_IS_INTERFACE_INTERNAL (parent) && class_implements_interface_ignore_generics (klass, parent)) return TRUE; return FALSE; } /* * Subtype can only access parent members with family protection if the site object * is subclass of Subtype. For example: * class A { protected int x; } * class B : A { * void valid_access () { * B b; * b.x = 0; * } * void invalid_access () { * A a; * a.x = 0; * } * } * */ static gboolean is_valid_family_access (MonoClass *access_klass, MonoClass *member_klass, MonoClass *context_klass) { if (MONO_CLASS_IS_INTERFACE_INTERNAL (member_klass) && !MONO_CLASS_IS_INTERFACE_INTERNAL (access_klass)) { /* Can happen with default interface methods */ if (!class_implements_interface_ignore_generics (access_klass, member_klass)) return FALSE; } else if (member_klass != access_klass && MONO_CLASS_IS_INTERFACE_INTERNAL (member_klass) && MONO_CLASS_IS_INTERFACE_INTERNAL (access_klass)) { /* Can happen with default interface methods */ if (!mono_interface_implements_interface (access_klass, member_klass)) return FALSE; } else { if (!mono_class_has_parent_and_ignore_generics (access_klass, member_klass)) return FALSE; } if (context_klass == NULL) return TRUE; /*if access_klass is not member_klass context_klass must be type compat*/ if (access_klass != member_klass && !mono_class_has_parent_and_ignore_generics (context_klass, access_klass)) return FALSE; return TRUE; } static gboolean ignores_access_checks_to (MonoAssembly *accessing, MonoAssembly *accessed) { if (!accessing || !accessed) return FALSE; mono_assembly_load_friends (accessing); for (GSList *tmp = accessing->ignores_checks_assembly_names; tmp; tmp = tmp->next) { MonoAssemblyName *victim = (MonoAssemblyName *)tmp->data; if (!victim->name) continue; if (!g_ascii_strcasecmp (accessed->aname.name, victim->name)) return TRUE; } return FALSE; } static gboolean can_access_internals (MonoAssembly *accessing, MonoAssembly* accessed) { GSList *tmp; if (accessing == accessed) return TRUE; if (!accessed || !accessing) return FALSE; mono_assembly_load_friends (accessed); for (tmp = accessed->friend_assembly_names; tmp; tmp = tmp->next) { MonoAssemblyName *friend_ = (MonoAssemblyName *)tmp->data; /* Be conservative with checks */ if (!friend_->name) continue; if (g_ascii_strcasecmp (accessing->aname.name, friend_->name)) continue; if (friend_->public_key_token [0]) { if (!accessing->aname.public_key_token [0]) continue; if (!mono_public_tokens_are_equal (friend_->public_key_token, accessing->aname.public_key_token)) continue; } return TRUE; } return ignores_access_checks_to (accessing, accessed); } /* * If klass is a generic type or if it is derived from a generic type, return the * MonoClass of the generic definition * Returns NULL if not found */ static MonoClass* get_generic_definition_class (MonoClass *klass) { while (klass) { MonoGenericClass *gklass = mono_class_try_get_generic_class (klass); if (gklass && gklass->container_class) return gklass->container_class; klass = m_class_get_parent (klass); } return NULL; } static gboolean can_access_instantiation (MonoClass *access_klass, MonoGenericInst *ginst) { int i; for (i = 0; i < ginst->type_argc; ++i) { MonoType *type = ginst->type_argv[i]; switch (type->type) { case MONO_TYPE_SZARRAY: if (!can_access_type (access_klass, type->data.klass)) return FALSE; break; case MONO_TYPE_ARRAY: if (!can_access_type (access_klass, type->data.array->eklass)) return FALSE; break; case MONO_TYPE_PTR: if (!can_access_type (access_klass, mono_class_from_mono_type_internal (type->data.type))) return FALSE; break; case MONO_TYPE_CLASS: case MONO_TYPE_VALUETYPE: case MONO_TYPE_GENERICINST: if (!can_access_type (access_klass, mono_class_from_mono_type_internal (type))) return FALSE; default: break; } } return TRUE; } static gboolean can_access_type (MonoClass *access_klass, MonoClass *member_klass) { int access_level; if (access_klass == member_klass) return TRUE; MonoAssembly *access_klass_assembly = m_class_get_image (access_klass)->assembly; MonoAssembly *member_klass_assembly = m_class_get_image (member_klass)->assembly; if (m_class_get_element_class (access_klass) && !m_class_is_enumtype (access_klass)) { access_klass = m_class_get_element_class (access_klass); access_klass_assembly = m_class_get_image (access_klass)->assembly; } if (m_class_get_element_class (member_klass) && !m_class_is_enumtype (member_klass)) { member_klass = m_class_get_element_class (member_klass); member_klass_assembly = m_class_get_image (member_klass)->assembly; } access_level = mono_class_get_flags (member_klass) & TYPE_ATTRIBUTE_VISIBILITY_MASK; if (mono_type_is_generic_argument (m_class_get_byval_arg (member_klass))) return TRUE; if (mono_class_is_ginst (member_klass) && !can_access_instantiation (access_klass, mono_class_get_generic_class (member_klass)->context.class_inst)) return FALSE; if (is_nesting_type (access_klass, member_klass) || (m_class_get_nested_in (access_klass) && is_nesting_type (m_class_get_nested_in (access_klass), member_klass))) return TRUE; /*Non nested type with nested visibility. We just fail it.*/ if (access_level >= TYPE_ATTRIBUTE_NESTED_PRIVATE && access_level <= TYPE_ATTRIBUTE_NESTED_FAM_OR_ASSEM && m_class_get_nested_in (member_klass) == NULL) return FALSE; MonoClass *member_klass_nested_in = m_class_get_nested_in (member_klass); switch (access_level) { case TYPE_ATTRIBUTE_NOT_PUBLIC: return can_access_internals (access_klass_assembly, member_klass_assembly); case TYPE_ATTRIBUTE_PUBLIC: return TRUE; case TYPE_ATTRIBUTE_NESTED_PUBLIC: return member_klass_nested_in && can_access_type (access_klass, member_klass_nested_in); case TYPE_ATTRIBUTE_NESTED_PRIVATE: if (is_nesting_type (member_klass, access_klass) && member_klass_nested_in && can_access_type (access_klass, member_klass_nested_in)) return TRUE; return ignores_access_checks_to (access_klass_assembly, member_klass_assembly); case TYPE_ATTRIBUTE_NESTED_FAMILY: return mono_class_has_parent_and_ignore_generics (access_klass, m_class_get_nested_in (member_klass)); case TYPE_ATTRIBUTE_NESTED_ASSEMBLY: return can_access_internals (access_klass_assembly, member_klass_assembly) && member_klass_nested_in && can_access_type (access_klass, member_klass_nested_in); case TYPE_ATTRIBUTE_NESTED_FAM_AND_ASSEM: return can_access_internals (access_klass_assembly, m_class_get_image (member_klass_nested_in)->assembly) && mono_class_has_parent_and_ignore_generics (access_klass, member_klass_nested_in); case TYPE_ATTRIBUTE_NESTED_FAM_OR_ASSEM: return can_access_internals (access_klass_assembly, m_class_get_image (member_klass_nested_in)->assembly) || mono_class_has_parent_and_ignore_generics (access_klass, member_klass_nested_in); } return FALSE; } /* FIXME: check visibility of type, too */ static gboolean can_access_member (MonoClass *access_klass, MonoClass *member_klass, MonoClass* context_klass, int access_level) { MonoClass *member_generic_def; MonoAssembly *access_klass_assembly = m_class_get_image (access_klass)->assembly; MonoGenericClass *access_gklass = mono_class_try_get_generic_class (access_klass); if (((access_gklass && access_gklass->container_class) || mono_class_is_gtd (access_klass)) && (member_generic_def = get_generic_definition_class (member_klass))) { MonoClass *access_container; if (mono_class_is_gtd (access_klass)) access_container = access_klass; else access_container = access_gklass->container_class; if (can_access_member (access_container, member_generic_def, context_klass, access_level)) return TRUE; } MonoImage *member_klass_image = m_class_get_image (member_klass); /* Partition I 8.5.3.2 */ /* the access level values are the same for fields and methods */ switch (access_level) { case FIELD_ATTRIBUTE_COMPILER_CONTROLLED: /* same compilation unit */ return m_class_get_image (access_klass) == member_klass_image; case FIELD_ATTRIBUTE_PRIVATE: return (access_klass == member_klass) || ignores_access_checks_to (access_klass_assembly, member_klass_image->assembly); case FIELD_ATTRIBUTE_FAM_AND_ASSEM: if (is_valid_family_access (access_klass, member_klass, context_klass) && can_access_internals (access_klass_assembly, member_klass_image->assembly)) return TRUE; return FALSE; case FIELD_ATTRIBUTE_ASSEMBLY: return can_access_internals (access_klass_assembly, member_klass_image->assembly); case FIELD_ATTRIBUTE_FAMILY: if (is_valid_family_access (access_klass, member_klass, context_klass)) return TRUE; return FALSE; case FIELD_ATTRIBUTE_FAM_OR_ASSEM: if (is_valid_family_access (access_klass, member_klass, context_klass)) return TRUE; return can_access_internals (access_klass_assembly, member_klass_image->assembly); case FIELD_ATTRIBUTE_PUBLIC: return TRUE; } return FALSE; } /** * mono_method_can_access_field: * \param method Method that will attempt to access the field * \param field the field to access * * Used to determine if a method is allowed to access the specified field. * * \returns TRUE if the given \p method is allowed to access the \p field while following * the accessibility rules of the CLI. */ gboolean mono_method_can_access_field (MonoMethod *method, MonoClassField *field) { /* FIXME: check all overlapping fields */ int can = can_access_member (method->klass, m_field_get_parent (field), NULL, mono_field_get_type_internal (field)->attrs & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK); if (!can) { MonoClass *nested = m_class_get_nested_in (method->klass); while (nested) { can = can_access_member (nested, m_field_get_parent (field), NULL, mono_field_get_type_internal (field)->attrs & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK); if (can) return TRUE; nested = m_class_get_nested_in (nested); } } return can; } static MonoMethod* mono_method_get_method_definition (MonoMethod *method) { while (method->is_inflated) method = ((MonoMethodInflated*)method)->declaring; return method; } /** * mono_method_can_access_method: * \param method Method that will attempt to access the other method * \param called the method that we want to probe for accessibility. * * Used to determine if the \p method is allowed to access the specified \p called method. * * \returns TRUE if the given \p method is allowed to invoke the \p called while following * the accessibility rules of the CLI. */ gboolean mono_method_can_access_method (MonoMethod *method, MonoMethod *called) { method = mono_method_get_method_definition (method); called = mono_method_get_method_definition (called); return mono_method_can_access_method_full (method, called, NULL); } /* * mono_method_can_access_method_full: * @method: The caller method * @called: The called method * @context_klass: The static type on stack of the owner @called object used * * This function must be used with instance calls, as they have more strict family accessibility. * It can be used with static methods, but context_klass should be NULL. * * Returns: TRUE if caller have proper visibility and acessibility to @called */ gboolean mono_method_can_access_method_full (MonoMethod *method, MonoMethod *called, MonoClass *context_klass) { /* Wrappers are except from access checks */ if (method->wrapper_type != MONO_WRAPPER_NONE || called->wrapper_type != MONO_WRAPPER_NONE) return TRUE; MonoClass *access_class = method->klass; MonoClass *member_class = called->klass; int can = can_access_member (access_class, member_class, context_klass, called->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK); if (!can) { MonoClass *nested = m_class_get_nested_in (access_class); while (nested) { can = can_access_member (nested, member_class, context_klass, called->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK); if (can) break; nested = m_class_get_nested_in (nested); } } if (!can) return FALSE; can = can_access_type (access_class, member_class); if (!can) { MonoClass *nested = m_class_get_nested_in (access_class); while (nested) { can = can_access_type (nested, member_class); if (can) break; nested = m_class_get_nested_in (nested); } } if (!can) return FALSE; if (called->is_inflated) { MonoMethodInflated * infl = (MonoMethodInflated*)called; if (infl->context.method_inst && !can_access_instantiation (access_class, infl->context.method_inst)) return FALSE; } return TRUE; } /* * mono_method_can_access_field_full: * @method: The caller method * @field: The accessed field * @context_klass: The static type on stack of the owner @field object used * * This function must be used with instance fields, as they have more strict family accessibility. * It can be used with static fields, but context_klass should be NULL. * * Returns: TRUE if caller have proper visibility and acessibility to @field */ gboolean mono_method_can_access_field_full (MonoMethod *method, MonoClassField *field, MonoClass *context_klass) { MonoClass *access_class = method->klass; MonoClass *member_class = m_field_get_parent (field); /* FIXME: check all overlapping fields */ int can = can_access_member (access_class, member_class, context_klass, field->type->attrs & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK); if (!can) { MonoClass *nested = m_class_get_nested_in (access_class); while (nested) { can = can_access_member (nested, member_class, context_klass, field->type->attrs & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK); if (can) break; nested = m_class_get_nested_in (nested); } } if (!can) return FALSE; can = can_access_type (access_class, member_class); if (!can) { MonoClass *nested = m_class_get_nested_in (access_class); while (nested) { can = can_access_type (nested, member_class); if (can) break; nested = m_class_get_nested_in (nested); } } if (!can) return FALSE; return TRUE; } /* * mono_class_can_access_class: * @source_class: The source class * @target_class: The accessed class * * This function returns is @target_class is visible to @source_class * * Returns: TRUE if source have proper visibility and acessibility to target */ gboolean mono_class_can_access_class (MonoClass *source_class, MonoClass *target_class) { return can_access_type (source_class, target_class); } /** * mono_type_is_valid_enum_basetype: * \param type The MonoType to check * \returns TRUE if the type can be used as the basetype of an enum */ gboolean mono_type_is_valid_enum_basetype (MonoType * type) { switch (type->type) { case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_BOOLEAN: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_R8: case MONO_TYPE_R4: return TRUE; default: return FALSE; } } /** * mono_class_is_valid_enum: * \param klass An enum class to be validated * * This method verify the required properties an enum should have. * * FIXME: TypeBuilder enums are allowed to implement interfaces, but since they cannot have methods, only empty interfaces are possible * FIXME: enum types are not allowed to have a cctor, but mono_reflection_create_runtime_class sets has_cctor to 1 for all types * FIXME: TypeBuilder enums can have any kind of static fields, but the spec is very explicit about that (P II 14.3) * * \returns TRUE if the informed enum class is valid */ gboolean mono_class_is_valid_enum (MonoClass *klass) { MonoClassField * field; gpointer iter = NULL; gboolean found_base_field = FALSE; g_assert (m_class_is_enumtype (klass)); MonoClass *klass_parent = m_class_get_parent (klass); /* we cannot test against mono_defaults.enum_class, or mcs won't be able to compile the System namespace*/ if (!klass_parent || strcmp (m_class_get_name (klass_parent), "Enum") || strcmp (m_class_get_name_space (klass_parent), "System") ) { return FALSE; } if (!mono_class_is_auto_layout (klass)) return FALSE; while ((field = mono_class_get_fields_internal (klass, &iter))) { if (!(field->type->attrs & FIELD_ATTRIBUTE_STATIC)) { if (found_base_field) return FALSE; found_base_field = TRUE; if (!mono_type_is_valid_enum_basetype (field->type)) return FALSE; } } if (!found_base_field) return FALSE; if (mono_class_get_method_count (klass) > 0) return FALSE; return TRUE; } gboolean mono_generic_class_is_generic_type_definition (MonoGenericClass *gklass) { return gklass->context.class_inst == mono_class_get_generic_container (gklass->container_class)->context.class_inst; } void mono_field_resolve_type (MonoClassField *field, MonoError *error) { MonoClass *klass = m_field_get_parent (field); MonoImage *image = m_class_get_image (klass); MonoClass *gtd = mono_class_is_ginst (klass) ? mono_class_get_generic_type_definition (klass) : NULL; MonoType *ftype; int field_idx; if (G_UNLIKELY (m_field_is_from_update (field))) { field_idx = -1; } else { field_idx = field - m_class_get_fields (klass); } error_init (error); if (gtd) { g_assert (field_idx != -1); MonoClassField *gfield = &m_class_get_fields (gtd) [field_idx]; MonoType *gtype = mono_field_get_type_checked (gfield, error); if (!is_ok (error)) { char *full_name = mono_type_get_full_name (gtd); mono_class_set_type_load_failure (klass, "Could not load generic type of field '%s:%s' (%d) due to: %s", full_name, gfield->name, field_idx, mono_error_get_message (error)); g_free (full_name); } ftype = mono_class_inflate_generic_type_no_copy (image, gtype, mono_class_get_context (klass), error); if (!is_ok (error)) { char *full_name = mono_type_get_full_name (klass); mono_class_set_type_load_failure (klass, "Could not load instantiated type of field '%s:%s' (%d) due to: %s", full_name, field->name, field_idx, mono_error_get_message (error)); g_free (full_name); } } else { const char *sig; guint32 cols [MONO_FIELD_SIZE]; MonoGenericContainer *container = NULL; int idx; if (G_UNLIKELY (m_field_is_from_update (field))) { idx = mono_metadata_update_get_field_idx (field) - 1; } else { idx = mono_class_get_first_field_idx (klass) + field_idx; } /*FIXME, in theory we do not lazy load SRE fields*/ g_assert (!image_is_dynamic (image)); if (mono_class_is_gtd (klass)) { container = mono_class_get_generic_container (klass); } else if (gtd) { container = mono_class_get_generic_container (gtd); g_assert (container); } /* first_field_idx and idx points into the fieldptr table */ mono_metadata_decode_table_row (image, MONO_TABLE_FIELD, idx, cols, MONO_FIELD_SIZE); sig = mono_metadata_blob_heap (image, cols [MONO_FIELD_SIGNATURE]); mono_metadata_decode_value (sig, &sig); /* FIELD signature == 0x06 */ g_assert (*sig == 0x06); ftype = mono_metadata_parse_type_checked (image, container, cols [MONO_FIELD_FLAGS], FALSE, sig + 1, &sig, error); if (!ftype) { char *full_name = mono_type_get_full_name (klass); mono_class_set_type_load_failure (klass, "Could not load type of field '%s:%s' (%d) due to: %s", full_name, field->name, field_idx, mono_error_get_message (error)); g_free (full_name); } } mono_memory_barrier (); field->type = ftype; } static guint32 mono_field_resolve_flags (MonoClassField *field) { MonoClass *klass = m_field_get_parent (field); MonoImage *image = m_class_get_image (klass); MonoClass *gtd = mono_class_is_ginst (klass) ? mono_class_get_generic_type_definition (klass) : NULL; int field_idx = field - m_class_get_fields (klass); if (gtd) { MonoClassField *gfield = &m_class_get_fields (gtd) [field_idx]; return mono_field_get_flags (gfield); } else { int idx = mono_class_get_first_field_idx (klass) + field_idx; /*FIXME, in theory we do not lazy load SRE fields*/ g_assert (!image_is_dynamic (image)); return mono_metadata_decode_table_row_col (image, MONO_TABLE_FIELD, idx, MONO_FIELD_FLAGS); } } /** * mono_class_get_fields_lazy: * \param klass the MonoClass to act on * * This routine is an iterator routine for retrieving the fields in a class. * Only minimal information about fields are loaded. Accessors must be used * for all MonoClassField returned. * * You must pass a gpointer that points to zero and is treated as an opaque handle to * iterate over all of the elements. When no more values are * available, the return value is NULL. * * \returns a \c MonoClassField* on each iteration, or NULL when no more fields are available. */ MonoClassField* mono_class_get_fields_lazy (MonoClass* klass, gpointer *iter) { MonoClassField* field; if (!iter) return NULL; if (!*iter) { mono_class_setup_basic_field_info (klass); MonoClassField *klass_fields = m_class_get_fields (klass); if (!klass_fields) return NULL; /* start from the first */ if (mono_class_get_field_count (klass)) { *iter = &klass_fields [0]; return (MonoClassField *)*iter; } else { /* no fields */ return NULL; } } field = (MonoClassField *)*iter; field++; if (field < &m_class_get_fields (klass) [mono_class_get_field_count (klass)]) { *iter = field; return (MonoClassField *)*iter; } return NULL; } char* mono_class_full_name (MonoClass *klass) { return mono_type_full_name (m_class_get_byval_arg (klass)); } /* Declare all shared lazy type lookup functions */ GENERATE_TRY_GET_CLASS_WITH_CACHE (safehandle, "System.Runtime.InteropServices", "SafeHandle") /** * mono_method_get_base_method: * \param method a method * \param definition if true, get the definition * \param error set on failure * * Given a virtual method associated with a subclass, return the corresponding * method from an ancestor. If \p definition is FALSE, returns the method in the * superclass of the given method. If \p definition is TRUE, return the method * in the ancestor class where it was first declared. The type arguments will * be inflated in the ancestor classes. If the method is not associated with a * class, or isn't virtual, returns the method itself. On failure returns NULL * and sets \p error. */ MonoMethod* mono_method_get_base_method (MonoMethod *method, gboolean definition, MonoError *error) { MonoClass *klass, *parent; MonoGenericContext *generic_inst = NULL; MonoMethod *result = NULL; int slot; if (method->klass == NULL) return method; if (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_CLASS_IS_INTERFACE_INTERNAL (method->klass) || method->flags & METHOD_ATTRIBUTE_NEW_SLOT) return method; slot = mono_method_get_vtable_slot (method); if (slot == -1) return method; klass = method->klass; if (mono_class_is_gtd (klass)) { /* If we get a GTD like Foo`2 replace look instead at its instantiation with its own generic params: Foo`2<!0, !1>. */ /* In particular we want generic_inst to be initialized to <!0, * !1> so that we can inflate parent classes correctly as we go * up the class hierarchy. */ MonoType *ty = mono_class_gtd_get_canonical_inst (klass); g_assert (ty->type == MONO_TYPE_GENERICINST); MonoGenericClass *gklass = ty->data.generic_class; generic_inst = mono_generic_class_get_context (gklass); klass = gklass->container_class; } else if (mono_class_is_ginst (klass)) { generic_inst = mono_class_get_context (klass); klass = mono_class_get_generic_class (klass)->container_class; } retry: if (definition) { /* At the end of the loop, klass points to the eldest class that has this virtual function slot. */ for (parent = m_class_get_parent (klass); parent != NULL; parent = m_class_get_parent (parent)) { /* on entry, klass is either a plain old non-generic class and generic_inst == NULL or klass is the generic container class and generic_inst is the instantiation. when we go to the parent, if the parent is an open constructed type, we need to replace the type parameters by the definitions from the generic_inst, and then take it apart again into the klass and the generic_inst. For cases like this: class C<T> : B<T, int> { public override void Foo () { ... } } class B<U,V> : A<HashMap<U,V>> { public override void Foo () { ... } } class A<X> { public virtual void Foo () { ... } } if at each iteration the parent isn't open, we can skip inflating it. if at some iteration the parent isn't generic (after possible inflation), we set generic_inst to NULL; */ MonoGenericContext *parent_inst = NULL; if (mono_class_is_open_constructed_type (m_class_get_byval_arg (parent))) { parent = mono_class_inflate_generic_class_checked (parent, generic_inst, error); return_val_if_nok (error, NULL); } if (mono_class_is_ginst (parent)) { parent_inst = mono_class_get_context (parent); parent = mono_class_get_generic_class (parent)->container_class; } mono_class_setup_vtable (parent); if (m_class_get_vtable_size (parent) <= slot) break; klass = parent; generic_inst = parent_inst; } } else { /* When we get here, possibly after a retry, if generic_inst is * set, then the class is must be a gtd */ g_assert (generic_inst == NULL || mono_class_is_gtd (klass)); klass = m_class_get_parent (klass); if (!klass) return method; if (mono_class_is_open_constructed_type (m_class_get_byval_arg (klass))) { klass = mono_class_inflate_generic_class_checked (klass, generic_inst, error); return_val_if_nok (error, NULL); generic_inst = NULL; } if (mono_class_is_ginst (klass)) { generic_inst = mono_class_get_context (klass); klass = mono_class_get_generic_class (klass)->container_class; } } if (generic_inst) { klass = mono_class_inflate_generic_class_checked (klass, generic_inst, error); return_val_if_nok (error, NULL); generic_inst = NULL; } if (klass == method->klass) return method; /*This is possible if definition == FALSE. * Do it here to be really sure we don't read invalid memory. */ if (slot >= m_class_get_vtable_size (klass)) return method; mono_class_setup_vtable (klass); result = m_class_get_vtable (klass) [slot]; if (result == NULL) { /* It is an abstract method */ gboolean found = FALSE; gpointer iter = NULL; while ((result = mono_class_get_methods (klass, &iter))) { if (result->slot == slot) { found = TRUE; break; } } /* found might be FALSE if we looked in an abstract class * that doesn't override an abstract method of its * parent: * abstract class Base { * public abstract void Foo (); * } * abstract class Derived : Base { } * class Child : Derived { * public override void Foo () { } * } * * if m was Child.Foo and we ask for the base method, * then we get here with klass == Derived and found == FALSE */ /* but it shouldn't be the case that if we're looking * for the definition and didn't find a result; the * loop above should've taken us as far as we could * go! */ g_assert (!(definition && !found)); if (!found) goto retry; } g_assert (result != NULL); return result; } gboolean mono_method_is_constructor (MonoMethod *method) { return ((method->flags & CTOR_REQUIRED_FLAGS) == CTOR_REQUIRED_FLAGS && !(method->flags & CTOR_INVALID_FLAGS) && !strcmp (".ctor", method->name)); } gboolean mono_class_has_default_constructor (MonoClass *klass, gboolean public_only) { MonoMethod *method; int i; mono_class_setup_methods (klass); if (mono_class_has_failure (klass)) return FALSE; int mcount = mono_class_get_method_count (klass); MonoMethod **klass_methods = m_class_get_methods (klass); for (i = 0; i < mcount; ++i) { method = klass_methods [i]; if (mono_method_is_constructor (method) && mono_method_signature_internal (method) && mono_method_signature_internal (method)->param_count == 0 && (!public_only || (method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC)) return TRUE; } return FALSE; }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/coreclr/pal/src/libunwind/src/mips/Ginit_local.c
/* libunwind - a platform-independent unwind library Copyright (C) 2008 CodeSourcery This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind_i.h" #include "init.h" #ifdef UNW_REMOTE_ONLY int unw_init_local (unw_cursor_t *cursor, ucontext_t *uc) { return -UNW_EINVAL; } #else /* !UNW_REMOTE_ONLY */ static int unw_init_local_common(unw_cursor_t *cursor, ucontext_t *uc, unsigned use_prev_instr) { struct cursor *c = (struct cursor *) cursor; if (!atomic_load(&tdep_init_done)) tdep_init (); Debug (1, "(cursor=%p)\n", c); c->dwarf.as = unw_local_addr_space; c->dwarf.as_arg = uc; return common_init (c, use_prev_instr); } int unw_init_local(unw_cursor_t *cursor, ucontext_t *uc) { return unw_init_local_common(cursor, uc, 1); } int unw_init_local2 (unw_cursor_t *cursor, ucontext_t *uc, int flag) { if (!flag) { return unw_init_local_common(cursor, uc, 1); } else if (flag == UNW_INIT_SIGNAL_FRAME) { return unw_init_local_common(cursor, uc, 0); } else { return -UNW_EINVAL; } } #endif /* !UNW_REMOTE_ONLY */
/* libunwind - a platform-independent unwind library Copyright (C) 2008 CodeSourcery This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind_i.h" #include "init.h" #ifdef UNW_REMOTE_ONLY int unw_init_local (unw_cursor_t *cursor, ucontext_t *uc) { return -UNW_EINVAL; } #else /* !UNW_REMOTE_ONLY */ static int unw_init_local_common(unw_cursor_t *cursor, ucontext_t *uc, unsigned use_prev_instr) { struct cursor *c = (struct cursor *) cursor; if (!atomic_load(&tdep_init_done)) tdep_init (); Debug (1, "(cursor=%p)\n", c); c->dwarf.as = unw_local_addr_space; c->dwarf.as_arg = uc; return common_init (c, use_prev_instr); } int unw_init_local(unw_cursor_t *cursor, ucontext_t *uc) { return unw_init_local_common(cursor, uc, 1); } int unw_init_local2 (unw_cursor_t *cursor, ucontext_t *uc, int flag) { if (!flag) { return unw_init_local_common(cursor, uc, 1); } else if (flag == UNW_INIT_SIGNAL_FRAME) { return unw_init_local_common(cursor, uc, 0); } else { return -UNW_EINVAL; } } #endif /* !UNW_REMOTE_ONLY */
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/native/external/brotli/dec/state.c
/* Copyright 2015 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ #include "./state.h" #include <stdlib.h> /* free, malloc */ #include <brotli/types.h> #include "./huffman.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif BROTLI_BOOL BrotliDecoderStateInit(BrotliDecoderState* s, brotli_alloc_func alloc_func, brotli_free_func free_func, void* opaque) { if (!alloc_func) { s->alloc_func = BrotliDefaultAllocFunc; s->free_func = BrotliDefaultFreeFunc; s->memory_manager_opaque = 0; } else { s->alloc_func = alloc_func; s->free_func = free_func; s->memory_manager_opaque = opaque; } s->error_code = 0; /* BROTLI_DECODER_NO_ERROR */ BrotliInitBitReader(&s->br); s->state = BROTLI_STATE_UNINITED; s->large_window = 0; s->substate_metablock_header = BROTLI_STATE_METABLOCK_HEADER_NONE; s->substate_uncompressed = BROTLI_STATE_UNCOMPRESSED_NONE; s->substate_decode_uint8 = BROTLI_STATE_DECODE_UINT8_NONE; s->substate_read_block_length = BROTLI_STATE_READ_BLOCK_LENGTH_NONE; s->buffer_length = 0; s->loop_counter = 0; s->pos = 0; s->rb_roundtrips = 0; s->partial_pos_out = 0; s->block_type_trees = NULL; s->block_len_trees = NULL; s->ringbuffer = NULL; s->ringbuffer_size = 0; s->new_ringbuffer_size = 0; s->ringbuffer_mask = 0; s->context_map = NULL; s->context_modes = NULL; s->dist_context_map = NULL; s->context_map_slice = NULL; s->dist_context_map_slice = NULL; s->literal_hgroup.codes = NULL; s->literal_hgroup.htrees = NULL; s->insert_copy_hgroup.codes = NULL; s->insert_copy_hgroup.htrees = NULL; s->distance_hgroup.codes = NULL; s->distance_hgroup.htrees = NULL; s->is_last_metablock = 0; s->is_uncompressed = 0; s->is_metadata = 0; s->should_wrap_ringbuffer = 0; s->canny_ringbuffer_allocation = 1; s->window_bits = 0; s->max_distance = 0; s->dist_rb[0] = 16; s->dist_rb[1] = 15; s->dist_rb[2] = 11; s->dist_rb[3] = 4; s->dist_rb_idx = 0; s->block_type_trees = NULL; s->block_len_trees = NULL; s->mtf_upper_bound = 63; s->dictionary = BrotliGetDictionary(); s->transforms = BrotliGetTransforms(); return BROTLI_TRUE; } void BrotliDecoderStateMetablockBegin(BrotliDecoderState* s) { s->meta_block_remaining_len = 0; s->block_length[0] = 1U << 24; s->block_length[1] = 1U << 24; s->block_length[2] = 1U << 24; s->num_block_types[0] = 1; s->num_block_types[1] = 1; s->num_block_types[2] = 1; s->block_type_rb[0] = 1; s->block_type_rb[1] = 0; s->block_type_rb[2] = 1; s->block_type_rb[3] = 0; s->block_type_rb[4] = 1; s->block_type_rb[5] = 0; s->context_map = NULL; s->context_modes = NULL; s->dist_context_map = NULL; s->context_map_slice = NULL; s->literal_htree = NULL; s->dist_context_map_slice = NULL; s->dist_htree_index = 0; s->context_lookup = NULL; s->literal_hgroup.codes = NULL; s->literal_hgroup.htrees = NULL; s->insert_copy_hgroup.codes = NULL; s->insert_copy_hgroup.htrees = NULL; s->distance_hgroup.codes = NULL; s->distance_hgroup.htrees = NULL; } void BrotliDecoderStateCleanupAfterMetablock(BrotliDecoderState* s) { BROTLI_DECODER_FREE(s, s->context_modes); BROTLI_DECODER_FREE(s, s->context_map); BROTLI_DECODER_FREE(s, s->dist_context_map); BROTLI_DECODER_FREE(s, s->literal_hgroup.htrees); BROTLI_DECODER_FREE(s, s->insert_copy_hgroup.htrees); BROTLI_DECODER_FREE(s, s->distance_hgroup.htrees); } void BrotliDecoderStateCleanup(BrotliDecoderState* s) { BrotliDecoderStateCleanupAfterMetablock(s); BROTLI_DECODER_FREE(s, s->ringbuffer); BROTLI_DECODER_FREE(s, s->block_type_trees); } BROTLI_BOOL BrotliDecoderHuffmanTreeGroupInit(BrotliDecoderState* s, HuffmanTreeGroup* group, uint32_t alphabet_size_max, uint32_t alphabet_size_limit, uint32_t ntrees) { /* 376 = 256 (1-st level table) + 4 + 7 + 15 + 31 + 63 (2-nd level mix-tables) This number is discovered "unlimited" "enough" calculator; it is actually a wee bigger than required in several cases (especially for alphabets with less than 16 symbols). */ const size_t max_table_size = alphabet_size_limit + 376; const size_t code_size = sizeof(HuffmanCode) * ntrees * max_table_size; const size_t htree_size = sizeof(HuffmanCode*) * ntrees; /* Pointer alignment is, hopefully, wider than sizeof(HuffmanCode). */ HuffmanCode** p = (HuffmanCode**)BROTLI_DECODER_ALLOC(s, code_size + htree_size); group->alphabet_size_max = (uint16_t)alphabet_size_max; group->alphabet_size_limit = (uint16_t)alphabet_size_limit; group->num_htrees = (uint16_t)ntrees; group->htrees = p; group->codes = (HuffmanCode*)(&p[ntrees]); return !!p; } #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif
/* Copyright 2015 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ #include "./state.h" #include <stdlib.h> /* free, malloc */ #include <brotli/types.h> #include "./huffman.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif BROTLI_BOOL BrotliDecoderStateInit(BrotliDecoderState* s, brotli_alloc_func alloc_func, brotli_free_func free_func, void* opaque) { if (!alloc_func) { s->alloc_func = BrotliDefaultAllocFunc; s->free_func = BrotliDefaultFreeFunc; s->memory_manager_opaque = 0; } else { s->alloc_func = alloc_func; s->free_func = free_func; s->memory_manager_opaque = opaque; } s->error_code = 0; /* BROTLI_DECODER_NO_ERROR */ BrotliInitBitReader(&s->br); s->state = BROTLI_STATE_UNINITED; s->large_window = 0; s->substate_metablock_header = BROTLI_STATE_METABLOCK_HEADER_NONE; s->substate_uncompressed = BROTLI_STATE_UNCOMPRESSED_NONE; s->substate_decode_uint8 = BROTLI_STATE_DECODE_UINT8_NONE; s->substate_read_block_length = BROTLI_STATE_READ_BLOCK_LENGTH_NONE; s->buffer_length = 0; s->loop_counter = 0; s->pos = 0; s->rb_roundtrips = 0; s->partial_pos_out = 0; s->block_type_trees = NULL; s->block_len_trees = NULL; s->ringbuffer = NULL; s->ringbuffer_size = 0; s->new_ringbuffer_size = 0; s->ringbuffer_mask = 0; s->context_map = NULL; s->context_modes = NULL; s->dist_context_map = NULL; s->context_map_slice = NULL; s->dist_context_map_slice = NULL; s->literal_hgroup.codes = NULL; s->literal_hgroup.htrees = NULL; s->insert_copy_hgroup.codes = NULL; s->insert_copy_hgroup.htrees = NULL; s->distance_hgroup.codes = NULL; s->distance_hgroup.htrees = NULL; s->is_last_metablock = 0; s->is_uncompressed = 0; s->is_metadata = 0; s->should_wrap_ringbuffer = 0; s->canny_ringbuffer_allocation = 1; s->window_bits = 0; s->max_distance = 0; s->dist_rb[0] = 16; s->dist_rb[1] = 15; s->dist_rb[2] = 11; s->dist_rb[3] = 4; s->dist_rb_idx = 0; s->block_type_trees = NULL; s->block_len_trees = NULL; s->mtf_upper_bound = 63; s->dictionary = BrotliGetDictionary(); s->transforms = BrotliGetTransforms(); return BROTLI_TRUE; } void BrotliDecoderStateMetablockBegin(BrotliDecoderState* s) { s->meta_block_remaining_len = 0; s->block_length[0] = 1U << 24; s->block_length[1] = 1U << 24; s->block_length[2] = 1U << 24; s->num_block_types[0] = 1; s->num_block_types[1] = 1; s->num_block_types[2] = 1; s->block_type_rb[0] = 1; s->block_type_rb[1] = 0; s->block_type_rb[2] = 1; s->block_type_rb[3] = 0; s->block_type_rb[4] = 1; s->block_type_rb[5] = 0; s->context_map = NULL; s->context_modes = NULL; s->dist_context_map = NULL; s->context_map_slice = NULL; s->literal_htree = NULL; s->dist_context_map_slice = NULL; s->dist_htree_index = 0; s->context_lookup = NULL; s->literal_hgroup.codes = NULL; s->literal_hgroup.htrees = NULL; s->insert_copy_hgroup.codes = NULL; s->insert_copy_hgroup.htrees = NULL; s->distance_hgroup.codes = NULL; s->distance_hgroup.htrees = NULL; } void BrotliDecoderStateCleanupAfterMetablock(BrotliDecoderState* s) { BROTLI_DECODER_FREE(s, s->context_modes); BROTLI_DECODER_FREE(s, s->context_map); BROTLI_DECODER_FREE(s, s->dist_context_map); BROTLI_DECODER_FREE(s, s->literal_hgroup.htrees); BROTLI_DECODER_FREE(s, s->insert_copy_hgroup.htrees); BROTLI_DECODER_FREE(s, s->distance_hgroup.htrees); } void BrotliDecoderStateCleanup(BrotliDecoderState* s) { BrotliDecoderStateCleanupAfterMetablock(s); BROTLI_DECODER_FREE(s, s->ringbuffer); BROTLI_DECODER_FREE(s, s->block_type_trees); } BROTLI_BOOL BrotliDecoderHuffmanTreeGroupInit(BrotliDecoderState* s, HuffmanTreeGroup* group, uint32_t alphabet_size_max, uint32_t alphabet_size_limit, uint32_t ntrees) { /* 376 = 256 (1-st level table) + 4 + 7 + 15 + 31 + 63 (2-nd level mix-tables) This number is discovered "unlimited" "enough" calculator; it is actually a wee bigger than required in several cases (especially for alphabets with less than 16 symbols). */ const size_t max_table_size = alphabet_size_limit + 376; const size_t code_size = sizeof(HuffmanCode) * ntrees * max_table_size; const size_t htree_size = sizeof(HuffmanCode*) * ntrees; /* Pointer alignment is, hopefully, wider than sizeof(HuffmanCode). */ HuffmanCode** p = (HuffmanCode**)BROTLI_DECODER_ALLOC(s, code_size + htree_size); group->alphabet_size_max = (uint16_t)alphabet_size_max; group->alphabet_size_limit = (uint16_t)alphabet_size_limit; group->num_htrees = (uint16_t)ntrees; group->htrees = p; group->codes = (HuffmanCode*)(&p[ntrees]); return !!p; } #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/eglib/giconv.c
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */ /* * Copyright (C) 2011 Jeffrey Stedfast * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <config.h> #include <glib.h> #include <string.h> #include <errno.h> #include "../utils/mono-errno.h" #ifdef _MSC_VER #define FORCE_INLINE(RET_TYPE) __forceinline RET_TYPE #else #define FORCE_INLINE(RET_TYPE) inline RET_TYPE __attribute__((always_inline)) #endif #define UNROLL_DECODE_UTF8 0 #define UNROLL_ENCODE_UTF8 0 static int decode_utf32be (char *inbuf, size_t inleft, gunichar *outchar); static int encode_utf32be (gunichar c, char *outbuf, size_t outleft); static int decode_utf32le (char *inbuf, size_t inleft, gunichar *outchar); static int encode_utf32le (gunichar c, char *outbuf, size_t outleft); static int decode_utf16be (char *inbuf, size_t inleft, gunichar *outchar); static int encode_utf16be (gunichar c, char *outbuf, size_t outleft); static int decode_utf16le (char *inbuf, size_t inleft, gunichar *outchar); static int encode_utf16le (gunichar c, char *outbuf, size_t outleft); static FORCE_INLINE (int) decode_utf8 (char *inbuf, size_t inleft, gunichar *outchar); static int encode_utf8 (gunichar c, char *outbuf, size_t outleft); static int decode_latin1 (char *inbuf, size_t inleft, gunichar *outchar); static int encode_latin1 (gunichar c, char *outbuf, size_t outleft); #if G_BYTE_ORDER == G_LITTLE_ENDIAN #define decode_utf32 decode_utf32le #define encode_utf32 encode_utf32le #define decode_utf16 decode_utf16le #define encode_utf16 encode_utf16le #else #define decode_utf32 decode_utf32be #define encode_utf32 encode_utf32be #define decode_utf16 decode_utf16be #define encode_utf16 encode_utf16be #endif /* * Unicode encoders and decoders */ static FORCE_INLINE (uint32_t) read_uint32_endian (unsigned char *inptr, unsigned endian) { if (endian == G_LITTLE_ENDIAN) return (inptr[3] << 24) | (inptr[2] << 16) | (inptr[1] << 8) | inptr[0]; return (inptr[0] << 24) | (inptr[1] << 16) | (inptr[2] << 8) | inptr[3]; } static int decode_utf32_endian (char *inbuf, size_t inleft, gunichar *outchar, unsigned endian) { unsigned char *inptr = (unsigned char *) inbuf; gunichar c; if (inleft < 4) { mono_set_errno (EINVAL); return -1; } c = read_uint32_endian (inptr, endian); if (c >= 0xd800 && c < 0xe000) { mono_set_errno (EILSEQ); return -1; } else if (c >= 0x110000) { mono_set_errno (EILSEQ); return -1; } *outchar = c; return 4; } static int decode_utf32be (char *inbuf, size_t inleft, gunichar *outchar) { return decode_utf32_endian (inbuf, inleft, outchar, G_BIG_ENDIAN); } static int decode_utf32le (char *inbuf, size_t inleft, gunichar *outchar) { return decode_utf32_endian (inbuf, inleft, outchar, G_LITTLE_ENDIAN); } static int encode_utf32be (gunichar c, char *outbuf, size_t outleft) { unsigned char *outptr = (unsigned char *) outbuf; if (outleft < 4) { mono_set_errno (E2BIG); return -1; } outptr[0] = (c >> 24) & 0xff; outptr[1] = (c >> 16) & 0xff; outptr[2] = (c >> 8) & 0xff; outptr[3] = c & 0xff; return 4; } static int encode_utf32le (gunichar c, char *outbuf, size_t outleft) { unsigned char *outptr = (unsigned char *) outbuf; if (outleft < 4) { mono_set_errno (E2BIG); return -1; } outptr[0] = c & 0xff; outptr[1] = (c >> 8) & 0xff; outptr[2] = (c >> 16) & 0xff; outptr[3] = (c >> 24) & 0xff; return 4; } static FORCE_INLINE (uint16_t) read_uint16_endian (unsigned char *inptr, unsigned endian) { if (endian == G_LITTLE_ENDIAN) return (inptr[1] << 8) | inptr[0]; return (inptr[0] << 8) | inptr[1]; } static FORCE_INLINE (int) decode_utf16_endian (char *inbuf, size_t inleft, gunichar *outchar, unsigned endian) { unsigned char *inptr = (unsigned char *) inbuf; gunichar2 c; gunichar u; if (inleft < 2) { mono_set_errno (E2BIG); return -1; } u = read_uint16_endian (inptr, endian); if (u < 0xd800) { /* 0x0000 -> 0xd7ff */ *outchar = u; return 2; } else if (u < 0xdc00) { /* 0xd800 -> 0xdbff */ if (inleft < 4) { mono_set_errno (EINVAL); return -2; } c = read_uint16_endian (inptr + 2, endian); if (c < 0xdc00 || c > 0xdfff) { mono_set_errno (EILSEQ); return -2; } u = ((u - 0xd800) << 10) + (c - 0xdc00) + 0x0010000UL; *outchar = u; return 4; } else if (u < 0xe000) { /* 0xdc00 -> 0xdfff */ mono_set_errno (EILSEQ); return -1; } else { /* 0xe000 -> 0xffff */ *outchar = u; return 2; } } static int decode_utf16be (char *inbuf, size_t inleft, gunichar *outchar) { return decode_utf16_endian (inbuf, inleft, outchar, G_BIG_ENDIAN); } static int decode_utf16le (char *inbuf, size_t inleft, gunichar *outchar) { return decode_utf16_endian (inbuf, inleft, outchar, G_LITTLE_ENDIAN); } static FORCE_INLINE (void) write_uint16_endian (unsigned char *outptr, uint16_t c, unsigned endian) { if (endian == G_LITTLE_ENDIAN) { outptr[0] = c & 0xff; outptr[1] = (c >> 8) & 0xff; return; } outptr[0] = (c >> 8) & 0xff; outptr[1] = c & 0xff; } static FORCE_INLINE (int) encode_utf16_endian (gunichar c, char *outbuf, size_t outleft, unsigned endian) { unsigned char *outptr = (unsigned char *) outbuf; gunichar2 ch; gunichar c2; if (c < 0x10000) { if (outleft < 2) { mono_set_errno (E2BIG); return -1; } write_uint16_endian (outptr, c, endian); return 2; } else { if (outleft < 4) { mono_set_errno (E2BIG); return -1; } c2 = c - 0x10000; ch = (gunichar2) ((c2 >> 10) + 0xd800); write_uint16_endian (outptr, ch, endian); ch = (gunichar2) ((c2 & 0x3ff) + 0xdc00); write_uint16_endian (outptr + 2, ch, endian); return 4; } } static int encode_utf16be (gunichar c, char *outbuf, size_t outleft) { return encode_utf16_endian (c, outbuf, outleft, G_BIG_ENDIAN); } static int encode_utf16le (gunichar c, char *outbuf, size_t outleft) { return encode_utf16_endian (c, outbuf, outleft, G_LITTLE_ENDIAN); } static FORCE_INLINE (int) decode_utf8 (char *inbuf, size_t inleft, gunichar *outchar) { unsigned char *inptr = (unsigned char *) inbuf; gunichar u; int n, i; u = *inptr; if (u < 0x80) { /* simple ascii case */ *outchar = u; return 1; } else if (u < 0xc2) { mono_set_errno (EILSEQ); return -1; } else if (u < 0xe0) { u &= 0x1f; n = 2; } else if (u < 0xf0) { u &= 0x0f; n = 3; } else if (u < 0xf8) { u &= 0x07; n = 4; } else if (u < 0xfc) { u &= 0x03; n = 5; } else if (u < 0xfe) { u &= 0x01; n = 6; } else { mono_set_errno (EILSEQ); return -1; } if (n > inleft) { mono_set_errno (EINVAL); return -1; } #if UNROLL_DECODE_UTF8 switch (n) { case 6: u = (u << 6) | (*++inptr ^ 0x80); case 5: u = (u << 6) | (*++inptr ^ 0x80); case 4: u = (u << 6) | (*++inptr ^ 0x80); case 3: u = (u << 6) | (*++inptr ^ 0x80); case 2: u = (u << 6) | (*++inptr ^ 0x80); } #else for (i = 1; i < n; i++) u = (u << 6) | (*++inptr ^ 0x80); #endif *outchar = u; return n; } static int encode_utf8 (gunichar c, char *outbuf, size_t outleft) { unsigned char *outptr = (unsigned char *) outbuf; int base, n, i; if (c < 0x80) { outptr[0] = c; return 1; } else if (c < 0x800) { base = 192; n = 2; } else if (c < 0x10000) { base = 224; n = 3; } else if (c < 0x200000) { base = 240; n = 4; } else if (c < 0x4000000) { base = 248; n = 5; } else { base = 252; n = 6; } if (outleft < n) { mono_set_errno (E2BIG); return -1; } #if UNROLL_ENCODE_UTF8 switch (n) { case 6: outptr[5] = (c & 0x3f) | 0x80; c >>= 6; case 5: outptr[4] = (c & 0x3f) | 0x80; c >>= 6; case 4: outptr[3] = (c & 0x3f) | 0x80; c >>= 6; case 3: outptr[2] = (c & 0x3f) | 0x80; c >>= 6; case 2: outptr[1] = (c & 0x3f) | 0x80; c >>= 6; case 1: outptr[0] = c | base; } #else for (i = n - 1; i > 0; i--) { outptr[i] = (c & 0x3f) | 0x80; c >>= 6; } outptr[0] = c | base; #endif return n; } static int decode_latin1 (char *inbuf, size_t inleft, gunichar *outchar) { *outchar = (unsigned char) *inbuf; return 1; } static int encode_latin1 (gunichar c, char *outbuf, size_t outleft) { if (outleft < 1) { mono_set_errno (E2BIG); return -1; } if (c > 0xff) { mono_set_errno (EILSEQ); return -1; } *outbuf = (char) c; return 1; } /* * Simple conversion API */ static gpointer error_quark = (gpointer)"ConvertError"; gpointer g_convert_error_quark (void) { return error_quark; } /* * Unicode conversion */ /** * An explanation of the conversion can be found at: * http://home.tiscali.nl/t876506/utf8tbl.html * **/ gint g_unichar_to_utf8 (gunichar c, gchar *outbuf) { int base, n, i; if (c < 0x80) { base = 0; n = 1; } else if (c < 0x800) { base = 192; n = 2; } else if (c < 0x10000) { base = 224; n = 3; } else if (c < 0x200000) { base = 240; n = 4; } else if (c < 0x4000000) { base = 248; n = 5; } else if (c < 0x80000000) { base = 252; n = 6; } else { return -1; } if (outbuf != NULL) { for (i = n - 1; i > 0; i--) { /* mask off 6 bits worth and add 128 */ outbuf[i] = (c & 0x3f) | 0x80; c >>= 6; } /* first character has a different base */ outbuf[0] = c | base; } return n; } static FORCE_INLINE (int) g_unichar_to_utf16 (gunichar c, gunichar2 *outbuf) { gunichar c2; if (c < 0xd800) { if (outbuf) *outbuf = (gunichar2) c; return 1; } else if (c < 0xe000) { return -1; } else if (c < 0x10000) { if (outbuf) *outbuf = (gunichar2) c; return 1; } else if (c < 0x110000) { if (outbuf) { c2 = c - 0x10000; outbuf[0] = (gunichar2) ((c2 >> 10) + 0xd800); outbuf[1] = (gunichar2) ((c2 & 0x3ff) + 0xdc00); } return 2; } else { return -1; } } gunichar * g_utf8_to_ucs4_fast (const gchar *str, glong len, glong *items_written) { gunichar *outbuf, *outptr; char *inptr; glong n, i; g_return_val_if_fail (str != NULL, NULL); n = g_utf8_strlen (str, len); if (items_written) *items_written = n; outptr = outbuf = g_malloc ((n + 1) * sizeof (gunichar)); inptr = (char *) str; for (i = 0; i < n; i++) { *outptr++ = g_utf8_get_char (inptr); inptr = g_utf8_next_char (inptr); } *outptr = 0; return outbuf; } static gunichar2 * eg_utf8_to_utf16_general (const gchar *str, glong len, glong *items_read, glong *items_written, gboolean include_nuls, gboolean replace_invalid_codepoints, GCustomAllocator custom_alloc_func, gpointer custom_alloc_data, GError **err) { gunichar2 *outbuf, *outptr; size_t outlen = 0; size_t inleft; char *inptr; gunichar c; int u, n; g_return_val_if_fail (str != NULL, NULL); if (len < 0) { if (include_nuls) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_FAILED, "Conversions with embedded nulls must pass the string length"); return NULL; } len = strlen (str); } inptr = (char *) str; inleft = len; while (inleft > 0) { if ((n = decode_utf8 (inptr, inleft, &c)) < 0) goto error; if (c == 0 && !include_nuls) break; if ((u = g_unichar_to_utf16 (c, NULL)) < 0) { if (replace_invalid_codepoints) { u = 2; } else { mono_set_errno (EILSEQ); goto error; } } outlen += u; inleft -= n; inptr += n; } if (items_read) *items_read = inptr - str; if (items_written) *items_written = outlen; if (G_LIKELY (!custom_alloc_func)) outptr = outbuf = g_malloc ((outlen + 1) * sizeof (gunichar2)); else outptr = outbuf = (gunichar2 *)custom_alloc_func ((outlen + 1) * sizeof (gunichar2), custom_alloc_data); if (G_UNLIKELY (custom_alloc_func && !outbuf)) { mono_set_errno (ENOMEM); goto error; } inptr = (char *) str; inleft = len; while (inleft > 0) { if ((n = decode_utf8 (inptr, inleft, &c)) < 0) break; if (c == 0 && !include_nuls) break; u = g_unichar_to_utf16 (c, outptr); if ((u < 0) && replace_invalid_codepoints) { outptr[0] = 0xFFFD; outptr[1] = 0xFFFD; u = 2; } outptr += u; inleft -= n; inptr += n; } *outptr = '\0'; return outbuf; error: if (errno == ENOMEM) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_NO_MEMORY, "Allocation failed."); } else if (errno == EILSEQ) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); } else if (items_read) { /* partial input is ok if we can let our caller know... */ } else { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_PARTIAL_INPUT, "Partial byte sequence encountered in the input."); } if (items_read) *items_read = inptr - str; if (items_written) *items_written = 0; return NULL; } gunichar2 * g_utf8_to_utf16 (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err) { return eg_utf8_to_utf16_general (str, len, items_read, items_written, FALSE, FALSE, NULL, NULL, err); } gunichar2 * g_utf8_to_utf16_custom_alloc (const gchar *str, glong len, glong *items_read, glong *items_written, GCustomAllocator custom_alloc_func, gpointer custom_alloc_data, GError **err) { return eg_utf8_to_utf16_general (str, len, items_read, items_written, FALSE, FALSE, custom_alloc_func, custom_alloc_data, err); } gunichar2 * eg_utf8_to_utf16_with_nuls (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err) { return eg_utf8_to_utf16_general (str, len, items_read, items_written, TRUE, FALSE, NULL, NULL, err); } gunichar2 * eg_wtf8_to_utf16 (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err) { return eg_utf8_to_utf16_general (str, len, items_read, items_written, TRUE, TRUE, NULL, NULL, err); } gunichar * g_utf8_to_ucs4 (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err) { gunichar *outbuf, *outptr; size_t outlen = 0; size_t inleft; char *inptr; gunichar c; int n; g_return_val_if_fail (str != NULL, NULL); if (len < 0) len = strlen (str); inptr = (char *) str; inleft = len; while (inleft > 0) { if ((n = decode_utf8 (inptr, inleft, &c)) < 0) { if (errno == EILSEQ) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); } else if (items_read) { /* partial input is ok if we can let our caller know... */ break; } else { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_PARTIAL_INPUT, "Partial byte sequence encountered in the input."); } if (items_read) *items_read = inptr - str; if (items_written) *items_written = 0; return NULL; } else if (c == 0) break; outlen += 4; inleft -= n; inptr += n; } if (items_written) *items_written = outlen / 4; if (items_read) *items_read = inptr - str; outptr = outbuf = g_malloc (outlen + 4); inptr = (char *) str; inleft = len; while (inleft > 0) { if ((n = decode_utf8 (inptr, inleft, &c)) < 0) break; else if (c == 0) break; *outptr++ = c; inleft -= n; inptr += n; } *outptr = 0; return outbuf; } static gchar * eg_utf16_to_utf8_general (const gunichar2 *str, glong len, glong *items_read, glong *items_written, GCustomAllocator custom_alloc_func, gpointer custom_alloc_data, GError **err) { char *inptr, *outbuf, *outptr; size_t outlen = 0; size_t inleft; gunichar c; int n; g_return_val_if_fail (str != NULL, NULL); if (len < 0) { len = 0; while (str[len]) len++; } inptr = (char *) str; inleft = len * 2; while (inleft > 0) { if ((n = decode_utf16 (inptr, inleft, &c)) < 0) { if (n == -2 && inleft > 2) { /* This means that the first UTF-16 char was read, but second failed */ inleft -= 2; inptr += 2; } if (errno == EILSEQ) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); } else if (items_read) { /* partial input is ok if we can let our caller know... */ break; } else { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_PARTIAL_INPUT, "Partial byte sequence encountered in the input."); } if (items_read) *items_read = (inptr - (char *) str) / 2; if (items_written) *items_written = 0; return NULL; } else if (c == 0) break; outlen += g_unichar_to_utf8 (c, NULL); inleft -= n; inptr += n; } if (items_read) *items_read = (inptr - (char *) str) / 2; if (items_written) *items_written = outlen; if (G_LIKELY (!custom_alloc_func)) outptr = outbuf = g_malloc (outlen + 1); else outptr = outbuf = (char *)custom_alloc_func (outlen + 1, custom_alloc_data); if (G_UNLIKELY (custom_alloc_func && !outbuf)) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_NO_MEMORY, "Allocation failed."); if (items_written) *items_written = 0; return NULL; } inptr = (char *) str; inleft = len * 2; while (inleft > 0) { if ((n = decode_utf16 (inptr, inleft, &c)) < 0) break; else if (c == 0) break; outptr += g_unichar_to_utf8 (c, outptr); inleft -= n; inptr += n; } *outptr = '\0'; return outbuf; } gchar * g_utf16_to_utf8 (const gunichar2 *str, glong len, glong *items_read, glong *items_written, GError **err) { return eg_utf16_to_utf8_general (str, len, items_read, items_written, NULL, NULL, err); } gchar * g_utf16_to_utf8_custom_alloc (const gunichar2 *str, glong len, glong *items_read, glong *items_written, GCustomAllocator custom_alloc_func, gpointer custom_alloc_data, GError **err) { return eg_utf16_to_utf8_general (str, len, items_read, items_written, custom_alloc_func, custom_alloc_data, err); } gunichar * g_utf16_to_ucs4 (const gunichar2 *str, glong len, glong *items_read, glong *items_written, GError **err) { gunichar *outbuf, *outptr; size_t outlen = 0; size_t inleft; char *inptr; gunichar c; int n; g_return_val_if_fail (str != NULL, NULL); if (len < 0) { len = 0; while (str[len]) len++; } inptr = (char *) str; inleft = len * 2; while (inleft > 0) { if ((n = decode_utf16 (inptr, inleft, &c)) < 0) { if (n == -2 && inleft > 2) { /* This means that the first UTF-16 char was read, but second failed */ inleft -= 2; inptr += 2; } if (errno == EILSEQ) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); } else if (items_read) { /* partial input is ok if we can let our caller know... */ break; } else { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_PARTIAL_INPUT, "Partial byte sequence encountered in the input."); } if (items_read) *items_read = (inptr - (char *) str) / 2; if (items_written) *items_written = 0; return NULL; } else if (c == 0) break; outlen += 4; inleft -= n; inptr += n; } if (items_read) *items_read = (inptr - (char *) str) / 2; if (items_written) *items_written = outlen / 4; outptr = outbuf = g_malloc (outlen + 4); inptr = (char *) str; inleft = len * 2; while (inleft > 0) { if ((n = decode_utf16 (inptr, inleft, &c)) < 0) break; else if (c == 0) break; *outptr++ = c; inleft -= n; inptr += n; } *outptr = 0; return outbuf; } gchar * g_ucs4_to_utf8 (const gunichar *str, glong len, glong *items_read, glong *items_written, GError **err) { char *outbuf, *outptr; size_t outlen = 0; glong i; int n; g_return_val_if_fail (str != NULL, NULL); if (len < 0) { for (i = 0; str[i] != 0; i++) { if ((n = g_unichar_to_utf8 (str[i], NULL)) < 0) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); if (items_written) *items_written = 0; if (items_read) *items_read = i; return NULL; } outlen += n; } } else { for (i = 0; i < len && str[i] != 0; i++) { if ((n = g_unichar_to_utf8 (str[i], NULL)) < 0) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); if (items_written) *items_written = 0; if (items_read) *items_read = i; return NULL; } outlen += n; } } len = i; outptr = outbuf = g_malloc (outlen + 1); for (i = 0; i < len; i++) outptr += g_unichar_to_utf8 (str[i], outptr); *outptr = 0; if (items_written) *items_written = outlen; if (items_read) *items_read = i; return outbuf; } gunichar2 * g_ucs4_to_utf16 (const gunichar *str, glong len, glong *items_read, glong *items_written, GError **err) { gunichar2 *outbuf, *outptr; size_t outlen = 0; glong i; int n; g_return_val_if_fail (str != NULL, NULL); if (len < 0) { for (i = 0; str[i] != 0; i++) { if ((n = g_unichar_to_utf16 (str[i], NULL)) < 0) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); if (items_written) *items_written = 0; if (items_read) *items_read = i; return NULL; } outlen += n; } } else { for (i = 0; i < len && str[i] != 0; i++) { if ((n = g_unichar_to_utf16 (str[i], NULL)) < 0) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); if (items_written) *items_written = 0; if (items_read) *items_read = i; return NULL; } outlen += n; } } len = i; outptr = outbuf = g_malloc ((outlen + 1) * sizeof (gunichar2)); for (i = 0; i < len; i++) outptr += g_unichar_to_utf16 (str[i], outptr); *outptr = 0; if (items_written) *items_written = outlen; if (items_read) *items_read = i; return outbuf; } gpointer g_fixed_buffer_custom_allocator (gsize req_size, gpointer custom_alloc_data) { GFixedBufferCustomAllocatorData *fixed_buffer_custom_alloc_data = (GFixedBufferCustomAllocatorData *)custom_alloc_data; if (!fixed_buffer_custom_alloc_data) return NULL; fixed_buffer_custom_alloc_data->req_buffer_size = req_size; if (req_size > fixed_buffer_custom_alloc_data->buffer_size) return NULL; return fixed_buffer_custom_alloc_data->buffer; }
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */ /* * Copyright (C) 2011 Jeffrey Stedfast * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <config.h> #include <glib.h> #include <string.h> #include <errno.h> #include "../utils/mono-errno.h" #ifdef _MSC_VER #define FORCE_INLINE(RET_TYPE) __forceinline RET_TYPE #else #define FORCE_INLINE(RET_TYPE) inline RET_TYPE __attribute__((always_inline)) #endif #define UNROLL_DECODE_UTF8 0 #define UNROLL_ENCODE_UTF8 0 static int decode_utf32be (char *inbuf, size_t inleft, gunichar *outchar); static int encode_utf32be (gunichar c, char *outbuf, size_t outleft); static int decode_utf32le (char *inbuf, size_t inleft, gunichar *outchar); static int encode_utf32le (gunichar c, char *outbuf, size_t outleft); static int decode_utf16be (char *inbuf, size_t inleft, gunichar *outchar); static int encode_utf16be (gunichar c, char *outbuf, size_t outleft); static int decode_utf16le (char *inbuf, size_t inleft, gunichar *outchar); static int encode_utf16le (gunichar c, char *outbuf, size_t outleft); static FORCE_INLINE (int) decode_utf8 (char *inbuf, size_t inleft, gunichar *outchar); static int encode_utf8 (gunichar c, char *outbuf, size_t outleft); static int decode_latin1 (char *inbuf, size_t inleft, gunichar *outchar); static int encode_latin1 (gunichar c, char *outbuf, size_t outleft); #if G_BYTE_ORDER == G_LITTLE_ENDIAN #define decode_utf32 decode_utf32le #define encode_utf32 encode_utf32le #define decode_utf16 decode_utf16le #define encode_utf16 encode_utf16le #else #define decode_utf32 decode_utf32be #define encode_utf32 encode_utf32be #define decode_utf16 decode_utf16be #define encode_utf16 encode_utf16be #endif /* * Unicode encoders and decoders */ static FORCE_INLINE (uint32_t) read_uint32_endian (unsigned char *inptr, unsigned endian) { if (endian == G_LITTLE_ENDIAN) return (inptr[3] << 24) | (inptr[2] << 16) | (inptr[1] << 8) | inptr[0]; return (inptr[0] << 24) | (inptr[1] << 16) | (inptr[2] << 8) | inptr[3]; } static int decode_utf32_endian (char *inbuf, size_t inleft, gunichar *outchar, unsigned endian) { unsigned char *inptr = (unsigned char *) inbuf; gunichar c; if (inleft < 4) { mono_set_errno (EINVAL); return -1; } c = read_uint32_endian (inptr, endian); if (c >= 0xd800 && c < 0xe000) { mono_set_errno (EILSEQ); return -1; } else if (c >= 0x110000) { mono_set_errno (EILSEQ); return -1; } *outchar = c; return 4; } static int decode_utf32be (char *inbuf, size_t inleft, gunichar *outchar) { return decode_utf32_endian (inbuf, inleft, outchar, G_BIG_ENDIAN); } static int decode_utf32le (char *inbuf, size_t inleft, gunichar *outchar) { return decode_utf32_endian (inbuf, inleft, outchar, G_LITTLE_ENDIAN); } static int encode_utf32be (gunichar c, char *outbuf, size_t outleft) { unsigned char *outptr = (unsigned char *) outbuf; if (outleft < 4) { mono_set_errno (E2BIG); return -1; } outptr[0] = (c >> 24) & 0xff; outptr[1] = (c >> 16) & 0xff; outptr[2] = (c >> 8) & 0xff; outptr[3] = c & 0xff; return 4; } static int encode_utf32le (gunichar c, char *outbuf, size_t outleft) { unsigned char *outptr = (unsigned char *) outbuf; if (outleft < 4) { mono_set_errno (E2BIG); return -1; } outptr[0] = c & 0xff; outptr[1] = (c >> 8) & 0xff; outptr[2] = (c >> 16) & 0xff; outptr[3] = (c >> 24) & 0xff; return 4; } static FORCE_INLINE (uint16_t) read_uint16_endian (unsigned char *inptr, unsigned endian) { if (endian == G_LITTLE_ENDIAN) return (inptr[1] << 8) | inptr[0]; return (inptr[0] << 8) | inptr[1]; } static FORCE_INLINE (int) decode_utf16_endian (char *inbuf, size_t inleft, gunichar *outchar, unsigned endian) { unsigned char *inptr = (unsigned char *) inbuf; gunichar2 c; gunichar u; if (inleft < 2) { mono_set_errno (E2BIG); return -1; } u = read_uint16_endian (inptr, endian); if (u < 0xd800) { /* 0x0000 -> 0xd7ff */ *outchar = u; return 2; } else if (u < 0xdc00) { /* 0xd800 -> 0xdbff */ if (inleft < 4) { mono_set_errno (EINVAL); return -2; } c = read_uint16_endian (inptr + 2, endian); if (c < 0xdc00 || c > 0xdfff) { mono_set_errno (EILSEQ); return -2; } u = ((u - 0xd800) << 10) + (c - 0xdc00) + 0x0010000UL; *outchar = u; return 4; } else if (u < 0xe000) { /* 0xdc00 -> 0xdfff */ mono_set_errno (EILSEQ); return -1; } else { /* 0xe000 -> 0xffff */ *outchar = u; return 2; } } static int decode_utf16be (char *inbuf, size_t inleft, gunichar *outchar) { return decode_utf16_endian (inbuf, inleft, outchar, G_BIG_ENDIAN); } static int decode_utf16le (char *inbuf, size_t inleft, gunichar *outchar) { return decode_utf16_endian (inbuf, inleft, outchar, G_LITTLE_ENDIAN); } static FORCE_INLINE (void) write_uint16_endian (unsigned char *outptr, uint16_t c, unsigned endian) { if (endian == G_LITTLE_ENDIAN) { outptr[0] = c & 0xff; outptr[1] = (c >> 8) & 0xff; return; } outptr[0] = (c >> 8) & 0xff; outptr[1] = c & 0xff; } static FORCE_INLINE (int) encode_utf16_endian (gunichar c, char *outbuf, size_t outleft, unsigned endian) { unsigned char *outptr = (unsigned char *) outbuf; gunichar2 ch; gunichar c2; if (c < 0x10000) { if (outleft < 2) { mono_set_errno (E2BIG); return -1; } write_uint16_endian (outptr, c, endian); return 2; } else { if (outleft < 4) { mono_set_errno (E2BIG); return -1; } c2 = c - 0x10000; ch = (gunichar2) ((c2 >> 10) + 0xd800); write_uint16_endian (outptr, ch, endian); ch = (gunichar2) ((c2 & 0x3ff) + 0xdc00); write_uint16_endian (outptr + 2, ch, endian); return 4; } } static int encode_utf16be (gunichar c, char *outbuf, size_t outleft) { return encode_utf16_endian (c, outbuf, outleft, G_BIG_ENDIAN); } static int encode_utf16le (gunichar c, char *outbuf, size_t outleft) { return encode_utf16_endian (c, outbuf, outleft, G_LITTLE_ENDIAN); } static FORCE_INLINE (int) decode_utf8 (char *inbuf, size_t inleft, gunichar *outchar) { unsigned char *inptr = (unsigned char *) inbuf; gunichar u; int n, i; u = *inptr; if (u < 0x80) { /* simple ascii case */ *outchar = u; return 1; } else if (u < 0xc2) { mono_set_errno (EILSEQ); return -1; } else if (u < 0xe0) { u &= 0x1f; n = 2; } else if (u < 0xf0) { u &= 0x0f; n = 3; } else if (u < 0xf8) { u &= 0x07; n = 4; } else if (u < 0xfc) { u &= 0x03; n = 5; } else if (u < 0xfe) { u &= 0x01; n = 6; } else { mono_set_errno (EILSEQ); return -1; } if (n > inleft) { mono_set_errno (EINVAL); return -1; } #if UNROLL_DECODE_UTF8 switch (n) { case 6: u = (u << 6) | (*++inptr ^ 0x80); case 5: u = (u << 6) | (*++inptr ^ 0x80); case 4: u = (u << 6) | (*++inptr ^ 0x80); case 3: u = (u << 6) | (*++inptr ^ 0x80); case 2: u = (u << 6) | (*++inptr ^ 0x80); } #else for (i = 1; i < n; i++) u = (u << 6) | (*++inptr ^ 0x80); #endif *outchar = u; return n; } static int encode_utf8 (gunichar c, char *outbuf, size_t outleft) { unsigned char *outptr = (unsigned char *) outbuf; int base, n, i; if (c < 0x80) { outptr[0] = c; return 1; } else if (c < 0x800) { base = 192; n = 2; } else if (c < 0x10000) { base = 224; n = 3; } else if (c < 0x200000) { base = 240; n = 4; } else if (c < 0x4000000) { base = 248; n = 5; } else { base = 252; n = 6; } if (outleft < n) { mono_set_errno (E2BIG); return -1; } #if UNROLL_ENCODE_UTF8 switch (n) { case 6: outptr[5] = (c & 0x3f) | 0x80; c >>= 6; case 5: outptr[4] = (c & 0x3f) | 0x80; c >>= 6; case 4: outptr[3] = (c & 0x3f) | 0x80; c >>= 6; case 3: outptr[2] = (c & 0x3f) | 0x80; c >>= 6; case 2: outptr[1] = (c & 0x3f) | 0x80; c >>= 6; case 1: outptr[0] = c | base; } #else for (i = n - 1; i > 0; i--) { outptr[i] = (c & 0x3f) | 0x80; c >>= 6; } outptr[0] = c | base; #endif return n; } static int decode_latin1 (char *inbuf, size_t inleft, gunichar *outchar) { *outchar = (unsigned char) *inbuf; return 1; } static int encode_latin1 (gunichar c, char *outbuf, size_t outleft) { if (outleft < 1) { mono_set_errno (E2BIG); return -1; } if (c > 0xff) { mono_set_errno (EILSEQ); return -1; } *outbuf = (char) c; return 1; } /* * Simple conversion API */ static gpointer error_quark = (gpointer)"ConvertError"; gpointer g_convert_error_quark (void) { return error_quark; } /* * Unicode conversion */ /** * An explanation of the conversion can be found at: * http://home.tiscali.nl/t876506/utf8tbl.html * **/ gint g_unichar_to_utf8 (gunichar c, gchar *outbuf) { int base, n, i; if (c < 0x80) { base = 0; n = 1; } else if (c < 0x800) { base = 192; n = 2; } else if (c < 0x10000) { base = 224; n = 3; } else if (c < 0x200000) { base = 240; n = 4; } else if (c < 0x4000000) { base = 248; n = 5; } else if (c < 0x80000000) { base = 252; n = 6; } else { return -1; } if (outbuf != NULL) { for (i = n - 1; i > 0; i--) { /* mask off 6 bits worth and add 128 */ outbuf[i] = (c & 0x3f) | 0x80; c >>= 6; } /* first character has a different base */ outbuf[0] = c | base; } return n; } static FORCE_INLINE (int) g_unichar_to_utf16 (gunichar c, gunichar2 *outbuf) { gunichar c2; if (c < 0xd800) { if (outbuf) *outbuf = (gunichar2) c; return 1; } else if (c < 0xe000) { return -1; } else if (c < 0x10000) { if (outbuf) *outbuf = (gunichar2) c; return 1; } else if (c < 0x110000) { if (outbuf) { c2 = c - 0x10000; outbuf[0] = (gunichar2) ((c2 >> 10) + 0xd800); outbuf[1] = (gunichar2) ((c2 & 0x3ff) + 0xdc00); } return 2; } else { return -1; } } gunichar * g_utf8_to_ucs4_fast (const gchar *str, glong len, glong *items_written) { gunichar *outbuf, *outptr; char *inptr; glong n, i; g_return_val_if_fail (str != NULL, NULL); n = g_utf8_strlen (str, len); if (items_written) *items_written = n; outptr = outbuf = g_malloc ((n + 1) * sizeof (gunichar)); inptr = (char *) str; for (i = 0; i < n; i++) { *outptr++ = g_utf8_get_char (inptr); inptr = g_utf8_next_char (inptr); } *outptr = 0; return outbuf; } static gunichar2 * eg_utf8_to_utf16_general (const gchar *str, glong len, glong *items_read, glong *items_written, gboolean include_nuls, gboolean replace_invalid_codepoints, GCustomAllocator custom_alloc_func, gpointer custom_alloc_data, GError **err) { gunichar2 *outbuf, *outptr; size_t outlen = 0; size_t inleft; char *inptr; gunichar c; int u, n; g_return_val_if_fail (str != NULL, NULL); if (len < 0) { if (include_nuls) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_FAILED, "Conversions with embedded nulls must pass the string length"); return NULL; } len = strlen (str); } inptr = (char *) str; inleft = len; while (inleft > 0) { if ((n = decode_utf8 (inptr, inleft, &c)) < 0) goto error; if (c == 0 && !include_nuls) break; if ((u = g_unichar_to_utf16 (c, NULL)) < 0) { if (replace_invalid_codepoints) { u = 2; } else { mono_set_errno (EILSEQ); goto error; } } outlen += u; inleft -= n; inptr += n; } if (items_read) *items_read = inptr - str; if (items_written) *items_written = outlen; if (G_LIKELY (!custom_alloc_func)) outptr = outbuf = g_malloc ((outlen + 1) * sizeof (gunichar2)); else outptr = outbuf = (gunichar2 *)custom_alloc_func ((outlen + 1) * sizeof (gunichar2), custom_alloc_data); if (G_UNLIKELY (custom_alloc_func && !outbuf)) { mono_set_errno (ENOMEM); goto error; } inptr = (char *) str; inleft = len; while (inleft > 0) { if ((n = decode_utf8 (inptr, inleft, &c)) < 0) break; if (c == 0 && !include_nuls) break; u = g_unichar_to_utf16 (c, outptr); if ((u < 0) && replace_invalid_codepoints) { outptr[0] = 0xFFFD; outptr[1] = 0xFFFD; u = 2; } outptr += u; inleft -= n; inptr += n; } *outptr = '\0'; return outbuf; error: if (errno == ENOMEM) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_NO_MEMORY, "Allocation failed."); } else if (errno == EILSEQ) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); } else if (items_read) { /* partial input is ok if we can let our caller know... */ } else { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_PARTIAL_INPUT, "Partial byte sequence encountered in the input."); } if (items_read) *items_read = inptr - str; if (items_written) *items_written = 0; return NULL; } gunichar2 * g_utf8_to_utf16 (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err) { return eg_utf8_to_utf16_general (str, len, items_read, items_written, FALSE, FALSE, NULL, NULL, err); } gunichar2 * g_utf8_to_utf16_custom_alloc (const gchar *str, glong len, glong *items_read, glong *items_written, GCustomAllocator custom_alloc_func, gpointer custom_alloc_data, GError **err) { return eg_utf8_to_utf16_general (str, len, items_read, items_written, FALSE, FALSE, custom_alloc_func, custom_alloc_data, err); } gunichar2 * eg_utf8_to_utf16_with_nuls (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err) { return eg_utf8_to_utf16_general (str, len, items_read, items_written, TRUE, FALSE, NULL, NULL, err); } gunichar2 * eg_wtf8_to_utf16 (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err) { return eg_utf8_to_utf16_general (str, len, items_read, items_written, TRUE, TRUE, NULL, NULL, err); } gunichar * g_utf8_to_ucs4 (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err) { gunichar *outbuf, *outptr; size_t outlen = 0; size_t inleft; char *inptr; gunichar c; int n; g_return_val_if_fail (str != NULL, NULL); if (len < 0) len = strlen (str); inptr = (char *) str; inleft = len; while (inleft > 0) { if ((n = decode_utf8 (inptr, inleft, &c)) < 0) { if (errno == EILSEQ) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); } else if (items_read) { /* partial input is ok if we can let our caller know... */ break; } else { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_PARTIAL_INPUT, "Partial byte sequence encountered in the input."); } if (items_read) *items_read = inptr - str; if (items_written) *items_written = 0; return NULL; } else if (c == 0) break; outlen += 4; inleft -= n; inptr += n; } if (items_written) *items_written = outlen / 4; if (items_read) *items_read = inptr - str; outptr = outbuf = g_malloc (outlen + 4); inptr = (char *) str; inleft = len; while (inleft > 0) { if ((n = decode_utf8 (inptr, inleft, &c)) < 0) break; else if (c == 0) break; *outptr++ = c; inleft -= n; inptr += n; } *outptr = 0; return outbuf; } static gchar * eg_utf16_to_utf8_general (const gunichar2 *str, glong len, glong *items_read, glong *items_written, GCustomAllocator custom_alloc_func, gpointer custom_alloc_data, GError **err) { char *inptr, *outbuf, *outptr; size_t outlen = 0; size_t inleft; gunichar c; int n; g_return_val_if_fail (str != NULL, NULL); if (len < 0) { len = 0; while (str[len]) len++; } inptr = (char *) str; inleft = len * 2; while (inleft > 0) { if ((n = decode_utf16 (inptr, inleft, &c)) < 0) { if (n == -2 && inleft > 2) { /* This means that the first UTF-16 char was read, but second failed */ inleft -= 2; inptr += 2; } if (errno == EILSEQ) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); } else if (items_read) { /* partial input is ok if we can let our caller know... */ break; } else { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_PARTIAL_INPUT, "Partial byte sequence encountered in the input."); } if (items_read) *items_read = (inptr - (char *) str) / 2; if (items_written) *items_written = 0; return NULL; } else if (c == 0) break; outlen += g_unichar_to_utf8 (c, NULL); inleft -= n; inptr += n; } if (items_read) *items_read = (inptr - (char *) str) / 2; if (items_written) *items_written = outlen; if (G_LIKELY (!custom_alloc_func)) outptr = outbuf = g_malloc (outlen + 1); else outptr = outbuf = (char *)custom_alloc_func (outlen + 1, custom_alloc_data); if (G_UNLIKELY (custom_alloc_func && !outbuf)) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_NO_MEMORY, "Allocation failed."); if (items_written) *items_written = 0; return NULL; } inptr = (char *) str; inleft = len * 2; while (inleft > 0) { if ((n = decode_utf16 (inptr, inleft, &c)) < 0) break; else if (c == 0) break; outptr += g_unichar_to_utf8 (c, outptr); inleft -= n; inptr += n; } *outptr = '\0'; return outbuf; } gchar * g_utf16_to_utf8 (const gunichar2 *str, glong len, glong *items_read, glong *items_written, GError **err) { return eg_utf16_to_utf8_general (str, len, items_read, items_written, NULL, NULL, err); } gchar * g_utf16_to_utf8_custom_alloc (const gunichar2 *str, glong len, glong *items_read, glong *items_written, GCustomAllocator custom_alloc_func, gpointer custom_alloc_data, GError **err) { return eg_utf16_to_utf8_general (str, len, items_read, items_written, custom_alloc_func, custom_alloc_data, err); } gunichar * g_utf16_to_ucs4 (const gunichar2 *str, glong len, glong *items_read, glong *items_written, GError **err) { gunichar *outbuf, *outptr; size_t outlen = 0; size_t inleft; char *inptr; gunichar c; int n; g_return_val_if_fail (str != NULL, NULL); if (len < 0) { len = 0; while (str[len]) len++; } inptr = (char *) str; inleft = len * 2; while (inleft > 0) { if ((n = decode_utf16 (inptr, inleft, &c)) < 0) { if (n == -2 && inleft > 2) { /* This means that the first UTF-16 char was read, but second failed */ inleft -= 2; inptr += 2; } if (errno == EILSEQ) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); } else if (items_read) { /* partial input is ok if we can let our caller know... */ break; } else { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_PARTIAL_INPUT, "Partial byte sequence encountered in the input."); } if (items_read) *items_read = (inptr - (char *) str) / 2; if (items_written) *items_written = 0; return NULL; } else if (c == 0) break; outlen += 4; inleft -= n; inptr += n; } if (items_read) *items_read = (inptr - (char *) str) / 2; if (items_written) *items_written = outlen / 4; outptr = outbuf = g_malloc (outlen + 4); inptr = (char *) str; inleft = len * 2; while (inleft > 0) { if ((n = decode_utf16 (inptr, inleft, &c)) < 0) break; else if (c == 0) break; *outptr++ = c; inleft -= n; inptr += n; } *outptr = 0; return outbuf; } gchar * g_ucs4_to_utf8 (const gunichar *str, glong len, glong *items_read, glong *items_written, GError **err) { char *outbuf, *outptr; size_t outlen = 0; glong i; int n; g_return_val_if_fail (str != NULL, NULL); if (len < 0) { for (i = 0; str[i] != 0; i++) { if ((n = g_unichar_to_utf8 (str[i], NULL)) < 0) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); if (items_written) *items_written = 0; if (items_read) *items_read = i; return NULL; } outlen += n; } } else { for (i = 0; i < len && str[i] != 0; i++) { if ((n = g_unichar_to_utf8 (str[i], NULL)) < 0) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); if (items_written) *items_written = 0; if (items_read) *items_read = i; return NULL; } outlen += n; } } len = i; outptr = outbuf = g_malloc (outlen + 1); for (i = 0; i < len; i++) outptr += g_unichar_to_utf8 (str[i], outptr); *outptr = 0; if (items_written) *items_written = outlen; if (items_read) *items_read = i; return outbuf; } gunichar2 * g_ucs4_to_utf16 (const gunichar *str, glong len, glong *items_read, glong *items_written, GError **err) { gunichar2 *outbuf, *outptr; size_t outlen = 0; glong i; int n; g_return_val_if_fail (str != NULL, NULL); if (len < 0) { for (i = 0; str[i] != 0; i++) { if ((n = g_unichar_to_utf16 (str[i], NULL)) < 0) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); if (items_written) *items_written = 0; if (items_read) *items_read = i; return NULL; } outlen += n; } } else { for (i = 0; i < len && str[i] != 0; i++) { if ((n = g_unichar_to_utf16 (str[i], NULL)) < 0) { g_set_error (err, G_CONVERT_ERROR, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, "Illegal byte sequence encounted in the input."); if (items_written) *items_written = 0; if (items_read) *items_read = i; return NULL; } outlen += n; } } len = i; outptr = outbuf = g_malloc ((outlen + 1) * sizeof (gunichar2)); for (i = 0; i < len; i++) outptr += g_unichar_to_utf16 (str[i], outptr); *outptr = 0; if (items_written) *items_written = outlen; if (items_read) *items_read = i; return outbuf; } gpointer g_fixed_buffer_custom_allocator (gsize req_size, gpointer custom_alloc_data) { GFixedBufferCustomAllocatorData *fixed_buffer_custom_alloc_data = (GFixedBufferCustomAllocatorData *)custom_alloc_data; if (!fixed_buffer_custom_alloc_data) return NULL; fixed_buffer_custom_alloc_data->req_buffer_size = req_size; if (req_size > fixed_buffer_custom_alloc_data->buffer_size) return NULL; return fixed_buffer_custom_alloc_data->buffer; }
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./docs/design/coreclr/profiling/davbr-blog-archive/Debugging - SOS and IDs.md
*This blog post originally appeared on David Broman's blog on 12/18/2007* In this debugging post, I'll talk about the various IDs the profiling API exposes to your profiler, and how you can use SOS to give you more information about the IDs. As usual, this post assumes you're using CLR 2.x. ## S.O.What Now? SOS.DLL is a debugger extension DLL that ships with the CLR. You'll find it sitting alongside mscorwks.dll. While originally written as an extension to the windbg family of debuggers, Visual Studio can also load and use SOS. If you search the MSDN blogs for "SOS" you'll find lots of info on it. I'm not going to repeat all that's out there, but I'll give you a quick primer on getting it loaded. In windbg, you'll need mscorwks.dll to load first, and then you can load SOS. Often, I don't need SOS until well into my debugging session, at which point mscorwks.dll has already been loaded anyway. However, there are some cases where you'd like SOS loaded at the first possible moment, so you can use some of its commands early (like !bpmd to set a breakpoint on a managed method). So a surefire way to get SOS loaded ASAP is to have the debugger break when mscorwks gets loaded (e.g., "sxe ld mscorwks"). Once mscorwks is loaded, you can load SOS using the .loadby command: ``` 0:000\> sxe ld mscorwks 0:000\> g ModLoad: 79e70000 7a3ff000 C:\Windows\Microsoft.NET\Framework\v2.0.50727\mscorwks.dll eax=00000000 ebx=00000000 ecx=00000000 edx=00000000 esi=7efdd000 edi=20000000 eip=77a1a9fa esp=002fea38 ebp=002fea78 iopl=0 nv up ei pl nz na po nc cs=0023 ss=002b ds=002b es=002b fs=0053 gs=002b efl=00000202 ntdll!NtMapViewOfSection+0x12: 77a1a9fa c22800 ret 28h 0:000\> .loadby sos mscorwks ``` With SOS loaded, you can now use its commands to inspect the various IDs that the profiling API passes to your profiler. Note: The following contains implementation details of the runtime. While these details are useful as a debugging aid, your profiler code cannot make assumptions about them. These implementation details are subject to change at whim. ## FunctionID Walkthrough For starters, take a look at FunctionIDs. Your profiler receives a FunctionID anytime you hit a callback that needs to, well, identify a function! For example, when it's time to JIT, the CLR issues JITCompilationStarted (assuming your profiler subscribed to that callback), and one of the parameters to the callback is a FunctionID. You can then use that FunctionID in later calls your profiler makes back into the CLR, such as GetFunctionInfo2. As far as your profiler is concerned, a FunctionID is just an opaque number. It has no meaning in itself; it's merely a handle you can pass back into the CLR to refer to the function. Under the covers, however, a FunctionID is actually a pointer to an internal CLR data structure called a MethodDesc. I must warn you again that you cannot rely on this information when coding your profiler. The CLR team reserves the right to change the underlying meaning of a FunctionID to be something radically different in later versions. This info is for entertainment and debugging purposes only! Ok, so FunctionID = (MethodDesc \*). How does that help you? SOS just so happens to have a command to inspect MethodDescs: !dumpmd. So if you're in a debugger looking at your profiler code that's operating on a FunctionID, it can beneficial to you to find out which function that FunctionID actually refers to. In the example below, the debugger will break in my proifler's JITCompilationStarted callback and look at the FunctionID. It's assumed that you've already loaded SOS as per above. ``` 0:000\> bu UnitTestSampleProfiler!SampleCallbackImpl::JITCompilationStarted 0:000\> g ... ``` ``` Breakpoint 0 hit eax=00c133f8 ebx=00000000 ecx=10001218 edx=00000001 esi=002fec74 edi=00000000 eip=10003fc0 esp=002fec64 ebp=002feca4 iopl=0 nv up ei pl nz na po nc cs=0023 ss=002b ds=002b es=002b fs=0053 gs=002b efl=00000202 UnitTestSampleProfiler!SampleCallbackImpl::JITCompilationStarted: 10003fc0 55 push ebp ``` The debugger is now sitting at the beginning of my profiler's JITCompilationStarted callback. Let's take a look at the parameters. ``` 0:000\> dv this = 0x00c133f8 functionID = 0x1e3170 fIsSafeToBlock = 1 ``` Aha, that's the FunctionID about to get JITted. Now use SOS to see what that function really is. ``` 0:000\> !dumpmd 0x1e3170 Method Name: test.Class1.Main(System.String[]) Class: 001e1288 MethodTable: 001e3180 mdToken: 06000001 Module: 001e2d8c IsJitted: no m\_CodeOrIL: ffffffff ``` Lots of juicy info here, though the Method Name typically is what helps me the most in my debugging sessions. mdToken tells us the metadata token for this method. MethodTable tells us where another internal CLR data structure is stored that contains information about the class containing the function. In fact, the profiing API's ClassID is simply a MethodTable \*. [Note: the "Class: 001e1288" in the output above is very different from the MethodTable, and thus different from the profiling API's ClassID. Don't let the name fool you!] So we could go and inspect a bit further by dumping information about the MethodTable: ``` 0:000\> !dumpmt 0x001e3180 EEClass: 001e1288 Module: 001e2d8c Name: test.Class1 mdToken: 02000002 (C:\proj\HelloWorld\Class1.exe) BaseSize: 0xc ComponentSize: 0x0 Number of IFaces in IFaceMap: 0 Slots in VTable: 6 ``` And of course, !dumpmt can be used anytime you come across a ClassID and want more info on it. [ Update 12/29/2011 In the original posting, I neglected to mention that there are cases where ClassIDs are not actually MethodTable \*'s, and thus cannot be inspected via !dumpmt. The most common case are some kinds of arrays, though there are other cases as well, such as function pointers, byrefs, and others. In these cases, if you look at the ClassID value in a debugger, you'll see that it's not pointer-aligned. Some of the low-order bits may be intentionally set by the CLR to distinguish these ClassIDs from MethodTable pointers. Although !dumpmt cannot be used on these ClassIDs, you can safely call profiling API methods such as IsArrayClass or GetClassIDInfo(2) on them. ] ## IDs and their Dumpers Now that you see how this works, you'll need to know how the profiling IDs relate to the various SOS commands that dump info on them: | **ID** | **Internal CLR Structure** | **SOS command** | | AssemblyID | Assembly \* | !DumpAssembly | | AppDomainID | AppDomain \* | !DumpDomain | | ModuleID | Module \* | !DumpModule | | ClassID | MethodTable \* | !DumpMT | | ThreadID | Thread \* | !Threads (see note) | | FunctionID | MethodDesc \* | !DumpMD | | ObjectID | Object \* (i.e., a managed object) | !DumpObject | Note: !Threads takes no arguments, but simply dumps info on all threads that have ever run managed code. If you use "!Threads -special" you get to see other special threads separated out explicitly, including threads that perform GC in server-mode, the finalizer thread, and the debugger helper thread. ## More Useful SOS Commands It would probably be quicker to list what _isn't_ useful! I encourage you to do a !help to see what's included. Here's a sampling of what I commonly use: !u is a nice SOS analog to the windbg command "u". While the latter gives you a no-frills disassembly, !u works nicely for managed code, including spanning the disassembly from start to finish, and converting metadata tokens to names. !bpmd lets you place a breakpoint on a managed method. Just specify the module name and the fully-qualified method name. For example: ``` !bpmd MyModule.exe MyNamespace.MyClass.Foo ``` If the method hasn't jitted yet, no worries. A "pending" breakpoint is placed. If your profiler performs IL rewriting, then using !bpmd on startup to set a managed breakpoint can be a handy way to break into the debugger just before your instrumented code will run (which, in turn, is typically just after your instrumented code has been jitted). This can help you in reproducing and diagnosing issues your profiler may run into when instrumenting particular functions (due to something interesting about the signature, generics, etc.). !PrintException: If you use this without arguments you get to see a pretty-printing of the last outstanding managed exception on the thread; or specify a particular Exception object's address. Ok, that about does it for SOS. Hopefully this info can help you track down problems a little faster, or better yet, perhaps this can help you step through and verify your code before problems arise.
*This blog post originally appeared on David Broman's blog on 12/18/2007* In this debugging post, I'll talk about the various IDs the profiling API exposes to your profiler, and how you can use SOS to give you more information about the IDs. As usual, this post assumes you're using CLR 2.x. ## S.O.What Now? SOS.DLL is a debugger extension DLL that ships with the CLR. You'll find it sitting alongside mscorwks.dll. While originally written as an extension to the windbg family of debuggers, Visual Studio can also load and use SOS. If you search the MSDN blogs for "SOS" you'll find lots of info on it. I'm not going to repeat all that's out there, but I'll give you a quick primer on getting it loaded. In windbg, you'll need mscorwks.dll to load first, and then you can load SOS. Often, I don't need SOS until well into my debugging session, at which point mscorwks.dll has already been loaded anyway. However, there are some cases where you'd like SOS loaded at the first possible moment, so you can use some of its commands early (like !bpmd to set a breakpoint on a managed method). So a surefire way to get SOS loaded ASAP is to have the debugger break when mscorwks gets loaded (e.g., "sxe ld mscorwks"). Once mscorwks is loaded, you can load SOS using the .loadby command: ``` 0:000\> sxe ld mscorwks 0:000\> g ModLoad: 79e70000 7a3ff000 C:\Windows\Microsoft.NET\Framework\v2.0.50727\mscorwks.dll eax=00000000 ebx=00000000 ecx=00000000 edx=00000000 esi=7efdd000 edi=20000000 eip=77a1a9fa esp=002fea38 ebp=002fea78 iopl=0 nv up ei pl nz na po nc cs=0023 ss=002b ds=002b es=002b fs=0053 gs=002b efl=00000202 ntdll!NtMapViewOfSection+0x12: 77a1a9fa c22800 ret 28h 0:000\> .loadby sos mscorwks ``` With SOS loaded, you can now use its commands to inspect the various IDs that the profiling API passes to your profiler. Note: The following contains implementation details of the runtime. While these details are useful as a debugging aid, your profiler code cannot make assumptions about them. These implementation details are subject to change at whim. ## FunctionID Walkthrough For starters, take a look at FunctionIDs. Your profiler receives a FunctionID anytime you hit a callback that needs to, well, identify a function! For example, when it's time to JIT, the CLR issues JITCompilationStarted (assuming your profiler subscribed to that callback), and one of the parameters to the callback is a FunctionID. You can then use that FunctionID in later calls your profiler makes back into the CLR, such as GetFunctionInfo2. As far as your profiler is concerned, a FunctionID is just an opaque number. It has no meaning in itself; it's merely a handle you can pass back into the CLR to refer to the function. Under the covers, however, a FunctionID is actually a pointer to an internal CLR data structure called a MethodDesc. I must warn you again that you cannot rely on this information when coding your profiler. The CLR team reserves the right to change the underlying meaning of a FunctionID to be something radically different in later versions. This info is for entertainment and debugging purposes only! Ok, so FunctionID = (MethodDesc \*). How does that help you? SOS just so happens to have a command to inspect MethodDescs: !dumpmd. So if you're in a debugger looking at your profiler code that's operating on a FunctionID, it can beneficial to you to find out which function that FunctionID actually refers to. In the example below, the debugger will break in my proifler's JITCompilationStarted callback and look at the FunctionID. It's assumed that you've already loaded SOS as per above. ``` 0:000\> bu UnitTestSampleProfiler!SampleCallbackImpl::JITCompilationStarted 0:000\> g ... ``` ``` Breakpoint 0 hit eax=00c133f8 ebx=00000000 ecx=10001218 edx=00000001 esi=002fec74 edi=00000000 eip=10003fc0 esp=002fec64 ebp=002feca4 iopl=0 nv up ei pl nz na po nc cs=0023 ss=002b ds=002b es=002b fs=0053 gs=002b efl=00000202 UnitTestSampleProfiler!SampleCallbackImpl::JITCompilationStarted: 10003fc0 55 push ebp ``` The debugger is now sitting at the beginning of my profiler's JITCompilationStarted callback. Let's take a look at the parameters. ``` 0:000\> dv this = 0x00c133f8 functionID = 0x1e3170 fIsSafeToBlock = 1 ``` Aha, that's the FunctionID about to get JITted. Now use SOS to see what that function really is. ``` 0:000\> !dumpmd 0x1e3170 Method Name: test.Class1.Main(System.String[]) Class: 001e1288 MethodTable: 001e3180 mdToken: 06000001 Module: 001e2d8c IsJitted: no m\_CodeOrIL: ffffffff ``` Lots of juicy info here, though the Method Name typically is what helps me the most in my debugging sessions. mdToken tells us the metadata token for this method. MethodTable tells us where another internal CLR data structure is stored that contains information about the class containing the function. In fact, the profiing API's ClassID is simply a MethodTable \*. [Note: the "Class: 001e1288" in the output above is very different from the MethodTable, and thus different from the profiling API's ClassID. Don't let the name fool you!] So we could go and inspect a bit further by dumping information about the MethodTable: ``` 0:000\> !dumpmt 0x001e3180 EEClass: 001e1288 Module: 001e2d8c Name: test.Class1 mdToken: 02000002 (C:\proj\HelloWorld\Class1.exe) BaseSize: 0xc ComponentSize: 0x0 Number of IFaces in IFaceMap: 0 Slots in VTable: 6 ``` And of course, !dumpmt can be used anytime you come across a ClassID and want more info on it. [ Update 12/29/2011 In the original posting, I neglected to mention that there are cases where ClassIDs are not actually MethodTable \*'s, and thus cannot be inspected via !dumpmt. The most common case are some kinds of arrays, though there are other cases as well, such as function pointers, byrefs, and others. In these cases, if you look at the ClassID value in a debugger, you'll see that it's not pointer-aligned. Some of the low-order bits may be intentionally set by the CLR to distinguish these ClassIDs from MethodTable pointers. Although !dumpmt cannot be used on these ClassIDs, you can safely call profiling API methods such as IsArrayClass or GetClassIDInfo(2) on them. ] ## IDs and their Dumpers Now that you see how this works, you'll need to know how the profiling IDs relate to the various SOS commands that dump info on them: | **ID** | **Internal CLR Structure** | **SOS command** | | AssemblyID | Assembly \* | !DumpAssembly | | AppDomainID | AppDomain \* | !DumpDomain | | ModuleID | Module \* | !DumpModule | | ClassID | MethodTable \* | !DumpMT | | ThreadID | Thread \* | !Threads (see note) | | FunctionID | MethodDesc \* | !DumpMD | | ObjectID | Object \* (i.e., a managed object) | !DumpObject | Note: !Threads takes no arguments, but simply dumps info on all threads that have ever run managed code. If you use "!Threads -special" you get to see other special threads separated out explicitly, including threads that perform GC in server-mode, the finalizer thread, and the debugger helper thread. ## More Useful SOS Commands It would probably be quicker to list what _isn't_ useful! I encourage you to do a !help to see what's included. Here's a sampling of what I commonly use: !u is a nice SOS analog to the windbg command "u". While the latter gives you a no-frills disassembly, !u works nicely for managed code, including spanning the disassembly from start to finish, and converting metadata tokens to names. !bpmd lets you place a breakpoint on a managed method. Just specify the module name and the fully-qualified method name. For example: ``` !bpmd MyModule.exe MyNamespace.MyClass.Foo ``` If the method hasn't jitted yet, no worries. A "pending" breakpoint is placed. If your profiler performs IL rewriting, then using !bpmd on startup to set a managed breakpoint can be a handy way to break into the debugger just before your instrumented code will run (which, in turn, is typically just after your instrumented code has been jitted). This can help you in reproducing and diagnosing issues your profiler may run into when instrumenting particular functions (due to something interesting about the signature, generics, etc.). !PrintException: If you use this without arguments you get to see a pretty-printing of the last outstanding managed exception on the thread; or specify a particular Exception object's address. Ok, that about does it for SOS. Hopefully this info can help you track down problems a little faster, or better yet, perhaps this can help you step through and verify your code before problems arise.
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/mono/tests/libtest.c
#include <config.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <glib.h> #include <gmodule.h> #include <errno.h> #include <time.h> #include <math.h> #include <setjmp.h> #include <signal.h> #include "../utils/mono-errno.h" #include "../utils/mono-compiler.h" #ifndef HOST_WIN32 #include <dlfcn.h> #endif #ifdef WIN32 #include <windows.h> #include "initguid.h" #else #include <pthread.h> #endif #ifdef __cplusplus extern "C" { #endif #ifdef WIN32 #define STDCALL __stdcall #else #define STDCALL #define __thiscall /* nothing */ #endif #ifdef __GNUC__ #pragma GCC diagnostic ignored "-Wmissing-prototypes" #endif #ifdef WIN32 extern __declspec(dllimport) void __stdcall CoTaskMemFree(void *ptr); #endif typedef int (STDCALL *SimpleDelegate) (int a); #if defined(WIN32) && defined (_MSC_VER) #define LIBTEST_API __declspec(dllexport) #elif defined(__GNUC__) #define LIBTEST_API __attribute__ ((__visibility__ ("default"))) #else #define LIBTEST_API #endif static void marshal_free (void *ptr) { #ifdef WIN32 CoTaskMemFree (ptr); #else g_free (ptr); #endif } static void* marshal_alloc (gsize size) { #ifdef WIN32 return CoTaskMemAlloc (size); #else return g_malloc (size); #endif } static void* marshal_alloc0 (gsize size) { #ifdef WIN32 void* ptr = CoTaskMemAlloc (size); memset(ptr, 0, size); return ptr; #else return g_malloc0 (size); #endif } static char* marshal_strdup (const char *str) { #ifdef WIN32 if (!str) return NULL; char *buf = (char *) CoTaskMemAlloc (strlen (str) + 1); return strcpy (buf, str); #else return g_strdup (str); #endif } static gunichar2* marshal_bstr_alloc(const gchar* str) { #ifdef WIN32 gunichar2* temp = g_utf8_to_utf16 (str, -1, NULL, NULL, NULL); gunichar2* ret = SysAllocString (temp); g_free (temp); return ret; #else gchar* ret = NULL; int slen = strlen (str); gunichar2* temp; /* allocate len + 1 utf16 characters plus 4 byte integer for length*/ ret = (gchar *)g_malloc ((slen + 1) * sizeof(gunichar2) + sizeof(guint32)); if (ret == NULL) return NULL; temp = g_utf8_to_utf16 (str, -1, NULL, NULL, NULL); memcpy (ret + sizeof(guint32), temp, slen * sizeof(gunichar2)); * ((guint32 *) ret) = slen * sizeof(gunichar2); ret [4 + slen * sizeof(gunichar2)] = 0; ret [5 + slen * sizeof(gunichar2)] = 0; return (gunichar2*)(ret + 4); #endif } #define marshal_new0(type,size) ((type *) marshal_alloc0 (sizeof (type)* (size))) LIBTEST_API int STDCALL mono_cominterop_is_supported (void) { #if defined(TARGET_X86) || defined(TARGET_AMD64) return 1; #endif return 0; } LIBTEST_API unsigned short* STDCALL test_lpwstr_marshal (unsigned short* chars, int length) { int i = 0; unsigned short *res; res = (unsigned short *)marshal_alloc (2 * (length + 1)); // printf("test_lpwstr_marshal()\n"); while ( i < length ) { // printf("X|%u|\n", chars[i]); res [i] = chars[i]; i++; } res [i] = 0; return res; } LIBTEST_API void STDCALL test_lpwstr_marshal_out (unsigned short** chars) { int i = 0; const char abc[] = "ABC"; glong len = strlen(abc); *chars = (unsigned short *)marshal_alloc (2 * (len + 1)); while ( i < len ) { (*chars) [i] = abc[i]; i++; } (*chars) [i] = 0; } typedef struct { int b; int a; int c; } union_test_1_type; LIBTEST_API int STDCALL mono_union_test_1 (union_test_1_type u1) { // printf ("Got values %d %d %d\n", u1.b, u1.a, u1.c); return u1.a + u1.b + u1.c; } LIBTEST_API int STDCALL mono_return_int (int a) { // printf ("Got value %d\n", a); return a; } LIBTEST_API float STDCALL mono_test_marshal_pass_return_float (float f) { return f + 1.0; } struct ss { int i; }; LIBTEST_API int STDCALL mono_return_int_ss (struct ss a) { // printf ("Got value %d\n", a.i); return a.i; } LIBTEST_API struct ss STDCALL mono_return_ss (struct ss a) { // printf ("Got value %d\n", a.i); a.i++; return a; } struct sc1 { char c[1]; }; LIBTEST_API struct sc1 STDCALL mono_return_sc1 (struct sc1 a) { // printf ("Got value %d\n", a.c[0]); a.c[0]++; return a; } struct sc3 { char c[3]; }; LIBTEST_API struct sc3 STDCALL mono_return_sc3 (struct sc3 a) { // printf ("Got values %d %d %d\n", a.c[0], a.c[1], a.c[2]); a.c[0]++; a.c[1] += 2; a.c[2] += 3; return a; } struct sc5 { char c[5]; }; LIBTEST_API struct sc5 STDCALL mono_return_sc5 (struct sc5 a) { // printf ("Got values %d %d %d %d %d\n", a.c[0], a.c[1], a.c[2], a.c[3], a.c[4]); a.c[0]++; a.c[1] += 2; a.c[2] += 3; a.c[3] += 4; a.c[4] += 5; return a; } union su { int i1; int i2; }; LIBTEST_API int STDCALL mono_return_int_su (union su a) { // printf ("Got value %d\n", a.i1); return a.i1; } struct FI { float f1; float f2; float f3; }; struct NestedFloat { struct FI fi; float f4; }; LIBTEST_API struct NestedFloat STDCALL mono_return_nested_float (void) { struct NestedFloat f; f.fi.f1 = 1.0; f.fi.f2 = 2.0; f.fi.f3 = 3.0; f.f4 = 4.0; return f; } struct Scalar4 { double val[4]; }; struct Rect { int x; int y; int width; int height; }; LIBTEST_API char * STDCALL mono_return_struct_4_double (void *ptr, struct Rect rect, struct Scalar4 sc4, int a, int b, int c) { char *buffer = (char *)marshal_alloc (1024 * sizeof (char)); sprintf (buffer, "sc4 = {%.1f, %.1f, %.1f, %.1f }, a=%x, b=%x, c=%x\n", (float) sc4.val [0], (float) sc4.val [1], (float) sc4.val [2], (float) sc4.val [3], a, b, c); return buffer; } LIBTEST_API int STDCALL mono_test_many_int_arguments (int a, int b, int c, int d, int e, int f, int g, int h, int i, int j); LIBTEST_API short STDCALL mono_test_many_short_arguments (short a, short b, short c, short d, short e, short f, short g, short h, short i, short j); LIBTEST_API char STDCALL mono_test_many_char_arguments (char a, char b, char c, char d, char e, char f, char g, char h, char i, char j); LIBTEST_API int STDCALL mono_test_many_int_arguments (int a, int b, int c, int d, int e, int f, int g, int h, int i, int j) { return a + b + c + d + e + f + g + h + i + j; } LIBTEST_API short STDCALL mono_test_many_short_arguments (short a, short b, short c, short d, short e, short f, short g, short h, short i, short j) { return a + b + c + d + e + f + g + h + i + j; } LIBTEST_API char STDCALL mono_test_many_byte_arguments (char a, char b, char c, char d, char e, char f, char g, char h, char i, char j) { return a + b + c + d + e + f + g + h + i + j; } LIBTEST_API float STDCALL mono_test_many_float_arguments (float a, float b, float c, float d, float e, float f, float g, float h, float i, float j) { return a + b + c + d + e + f + g + h + i + j; } LIBTEST_API double STDCALL mono_test_many_double_arguments (double a, double b, double c, double d, double e, double f, double g, double h, double i, double j) { return a + b + c + d + e + f + g + h + i + j; } LIBTEST_API double STDCALL mono_test_split_double_arguments (double a, double b, float c, double d, double e) { return a + b + c + d + e; } LIBTEST_API int STDCALL mono_test_puts_static (char *s) { // printf ("TEST %s\n", s); return 1; } typedef int (STDCALL *SimpleDelegate3) (int a, int b); LIBTEST_API int STDCALL mono_invoke_delegate (SimpleDelegate3 delegate) { int res; // printf ("start invoke %p\n", delegate); res = delegate (2, 3); // printf ("end invoke\n"); return res; } LIBTEST_API int STDCALL mono_invoke_simple_delegate (SimpleDelegate d) { return d (4); } LIBTEST_API int STDCALL mono_test_marshal_char (short a1) { if (a1 == 'a') return 0; return 1; } LIBTEST_API void STDCALL mono_test_marshal_char_array (gunichar2 *s) { const char m[] = "abcdef"; gunichar2* s2; glong len; s2 = g_utf8_to_utf16 (m, -1, NULL, &len, NULL); len = (len * 2) + 2; memcpy (s, s2, len); g_free (s2); } LIBTEST_API int STDCALL mono_test_marshal_ansi_char_array (char *s) { const char m[] = "abcdef"; if (strncmp ("qwer", s, 4)) return 1; memcpy (s, m, sizeof (m)); return 0; } LIBTEST_API int STDCALL mono_test_marshal_unicode_char_array (gunichar2 *s) { const char m[] = "abcdef"; const char expected[] = "qwer"; gunichar2 *s1, *s2; glong len1, len2; s1 = g_utf8_to_utf16 (m, -1, NULL, &len1, NULL); s2 = g_utf8_to_utf16 (expected, -1, NULL, &len2, NULL); len1 = (len1 * 2); len2 = (len2 * 2); if (memcmp (s, s2, len2)) return 1; memcpy (s, s1, len1); return 0; } LIBTEST_API int STDCALL mono_test_empty_pinvoke (int i) { return i; } LIBTEST_API int STDCALL mono_test_marshal_bool_byref (int a, int *b, int c) { int res = *b; *b = 1; return res; } LIBTEST_API int STDCALL mono_test_marshal_bool_in_as_I1_U1 (char bTrue, char bFalse) { if (!bTrue) return 1; if (bFalse) return 2; return 0; } LIBTEST_API int STDCALL mono_test_marshal_bool_out_as_I1_U1 (char* bTrue, char* bFalse) { if (!bTrue || !bFalse) return 3; *bTrue = 1; *bFalse = 0; return 0; } LIBTEST_API int STDCALL mono_test_marshal_bool_ref_as_I1_U1 (char* bTrue, char* bFalse) { if (!bTrue || !bFalse) return 4; if (!(*bTrue)) return 5; if (*bFalse) return 6; *bFalse = 1; *bTrue = 0; return 0; } LIBTEST_API int STDCALL mono_test_marshal_array (int *a1) { int i, sum = 0; for (i = 0; i < 50; i++) sum += a1 [i]; return sum; } LIBTEST_API int STDCALL mono_test_marshal_inout_array (int *a1) { int i, sum = 0; for (i = 0; i < 50; i++) { sum += a1 [i]; a1 [i] = 50 - a1 [i]; } return sum; } LIBTEST_API int /* cdecl */ mono_test_marshal_inout_array_cdecl (int *a1) { return mono_test_marshal_inout_array (a1); } LIBTEST_API int STDCALL mono_test_marshal_out_array (int *a1) { int i; for (i = 0; i < 50; i++) { a1 [i] = i; } return 0; } LIBTEST_API int STDCALL mono_test_marshal_out_byref_array_out_size_param (int **out_arr, int *out_len) { int *arr; int i, len; len = 4; arr = (gint32 *)marshal_alloc (sizeof (gint32) * len); for (i = 0; i < len; ++i) arr [i] = i; *out_arr = arr; *out_len = len; return 0; } LIBTEST_API int STDCALL mono_test_marshal_out_lparray_out_size_param (int *arr, int *out_len) { int i, len; len = 4; for (i = 0; i < len; ++i) arr [i] = i; *out_len = len; return 0; } LIBTEST_API int STDCALL mono_test_marshal_inout_nonblittable_array (gunichar2 *a1) { int i, sum = 0; for (i = 0; i < 10; i++) { a1 [i] = 'F'; } return sum; } typedef struct { int a; int b; int c; const char *d; gunichar2 *d2; } simplestruct; typedef struct { double x; double y; } point; LIBTEST_API simplestruct STDCALL mono_test_return_vtype (int i) { simplestruct res; static gunichar2 test2 [] = { 'T', 'E', 'S', 'T', '2', 0 }; res.a = 0; res.b = 1; res.c = 0; res.d = "TEST"; res.d2 = test2; return res; } LIBTEST_API void STDCALL mono_test_delegate_struct (void) { // printf ("TEST\n"); } typedef char* (STDCALL *ReturnStringDelegate) (const char *s); LIBTEST_API char * STDCALL mono_test_return_string (ReturnStringDelegate func) { char *res; // printf ("mono_test_return_string\n"); res = func ("TEST"); marshal_free (res); // printf ("got string: %s\n", res); return marshal_strdup ("12345"); } typedef int (STDCALL *RefVTypeDelegate) (int a, simplestruct *ss, int b); LIBTEST_API int STDCALL mono_test_ref_vtype (int a, simplestruct *ss, int b, RefVTypeDelegate func) { if (a == 1 && b == 2 && ss->a == 0 && ss->b == 1 && ss->c == 0 && !strcmp (ss->d, "TEST1")) { ss->a = 1; ss->b = 0; ss->c = 1; ss->d = "TEST2"; return func (a, ss, b); } return 1; } typedef int (STDCALL *OutVTypeDelegate) (int a, simplestruct *ss, int b); LIBTEST_API int STDCALL mono_test_marshal_out_struct (int a, simplestruct *ss, int b, OutVTypeDelegate func) { /* Check that the input pointer is ignored */ ss->d = (const char *)0x12345678; func (a, ss, b); if (ss->a && ss->b && ss->c && !strcmp (ss->d, "TEST3")) return 0; else return 1; } typedef int (STDCALL *InVTypeDelegate) (int a, simplestruct *ss, int b); LIBTEST_API int STDCALL mono_test_marshal_in_struct (int a, simplestruct *ss, int b, InVTypeDelegate func) { simplestruct ss2; int res; memcpy (&ss2, ss, sizeof (simplestruct)); res = func (a, ss, b); if (res) { printf ("mono_test_marshal_in_struct () failed: %d\n", res); return 1; } /* Check that no modifications is made to the struct */ if (ss2.a == ss->a && ss2.b == ss->b && ss2.c == ss->c && ss2.d == ss->d) return 0; else return 1; } typedef struct { int a; SimpleDelegate func, func2, func3; } DelegateStruct; LIBTEST_API DelegateStruct STDCALL mono_test_marshal_delegate_struct (DelegateStruct ds) { DelegateStruct res; res.a = ds.func (ds.a) + ds.func2 (ds.a) + (ds.func3 == NULL ? 0 : 1); res.func = ds.func; res.func2 = ds.func2; res.func3 = NULL; return res; } LIBTEST_API int STDCALL mono_test_marshal_byref_struct (simplestruct *ss, int a, int b, int c, char *d) { gboolean res = (ss->a == a && ss->b == b && ss->c == c && strcmp (ss->d, d) == 0); marshal_free ((char*)ss->d); ss->a = !ss->a; ss->b = !ss->b; ss->c = !ss->c; ss->d = marshal_strdup ("DEF"); return res ? 0 : 1; } typedef struct { int a; int b; int c; char *d; unsigned char e; double f; unsigned char g; guint64 h; } simplestruct2; LIBTEST_API int STDCALL mono_test_marshal_struct2 (simplestruct2 ss) { if (ss.a == 0 && ss.b == 1 && ss.c == 0 && !strcmp (ss.d, "TEST") && ss.e == 99 && ss.f == 1.5 && ss.g == 42 && ss.h == (guint64)123) return 0; return 1; } /* on HP some of the struct should be on the stack and not in registers */ LIBTEST_API int STDCALL mono_test_marshal_struct2_2 (int i, int j, int k, simplestruct2 ss) { if (i != 10 || j != 11 || k != 12) return 1; if (ss.a == 0 && ss.b == 1 && ss.c == 0 && !strcmp (ss.d, "TEST") && ss.e == 99 && ss.f == 1.5 && ss.g == 42 && ss.h == (guint64)123) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_lpstruct (simplestruct *ss) { if (ss->a == 0 && ss->b == 1 && ss->c == 0 && !strcmp (ss->d, "TEST")) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_lpstruct_blittable (point *p) { if (p->x == 1.0 && p->y == 2.0) return 0; else return 1; } LIBTEST_API int STDCALL mono_test_marshal_struct_array (simplestruct2 *ss) { if (! (ss[0].a == 0 && ss[0].b == 1 && ss[0].c == 0 && !strcmp (ss[0].d, "TEST") && ss[0].e == 99 && ss[0].f == 1.5 && ss[0].g == 42 && ss[0].h == (guint64)123)) return 1; if (! (ss[1].a == 0 && ss[1].b == 0 && ss[1].c == 0 && !strcmp (ss[1].d, "TEST2") && ss[1].e == 100 && ss[1].f == 2.5 && ss[1].g == 43 && ss[1].h == (guint64)124)) return 1; return 0; } typedef struct long_align_struct { gint32 a; gint64 b; gint64 c; } long_align_struct; LIBTEST_API int STDCALL mono_test_marshal_long_align_struct_array (long_align_struct *ss) { return ss[0].a + ss[0].b + ss[0].c + ss[1].a + ss[1].b + ss[1].c; } LIBTEST_API simplestruct2 * STDCALL mono_test_marshal_class (int i, int j, int k, simplestruct2 *ss, int l) { simplestruct2 *res; if (!ss) return NULL; if (i != 10 || j != 11 || k != 12 || l != 14) return NULL; if (! (ss->a == 0 && ss->b == 1 && ss->c == 0 && !strcmp (ss->d, "TEST") && ss->e == 99 && ss->f == 1.5 && ss->g == 42 && ss->h == (guint64)123)) return NULL; res = marshal_new0 (simplestruct2, 1); memcpy (res, ss, sizeof (simplestruct2)); res->d = marshal_strdup ("TEST"); return res; } LIBTEST_API int STDCALL mono_test_marshal_byref_class (simplestruct2 **ssp) { simplestruct2 *ss = *ssp; simplestruct2 *res; if (! (ss->a == 0 && ss->b == 1 && ss->c == 0 && !strcmp (ss->d, "TEST") && ss->e == 99 && ss->f == 1.5 && ss->g == 42 && ss->h == (guint64)123)) return 1; res = marshal_new0 (simplestruct2, 1); memcpy (res, ss, sizeof (simplestruct2)); res->d = marshal_strdup ("TEST-RES"); *ssp = res; return 0; } MONO_DISABLE_WARNING (4172) // returning address of local static void * get_sp (void) { int i; void *p; /* Yes, this is correct, we are only trying to determine the value of the stack here */ p = &i; return p; } MONO_RESTORE_WARNING LIBTEST_API int STDCALL reliable_delegate (int a) { return a; } /* * Checks whether get_sp() works as expected. It doesn't work with gcc-2.95.3 on linux. */ static gboolean is_get_sp_reliable (void) { void *sp1, *sp2; reliable_delegate(1); sp1 = get_sp(); reliable_delegate(1); sp2 = get_sp(); return sp1 == sp2; } LIBTEST_API int STDCALL mono_test_marshal_delegate (SimpleDelegate delegate) { void *sp1, *sp2; /* Check that the delegate wrapper is stdcall */ delegate (2); sp1 = get_sp (); delegate (2); sp2 = get_sp (); if (is_get_sp_reliable()) g_assert (sp1 == sp2); return delegate (2); } static int STDCALL inc_cb (int i) { return i + 1; } LIBTEST_API int STDCALL mono_test_marshal_out_delegate (SimpleDelegate *delegate) { *delegate = inc_cb; return 0; } LIBTEST_API SimpleDelegate STDCALL mono_test_marshal_return_delegate (SimpleDelegate delegate) { return delegate; } typedef int (STDCALL *DelegateByrefDelegate) (void *); LIBTEST_API int STDCALL mono_test_marshal_delegate_ref_delegate (DelegateByrefDelegate del) { int (STDCALL *ptr) (int i); del (&ptr); return ptr (54); } static int STDCALL return_plus_one (int i) { return i + 1; } LIBTEST_API SimpleDelegate STDCALL mono_test_marshal_return_delegate_2 (void) { return return_plus_one; } typedef simplestruct (STDCALL *SimpleDelegate2) (simplestruct ss); static gboolean is_utf16_equals (gunichar2 *s1, const char *s2) { char *s; int res; s = g_utf16_to_utf8 (s1, -1, NULL, NULL, NULL); res = strcmp (s, s2); g_free (s); return res == 0; } LIBTEST_API int STDCALL mono_test_marshal_struct (simplestruct ss) { if (ss.a == 0 && ss.b == 1 && ss.c == 0 && !strcmp (ss.d, "TEST") && is_utf16_equals (ss.d2, "OK")) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_delegate2 (SimpleDelegate2 delegate) { simplestruct ss, res; ss.a = 0; ss.b = 1; ss.c = 0; ss.d = "TEST"; ss.d2 = g_utf8_to_utf16 ("TEST2", -1, NULL, NULL, NULL); res = delegate (ss); if (! (res.a && !res.b && res.c && !strcmp (res.d, "TEST-RES") && is_utf16_equals (res.d2, "TEST2-RES"))) return 1; return 0; } typedef simplestruct* (STDCALL *SimpleDelegate4) (simplestruct *ss); LIBTEST_API int STDCALL mono_test_marshal_delegate4 (SimpleDelegate4 delegate) { simplestruct ss; simplestruct *res; ss.a = 0; ss.b = 1; ss.c = 0; ss.d = "TEST"; /* Check argument */ res = delegate (&ss); if (!res) return 1; /* Check return value */ if (! (!res->a && res->b && !res->c && !strcmp (res->d, "TEST"))) return 2; /* Check NULL argument and NULL result */ res = delegate (NULL); if (res) return 3; return 0; } typedef int (STDCALL *SimpleDelegate5) (simplestruct **ss); LIBTEST_API int STDCALL mono_test_marshal_delegate5 (SimpleDelegate5 delegate) { simplestruct ss; int res; simplestruct *ptr; ss.a = 0; ss.b = 1; ss.c = 0; ss.d = "TEST"; ptr = &ss; res = delegate (&ptr); if (res != 0) return 1; if (!(ptr->a && !ptr->b && ptr->c && !strcmp (ptr->d, "RES"))) return 2; return 0; } LIBTEST_API int STDCALL mono_test_marshal_delegate6 (SimpleDelegate5 delegate) { delegate (NULL); return 0; } typedef int (STDCALL *SimpleDelegate7) (simplestruct **ss); LIBTEST_API int STDCALL mono_test_marshal_delegate7 (SimpleDelegate7 delegate) { int res; simplestruct *ptr; /* Check that the input pointer is ignored */ ptr = (simplestruct *)0x12345678; res = delegate (&ptr); if (res != 0) return 1; if (!(ptr->a && !ptr->b && ptr->c && !strcmp (ptr->d, "RES"))) return 2; return 0; } typedef int (STDCALL *InOutByvalClassDelegate) (simplestruct *ss); LIBTEST_API int STDCALL mono_test_marshal_inout_byval_class_delegate (InOutByvalClassDelegate delegate) { int res; simplestruct ss; ss.a = FALSE; ss.b = TRUE; ss.c = FALSE; ss.d = g_strdup_printf ("%s", "FOO"); res = delegate (&ss); if (res != 0) return 1; if (!(ss.a && !ss.b && ss.c && !strcmp (ss.d, "RES"))) return 2; return 0; } typedef int (STDCALL *SimpleDelegate8) (gunichar2 *s); LIBTEST_API int STDCALL mono_test_marshal_delegate8 (SimpleDelegate8 delegate, gunichar2 *s) { return delegate (s); } typedef int (STDCALL *return_int_fnt) (int i); typedef int (STDCALL *SimpleDelegate9) (return_int_fnt d); LIBTEST_API int STDCALL mono_test_marshal_delegate9 (SimpleDelegate9 delegate, gpointer ftn) { return delegate ((return_int_fnt)ftn); } static int STDCALL return_self (int i) { return i; } LIBTEST_API int STDCALL mono_test_marshal_delegate10 (SimpleDelegate9 delegate) { return delegate (return_self); } typedef int (STDCALL *PrimitiveByrefDelegate) (int *i); LIBTEST_API int STDCALL mono_test_marshal_primitive_byref_delegate (PrimitiveByrefDelegate delegate) { int i = 1; int res = delegate (&i); if (res != 0) return res; if (i != 2) return 2; return 0; } typedef int (STDCALL *return_int_delegate) (int i); typedef return_int_delegate (STDCALL *ReturnDelegateDelegate) (void); LIBTEST_API int STDCALL mono_test_marshal_return_delegate_delegate (ReturnDelegateDelegate d) { return (d ()) (55); } typedef int (STDCALL *VirtualDelegate) (int); LIBTEST_API int STDCALL mono_test_marshal_virtual_delegate (VirtualDelegate del) { return del (42); } typedef char* (STDCALL *IcallDelegate) (const char *); LIBTEST_API int STDCALL mono_test_marshal_icall_delegate (IcallDelegate del) { char *res = del ("ABC"); return strcmp (res, "ABC") == 0 ? 0 : 1; } typedef char* (STDCALL *NullableReturnDelegate) (void); LIBTEST_API void STDCALL mono_test_marshal_nullable_ret_delegate (NullableReturnDelegate del) { del (); } LIBTEST_API int STDCALL mono_test_marshal_stringbuilder (char *s, int n) { const char m[] = "This is my message. Isn't it nice?"; if (strcmp (s, "ABCD") != 0) return 1; memcpy(s, m, n); s [n] = '\0'; return 0; } LIBTEST_API int STDCALL mono_test_marshal_stringbuilder_append (char *s, int length) { const char out_sentinel[] = "CSHARP_"; const char out_len = strlen (out_sentinel); for (int i=0; i < length; i++) { s [i] = out_sentinel [i % out_len]; } s [length] = '\0'; return 0; } LIBTEST_API int STDCALL mono_test_marshal_stringbuilder_default (char *s, int n) { const char m[] = "This is my message. Isn't it nice?"; memcpy(s, m, n); s [n] = '\0'; return 0; } LIBTEST_API int STDCALL mono_test_marshal_stringbuilder_unicode (gunichar2 *s, int n) { const char m[] = "This is my message. Isn't it nice?"; gunichar2* s2; glong len; s2 = g_utf8_to_utf16 (m, -1, NULL, &len, NULL); len = (len * 2) + 2; if (len > (n * 2)) len = n * 2; memcpy (s, s2, len); g_free (s2); return 0; } LIBTEST_API void STDCALL mono_test_marshal_stringbuilder_out (char **s) { const char m[] = "This is my message. Isn't it nice?"; char *str; str = (char *)marshal_alloc (strlen (m) + 1); memcpy (str, m, strlen (m) + 1); *s = str; } LIBTEST_API int STDCALL mono_test_marshal_stringbuilder_out_unicode (gunichar2 **s) { const char m[] = "This is my message. Isn't it nice?"; gunichar2 *s2; glong len; s2 = g_utf8_to_utf16 (m, -1, NULL, &len, NULL); len = (len * 2) + 2; *s = (gunichar2 *)marshal_alloc (len); memcpy (*s, s2, len); g_free (s2); return 0; } LIBTEST_API int STDCALL mono_test_marshal_stringbuilder_ref (char **s) { const char m[] = "This is my message. Isn't it nice?"; char *str; if (strcmp (*s, "ABC")) return 1; str = (char *)marshal_alloc (strlen (m) + 1); memcpy (str, m, strlen (m) + 1); *s = str; return 0; } LIBTEST_API void STDCALL mono_test_marshal_stringbuilder_utf16_tolower (short *s, int n) { for (int i = 0; i < n; i++) s[i] = tolower(s[i]); } #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wc++-compat" #endif /* * Standard C and C++ doesn't allow empty structs, empty structs will always have a size of 1 byte. * GCC have an extension to allow empty structs, https://gcc.gnu.org/onlinedocs/gcc/Empty-Structures.html. * This cause a little dilemma since runtime build using none GCC compiler will not be compatible with * GCC build C libraries and the other way around. On platforms where empty structs has size of 1 byte * it must be represented in call and cannot be dropped. On Windows x64 structs will always be represented in the call * meaning that an empty struct must have a representation in the callee in order to correctly follow the ABI used by the * C/C++ standard and the runtime. */ typedef struct { #if !defined(__GNUC__) || defined(TARGET_WIN32) char a; #endif } EmptyStruct; #ifdef __GNUC__ #pragma GCC diagnostic pop #endif LIBTEST_API int STDCALL mono_test_marshal_empty_string_array (char **array) { return (array == NULL) ? 0 : 1; } LIBTEST_API int STDCALL mono_test_marshal_string_array (char **array) { if (strcmp (array [0], "ABC")) return 1; if (strcmp (array [1], "DEF")) return 2; if (array [2] != NULL) return 3; return 0; } LIBTEST_API int STDCALL mono_test_marshal_byref_string_array (char ***array) { if (*array == NULL) return 0; if (strcmp ((*array) [0], "Alpha")) return 2; if (strcmp ((*array) [1], "Beta")) return 2; if (strcmp ((*array) [2], "Gamma")) return 2; return 1; } LIBTEST_API int STDCALL mono_test_marshal_stringbuilder_array (char **array) { if (strcmp (array [0], "ABC")) return 1; if (strcmp (array [1], "DEF")) return 2; strcpy (array [0], "DEF"); strcpy (array [1], "ABC"); return 0; } LIBTEST_API int STDCALL mono_test_marshal_unicode_string_array (gunichar2 **array, char **array2) { GError *gerror = NULL; char *s; s = g_utf16_to_utf8 (array [0], -1, NULL, NULL, &gerror); if (strcmp (s, "ABC")) { g_free (s); return 1; } else g_free (s); s = g_utf16_to_utf8 (array [1], -1, NULL, NULL, &gerror); if (strcmp (s, "DEF")) { g_free (s); return 2; } else g_free (s); if (strcmp (array2 [0], "ABC")) return 3; if (strcmp (array2 [1], "DEF")) return 4; return 0; } /* this does not work on Redhat gcc 2.96 */ LIBTEST_API int STDCALL mono_test_empty_struct (int a, EmptyStruct es, int b) { // printf ("mono_test_empty_struct %d %d\n", a, b); // Intel icc on ia64 passes 'es' in 2 registers #if defined(__ia64) && defined(__INTEL_COMPILER) return 0; #else if (a == 1 && b == 2) return 0; return 1; #endif } LIBTEST_API EmptyStruct STDCALL mono_test_return_empty_struct (int a) { EmptyStruct s; memset (&s, 0, sizeof (s)); #if !(defined(__i386__) && defined(__clang__)) /* https://bugzilla.xamarin.com/show_bug.cgi?id=58901 */ g_assert (a == 42); #endif return s; } typedef struct { char a[100]; } ByValStrStruct; LIBTEST_API ByValStrStruct * STDCALL mono_test_byvalstr_gen (void) { ByValStrStruct *ret; ret = (ByValStrStruct *)malloc (sizeof (ByValStrStruct)); memset(ret, 'a', sizeof(ByValStrStruct)-1); ret->a[sizeof(ByValStrStruct)-1] = 0; return ret; } LIBTEST_API int STDCALL mono_test_byvalstr_check (ByValStrStruct* data, char* correctString) { int ret; ret = strcmp(data->a, correctString); // printf ("T1: %s\n", data->a); // printf ("T2: %s\n", correctString); /* we need g_free because the allocation was performed by mono_test_byvalstr_gen */ g_free (data); return (ret != 0); } typedef struct { guint16 a[4]; int flag; } ByValStrStruct_Unicode; LIBTEST_API int STDCALL mono_test_byvalstr_check_unicode (ByValStrStruct_Unicode *ref, int test) { if (ref->flag != 0x1234abcd){ printf ("overwritten data"); return 1; } if (test == 1 || test == 3){ if (ref->a [0] != '1' || ref->a [1] != '2' || ref->a [2] != '3') return 1; return 0; } if (test == 2){ if (ref->a [0] != '1' || ref->a [1] != '2') return 1; return 0; } return 10; } LIBTEST_API int STDCALL NameManglingAnsi (char *data) { return data [0] + data [1] + data [2]; } LIBTEST_API int STDCALL NameManglingAnsiA (char *data) { g_assert_not_reached (); } LIBTEST_API int STDCALL NameManglingAnsiW (char *data) { g_assert_not_reached (); } LIBTEST_API int STDCALL NameManglingAnsi2A (char *data) { return data [0] + data [1] + data [2]; } LIBTEST_API int STDCALL NameManglingAnsi2W (char *data) { g_assert_not_reached (); } LIBTEST_API int STDCALL NameManglingUnicode (char *data) { g_assert_not_reached (); } LIBTEST_API int STDCALL NameManglingUnicodeW (gunichar2 *data) { return data [0] + data [1] + data [2]; } LIBTEST_API int STDCALL NameManglingUnicode2 (gunichar2 *data) { return data [0] + data [1] + data [2]; } LIBTEST_API int STDCALL NameManglingAutoW (char *data) { #ifdef WIN32 return (data [0] + data [1] + data [2]) == 131 ? 0 : 1; #else g_assert_not_reached (); #endif } LIBTEST_API int STDCALL NameManglingAuto (char *data) { #ifndef WIN32 return (data [0] + data [1] + data [2]) == 198 ? 0 : 1; #else g_assert_not_reached (); #endif } typedef int (STDCALL *intcharFunc)(const char*); LIBTEST_API void STDCALL callFunction (intcharFunc f) { f ("ABC"); } typedef struct { const char* str; int i; } SimpleObj; LIBTEST_API int STDCALL class_marshal_test0 (SimpleObj *obj1) { // printf ("class_marshal_test0 %s %d\n", obj1->str, obj1->i); if (strcmp(obj1->str, "T1")) return -1; if (obj1->i != 4) return -2; return 0; } LIBTEST_API int STDCALL class_marshal_test4 (SimpleObj *obj1) { if (obj1) return -1; return 0; } LIBTEST_API void STDCALL class_marshal_test1 (SimpleObj **obj1) { SimpleObj *res = (SimpleObj *)malloc (sizeof (SimpleObj)); res->str = marshal_strdup ("ABC"); res->i = 5; *obj1 = res; } LIBTEST_API int STDCALL class_marshal_test2 (SimpleObj **obj1) { // printf ("class_marshal_test2 %s %d\n", (*obj1)->str, (*obj1)->i); if (strcmp((*obj1)->str, "ABC")) return -1; if ((*obj1)->i != 5) return -2; return 0; } LIBTEST_API int STDCALL string_marshal_test0 (char *str) { if (strcmp (str, "TEST0")) return -1; return 0; } LIBTEST_API void STDCALL string_marshal_test1 (const char **str) { *str = marshal_strdup ("TEST1"); } LIBTEST_API int STDCALL string_marshal_test2 (char **str) { // printf ("string_marshal_test2 %s\n", *str); if (strcmp (*str, "TEST1")) return -1; *str = marshal_strdup ("TEST2"); return 0; } LIBTEST_API int STDCALL string_marshal_test3 (char *str) { if (str) return -1; return 0; } typedef struct { int a; int b; } BlittableClass; LIBTEST_API BlittableClass* STDCALL TestBlittableClass (BlittableClass *vl) { BlittableClass *res; // printf ("TestBlittableClass %d %d\n", vl->a, vl->b); if (vl) { vl->a++; vl->b++; res = marshal_new0 (BlittableClass, 1); memcpy (res, vl, sizeof (BlittableClass)); } else { res = marshal_new0 (BlittableClass, 1); res->a = 42; res->b = 43; } return res; } typedef struct OSVERSIONINFO_STRUCT { int a; int b; } OSVERSIONINFO_STRUCT; LIBTEST_API int STDCALL MyGetVersionEx (OSVERSIONINFO_STRUCT *osvi) { // printf ("GOT %d %d\n", osvi->a, osvi->b); osvi->a += 1; osvi->b += 1; return osvi->a + osvi->b; } LIBTEST_API int STDCALL BugGetVersionEx (int a, int b, int c, int d, int e, int f, int g, int h, OSVERSIONINFO_STRUCT *osvi) { // printf ("GOT %d %d\n", osvi->a, osvi->b); osvi->a += 1; osvi->b += 1; return osvi->a + osvi->b; } LIBTEST_API int STDCALL mono_test_marshal_point (point pt) { // printf("point %g %g\n", pt.x, pt.y); if (pt.x == 1.25 && pt.y == 3.5) return 0; return 1; } typedef struct { int x; double y; } mixed_point; LIBTEST_API int STDCALL mono_test_marshal_mixed_point (mixed_point pt) { // printf("mixed point %d %g\n", pt.x, pt.y); if (pt.x == 5 && pt.y == 6.75) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_mixed_point_2 (mixed_point *pt) { if (pt->x != 5 || pt->y != 6.75) return 1; pt->x = 10; pt->y = 12.35; return 0; } LIBTEST_API int STDCALL marshal_test_ref_bool(int i, char *b1, short *b2, int *b3) { int res = 1; if (*b1 != 0 && *b1 != 1) return 1; if (*b2 != 0 && *b2 != -1) /* variant_bool */ return 1; if (*b3 != 0 && *b3 != 1) return 1; if (i == ((*b1 << 2) | (-*b2 << 1) | *b3)) res = 0; *b1 = !*b1; *b2 = ~*b2; *b3 = !*b3; return res; } struct BoolStruct { int i; char b1; short b2; /* variant_bool */ int b3; }; LIBTEST_API int STDCALL marshal_test_bool_struct(struct BoolStruct *s) { int res = 1; if (s->b1 != 0 && s->b1 != 1) return 1; if (s->b2 != 0 && s->b2 != -1) return 1; if (s->b3 != 0 && s->b3 != 1) return 1; if (s->i == ((s->b1 << 2) | (-s->b2 << 1) | s->b3)) res = 0; s->b1 = !s->b1; s->b2 = ~s->b2; s->b3 = !s->b3; return res; } typedef struct { gint64 l; } LongStruct2; typedef struct { int i; LongStruct2 l; } LongStruct; LIBTEST_API int STDCALL mono_test_marshal_long_struct (LongStruct *s) { return s->i + s->l.l; } LIBTEST_API void STDCALL mono_test_last_error (int err) { #ifdef WIN32 SetLastError (err); /* * Make sure argument register used calling SetLastError * get's cleaned before returning back to caller. This is done to ensure * we don't get a undetected failure if error is preserved in register * on return since we read back value directly when doing p/invoke with SetLastError = true * into first argument register and then pass it to Mono function setting value in TLS. * If there is a codegen bug reading last error or the code has been incorrectly eliminated * this test could still succeed since expected error code could be left in argument register. * Below code just do something that shouldn't touch last error and won't be optimized away * but will change the argument registers to something different than err. */ char buffer[256] = { 0 }; char value[] = "Dummy"; strncpy (buffer, value, STRING_LENGTH (value)); #else mono_set_errno (err); #endif } LIBTEST_API int STDCALL mono_test_asany (void *ptr, int what) { switch (what) { case 1: return (*(int*)ptr == 5) ? 0 : 1; case 2: return strcmp ((const char*)ptr, "ABC") == 0 ? 0 : 1; case 3: { simplestruct2 ss = *(simplestruct2*)ptr; if (ss.a == 0 && ss.b == 1 && ss.c == 0 && !strcmp (ss.d, "TEST") && ss.e == 99 && ss.f == 1.5 && ss.g == 42 && ss.h == (guint64)123) return 0; else return 1; } case 4: { GError *gerror = NULL; char *s; s = g_utf16_to_utf8 ((const gunichar2 *)ptr, -1, NULL, NULL, &gerror); if (!s) return 1; if (!strcmp (s, "ABC")) { g_free (s); return 0; } else { g_free (s); return 1; } } case 5: { return (*(intptr_t*)ptr == 5) ? 0 : 1; } default: g_assert_not_reached (); } return 1; } typedef struct { int i; int j; int k; char *s; } AsAnyStruct; LIBTEST_API int STDCALL mono_test_marshal_asany_in (void* ptr) { AsAnyStruct *asAny = (AsAnyStruct *)ptr; int res = asAny->i + asAny->j + asAny->k; return res; } LIBTEST_API int STDCALL mono_test_marshal_asany_inout (void* ptr) { AsAnyStruct *asAny = (AsAnyStruct *)ptr; int res = asAny->i + asAny->j + asAny->k; marshal_free (asAny->s); asAny->i = 10; asAny->j = 20; asAny->k = 30; asAny->s = 0; return res; } LIBTEST_API int STDCALL mono_test_marshal_asany_out (void* ptr) { AsAnyStruct *asAny = (AsAnyStruct *)ptr; int res = asAny->i + asAny->j + asAny->k; asAny->i = 10; asAny->j = 20; asAny->k = 30; asAny->s = 0; return res; } /* * AMD64 marshalling tests. */ typedef struct amd64_struct1 { int i; int j; int k; int l; } amd64_struct1; LIBTEST_API amd64_struct1 STDCALL mono_test_marshal_amd64_pass_return_struct1 (amd64_struct1 s) { s.i ++; s.j ++; s.k ++; s.l ++; return s; } LIBTEST_API amd64_struct1 STDCALL mono_test_marshal_amd64_pass_return_struct1_many_args (amd64_struct1 s, int i1, int i2, int i3, int i4, int i5, int i6, int i7, int i8) { s.i ++; s.j ++; s.k ++; s.l += 1 + i1 + i2 + i3 + i4 + i5 + i6 + i7 + i8; return s; } typedef struct amd64_struct2 { int i; int j; } amd64_struct2; LIBTEST_API amd64_struct2 STDCALL mono_test_marshal_amd64_pass_return_struct2 (amd64_struct2 s) { s.i ++; s.j ++; return s; } typedef struct amd64_struct3 { int i; } amd64_struct3; LIBTEST_API amd64_struct3 STDCALL mono_test_marshal_amd64_pass_return_struct3 (amd64_struct3 s) { s.i ++; return s; } typedef struct amd64_struct4 { double d1, d2; } amd64_struct4; LIBTEST_API amd64_struct4 STDCALL mono_test_marshal_amd64_pass_return_struct4 (amd64_struct4 s) { s.d1 ++; s.d2 ++; return s; } /* * IA64 marshalling tests. */ typedef struct test_struct5 { float d1, d2; } test_struct5; LIBTEST_API test_struct5 STDCALL mono_test_marshal_ia64_pass_return_struct5 (double d1, double d2, test_struct5 s, int i, double d3, double d4) { s.d1 += d1 + d2 + i; s.d2 += d3 + d4 + i; return s; } typedef struct test_struct6 { double d1, d2; } test_struct6; LIBTEST_API test_struct6 STDCALL mono_test_marshal_ia64_pass_return_struct6 (double d1, double d2, test_struct6 s, int i, double d3, double d4) { s.d1 += d1 + d2 + i; s.d2 += d3 + d4; return s; } static guint32 custom_res [2]; LIBTEST_API void* STDCALL mono_test_marshal_pass_return_custom (int i, guint32 *ptr, int j) { /* ptr will be freed by CleanupNative, so make a copy */ custom_res [0] = 0; /* not allocated by AllocHGlobal */ custom_res [1] = ptr [1]; return &custom_res; } LIBTEST_API int STDCALL mono_test_marshal_pass_out_custom (int i, guint32 **ptr, int j) { custom_res [0] = 0; custom_res [1] = i + j + 10; *ptr = custom_res; return 0; } LIBTEST_API int STDCALL mono_test_marshal_pass_inout_custom (int i, guint32 *ptr, int j) { ptr [0] = 0; ptr [1] = i + ptr [1] + j; return 0; } LIBTEST_API int STDCALL mono_test_marshal_pass_out_byval_custom (int i, guint32 *ptr, int j) { return ptr == NULL ? 0 : 1; } LIBTEST_API int STDCALL mono_test_marshal_pass_byref_custom (int i, guint32 **ptr, int j) { (*ptr)[1] += i + j; return 0; } LIBTEST_API void* STDCALL mono_test_marshal_pass_return_custom2 (int i, guint32 *ptr, int j) { g_assert_not_reached (); return NULL; } LIBTEST_API void* STDCALL mono_test_marshal_pass_return_custom_null (int i, guint32 *ptr, int j) { g_assert (ptr == NULL); return NULL; } typedef void *(STDCALL *PassReturnPtrDelegate) (void *ptr); LIBTEST_API int STDCALL mono_test_marshal_pass_return_custom_in_delegate (PassReturnPtrDelegate del) { guint32 buf [2]; guint32 res; guint32 *ptr; buf [0] = 0; buf [1] = 10; ptr = (guint32 *)del (&buf); res = ptr [1]; #ifdef WIN32 /* FIXME: Freed with FreeHGlobal */ #else g_free (ptr); #endif return res; } LIBTEST_API int STDCALL mono_test_marshal_pass_return_custom_null_in_delegate (PassReturnPtrDelegate del) { void *ptr = del (NULL); return (ptr == NULL) ? 15 : 0; } typedef void (STDCALL *CustomOutParamDelegate) (void **pptr); LIBTEST_API int STDCALL mono_test_marshal_custom_out_param_delegate (CustomOutParamDelegate del) { void* pptr = (void*)del; del (&pptr); if(pptr != NULL) return 1; return 0; } typedef int (STDCALL *ReturnEnumDelegate) (int e); LIBTEST_API int STDCALL mono_test_marshal_return_enum_delegate (ReturnEnumDelegate func) { return func (1); } typedef struct { int a, b, c; gint64 d; } BlittableStruct; typedef BlittableStruct (STDCALL *SimpleDelegate10) (BlittableStruct ss); LIBTEST_API int STDCALL mono_test_marshal_blittable_struct_delegate (SimpleDelegate10 delegate) { BlittableStruct ss, res; ss.a = 1; ss.b = 2; ss.c = 3; ss.d = 55; res = delegate (ss); if (! ((res.a == -1) && (res.b == -2) && (res.c == -3) && (res.d == -55))) return 1; return 0; } LIBTEST_API int STDCALL mono_test_stdcall_name_mangling (int a, int b, int c) { return a + b + c; } LIBTEST_API int mono_test_stdcall_mismatch_1 (int a, int b, int c) { return a + b + c; } LIBTEST_API int STDCALL mono_test_stdcall_mismatch_2 (int a, int b, int c) { return a + b + c; } /* * PASSING AND RETURNING SMALL STRUCTURES FROM DELEGATES TESTS */ typedef struct { int i; } SmallStruct1; typedef SmallStruct1 (STDCALL *SmallStructDelegate1) (SmallStruct1 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate1 (SmallStructDelegate1 delegate) { SmallStruct1 ss, res; ss.i = 1; res = delegate (ss); if (! (res.i == -1)) return 1; return 0; } typedef struct { gint16 i, j; } SmallStruct2; typedef SmallStruct2 (STDCALL *SmallStructDelegate2) (SmallStruct2 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate2 (SmallStructDelegate2 delegate) { SmallStruct2 ss, res; ss.i = 2; ss.j = 3; res = delegate (ss); if (! ((res.i == -2) && (res.j == -3))) return 1; return 0; } typedef struct { gint16 i; gint8 j; } SmallStruct3; typedef SmallStruct3 (STDCALL *SmallStructDelegate3) (SmallStruct3 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate3 (SmallStructDelegate3 delegate) { SmallStruct3 ss, res; ss.i = 1; ss.j = 2; res = delegate (ss); if (! ((res.i == -1) && (res.j == -2))) return 1; return 0; } typedef struct { gint16 i; } SmallStruct4; typedef SmallStruct4 (STDCALL *SmallStructDelegate4) (SmallStruct4 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate4 (SmallStructDelegate4 delegate) { SmallStruct4 ss, res; ss.i = 1; res = delegate (ss); if (! (res.i == -1)) return 1; return 0; } typedef struct { gint64 i; } SmallStruct5; typedef SmallStruct5 (STDCALL *SmallStructDelegate5) (SmallStruct5 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate5 (SmallStructDelegate5 delegate) { SmallStruct5 ss, res; ss.i = 5; res = delegate (ss); if (! (res.i == -5)) return 1; return 0; } typedef struct { int i, j; } SmallStruct6; typedef SmallStruct6 (STDCALL *SmallStructDelegate6) (SmallStruct6 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate6 (SmallStructDelegate6 delegate) { SmallStruct6 ss, res; ss.i = 1; ss.j = 2; res = delegate (ss); if (! ((res.i == -1) && (res.j == -2))) return 1; return 0; } typedef struct { int i; gint16 j; } SmallStruct7; typedef SmallStruct7 (STDCALL *SmallStructDelegate7) (SmallStruct7 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate7 (SmallStructDelegate7 delegate) { SmallStruct7 ss, res; ss.i = 1; ss.j = 2; res = delegate (ss); if (! ((res.i == -1) && (res.j == -2))) return 1; return 0; } typedef struct { float i; } SmallStruct8; typedef SmallStruct8 (STDCALL *SmallStructDelegate8) (SmallStruct8 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate8 (SmallStructDelegate8 delegate) { SmallStruct8 ss, res; ss.i = 1.0; res = delegate (ss); if (! ((res.i == -1.0))) return 1; return 0; } typedef struct { double i; } SmallStruct9; typedef SmallStruct9 (STDCALL *SmallStructDelegate9) (SmallStruct9 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate9 (SmallStructDelegate9 delegate) { SmallStruct9 ss, res; ss.i = 1.0; res = delegate (ss); if (! ((res.i == -1.0))) return 1; return 0; } typedef struct { float i, j; } SmallStruct10; typedef SmallStruct10 (STDCALL *SmallStructDelegate10) (SmallStruct10 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate10 (SmallStructDelegate10 delegate) { SmallStruct10 ss, res; ss.i = 1.0; ss.j = 2.0; res = delegate (ss); if (! ((res.i == -1.0) && (res.j == -2.0))) return 1; return 0; } typedef struct { float i; int j; } SmallStruct11; typedef SmallStruct11 (STDCALL *SmallStructDelegate11) (SmallStruct11 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate11 (SmallStructDelegate11 delegate) { SmallStruct11 ss, res; ss.i = 1.0; ss.j = 2; res = delegate (ss); if (! ((res.i == -1.0) && (res.j == -2))) return 1; return 0; } typedef int (STDCALL *ArrayDelegate) (int i, char *j, void *arr); LIBTEST_API int STDCALL mono_test_marshal_array_delegate (void *arr, int len, ArrayDelegate del) { return del (len, NULL, arr); } typedef int (STDCALL *ArrayDelegateLong) (gint64 i, char *j, void *arr); LIBTEST_API int STDCALL mono_test_marshal_array_delegate_long (void *arr, gint64 len, ArrayDelegateLong del) { return del (len, NULL, arr); } LIBTEST_API int STDCALL mono_test_marshal_out_array_delegate (int *arr, int len, ArrayDelegate del) { del (len, NULL, arr); if ((arr [0] != 1) || (arr [1] != 2)) return 1; else return 0; } typedef gunichar2* (STDCALL *UnicodeStringDelegate) (gunichar2 *message); LIBTEST_API int STDCALL mono_test_marshal_return_unicode_string_delegate (UnicodeStringDelegate del) { const char m[] = "abcdef"; gunichar2 *s2, *res; glong len; s2 = g_utf8_to_utf16 (m, -1, NULL, &len, NULL); res = del (s2); marshal_free (res); return 0; } LIBTEST_API int STDCALL mono_test_marshal_out_string_array_delegate (char **arr, int len, ArrayDelegate del) { del (len, NULL, arr); if (!strcmp (arr [0], "ABC") && !strcmp (arr [1], "DEF")) return 0; else return 1; } typedef int (*CdeclDelegate) (int i, int j); LIBTEST_API int STDCALL mono_test_marshal_cdecl_delegate (CdeclDelegate del) { int i; for (i = 0; i < 1000; ++i) del (1, 2); return 0; } typedef char** (STDCALL *ReturnStringArrayDelegate) (int i); LIBTEST_API int STDCALL mono_test_marshal_return_string_array_delegate (ReturnStringArrayDelegate d) { char **arr = d (2); int res; if (arr == NULL) return 3; if (strcmp (arr [0], "ABC") || strcmp (arr [1], "DEF")) res = 1; else res = 0; marshal_free (arr); return res; } typedef int (STDCALL *ByrefStringDelegate) (char **s); LIBTEST_API int STDCALL mono_test_marshal_byref_string_delegate (ByrefStringDelegate d) { char *s = (char*)"ABC"; int res; res = d (&s); if (res != 0) return res; if (!strcmp (s, "DEF")) res = 0; else res = 2; marshal_free (s); return res; } LIBTEST_API int STDCALL add_delegate (int i, int j) { return i + j; } LIBTEST_API gpointer STDCALL mono_test_marshal_return_fnptr (void) { return (gpointer)&add_delegate; } LIBTEST_API int STDCALL mono_xr (int code) { printf ("codigo %x\n", code); return code + 1234; } typedef struct { int handle; } HandleRef; LIBTEST_API HandleRef STDCALL mono_xr_as_handle (int code) { HandleRef ref; memset (&ref, 0, sizeof (ref)); return ref; } typedef struct { int a; void *handle1; void *handle2; int b; } HandleStructs; LIBTEST_API int STDCALL mono_safe_handle_struct_ref (HandleStructs *x) { printf ("Dingus Ref! \n"); printf ("Values: %d %d %p %p\n", x->a, x->b, x->handle1, x->handle2); if (x->a != 1234) return 1; if (x->b != 8743) return 2; if (x->handle1 != (void*) 0x7080feed) return 3; if (x->handle2 != (void*) 0x1234abcd) return 4; return 0xf00d; } LIBTEST_API int STDCALL mono_safe_handle_struct (HandleStructs x) { printf ("Dingus Standard! \n"); printf ("Values: %d %d %p %p\n", x.a, x.b, x.handle1, x.handle2); if (x.a != 1234) return 1; if (x.b != 8743) return 2; if (x.handle1 != (void*) 0x7080feed) return 3; if (x.handle2 != (void*) 0x1234abcd) return 4; return 0xf00f; } typedef struct { void *a; } TrivialHandle; LIBTEST_API int STDCALL mono_safe_handle_struct_simple (TrivialHandle x) { printf ("The value is %p\n", x.a); return ((int)(gsize)x.a) * 2; } LIBTEST_API int STDCALL mono_safe_handle_return (void) { return 0x1000f00d; } LIBTEST_API void STDCALL mono_safe_handle_ref (void **handle) { if (*handle != 0){ *handle = (void *) 0x800d; return; } *handle = (void *) 0xbad; } LIBTEST_API void* STDCALL mono_safe_handle_ref_nomod (void **handle) { return *handle; } LIBTEST_API double STDCALL mono_test_marshal_date_time (double d, double *d2) { *d2 = d; return d; } /* * COM INTEROP TESTS */ #ifndef WIN32 typedef struct { guint32 a; guint16 b; guint16 c; guint8 d[8]; } GUID; typedef const GUID *REFIID; typedef struct IDispatch IDispatch; typedef struct { int (STDCALL *QueryInterface)(IDispatch *iface, REFIID iid, gpointer *out); int (STDCALL *AddRef)(IDispatch *iface); int (STDCALL *Release)(IDispatch *iface); int (STDCALL *GetTypeInfoCount)(IDispatch *iface, unsigned int *count); int (STDCALL *GetTypeInfo)(IDispatch *iface, unsigned int index, unsigned int lcid, gpointer *out); int (STDCALL *GetIDsOfNames)(IDispatch *iface, REFIID iid, gpointer names, unsigned int count, unsigned int lcid, gpointer ids); int (STDCALL *Invoke)(IDispatch *iface, unsigned int dispid, REFIID iid, unsigned int lcid, unsigned short flags, gpointer params, gpointer result, gpointer excepinfo, gpointer err_arg); } IDispatchVtbl; struct IDispatch { const IDispatchVtbl *lpVtbl; }; typedef struct { guint16 vt; guint16 wReserved1; guint16 wReserved2; guint16 wReserved3; union { gint64 llVal; gint32 lVal; guint8 bVal; gint16 iVal; float fltVal; double dblVal; gint16 boolVal; gunichar2* bstrVal; gint8 cVal; guint16 uiVal; guint32 ulVal; guint64 ullVal; gpointer byref; struct { gpointer pvRecord; gpointer pRecInfo; }; }; } VARIANT; typedef enum { VARIANT_TRUE = -1, VARIANT_FALSE = 0 } VariantBool; typedef enum { VT_EMPTY = 0, VT_NULL = 1, VT_I2 = 2, VT_I4 = 3, VT_R4 = 4, VT_R8 = 5, VT_CY = 6, VT_DATE = 7, VT_BSTR = 8, VT_DISPATCH = 9, VT_ERROR = 10, VT_BOOL = 11, VT_VARIANT = 12, VT_UNKNOWN = 13, VT_DECIMAL = 14, VT_I1 = 16, VT_UI1 = 17, VT_UI2 = 18, VT_UI4 = 19, VT_I8 = 20, VT_UI8 = 21, VT_INT = 22, VT_UINT = 23, VT_VOID = 24, VT_HRESULT = 25, VT_PTR = 26, VT_SAFEARRAY = 27, VT_CARRAY = 28, VT_USERDEFINED = 29, VT_LPSTR = 30, VT_LPWSTR = 31, VT_RECORD = 36, VT_FILETIME = 64, VT_BLOB = 65, VT_STREAM = 66, VT_STORAGE = 67, VT_STREAMED_OBJECT = 68, VT_STORED_OBJECT = 69, VT_BLOB_OBJECT = 70, VT_CF = 71, VT_CLSID = 72, VT_VECTOR = 4096, VT_ARRAY = 8192, VT_BYREF = 16384 } VarEnum; void VariantInit(VARIANT* vt) { vt->vt = VT_EMPTY; } #define S_OK 0 #endif LIBTEST_API int STDCALL mono_test_marshal_bstr_in(gunichar2* bstr) { gint32 result = 0; gchar* bstr_utf8 = g_utf16_to_utf8 (bstr, -1, NULL, NULL, NULL); result = strcmp("mono_test_marshal_bstr_in", bstr_utf8); g_free(bstr_utf8); if (result == 0) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_bstr_out(gunichar2** bstr) { *bstr = marshal_bstr_alloc ("mono_test_marshal_bstr_out"); return 0; } LIBTEST_API int STDCALL mono_test_marshal_bstr_in_null(gunichar2* bstr) { if (!bstr) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_bstr_out_null(gunichar2** bstr) { *bstr = NULL; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_sbyte(VARIANT variant) { if (variant.vt == VT_I1 && variant.cVal == 100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_byte(VARIANT variant) { if (variant.vt == VT_UI1 && variant.bVal == 100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_short(VARIANT variant) { if (variant.vt == VT_I2 && variant.iVal == 314) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_ushort(VARIANT variant) { if (variant.vt == VT_UI2 && variant.uiVal == 314) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_int(VARIANT variant) { if (variant.vt == VT_I4 && variant.lVal == 314) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_uint(VARIANT variant) { if (variant.vt == VT_UI4 && variant.ulVal == 314) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_long(VARIANT variant) { if (variant.vt == VT_I8 && variant.llVal == 314) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_ulong(VARIANT variant) { if (variant.vt == VT_UI8 && variant.ullVal == 314) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_float(VARIANT variant) { if (variant.vt == VT_R4 && (variant.fltVal - 3.14)/3.14 < .001) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_double(VARIANT variant) { if (variant.vt == VT_R8 && (variant.dblVal - 3.14)/3.14 < .001) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_bstr(VARIANT variant) { gint32 result = 0; gchar* bstr_utf8 = g_utf16_to_utf8 (variant.bstrVal, -1, NULL, NULL, NULL); result = strcmp("PI", bstr_utf8); g_free(bstr_utf8); if (variant.vt == VT_BSTR && !result) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_bool_true (VARIANT variant) { if (variant.vt == VT_BOOL && variant.boolVal == VARIANT_TRUE) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_bool_false (VARIANT variant) { if (variant.vt == VT_BOOL && variant.boolVal == VARIANT_FALSE) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_sbyte(VARIANT* variant) { variant->vt = VT_I1; variant->cVal = 100; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_sbyte_byref(VARIANT* variant) { variant->vt = VT_I1|VT_BYREF; variant->byref = marshal_alloc(1); *((gint8*)variant->byref) = 100; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_byte(VARIANT* variant) { variant->vt = VT_UI1; variant->bVal = 100; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_byte_byref(VARIANT* variant) { variant->vt = VT_UI1|VT_BYREF; variant->byref = marshal_alloc(1); *((gint8*)variant->byref) = 100; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_short(VARIANT* variant) { variant->vt = VT_I2; variant->iVal = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_short_byref(VARIANT* variant) { variant->vt = VT_I2|VT_BYREF; variant->byref = marshal_alloc(2); *((gint16*)variant->byref) = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_ushort(VARIANT* variant) { variant->vt = VT_UI2; variant->uiVal = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_ushort_byref(VARIANT* variant) { variant->vt = VT_UI2|VT_BYREF; variant->byref = marshal_alloc(2); *((guint16*)variant->byref) = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_int(VARIANT* variant) { variant->vt = VT_I4; variant->lVal = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_int_byref(VARIANT* variant) { variant->vt = VT_I4|VT_BYREF; variant->byref = marshal_alloc(4); *((gint32*)variant->byref) = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_uint(VARIANT* variant) { variant->vt = VT_UI4; variant->ulVal = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_uint_byref(VARIANT* variant) { variant->vt = VT_UI4|VT_BYREF; variant->byref = marshal_alloc(4); *((guint32*)variant->byref) = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_long(VARIANT* variant) { variant->vt = VT_I8; variant->llVal = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_long_byref(VARIANT* variant) { variant->vt = VT_I8|VT_BYREF; variant->byref = marshal_alloc(8); *((gint64*)variant->byref) = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_ulong(VARIANT* variant) { variant->vt = VT_UI8; variant->ullVal = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_ulong_byref(VARIANT* variant) { variant->vt = VT_UI8|VT_BYREF; variant->byref = marshal_alloc(8); *((guint64*)variant->byref) = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_float(VARIANT* variant) { variant->vt = VT_R4; variant->fltVal = 3.14; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_float_byref(VARIANT* variant) { variant->vt = VT_R4|VT_BYREF; variant->byref = marshal_alloc(4); *((float*)variant->byref) = 3.14; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_double(VARIANT* variant) { variant->vt = VT_R8; variant->dblVal = 3.14; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_double_byref(VARIANT* variant) { variant->vt = VT_R8|VT_BYREF; variant->byref = marshal_alloc(8); *((double*)variant->byref) = 3.14; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bstr(VARIANT* variant) { variant->vt = VT_BSTR; variant->bstrVal = marshal_bstr_alloc("PI"); return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bstr_byref(VARIANT* variant) { variant->vt = VT_BSTR|VT_BYREF; variant->byref = marshal_alloc(sizeof(gpointer)); *((gunichar**)variant->byref) = (gunichar*)marshal_bstr_alloc("PI"); return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bool_true (VARIANT* variant) { variant->vt = VT_BOOL; variant->boolVal = VARIANT_TRUE; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bool_true_byref (VARIANT* variant) { variant->vt = VT_BOOL|VT_BYREF; variant->byref = marshal_alloc(2); *((gint16*)variant->byref) = VARIANT_TRUE; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bool_false (VARIANT* variant) { variant->vt = VT_BOOL; variant->boolVal = VARIANT_FALSE; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bool_false_byref (VARIANT* variant) { variant->vt = VT_BOOL|VT_BYREF; variant->byref = marshal_alloc(2); *((gint16*)variant->byref) = VARIANT_FALSE; return 0; } typedef int (STDCALL *VarFunc) (int vt, VARIANT variant); typedef int (STDCALL *VarRefFunc) (int vt, VARIANT* variant); LIBTEST_API int STDCALL mono_test_marshal_variant_in_sbyte_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_I1; vt.cVal = -100; return func (VT_I1, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_byte_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_UI1; vt.bVal = 100; return func (VT_UI1, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_short_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_I2; vt.iVal = -100; return func (VT_I2, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_ushort_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_UI2; vt.uiVal = 100; return func (VT_UI2, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_int_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_I4; vt.lVal = -100; return func (VT_I4, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_uint_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_UI4; vt.ulVal = 100; return func (VT_UI4, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_long_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_I8; vt.llVal = -100; return func (VT_I8, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_ulong_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_UI8; vt.ullVal = 100; return func (VT_UI8, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_float_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_R4; vt.fltVal = 3.14; return func (VT_R4, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_double_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_R8; vt.dblVal = 3.14; return func (VT_R8, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_bstr_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_BSTR; vt.bstrVal = marshal_bstr_alloc("PI"); return func (VT_BSTR, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_bool_true_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_BOOL; vt.boolVal = VARIANT_TRUE; return func (VT_BOOL, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_bool_false_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_BOOL; vt.boolVal = VARIANT_FALSE; return func (VT_BOOL, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_out_sbyte_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_I1, &vt); if (vt.vt == VT_I1 && vt.cVal == -100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_byte_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_UI1, &vt); if (vt.vt == VT_UI1 && vt.bVal == 100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_short_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_I2, &vt); if (vt.vt == VT_I2 && vt.iVal == -100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_ushort_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_UI2, &vt); if (vt.vt == VT_UI2 && vt.uiVal == 100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_int_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_I4, &vt); if (vt.vt == VT_I4 && vt.lVal == -100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_uint_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_UI4, &vt); if (vt.vt == VT_UI4 && vt.ulVal == 100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_long_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_I8, &vt); if (vt.vt == VT_I8 && vt.llVal == -100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_ulong_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_UI8, &vt); if (vt.vt == VT_UI8 && vt.ullVal == 100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_float_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_R4, &vt); if (vt.vt == VT_R4 && fabs (vt.fltVal - 3.14f) < 1e-10) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_double_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_R8, &vt); if (vt.vt == VT_R8 && fabs (vt.dblVal - 3.14) < 1e-10) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bstr_unmanaged(VarRefFunc func) { VARIANT vt; gchar* bstr_utf8; gint32 result = 0; VariantInit (&vt); func (VT_BSTR, &vt); bstr_utf8 = g_utf16_to_utf8 (vt.bstrVal, -1, NULL, NULL, NULL); result = strcmp("PI", bstr_utf8); g_free(bstr_utf8); if (vt.vt == VT_BSTR && !result) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bool_true_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_BOOL, &vt); if (vt.vt == VT_BOOL && vt.boolVal == VARIANT_TRUE) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bool_false_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_BOOL, &vt); if (vt.vt == VT_BOOL && vt.boolVal == VARIANT_TRUE) return 0; return 1; } typedef struct _StructWithVariant { VARIANT data; } StructWithVariant; typedef int (STDCALL *CheckStructWithVariantFunc) (StructWithVariant sv); LIBTEST_API int STDCALL mono_test_marshal_struct_with_variant_in_unmanaged(CheckStructWithVariantFunc func) { StructWithVariant sv; sv.data.vt = VT_I4; sv.data.lVal = -123; return func(sv); } LIBTEST_API int STDCALL mono_test_marshal_struct_with_variant_out_unmanaged (StructWithVariant sv) { if (sv.data.vt != VT_I4) return 1; if (sv.data.lVal != -123) return 2; return 0; } typedef struct _StructWithBstr { gunichar2* data; } StructWithBstr; typedef int (STDCALL *CheckStructWithBstrFunc) (StructWithBstr sb); LIBTEST_API int STDCALL mono_test_marshal_struct_with_bstr_in_unmanaged(CheckStructWithBstrFunc func) { StructWithBstr sb; sb.data = marshal_bstr_alloc("this is a test string"); return func(sb); } LIBTEST_API int STDCALL mono_test_marshal_struct_with_bstr_out_unmanaged (StructWithBstr sb) { char *s = g_utf16_to_utf8 (sb.data, g_utf16_len (sb.data), NULL, NULL, NULL); gboolean same = !strcmp (s, "this is a test string"); g_free (s); if (!same) return 1; return 0; } typedef struct MonoComObject MonoComObject; typedef struct MonoDefItfObject MonoDefItfObject; typedef struct { int (STDCALL *QueryInterface)(MonoDefItfObject* pUnk, gpointer riid, gpointer* ppv); int (STDCALL *AddRef)(MonoDefItfObject* pUnk); int (STDCALL *Release)(MonoDefItfObject* pUnk); int (STDCALL *Method)(MonoDefItfObject* pUnk, int *value); } MonoDefItf; typedef struct { int (STDCALL *QueryInterface)(MonoComObject* pUnk, gpointer riid, gpointer* ppv); int (STDCALL *AddRef)(MonoComObject* pUnk); int (STDCALL *Release)(MonoComObject* pUnk); int (STDCALL *get_ITest)(MonoComObject* pUnk, MonoComObject* *ppUnk); int (STDCALL *SByteIn)(MonoComObject* pUnk, char a); int (STDCALL *ByteIn)(MonoComObject* pUnk, unsigned char a); int (STDCALL *ShortIn)(MonoComObject* pUnk, short a); int (STDCALL *UShortIn)(MonoComObject* pUnk, unsigned short a); int (STDCALL *IntIn)(MonoComObject* pUnk, int a); int (STDCALL *UIntIn)(MonoComObject* pUnk, unsigned int a); int (STDCALL *LongIn)(MonoComObject* pUnk, gint64 a); int (STDCALL *ULongIn)(MonoComObject* pUnk, guint64 a); int (STDCALL *FloatIn)(MonoComObject* pUnk, float a); int (STDCALL *DoubleIn)(MonoComObject* pUnk, double a); int (STDCALL *ITestIn)(MonoComObject* pUnk, MonoComObject* pUnk2); int (STDCALL *ITestOut)(MonoComObject* pUnk, MonoComObject* *ppUnk); int (STDCALL *Return22NoICall)(MonoComObject* pUnk); int (STDCALL *IntOut)(MonoComObject* pUnk, int *a); int (STDCALL *ArrayIn)(MonoComObject* pUnk, void *array); int (STDCALL *ArrayIn2)(MonoComObject* pUnk, void *array); int (STDCALL *ArrayIn3)(MonoComObject* pUnk, void *array); int (STDCALL *ArrayOut)(MonoComObject* pUnk, guint32 *array, guint32 *result); int (STDCALL *GetDefInterface1)(MonoComObject* pUnk, MonoDefItfObject **iface); int (STDCALL *GetDefInterface2)(MonoComObject* pUnk, MonoDefItfObject **iface); } MonoIUnknown; struct MonoComObject { MonoIUnknown* vtbl; int m_ref; }; struct MonoDefItfObject { MonoDefItf* vtbl; }; static GUID IID_ITest = {0, 0, 0, {0,0,0,0,0,0,0,1}}; static GUID IID_IMonoUnknown = {0, 0, 0, {0xc0,0,0,0,0,0,0,0x46}}; static GUID IID_IMonoDispatch = {0x00020400, 0, 0, {0xc0,0,0,0,0,0,0,0x46}}; static GUID IID_INotImplemented = {0x12345678, 0, 0, {0x9a, 0xbc, 0xde, 0xf0, 0, 0, 0, 0}}; LIBTEST_API int STDCALL MonoQueryInterface(MonoComObject* pUnk, gpointer riid, gpointer* ppv) { *ppv = NULL; if (!memcmp(riid, &IID_IMonoUnknown, sizeof(GUID))) { *ppv = pUnk; return S_OK; } else if (!memcmp(riid, &IID_ITest, sizeof(GUID))) { *ppv = pUnk; return S_OK; } else if (!memcmp(riid, &IID_IMonoDispatch, sizeof(GUID))) { *ppv = pUnk; return S_OK; } return 0x80004002; //E_NOINTERFACE; } LIBTEST_API int STDCALL MonoAddRef(MonoComObject* pUnk) { return ++(pUnk->m_ref); } LIBTEST_API int STDCALL MonoRelease(MonoComObject* pUnk) { return --(pUnk->m_ref); } LIBTEST_API int STDCALL SByteIn(MonoComObject* pUnk, char a) { return S_OK; } LIBTEST_API int STDCALL ByteIn(MonoComObject* pUnk, unsigned char a) { return S_OK; } LIBTEST_API int STDCALL ShortIn(MonoComObject* pUnk, short a) { return S_OK; } LIBTEST_API int STDCALL UShortIn(MonoComObject* pUnk, unsigned short a) { return S_OK; } LIBTEST_API int STDCALL IntIn(MonoComObject* pUnk, int a) { return S_OK; } LIBTEST_API int STDCALL UIntIn(MonoComObject* pUnk, unsigned int a) { return S_OK; } LIBTEST_API int STDCALL LongIn(MonoComObject* pUnk, gint64 a) { return S_OK; } LIBTEST_API int STDCALL ULongIn(MonoComObject* pUnk, guint64 a) { return S_OK; } LIBTEST_API int STDCALL FloatIn(MonoComObject* pUnk, float a) { return S_OK; } LIBTEST_API int STDCALL DoubleIn(MonoComObject* pUnk, double a) { return S_OK; } LIBTEST_API int STDCALL ITestIn(MonoComObject* pUnk, MonoComObject *pUnk2) { return S_OK; } LIBTEST_API int STDCALL ITestOut(MonoComObject* pUnk, MonoComObject* *ppUnk) { return S_OK; } LIBTEST_API int STDCALL Return22NoICall(MonoComObject* pUnk) { return 22; } LIBTEST_API int STDCALL IntOut(MonoComObject* pUnk, int *a) { return S_OK; } LIBTEST_API int STDCALL ArrayIn(MonoComObject* pUnk, void *array) { return S_OK; } LIBTEST_API int STDCALL ArrayIn2(MonoComObject* pUnk, void *array) { return S_OK; } LIBTEST_API int STDCALL ArrayIn3(MonoComObject* pUnk, void *array) { return S_OK; } LIBTEST_API int STDCALL ArrayOut(MonoComObject* pUnk, guint32 *array, guint32 *result) { return S_OK; } LIBTEST_API int STDCALL GetDefInterface1(MonoComObject* pUnk, MonoDefItfObject **obj) { return S_OK; } LIBTEST_API int STDCALL GetDefInterface2(MonoComObject* pUnk, MonoDefItfObject **obj) { return S_OK; } static void create_com_object (MonoComObject** pOut); LIBTEST_API int STDCALL get_ITest(MonoComObject* pUnk, MonoComObject* *ppUnk) { create_com_object (ppUnk); return S_OK; } static void create_com_object (MonoComObject** pOut) { *pOut = marshal_new0 (MonoComObject, 1); (*pOut)->vtbl = marshal_new0 (MonoIUnknown, 1); (*pOut)->m_ref = 1; (*pOut)->vtbl->QueryInterface = MonoQueryInterface; (*pOut)->vtbl->AddRef = MonoAddRef; (*pOut)->vtbl->Release = MonoRelease; (*pOut)->vtbl->SByteIn = SByteIn; (*pOut)->vtbl->ByteIn = ByteIn; (*pOut)->vtbl->ShortIn = ShortIn; (*pOut)->vtbl->UShortIn = UShortIn; (*pOut)->vtbl->IntIn = IntIn; (*pOut)->vtbl->UIntIn = UIntIn; (*pOut)->vtbl->LongIn = LongIn; (*pOut)->vtbl->ULongIn = ULongIn; (*pOut)->vtbl->FloatIn = FloatIn; (*pOut)->vtbl->DoubleIn = DoubleIn; (*pOut)->vtbl->ITestIn = ITestIn; (*pOut)->vtbl->ITestOut = ITestOut; (*pOut)->vtbl->get_ITest = get_ITest; (*pOut)->vtbl->Return22NoICall = Return22NoICall; (*pOut)->vtbl->IntOut = IntOut; (*pOut)->vtbl->ArrayIn = ArrayIn; (*pOut)->vtbl->ArrayIn2 = ArrayIn2; (*pOut)->vtbl->ArrayIn3 = ArrayIn3; (*pOut)->vtbl->ArrayOut = ArrayOut; (*pOut)->vtbl->GetDefInterface1 = GetDefInterface1; (*pOut)->vtbl->GetDefInterface2 = GetDefInterface2; } static MonoComObject* same_object = NULL; LIBTEST_API int STDCALL mono_test_marshal_com_object_create(MonoComObject* *pUnk) { create_com_object (pUnk); if (!same_object) same_object = *pUnk; return 0; } LIBTEST_API int STDCALL mono_test_marshal_com_object_same(MonoComObject* *pUnk) { *pUnk = same_object; return 0; } LIBTEST_API int STDCALL mono_test_marshal_com_object_destroy(MonoComObject *pUnk) { int ref = --(pUnk->m_ref); g_free(pUnk->vtbl); g_free(pUnk); return ref; } LIBTEST_API int STDCALL mono_test_marshal_com_object_ref_count(MonoComObject *pUnk) { return pUnk->m_ref; } LIBTEST_API int STDCALL mono_test_marshal_ccw_itest (MonoComObject *pUnk) { int hr = 0; MonoComObject* pTest; if (!pUnk) return 1; hr = pUnk->vtbl->SByteIn (pUnk, -100); if (hr != 0) return 2; hr = pUnk->vtbl->ByteIn (pUnk, 100); if (hr != 0) return 3; hr = pUnk->vtbl->ShortIn (pUnk, -100); if (hr != 0) return 4; hr = pUnk->vtbl->UShortIn (pUnk, 100); if (hr != 0) return 5; hr = pUnk->vtbl->IntIn (pUnk, -100); if (hr != 0) return 6; hr = pUnk->vtbl->UIntIn (pUnk, 100); if (hr != 0) return 7; hr = pUnk->vtbl->LongIn (pUnk, -100); if (hr != 0) return 8; hr = pUnk->vtbl->ULongIn (pUnk, 100); if (hr != 0) return 9; hr = pUnk->vtbl->FloatIn (pUnk, 3.14f); if (hr != 0) return 10; hr = pUnk->vtbl->DoubleIn (pUnk, 3.14); if (hr != 0) return 11; hr = pUnk->vtbl->ITestIn (pUnk, pUnk); if (hr != 0) return 12; hr = pUnk->vtbl->ITestOut (pUnk, &pTest); if (hr != 0) return 13; return 0; } // Xamarin-47560 LIBTEST_API int STDCALL mono_test_marshal_array_ccw_itest (int count, MonoComObject ** ppUnk) { int hr = 0; if (!ppUnk) return 1; if (count < 1) return 2; if (!ppUnk[0]) return 3; hr = ppUnk[0]->vtbl->SByteIn (ppUnk[0], -100); if (hr != 0) return 4; return 0; } LIBTEST_API int STDCALL mono_test_marshal_retval_ccw_itest (MonoComObject *pUnk, int test_null) { int hr = 0, i = 0; if (!pUnk) return 1; hr = pUnk->vtbl->IntOut (pUnk, &i); if (hr != 0) return 2; if (i != 33) return 3; if (test_null) { hr = pUnk->vtbl->IntOut (pUnk, NULL); if (hr != 0) return 4; } return 0; } LIBTEST_API int STDCALL mono_test_default_interface_ccw (MonoComObject *pUnk) { MonoDefItfObject *obj; int ret, value; ret = pUnk->vtbl->GetDefInterface1(pUnk, &obj); if (ret) return 1; value = 0; ret = obj->vtbl->Method(obj, &value); obj->vtbl->Release(obj); if (ret) return 2; if (value != 1) return 3; ret = pUnk->vtbl->GetDefInterface2(pUnk, &obj); if (ret) return 4; ret = obj->vtbl->Method(obj, &value); obj->vtbl->Release(obj); if (ret) return 5; if (value != 2) return 6; return 0; } /* * mono_method_get_unmanaged_thunk tests */ #if defined(__GNUC__) && ((defined(__i386__) && (defined(__linux__) || defined (__APPLE__)) || defined (__FreeBSD__) || defined(__OpenBSD__)) || (defined(__ppc__) && defined(__APPLE__))) #define ALIGN(size) __attribute__ ((__aligned__(size))) #else #define ALIGN(size) #endif /* thunks.cs:TestStruct */ typedef struct _TestStruct { int A; double B; } TestStruct; /* Searches for mono symbols in all loaded modules */ static gpointer lookup_mono_symbol (const char *symbol_name) { gpointer symbol = NULL; GModule *mod = g_module_open (NULL, G_MODULE_BIND_LAZY); g_assert (mod != NULL); const gboolean success = g_module_symbol (mod, symbol_name, &symbol); g_assertf (success, "%s", symbol_name); return success ? symbol : NULL; } LIBTEST_API gpointer STDCALL mono_test_marshal_lookup_symbol (const char *symbol_name) { #ifndef HOST_WIN32 return dlsym (RTLD_DEFAULT, symbol_name); #else // This isn't really proper, but it should work return lookup_mono_symbol (symbol_name); #endif } // FIXME use runtime headers #define MONO_BEGIN_EFRAME { void *__dummy; void *__region_cookie = mono_threads_enter_gc_unsafe_region ? mono_threads_enter_gc_unsafe_region (&__dummy) : NULL; #define MONO_END_EFRAME if (mono_threads_exit_gc_unsafe_region) mono_threads_exit_gc_unsafe_region (__region_cookie, &__dummy); } /** * test_method_thunk: * * @test_id: the test number * @test_method_handle: MonoMethod* of the C# test method * @create_object_method_handle: MonoMethod* of thunks.cs:Test.CreateObject */ LIBTEST_API int STDCALL test_method_thunk (int test_id, gpointer test_method_handle, gpointer create_object_method_handle) { int ret = 0; // FIXME use runtime headers gpointer (*mono_method_get_unmanaged_thunk)(gpointer) = (gpointer (*)(gpointer))lookup_mono_symbol ("mono_method_get_unmanaged_thunk"); // FIXME use runtime headers gpointer (*mono_string_new_wrapper)(const char *) = (gpointer (*)(const char *))lookup_mono_symbol ("mono_string_new_wrapper"); // FIXME use runtime headers char *(*mono_string_to_utf8)(gpointer) = (char *(*)(gpointer))lookup_mono_symbol ("mono_string_to_utf8"); // FIXME use runtime headers gpointer (*mono_object_unbox)(gpointer) = (gpointer (*)(gpointer))lookup_mono_symbol ("mono_object_unbox"); // FIXME use runtime headers gpointer (*mono_threads_enter_gc_unsafe_region) (gpointer) = (gpointer (*)(gpointer))lookup_mono_symbol ("mono_threads_enter_gc_unsafe_region"); // FIXME use runtime headers void (*mono_threads_exit_gc_unsafe_region) (gpointer, gpointer) = (void (*)(gpointer, gpointer))lookup_mono_symbol ("mono_threads_exit_gc_unsafe_region"); gpointer test_method, ex = NULL; gpointer (STDCALL *CreateObject)(gpointer*); MONO_BEGIN_EFRAME; if (!mono_method_get_unmanaged_thunk) { ret = 1; goto done; } test_method = mono_method_get_unmanaged_thunk (test_method_handle); if (!test_method) { ret = 2; goto done; } CreateObject = (gpointer (STDCALL *)(gpointer *))mono_method_get_unmanaged_thunk (create_object_method_handle); if (!CreateObject) { ret = 3; goto done; } switch (test_id) { case 0: { /* thunks.cs:Test.Test0 */ void (STDCALL *F)(gpointer *) = (void (STDCALL *)(gpointer *))test_method; F (&ex); break; } case 1: { /* thunks.cs:Test.Test1 */ int (STDCALL *F)(gpointer *) = (int (STDCALL *)(gpointer *))test_method; if (F (&ex) != 42) { ret = 4; goto done; } break; } case 2: { /* thunks.cs:Test.Test2 */ gpointer (STDCALL *F)(gpointer, gpointer*) = (gpointer (STDCALL *)(gpointer, gpointer *))test_method; gpointer str = mono_string_new_wrapper ("foo"); if (str != F (str, &ex)) { ret = 4; goto done; } break; } case 3: { /* thunks.cs:Test.Test3 */ gpointer (STDCALL *F)(gpointer, gpointer, gpointer*); gpointer obj; gpointer str; F = (gpointer (STDCALL *)(gpointer, gpointer, gpointer *))test_method; obj = CreateObject (&ex); str = mono_string_new_wrapper ("bar"); if (str != F (obj, str, &ex)) { ret = 4; goto done; } break; } case 4: { /* thunks.cs:Test.Test4 */ int (STDCALL *F)(gpointer, gpointer, int, gpointer*); gpointer obj; gpointer str; F = (int (STDCALL *)(gpointer, gpointer, int, gpointer *))test_method; obj = CreateObject (&ex); str = mono_string_new_wrapper ("bar"); if (42 != F (obj, str, 42, &ex)) { ret = 4; goto done; } break; } case 5: { /* thunks.cs:Test.Test5 */ int (STDCALL *F)(gpointer, gpointer, int, gpointer*); gpointer obj; gpointer str; F = (int (STDCALL *)(gpointer, gpointer, int, gpointer *))test_method; obj = CreateObject (&ex); str = mono_string_new_wrapper ("bar"); F (obj, str, 42, &ex); if (!ex) { ret = 4; goto done; } break; } case 6: { /* thunks.cs:Test.Test6 */ int (STDCALL *F)(gpointer, guint8, gint16, gint32, gint64, float, double, gpointer, gpointer*); gpointer obj; gpointer str = mono_string_new_wrapper ("Test6"); int res; F = (int (STDCALL *)(gpointer, guint8, gint16, gint32, gint64, float, double, gpointer, gpointer *))test_method; obj = CreateObject (&ex); res = F (obj, 254, 32700, -245378, 6789600, 3.1415, 3.1415, str, &ex); if (ex) { ret = 4; goto done; } if (!res) { ret = 5; goto done; } break; } case 7: { /* thunks.cs:Test.Test7 */ gint64 (STDCALL *F)(gpointer*) = (gint64 (STDCALL *)(gpointer *))test_method; if (F (&ex) != G_MAXINT64) { ret = 4; goto done; } break; } case 8: { /* thunks.cs:Test.Test8 */ void (STDCALL *F)(guint8*, gint16*, gint32*, gint64*, float*, double*, gpointer*, gpointer*); guint8 a1; gint16 a2; gint32 a3; gint64 a4; float a5; double a6; gpointer a7; F = (void (STDCALL *)(guint8 *, gint16 *, gint32 *, gint64 *, float *, double *, gpointer *, gpointer *))test_method; F (&a1, &a2, &a3, &a4, &a5, &a6, &a7, &ex); if (ex) { ret = 4; goto done; } if (!(a1 == 254 && a2 == 32700 && a3 == -245378 && a4 == 6789600 && (fabs (a5 - 3.1415) < 0.001) && (fabs (a6 - 3.1415) < 0.001) && strcmp (mono_string_to_utf8 (a7), "Test8") == 0)){ ret = 5; goto done; } break; } case 9: { /* thunks.cs:Test.Test9 */ void (STDCALL *F)(guint8*, gint16*, gint32*, gint64*, float*, double*, gpointer*, gpointer*); guint8 a1; gint16 a2; gint32 a3; gint64 a4; float a5; double a6; gpointer a7; F = (void (STDCALL *)(guint8 *, gint16 *, gint32 *, gint64 *, float *, double *, gpointer *, gpointer *))test_method; F (&a1, &a2, &a3, &a4, &a5, &a6, &a7, &ex); if (!ex) { ret = 4; goto done; } break; } case 10: { /* thunks.cs:Test.Test10 */ void (STDCALL *F)(gpointer*, gpointer*); gpointer obj1, obj2; obj1 = obj2 = CreateObject (&ex); if (ex) { ret = 4; goto done; } F = (void (STDCALL *)(gpointer *, gpointer *))test_method; F (&obj1, &ex); if (ex) { ret = 5; goto done; } if (obj1 == obj2) { ret = 6; goto done; } break; } case 100: { /* thunks.cs:TestStruct.Test0 */ int (STDCALL *F)(gpointer*, gpointer*); gpointer obj; TestStruct *a1; int res; obj = CreateObject (&ex); if (ex) { ret = 4; goto done; } if (!obj) { ret = 5; goto done; } a1 = (TestStruct *)mono_object_unbox (obj); if (!a1) { ret = 6; goto done; } a1->A = 42; a1->B = 3.1415; F = (int (STDCALL *)(gpointer *, gpointer *))test_method; res = F ((gpointer *)obj, &ex); if (ex) { ret = 7; goto done; } if (!res) { ret = 8; goto done; } /* check whether the call was really by value */ if (a1->A != 42 || a1->B != 3.1415) { ret = 9; goto done; } break; } case 101: { /* thunks.cs:TestStruct.Test1 */ void (STDCALL *F)(gpointer, gpointer*); TestStruct *a1; gpointer obj; obj = CreateObject (&ex); if (ex) { ret = 4; goto done; } if (!obj) { ret = 5; goto done; } a1 = (TestStruct *)mono_object_unbox (obj); if (!a1) { ret = 6; goto done; } F = (void (STDCALL *)(gpointer, gpointer *))test_method; F (obj, &ex); if (ex) { ret = 7; goto done; } if (a1->A != 42) { ret = 8; goto done; } if (!(fabs (a1->B - 3.1415) < 0.001)) { ret = 9; goto done; } break; } case 102: { /* thunks.cs:TestStruct.Test2 */ gpointer (STDCALL *F)(gpointer*); TestStruct *a1; gpointer obj; F = (gpointer (STDCALL *)(gpointer *))test_method; obj = F (&ex); if (ex) { ret = 4; goto done; } if (!obj) { ret = 5; goto done; } a1 = (TestStruct *)mono_object_unbox (obj); if (a1->A != 42) { ret = 5; goto done; } if (!(fabs (a1->B - 3.1415) < 0.001)) { ret = 6; goto done; } break; } case 103: { /* thunks.cs:TestStruct.Test3 */ void (STDCALL *F)(gpointer, gpointer*); TestStruct *a1; gpointer obj; obj = CreateObject (&ex); if (ex) { ret = 4; goto done; } if (!obj) { ret = 5; goto done; } a1 = (TestStruct *)mono_object_unbox (obj); if (!a1) { ret = 6; goto done; } a1->A = 42; a1->B = 3.1415; F = (void (STDCALL *)(gpointer, gpointer *))test_method; F (obj, &ex); if (ex) { ret = 4; goto done; } if (a1->A != 1) { ret = 5; goto done; } if (a1->B != 17) { ret = 6; goto done; } break; } default: ret = 9; } done: MONO_END_EFRAME; return ret; } typedef struct { char a; } winx64_struct1; LIBTEST_API int STDCALL mono_test_Winx64_struct1_in (winx64_struct1 var) { if (var.a != 123) return 1; return 0; } typedef struct { char a; char b; } winx64_struct2; LIBTEST_API int STDCALL mono_test_Winx64_struct2_in (winx64_struct2 var) { if (var.a != 4) return 1; if (var.b != 5) return 2; return 0; } typedef struct { char a; char b; short c; } winx64_struct3; LIBTEST_API int STDCALL mono_test_Winx64_struct3_in (winx64_struct3 var) { if (var.a != 4) return 1; if (var.b != 5) return 2; if (var.c != 0x1234) return 3; return 0; } typedef struct { char a; char b; short c; unsigned int d; } winx64_struct4; LIBTEST_API int STDCALL mono_test_Winx64_struct4_in (winx64_struct4 var) { if (var.a != 4) return 1; if (var.b != 5) return 2; if (var.c != 0x1234) return 3; if (var.d != 0x87654321) return 4; return 0; } typedef struct { char a; char b; char c; } winx64_struct5; LIBTEST_API int STDCALL mono_test_Winx64_struct5_in (winx64_struct5 var) { if (var.a != 4) return 1; if (var.b != 5) return 2; if (var.c != 6) return 3; return 0; } typedef struct { winx64_struct1 a; short b; char c; } winx64_struct6; LIBTEST_API int STDCALL mono_test_Winx64_struct6_in (winx64_struct6 var) { if (var.a.a != 4) return 1; if (var.b != 5) return 2; if (var.c != 6) return 3; return 0; } LIBTEST_API int STDCALL mono_test_Winx64_structs_in1 (winx64_struct1 var1, winx64_struct2 var2, winx64_struct3 var3, winx64_struct4 var4) { if (var1.a != 123) return 1; if (var2.a != 4) return 2; if (var2.b != 5) return 3; if (var3.a != 4) return 4; if (var3.b != 5) return 2; if (var3.c != 0x1234) return 5; if (var4.a != 4) return 6; if (var4.b != 5) return 7; if (var4.c != 0x1234) return 8; if (var4.d != 0x87654321) return 9; return 0; } LIBTEST_API int STDCALL mono_test_Winx64_structs_in2 (winx64_struct1 var1, winx64_struct1 var2, winx64_struct1 var3, winx64_struct1 var4, winx64_struct1 var5) { if (var1.a != 1) return 1; if (var2.a != 2) return 2; if (var3.a != 3) return 3; if (var4.a != 4) return 4; if (var5.a != 5) return 5; return 0; } LIBTEST_API int STDCALL mono_test_Winx64_structs_in3 (winx64_struct1 var1, winx64_struct5 var2, winx64_struct1 var3, winx64_struct5 var4, winx64_struct1 var5, winx64_struct5 var6) { if (var1.a != 1) return 1; if (var2.a != 2) return 2; if (var2.b != 3) return 2; if (var2.c != 4) return 4; if (var3.a != 5) return 5; if (var4.a != 6) return 6; if (var4.b != 7) return 7; if (var4.c != 8) return 8; if (var5.a != 9) return 9; if (var6.a != 10) return 10; if (var6.b != 11) return 11; if (var6.c != 12) return 12; return 0; } LIBTEST_API winx64_struct1 STDCALL mono_test_Winx64_struct1_ret (void) { winx64_struct1 ret; ret.a = 123; return ret; } LIBTEST_API winx64_struct2 STDCALL mono_test_Winx64_struct2_ret (void) { winx64_struct2 ret; ret.a = 4; ret.b = 5; return ret; } LIBTEST_API winx64_struct3 STDCALL mono_test_Winx64_struct3_ret (void) { winx64_struct3 ret; ret.a = 4; ret.b = 5; ret.c = 0x1234; return ret; } LIBTEST_API winx64_struct4 STDCALL mono_test_Winx64_struct4_ret (void) { winx64_struct4 ret; ret.a = 4; ret.b = 5; ret.c = 0x1234; ret.d = 0x87654321; return ret; } LIBTEST_API winx64_struct5 STDCALL mono_test_Winx64_struct5_ret (void) { winx64_struct5 ret; ret.a = 4; ret.b = 5; ret.c = 6; return ret; } LIBTEST_API winx64_struct1 STDCALL mono_test_Winx64_struct1_ret_5_args (char a, char b, char c, char d, char e) { winx64_struct1 ret; ret.a = a + b + c + d + e; return ret; } LIBTEST_API winx64_struct5 STDCALL mono_test_Winx64_struct5_ret6_args (char a, char b, char c, char d, char e) { winx64_struct5 ret; ret.a = a + b; ret.b = c + d; ret.c = e; return ret; } typedef struct { float a; float b; } winx64_floatStruct; LIBTEST_API int STDCALL mono_test_Winx64_floatStruct (winx64_floatStruct a) { if (a.a > 5.6 || a.a < 5.4) return 1; if (a.b > 9.6 || a.b < 9.4) return 2; return 0; } typedef struct { double a; } winx64_doubleStruct; LIBTEST_API int STDCALL mono_test_Winx64_doubleStruct (winx64_doubleStruct a) { if (a.a > 5.6 || a.a < 5.4) return 1; return 0; } typedef int (STDCALL *managed_struct1_delegate) (winx64_struct1 a); LIBTEST_API int STDCALL mono_test_managed_Winx64_struct1_in(managed_struct1_delegate func) { winx64_struct1 val; val.a = 5; return func (val); } typedef int (STDCALL *managed_struct5_delegate) (winx64_struct5 a); LIBTEST_API int STDCALL mono_test_managed_Winx64_struct5_in(managed_struct5_delegate func) { winx64_struct5 val; val.a = 5; val.b = 0x10; val.c = (char)0x99; return func (val); } typedef int (STDCALL *managed_struct1_struct5_delegate) (winx64_struct1 a, winx64_struct5 b, winx64_struct1 c, winx64_struct5 d, winx64_struct1 e, winx64_struct5 f); LIBTEST_API int STDCALL mono_test_managed_Winx64_struct1_struct5_in(managed_struct1_struct5_delegate func) { winx64_struct1 a, c, e; winx64_struct5 b, d, f; a.a = 1; b.a = 2; b.b = 3; b.c = 4; c.a = 5; d.a = 6; d.b = 7; d.c = 8; e.a = 9; f.a = 10; f.b = 11; f.c = 12; return func (a, b, c, d, e, f); } typedef winx64_struct1 (STDCALL *managed_struct1_ret_delegate) (void); LIBTEST_API int STDCALL mono_test_Winx64_struct1_ret_managed (managed_struct1_ret_delegate func) { winx64_struct1 ret; ret = func (); if (ret.a != 0x45) return 1; return 0; } typedef winx64_struct5 (STDCALL *managed_struct5_ret_delegate) (void); LIBTEST_API int STDCALL mono_test_Winx64_struct5_ret_managed (managed_struct5_ret_delegate func) { winx64_struct5 ret; ret = func (); if (ret.a != 0x12) return 1; if (ret.b != 0x34) return 2; if (ret.c != 0x56) return 3; return 0; } LIBTEST_API int STDCALL mono_test_marshal_bool_in (int arg, unsigned int expected, unsigned int bDefaultMarsh, unsigned int bBoolCustMarsh, char bI1CustMarsh, unsigned char bU1CustMarsh, short bVBCustMarsh) { switch (arg) { case 1: if (bDefaultMarsh != expected) return 1; break; case 2: if (bBoolCustMarsh != expected) return 2; break; case 3: if (bI1CustMarsh != expected) return 3; break; case 4: if (bU1CustMarsh != expected) return 4; break; case 5: if (bVBCustMarsh != expected) return 5; break; default: return 999; } return 0; } LIBTEST_API int STDCALL mono_test_marshal_bool_out (int arg, unsigned int testVal, unsigned int* bDefaultMarsh, unsigned int* bBoolCustMarsh, char* bI1CustMarsh, unsigned char* bU1CustMarsh, unsigned short* bVBCustMarsh) { switch (arg) { case 1: if (!bDefaultMarsh) return 1; *bDefaultMarsh = testVal; break; case 2: if (!bBoolCustMarsh) return 2; *bBoolCustMarsh = testVal; break; case 3: if (!bI1CustMarsh) return 3; *bI1CustMarsh = (char)testVal; break; case 4: if (!bU1CustMarsh) return 4; *bU1CustMarsh = (unsigned char)testVal; break; case 5: if (!bVBCustMarsh) return 5; *bVBCustMarsh = (unsigned short)testVal; break; default: return 999; } return 0; } LIBTEST_API int STDCALL mono_test_marshal_bool_ref (int arg, unsigned int expected, unsigned int testVal, unsigned int* bDefaultMarsh, unsigned int* bBoolCustMarsh, char* bI1CustMarsh, unsigned char* bU1CustMarsh, unsigned short* bVBCustMarsh) { switch (arg) { case 1: if (!bDefaultMarsh) return 1; if (*bDefaultMarsh != expected) return 2; *bDefaultMarsh = testVal; break; case 2: if (!bBoolCustMarsh) return 3; if (*bBoolCustMarsh != expected) return 4; *bBoolCustMarsh = testVal; break; case 3: if (!bI1CustMarsh) return 5; if (*bI1CustMarsh != expected) return 6; *bI1CustMarsh = (char)testVal; break; case 4: if (!bU1CustMarsh) return 7; if (*bU1CustMarsh != expected) return 8; *bU1CustMarsh = (unsigned char)testVal; break; case 5: if (!bVBCustMarsh) return 9; if (*bVBCustMarsh != expected) return 10; *bVBCustMarsh = (unsigned short)testVal; break; default: return 999; } return 0; } typedef int (STDCALL *MarshalBoolInDelegate) (int arg, unsigned int expected, unsigned int bDefaultMarsh, unsigned int bBoolCustMarsh, char bI1CustMarsh, unsigned char bU1CustMarsh, unsigned short bVBCustMarsh); LIBTEST_API int STDCALL mono_test_managed_marshal_bool_in (int arg, unsigned int expected, unsigned int testVal, MarshalBoolInDelegate pfcn) { if (!pfcn) return 0x9900; switch (arg) { case 1: return pfcn (arg, expected, testVal, 0, 0, 0, 0); case 2: return pfcn (arg, expected, 0, testVal, 0, 0, 0); case 3: return pfcn (arg, expected, 0, 0, testVal, 0, 0); case 4: return pfcn (arg, expected, 0, 0, 0, testVal, 0); case 5: return pfcn (arg, expected, 0, 0, 0, 0, testVal); default: return 0x9800; } return 0; } typedef int (STDCALL *MarshalBoolOutDelegate) (int arg, unsigned int expected, unsigned int* bDefaultMarsh, unsigned int* bBoolCustMarsh, char* bI1CustMarsh, unsigned char* bU1CustMarsh, unsigned short* bVBCustMarsh); LIBTEST_API int STDCALL mono_test_managed_marshal_bool_out (int arg, unsigned int expected, unsigned int testVal, MarshalBoolOutDelegate pfcn) { int ret; unsigned int lDefaultMarsh, lBoolCustMarsh; char lI1CustMarsh = 0; unsigned char lU1CustMarsh = 0; unsigned short lVBCustMarsh = 0; lDefaultMarsh = lBoolCustMarsh = 0; if (!pfcn) return 0x9900; switch (arg) { case 1: { unsigned int ltVal = 0; ret = pfcn (arg, testVal, &ltVal, &lBoolCustMarsh, &lI1CustMarsh, &lU1CustMarsh, &lVBCustMarsh); if (ret) return 0x0100 + ret; if (expected != ltVal) return 0x0200; break; } case 2: { unsigned int ltVal = 0; ret = pfcn (arg, testVal, &lDefaultMarsh, &ltVal, &lI1CustMarsh, &lU1CustMarsh, &lVBCustMarsh); if (ret) return 0x0300 + ret; if (expected != ltVal) return 0x0400; break; } case 3: { char ltVal = 0; ret = pfcn (arg, testVal, &lDefaultMarsh, &lBoolCustMarsh, &ltVal, &lU1CustMarsh, &lVBCustMarsh); if (ret) return 0x0500 + ret; if (expected != ltVal) return 0x0600; break; } case 4: { unsigned char ltVal = 0; ret = pfcn (arg, testVal, &lDefaultMarsh, &lBoolCustMarsh, &lI1CustMarsh, &ltVal, &lVBCustMarsh); if (ret) return 0x0700 + ret; if (expected != ltVal) return 0x0800; break; } case 5: { unsigned short ltVal = 0; ret = pfcn (arg, testVal, &lDefaultMarsh, &lBoolCustMarsh, &lI1CustMarsh, &lU1CustMarsh, &ltVal); if (ret) return 0x0900 + ret; if (expected != ltVal) return 0x1000; break; } default: return 0x9800; } return 0; } typedef int (STDCALL *MarshalBoolRefDelegate) (int arg, unsigned int expected, unsigned int testVal, unsigned int* bDefaultMarsh, unsigned int* bBoolCustMarsh, char* bI1CustMarsh, unsigned char* bU1CustMarsh, unsigned short* bVBCustMarsh); LIBTEST_API int STDCALL mono_test_managed_marshal_bool_ref (int arg, unsigned int expected, unsigned int testVal, unsigned int outExpected, unsigned int outTestVal, MarshalBoolRefDelegate pfcn) { int ret; unsigned int lDefaultMarsh, lBoolCustMarsh; char lI1CustMarsh = 0; unsigned char lU1CustMarsh = 0; unsigned short lVBCustMarsh = 0; lDefaultMarsh = lBoolCustMarsh = 0; if (!pfcn) return 0x9900; switch (arg) { case 1: { unsigned int ltestVal = testVal; ret = pfcn (arg, expected, outTestVal, &ltestVal, &lBoolCustMarsh, &lI1CustMarsh, &lU1CustMarsh, &lVBCustMarsh); if (ret) return 0x0100 + ret; if (outExpected != ltestVal) return 0x0200; break; } case 2: { unsigned int ltestVal = testVal; ret = pfcn (arg, expected, outTestVal, &lDefaultMarsh, &ltestVal, &lI1CustMarsh, &lU1CustMarsh, &lVBCustMarsh); if (ret) return 0x0300 + ret; if (outExpected != ltestVal) return 0x0400; break; } case 3: { char ltestVal = testVal; ret = pfcn (arg, expected, outTestVal, &lDefaultMarsh, &lBoolCustMarsh, &ltestVal, &lU1CustMarsh, &lVBCustMarsh); if (ret) return 0x0500 + ret; if (outExpected != ltestVal) return 0x0600; break; } case 4: { unsigned char ltestVal = testVal; ret = pfcn (arg, expected, outTestVal, &lDefaultMarsh, &lBoolCustMarsh, &lI1CustMarsh, &ltestVal, &lVBCustMarsh); if (ret) return 0x0700 + ret; if (outExpected != ltestVal) return 0x0800; break; } case 5: { unsigned short ltestVal = testVal; ret = pfcn (arg, expected, outTestVal, &lDefaultMarsh, &lBoolCustMarsh, &lI1CustMarsh, &lU1CustMarsh, &ltestVal); if (ret) return 0x0900 + ret; if (outExpected != ltestVal) return 0x1000; break; } default: return 0x9800; } return 0; } #ifdef WIN32 LIBTEST_API int STDCALL mono_test_marshal_safearray_out_1dim_vt_bstr_empty (SAFEARRAY** safearray) { /* Create an empty one-dimensional array of variants */ SAFEARRAY *pSA; SAFEARRAYBOUND dimensions [1]; dimensions [0].lLbound = 0; dimensions [0].cElements = 0; pSA = SafeArrayCreate (VT_VARIANT, 1, dimensions); *safearray = pSA; return S_OK; } LIBTEST_API int STDCALL mono_test_marshal_safearray_out_1dim_vt_bstr (SAFEARRAY** safearray) { /* Create a one-dimensional array of 10 variants filled with "0" to "9" */ SAFEARRAY *pSA; SAFEARRAYBOUND dimensions [1]; long i; gchar buffer [20]; HRESULT hr = S_OK; long indices [1]; dimensions [0].lLbound = 0; dimensions [0].cElements = 10; pSA= SafeArrayCreate (VT_VARIANT, 1, dimensions); for (i= dimensions [0].lLbound; i< (dimensions [0].cElements + dimensions [0].lLbound); i++) { VARIANT vOut; VariantInit (&vOut); vOut.vt = VT_BSTR; _ltoa (i,buffer,10); vOut.bstrVal= marshal_bstr_alloc (buffer); indices [0] = i; if ((hr = SafeArrayPutElement (pSA, indices, &vOut)) != S_OK) { VariantClear (&vOut); SafeArrayDestroy (pSA); return hr; } VariantClear (&vOut); } *safearray = pSA; return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_out_2dim_vt_i4 (SAFEARRAY** safearray) { /* Create a two-dimensional array of 4x3 variants filled with 11, 12, 13, etc. */ SAFEARRAY *pSA; SAFEARRAYBOUND dimensions [2]; long i, j; HRESULT hr = S_OK; long indices [2]; dimensions [0].lLbound = 0; dimensions [0].cElements = 4; dimensions [1].lLbound = 0; dimensions [1].cElements = 3; pSA= SafeArrayCreate(VT_VARIANT, 2, dimensions); for (i= dimensions [0].lLbound; i< (dimensions [0].cElements + dimensions [0].lLbound); i++) { for (j= dimensions [1].lLbound; j< (dimensions [1].cElements + dimensions [1].lLbound); j++) { VARIANT vOut; VariantInit (&vOut); vOut.vt = VT_I4; vOut.lVal = (i+1)*10+(j+1); indices [0] = i; indices [1] = j; if ((hr = SafeArrayPutElement (pSA, indices, &vOut)) != S_OK) { VariantClear (&vOut); SafeArrayDestroy (pSA); return hr; } VariantClear (&vOut); // does a deep destroy of source VARIANT } } *safearray = pSA; return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_out_4dim_vt_i4 (SAFEARRAY** safearray) { /* Create a four-dimensional array of 10x3x6x7 variants filled with their indices */ /* Also use non zero lower bounds */ SAFEARRAY *pSA; SAFEARRAYBOUND dimensions [4]; long i; HRESULT hr = S_OK; VARIANT *pData; dimensions [0].lLbound = 15; dimensions [0].cElements = 10; dimensions [1].lLbound = 20; dimensions [1].cElements = 3; dimensions [2].lLbound = 5; dimensions [2].cElements = 6; dimensions [3].lLbound = 12; dimensions [3].cElements = 7; pSA= SafeArrayCreate (VT_VARIANT, 4, dimensions); SafeArrayAccessData (pSA, (void **)&pData); for (i= 0; i< 10*3*6*7; i++) { VariantInit(&pData [i]); pData [i].vt = VT_I4; pData [i].lVal = i; } SafeArrayUnaccessData (pSA); *safearray = pSA; return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_byval_1dim_empty (SAFEARRAY* safearray) { /* Check that array is one dimensional and empty */ UINT dim; long lbound, ubound; dim = SafeArrayGetDim (safearray); if (dim != 1) return 1; SafeArrayGetLBound (safearray, 1, &lbound); SafeArrayGetUBound (safearray, 1, &ubound); if ((lbound > 0) || (ubound > 0)) return 1; return 0; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_byval_1dim_vt_i4 (SAFEARRAY* safearray) { /* Check that array is one dimensional containing integers from 1 to 10 */ UINT dim; long lbound, ubound; VARIANT *pData; long i; int result=0; dim = SafeArrayGetDim (safearray); if (dim != 1) return 1; SafeArrayGetLBound (safearray, 1, &lbound); SafeArrayGetUBound (safearray, 1, &ubound); if ((lbound != 0) || (ubound != 9)) return 1; SafeArrayAccessData (safearray, (void **)&pData); for (i= lbound; i <= ubound; i++) { if ((VariantChangeType (&pData [i], &pData [i], VARIANT_NOUSEROVERRIDE, VT_I4) != S_OK) || (pData [i].lVal != i + 1)) result = 1; } SafeArrayUnaccessData (safearray); return result; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_byval_1dim_vt_mixed (SAFEARRAY* safearray) { /* Check that array is one dimensional containing integers mixed with strings from 0 to 12 */ UINT dim; long lbound, ubound; VARIANT *pData; long i; long indices [1]; VARIANT element; int result=0; VariantInit (&element); dim = SafeArrayGetDim (safearray); if (dim != 1) return 1; SafeArrayGetLBound (safearray, 1, &lbound); SafeArrayGetUBound (safearray, 1, &ubound); if ((lbound != 0) || (ubound != 12)) return 1; SafeArrayAccessData (safearray, (void **)&pData); for (i= lbound; i <= ubound; i++) { if ((i%2 == 0) && (pData [i].vt != VT_I4)) result = 1; if ((i%2 == 1) && (pData [i].vt != VT_BSTR)) result = 1; if ((VariantChangeType (&pData [i], &pData [i], VARIANT_NOUSEROVERRIDE, VT_I4) != S_OK) || (pData [i].lVal != i)) result = 1; } SafeArrayUnaccessData (safearray); /* Change the first element of the array to verify that [in] parameters are not marshalled back to the managed side */ indices [0] = 0; element.vt = VT_I4; element.lVal = 333; SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); return result; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_byval_2dim_vt_i4 (SAFEARRAY* safearray) { /* Check that array is one dimensional containing integers mixed with strings from 0 to 12 */ UINT dim; long lbound1, ubound1, lbound2, ubound2; long i, j, failed; long indices [2]; VARIANT element; VariantInit (&element); dim = SafeArrayGetDim (safearray); if (dim != 2) return 1; SafeArrayGetLBound (safearray, 1, &lbound1); SafeArrayGetUBound (safearray, 1, &ubound1); if ((lbound1 != 0) || (ubound1 != 1)) return 1; SafeArrayGetLBound (safearray, 2, &lbound2); SafeArrayGetUBound (safearray, 2, &ubound2); if ((lbound2 != 0) || (ubound2 != 3)) { return 1; } for (i= lbound1; i <= ubound1; i++) { indices [0] = i; for (j= lbound2; j <= ubound2; j++) { indices [1] = j; if (SafeArrayGetElement (safearray, indices, &element) != S_OK) return 1; failed = ((element.vt != VT_I4) || (element.lVal != 10*(i+1)+(j+1))); VariantClear (&element); if (failed) return 1; } } /* Change the first element of the array to verify that [in] parameters are not marshalled back to the managed side */ indices [0] = 0; indices [1] = 0; element.vt = VT_I4; element.lVal = 333; SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); return 0; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_byval_3dim_vt_bstr (SAFEARRAY* safearray) { /* Check that array is one dimensional containing integers mixed with strings from 0 to 12 */ UINT dim; long lbound1, ubound1, lbound2, ubound2, lbound3, ubound3; long i, j, k, failed; long indices [3]; VARIANT element; VariantInit (&element); dim = SafeArrayGetDim (safearray); if (dim != 3) return 1; SafeArrayGetLBound (safearray, 1, &lbound1); SafeArrayGetUBound (safearray, 1, &ubound1); if ((lbound1 != 0) || (ubound1 != 1)) return 1; SafeArrayGetLBound (safearray, 2, &lbound2); SafeArrayGetUBound (safearray, 2, &ubound2); if ((lbound2 != 0) || (ubound2 != 1)) return 1; SafeArrayGetLBound (safearray, 3, &lbound3); SafeArrayGetUBound (safearray, 3, &ubound3); if ((lbound3 != 0) || (ubound3 != 2)) return 1; for (i= lbound1; i <= ubound1; i++) { indices [0] = i; for (j= lbound2; j <= ubound2; j++) { indices [1] = j; for (k= lbound3; k <= ubound3; k++) { indices [2] = k; if (SafeArrayGetElement (safearray, indices, &element) != S_OK) return 1; failed = ((element.vt != VT_BSTR) || (VariantChangeType (&element, &element, VARIANT_NOUSEROVERRIDE, VT_I4) != S_OK) || (element.lVal != 100*(i+1)+10*(j+1)+(k+1))); VariantClear (&element); if (failed) return 1; } } } /* Change the first element of the array to verify that [in] parameters are not marshalled back to the managed side */ indices [0] = 0; indices [1] = 0; indices [2] = 0; element.vt = VT_BSTR; element.bstrVal = SysAllocString(L"Should not be copied"); SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); return 0; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_byref_3dim_vt_bstr (SAFEARRAY** safearray) { return mono_test_marshal_safearray_in_byval_3dim_vt_bstr (*safearray); } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_out_byref_1dim_empty (SAFEARRAY** safearray) { /* Check that the input array is what is expected and change it so the caller can check */ /* correct marshalling back to managed code */ UINT dim; long lbound, ubound; SAFEARRAYBOUND dimensions [1]; long i; wchar_t buffer [20]; HRESULT hr = S_OK; long indices [1]; /* Check that in array is one dimensional and empty */ dim = SafeArrayGetDim (*safearray); if (dim != 1) { return 1; } SafeArrayGetLBound (*safearray, 1, &lbound); SafeArrayGetUBound (*safearray, 1, &ubound); if ((lbound > 0) || (ubound > 0)) { return 1; } /* Re-dimension the array and return a one-dimensional array of 8 variants filled with "0" to "7" */ dimensions [0].lLbound = 0; dimensions [0].cElements = 8; hr = SafeArrayRedim (*safearray, dimensions); if (hr != S_OK) return 1; for (i= dimensions [0].lLbound; i< (dimensions [0].lLbound + dimensions [0].cElements); i++) { VARIANT vOut; VariantInit (&vOut); vOut.vt = VT_BSTR; _ltow (i,buffer,10); vOut.bstrVal = SysAllocString (buffer); indices [0] = i; if ((hr = SafeArrayPutElement (*safearray, indices, &vOut)) != S_OK) { VariantClear (&vOut); SafeArrayDestroy (*safearray); return hr; } VariantClear (&vOut); } return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_out_byref_3dim_vt_bstr (SAFEARRAY** safearray) { /* Check that the input array is what is expected and change it so the caller can check */ /* correct marshalling back to managed code */ UINT dim; long lbound1, ubound1, lbound2, ubound2, lbound3, ubound3; SAFEARRAYBOUND dimensions [1]; long i, j, k, failed; wchar_t buffer [20]; HRESULT hr = S_OK; long indices [3]; VARIANT element; VariantInit (&element); /* Check that in array is three dimensional and contains the expected values */ dim = SafeArrayGetDim (*safearray); if (dim != 3) return 1; SafeArrayGetLBound (*safearray, 1, &lbound1); SafeArrayGetUBound (*safearray, 1, &ubound1); if ((lbound1 != 0) || (ubound1 != 1)) return 1; SafeArrayGetLBound (*safearray, 2, &lbound2); SafeArrayGetUBound (*safearray, 2, &ubound2); if ((lbound2 != 0) || (ubound2 != 1)) return 1; SafeArrayGetLBound (*safearray, 3, &lbound3); SafeArrayGetUBound (*safearray, 3, &ubound3); if ((lbound3 != 0) || (ubound3 != 2)) return 1; for (i= lbound1; i <= ubound1; i++) { indices [0] = i; for (j= lbound2; j <= ubound2; j++) { indices [1] = j; for (k= lbound3; k <= ubound3; k++) { indices [2] = k; if (SafeArrayGetElement (*safearray, indices, &element) != S_OK) return 1; failed = ((element.vt != VT_BSTR) || (VariantChangeType (&element, &element, VARIANT_NOUSEROVERRIDE, VT_I4) != S_OK) || (element.lVal != 100*(i+1)+10*(j+1)+(k+1))); VariantClear (&element); if (failed) return 1; } } } hr = SafeArrayDestroy (*safearray); if (hr != S_OK) return 1; /* Return a new one-dimensional array of 8 variants filled with "0" to "7" */ dimensions [0].lLbound = 0; dimensions [0].cElements = 8; *safearray = SafeArrayCreate (VT_VARIANT, 1, dimensions); for (i= dimensions [0].lLbound; i< (dimensions [0].lLbound + dimensions [0].cElements); i++) { VARIANT vOut; VariantInit (&vOut); vOut.vt = VT_BSTR; _ltow (i,buffer,10); vOut.bstrVal = SysAllocString (buffer); indices [0] = i; if ((hr = SafeArrayPutElement (*safearray, indices, &vOut)) != S_OK) { VariantClear (&vOut); SafeArrayDestroy (*safearray); return hr; } VariantClear (&vOut); } return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_out_byref_1dim_vt_i4 (SAFEARRAY** safearray) { /* Check that the input array is what is expected and change it so the caller can check */ /* correct marshalling back to managed code */ UINT dim; long lbound1, ubound1; long i, failed; HRESULT hr = S_OK; long indices [1]; VARIANT element; VariantInit (&element); /* Check that in array is one dimensional and contains the expected value */ dim = SafeArrayGetDim (*safearray); if (dim != 1) return 1; SafeArrayGetLBound (*safearray, 1, &lbound1); SafeArrayGetUBound (*safearray, 1, &ubound1); ubound1 = 1; if ((lbound1 != 0) || (ubound1 != 1)) return 1; ubound1 = 0; for (i= lbound1; i <= ubound1; i++) { indices [0] = i; if (SafeArrayGetElement (*safearray, indices, &element) != S_OK) return 1; failed = (element.vt != VT_I4) || (element.lVal != i+1); VariantClear (&element); if (failed) return 1; } /* Change one of the elements of the array to verify that [out] parameter is marshalled back to the managed side */ indices [0] = 0; element.vt = VT_I4; element.lVal = -1; SafeArrayPutElement (*safearray, indices, &element); VariantClear (&element); return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_out_byval_1dim_vt_i4 (SAFEARRAY* safearray) { /* Check that the input array is what is expected and change it so the caller can check */ /* correct marshalling back to managed code */ UINT dim; long lbound1, ubound1; SAFEARRAYBOUND dimensions [1]; long i, failed; HRESULT hr = S_OK; long indices [1]; VARIANT element; VariantInit (&element); /* Check that in array is one dimensional and contains the expected value */ dim = SafeArrayGetDim (safearray); if (dim != 1) return 1; SafeArrayGetLBound (safearray, 1, &lbound1); SafeArrayGetUBound (safearray, 1, &ubound1); if ((lbound1 != 0) || (ubound1 != 0)) return 1; for (i= lbound1; i <= ubound1; i++) { indices [0] = i; if (SafeArrayGetElement (safearray, indices, &element) != S_OK) return 1; failed = (element.vt != VT_I4) || (element.lVal != i+1); VariantClear (&element); if (failed) return 1; } /* Change the array to verify how [out] parameter is marshalled back to the managed side */ /* Redimension the array */ dimensions [0].lLbound = lbound1; dimensions [0].cElements = 2; hr = SafeArrayRedim(safearray, dimensions); indices [0] = 0; element.vt = VT_I4; element.lVal = 12345; SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); indices [0] = 1; element.vt = VT_I4; element.lVal = -12345; SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_out_byval_3dim_vt_bstr (SAFEARRAY* safearray) { /* Check that the input array is what is expected and change it so the caller can check */ /* correct marshalling back to managed code */ UINT dim; long lbound1, ubound1, lbound2, ubound2, lbound3, ubound3; long i, j, k, failed; HRESULT hr = S_OK; long indices [3]; VARIANT element; VariantInit (&element); /* Check that in array is three dimensional and contains the expected values */ dim = SafeArrayGetDim (safearray); if (dim != 3) return 1; SafeArrayGetLBound (safearray, 1, &lbound1); SafeArrayGetUBound (safearray, 1, &ubound1); if ((lbound1 != 0) || (ubound1 != 1)) return 1; SafeArrayGetLBound (safearray, 2, &lbound2); SafeArrayGetUBound (safearray, 2, &ubound2); if ((lbound2 != 0) || (ubound2 != 1)) return 1; SafeArrayGetLBound (safearray, 3, &lbound3); SafeArrayGetUBound (safearray, 3, &ubound3); if ((lbound3 != 0) || (ubound3 != 2)) return 1; for (i= lbound1; i <= ubound1; i++) { indices [0] = i; for (j= lbound2; j <= ubound2; j++) { indices [1] = j; for (k= lbound3; k <= ubound3; k++) { indices [2] = k; if (SafeArrayGetElement (safearray, indices, &element) != S_OK) return 1; failed = ((element.vt != VT_BSTR) || (VariantChangeType (&element, &element, VARIANT_NOUSEROVERRIDE, VT_I4) != S_OK) || (element.lVal != 100*(i+1)+10*(j+1)+(k+1))); VariantClear (&element); if (failed) return 1; } } } /* Change the elements of the array to verify that [out] parameter is marshalled back to the managed side */ indices [0] = 1; indices [1] = 1; indices [2] = 2; element.vt = VT_I4; element.lVal = 333; SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); indices [0] = 1; indices [1] = 1; indices [2] = 1; element.vt = VT_I4; element.lVal = 111; SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); indices [0] = 0; indices [1] = 1; indices [2] = 0; element.vt = VT_BSTR; element.bstrVal = marshal_bstr_alloc("ABCDEFG"); SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_mixed( SAFEARRAY *safearray1, SAFEARRAY **safearray2, SAFEARRAY *safearray3, SAFEARRAY **safearray4 ) { HRESULT hr = S_OK; /* Initialize out parameters */ *safearray2 = NULL; /* array1: Check that in array is one dimensional and contains the expected value */ hr = mono_test_marshal_safearray_in_out_byval_1dim_vt_i4 (safearray1); /* array2: Fill in with some values to check on the managed side */ if (hr == S_OK) hr = mono_test_marshal_safearray_out_1dim_vt_bstr (safearray2); /* array3: Check that in array is one dimensional and contains the expected value */ if (hr == S_OK) hr = mono_test_marshal_safearray_in_byval_1dim_vt_mixed(safearray3); /* array4: Check input values and fill in with some values to check on the managed side */ if (hr == S_OK) hr = mono_test_marshal_safearray_in_out_byref_3dim_vt_bstr(safearray4); return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_ccw(MonoComObject *pUnk) { SAFEARRAY *array; VARIANT var; long index; int ret; array = SafeArrayCreateVector(VT_VARIANT, 0, 2); var.vt = VT_BSTR; var.bstrVal = marshal_bstr_alloc("Test"); index = 0; SafeArrayPutElement(array, &index, &var); var.vt = VT_I4; var.intVal = 2345; index = 1; SafeArrayPutElement(array, &index, &var); ret = pUnk->vtbl->ArrayIn (pUnk, (void *)array); if (!ret) ret = pUnk->vtbl->ArrayIn2 (pUnk, (void *)array); if (!ret) ret = pUnk->vtbl->ArrayIn3 (pUnk, (void *)array); SafeArrayDestroy(array); return ret; } LIBTEST_API int STDCALL mono_test_marshal_lparray_out_ccw(MonoComObject *pUnk) { guint32 array, result; int ret; ret = pUnk->vtbl->ArrayOut (pUnk, &array, &result); if (ret) return ret; if (array != 55) return 1; if (result != 1) return 2; ret = pUnk->vtbl->ArrayOut (pUnk, NULL, &result); if (ret) return ret; if (result != 0) return 3; return 0; } #endif static int call_managed_res; static void call_managed (gpointer arg) { SimpleDelegate del = (SimpleDelegate)arg; call_managed_res = del (42); } LIBTEST_API int STDCALL mono_test_marshal_thread_attach (SimpleDelegate del) { #ifdef WIN32 return 43; #else int res; pthread_t t; res = pthread_create (&t, NULL, (gpointer (*)(gpointer))call_managed, (gpointer)del); g_assert (res == 0); pthread_join (t, NULL); return call_managed_res; #endif } typedef struct { char arr [4 * 1024]; } LargeStruct; typedef int (STDCALL *LargeStructDelegate) (LargeStruct *s); static void call_managed_large_vt (gpointer arg) { LargeStructDelegate del = (LargeStructDelegate)arg; LargeStruct s; call_managed_res = del (&s); } LIBTEST_API int STDCALL mono_test_marshal_thread_attach_large_vt (SimpleDelegate del) { #ifdef WIN32 return 43; #else int res; pthread_t t; res = pthread_create (&t, NULL, (gpointer (*)(gpointer))call_managed_large_vt, (gpointer)del); g_assert (res == 0); pthread_join (t, NULL); return call_managed_res; #endif } typedef int (STDCALL *Callback) (void); static Callback callback; LIBTEST_API void STDCALL mono_test_marshal_set_callback (Callback cb) { callback = cb; } LIBTEST_API int STDCALL mono_test_marshal_call_callback (void) { return callback (); } LIBTEST_API int STDCALL mono_test_marshal_lpstr (char *str) { return strcmp ("ABC", str); } LIBTEST_API int STDCALL mono_test_marshal_lpwstr (gunichar2 *str) { char *s; int res; s = g_utf16_to_utf8 (str, -1, NULL, NULL, NULL); res = strcmp ("ABC", s); g_free (s); return res; } LIBTEST_API char* STDCALL mono_test_marshal_return_lpstr (void) { char *res = (char *)marshal_alloc (4); strcpy (res, "XYZ"); return res; } LIBTEST_API gunichar2* STDCALL mono_test_marshal_return_lpwstr (void) { gunichar2 *res = (gunichar2 *)marshal_alloc (8); gunichar2* tmp = g_utf8_to_utf16 ("XYZ", -1, NULL, NULL, NULL); memcpy (res, tmp, 8); g_free (tmp); return res; } typedef #if defined (HOST_WIN32) && defined (HOST_X86) && defined (__GNUC__) // Workaround gcc ABI bug. It returns the struct in ST0 instead of edx:eax. // Mono and Visual C++ agree. union #else struct #endif { double d; } SingleDoubleStruct; LIBTEST_API SingleDoubleStruct STDCALL mono_test_marshal_return_single_double_struct (void) { SingleDoubleStruct res = {3.0}; return res; } LIBTEST_API int STDCALL mono_test_has_thiscall_globals (void) { // Visual C++ does not accept __thiscall on global functions, only // member function and function pointers. Gcc accepts it also on global functions. #if defined (HOST_X86) && defined (HOST_WIN32) && !defined (_MSC_VER) return 1; #else return 0; #endif } LIBTEST_API int STDCALL mono_test_has_thiscall_pointers (void) { #if defined (HOST_X86) && defined (HOST_WIN32) return 1; #else return 0; #endif } LIBTEST_API int #ifndef _MSC_VER __thiscall #endif _mono_test_native_thiscall1 (int arg) { return arg; } LIBTEST_API int #ifndef _MSC_VER __thiscall #endif _mono_test_native_thiscall2 (int arg, int arg2) { return arg + (arg2^1); } LIBTEST_API int #ifndef _MSC_VER __thiscall #endif _mono_test_native_thiscall3 (int arg, int arg2, int arg3) { return arg + (arg2^1) + (arg3^2); } typedef int ( #ifndef _MSC_VER __thiscall #endif *ThiscallFunction)(int arg, int arg2); LIBTEST_API ThiscallFunction STDCALL mono_test_get_native_thiscall2 (void) { return _mono_test_native_thiscall2; } LIBTEST_API int STDCALL _mono_test_managed_thiscall1 (int (__thiscall*fn)(int), int arg) { return fn(arg); } LIBTEST_API int STDCALL _mono_test_managed_thiscall2 (int (__thiscall*fn)(int,int), int arg, int arg2) { return fn(arg, arg2); } LIBTEST_API int STDCALL _mono_test_managed_thiscall3 (int (__thiscall*fn)(int,int,int), int arg, int arg2, int arg3) { return fn(arg, arg2, arg3); } typedef struct { char f1; } sbyte1; LIBTEST_API sbyte1 STDCALL mono_return_sbyte1 (sbyte1 s1, int addend) { if (s1.f1 != 1) { fprintf(stderr, "mono_return_sbyte1 s1.f1: got %d but expected %d\n", s1.f1, 1); } s1.f1+=addend; return s1; } typedef struct { char f1,f2; } sbyte2; LIBTEST_API sbyte2 STDCALL mono_return_sbyte2 (sbyte2 s2, int addend) { if (s2.f1 != 1) { fprintf(stderr, "mono_return_sbyte2 s2.f1: got %d but expected %d\n", s2.f1, 1); } if (s2.f2 != 2) { fprintf(stderr, "mono_return_sbyte2 s2.f2: got %d but expected %d\n", s2.f2, 2); } s2.f1+=addend; s2.f2+=addend; return s2; } typedef struct { char f1,f2,f3; } sbyte3; LIBTEST_API sbyte3 STDCALL mono_return_sbyte3 (sbyte3 s3, int addend) { if (s3.f1 != 1) { fprintf(stderr, "mono_return_sbyte3 s3.f1: got %d but expected %d\n", s3.f1, 1); } if (s3.f2 != 2) { fprintf(stderr, "mono_return_sbyte3 s3.f2: got %d but expected %d\n", s3.f2, 2); } if (s3.f3 != 3) { fprintf(stderr, "mono_return_sbyte3 s3.f3: got %d but expected %d\n", s3.f3, 3); } s3.f1+=addend; s3.f2+=addend; s3.f3+=addend; return s3; } typedef struct { char f1,f2,f3,f4; } sbyte4; LIBTEST_API sbyte4 STDCALL mono_return_sbyte4 (sbyte4 s4, int addend) { if (s4.f1 != 1) { fprintf(stderr, "mono_return_sbyte4 s4.f1: got %d but expected %d\n", s4.f1, 1); } if (s4.f2 != 2) { fprintf(stderr, "mono_return_sbyte4 s4.f2: got %d but expected %d\n", s4.f2, 2); } if (s4.f3 != 3) { fprintf(stderr, "mono_return_sbyte4 s4.f3: got %d but expected %d\n", s4.f3, 3); } if (s4.f4 != 4) { fprintf(stderr, "mono_return_sbyte4 s4.f4: got %d but expected %d\n", s4.f4, 4); } s4.f1+=addend; s4.f2+=addend; s4.f3+=addend; s4.f4+=addend; return s4; } typedef struct { char f1,f2,f3,f4,f5; } sbyte5; LIBTEST_API sbyte5 STDCALL mono_return_sbyte5 (sbyte5 s5, int addend) { if (s5.f1 != 1) { fprintf(stderr, "mono_return_sbyte5 s5.f1: got %d but expected %d\n", s5.f1, 1); } if (s5.f2 != 2) { fprintf(stderr, "mono_return_sbyte5 s5.f2: got %d but expected %d\n", s5.f2, 2); } if (s5.f3 != 3) { fprintf(stderr, "mono_return_sbyte5 s5.f3: got %d but expected %d\n", s5.f3, 3); } if (s5.f4 != 4) { fprintf(stderr, "mono_return_sbyte5 s5.f4: got %d but expected %d\n", s5.f4, 4); } if (s5.f5 != 5) { fprintf(stderr, "mono_return_sbyte5 s5.f5: got %d but expected %d\n", s5.f5, 5); } s5.f1+=addend; s5.f2+=addend; s5.f3+=addend; s5.f4+=addend; s5.f5+=addend; return s5; } typedef struct { char f1,f2,f3,f4,f5,f6; } sbyte6; LIBTEST_API sbyte6 STDCALL mono_return_sbyte6 (sbyte6 s6, int addend) { if (s6.f1 != 1) { fprintf(stderr, "mono_return_sbyte6 s6.f1: got %d but expected %d\n", s6.f1, 1); } if (s6.f2 != 2) { fprintf(stderr, "mono_return_sbyte6 s6.f2: got %d but expected %d\n", s6.f2, 2); } if (s6.f3 != 3) { fprintf(stderr, "mono_return_sbyte6 s6.f3: got %d but expected %d\n", s6.f3, 3); } if (s6.f4 != 4) { fprintf(stderr, "mono_return_sbyte6 s6.f4: got %d but expected %d\n", s6.f4, 4); } if (s6.f5 != 5) { fprintf(stderr, "mono_return_sbyte6 s6.f5: got %d but expected %d\n", s6.f5, 5); } if (s6.f6 != 6) { fprintf(stderr, "mono_return_sbyte6 s6.f6: got %d but expected %d\n", s6.f6, 6); } s6.f1+=addend; s6.f2+=addend; s6.f3+=addend; s6.f4+=addend; s6.f5+=addend; s6.f6+=addend; return s6; } typedef struct { char f1,f2,f3,f4,f5,f6,f7; } sbyte7; LIBTEST_API sbyte7 STDCALL mono_return_sbyte7 (sbyte7 s7, int addend) { if (s7.f1 != 1) { fprintf(stderr, "mono_return_sbyte7 s7.f1: got %d but expected %d\n", s7.f1, 1); } if (s7.f2 != 2) { fprintf(stderr, "mono_return_sbyte7 s7.f2: got %d but expected %d\n", s7.f2, 2); } if (s7.f3 != 3) { fprintf(stderr, "mono_return_sbyte7 s7.f3: got %d but expected %d\n", s7.f3, 3); } if (s7.f4 != 4) { fprintf(stderr, "mono_return_sbyte7 s7.f4: got %d but expected %d\n", s7.f4, 4); } if (s7.f5 != 5) { fprintf(stderr, "mono_return_sbyte7 s7.f5: got %d but expected %d\n", s7.f5, 5); } if (s7.f6 != 6) { fprintf(stderr, "mono_return_sbyte7 s7.f6: got %d but expected %d\n", s7.f6, 6); } if (s7.f7 != 7) { fprintf(stderr, "mono_return_sbyte7 s7.f7: got %d but expected %d\n", s7.f7, 7); } s7.f1+=addend; s7.f2+=addend; s7.f3+=addend; s7.f4+=addend; s7.f5+=addend; s7.f6+=addend; s7.f7+=addend; return s7; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8; } sbyte8; LIBTEST_API sbyte8 STDCALL mono_return_sbyte8 (sbyte8 s8, int addend) { if (s8.f1 != 1) { fprintf(stderr, "mono_return_sbyte8 s8.f1: got %d but expected %d\n", s8.f1, 1); } if (s8.f2 != 2) { fprintf(stderr, "mono_return_sbyte8 s8.f2: got %d but expected %d\n", s8.f2, 2); } if (s8.f3 != 3) { fprintf(stderr, "mono_return_sbyte8 s8.f3: got %d but expected %d\n", s8.f3, 3); } if (s8.f4 != 4) { fprintf(stderr, "mono_return_sbyte8 s8.f4: got %d but expected %d\n", s8.f4, 4); } if (s8.f5 != 5) { fprintf(stderr, "mono_return_sbyte8 s8.f5: got %d but expected %d\n", s8.f5, 5); } if (s8.f6 != 6) { fprintf(stderr, "mono_return_sbyte8 s8.f6: got %d but expected %d\n", s8.f6, 6); } if (s8.f7 != 7) { fprintf(stderr, "mono_return_sbyte8 s8.f7: got %d but expected %d\n", s8.f7, 7); } if (s8.f8 != 8) { fprintf(stderr, "mono_return_sbyte8 s8.f8: got %d but expected %d\n", s8.f8, 8); } s8.f1+=addend; s8.f2+=addend; s8.f3+=addend; s8.f4+=addend; s8.f5+=addend; s8.f6+=addend; s8.f7+=addend; s8.f8+=addend; return s8; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9; } sbyte9; LIBTEST_API sbyte9 STDCALL mono_return_sbyte9 (sbyte9 s9, int addend) { if (s9.f1 != 1) { fprintf(stderr, "mono_return_sbyte9 s9.f1: got %d but expected %d\n", s9.f1, 1); } if (s9.f2 != 2) { fprintf(stderr, "mono_return_sbyte9 s9.f2: got %d but expected %d\n", s9.f2, 2); } if (s9.f3 != 3) { fprintf(stderr, "mono_return_sbyte9 s9.f3: got %d but expected %d\n", s9.f3, 3); } if (s9.f4 != 4) { fprintf(stderr, "mono_return_sbyte9 s9.f4: got %d but expected %d\n", s9.f4, 4); } if (s9.f5 != 5) { fprintf(stderr, "mono_return_sbyte9 s9.f5: got %d but expected %d\n", s9.f5, 5); } if (s9.f6 != 6) { fprintf(stderr, "mono_return_sbyte9 s9.f6: got %d but expected %d\n", s9.f6, 6); } if (s9.f7 != 7) { fprintf(stderr, "mono_return_sbyte9 s9.f7: got %d but expected %d\n", s9.f7, 7); } if (s9.f8 != 8) { fprintf(stderr, "mono_return_sbyte9 s9.f8: got %d but expected %d\n", s9.f8, 8); } if (s9.f9 != 9) { fprintf(stderr, "mono_return_sbyte9 s9.f9: got %d but expected %d\n", s9.f9, 9); } s9.f1+=addend; s9.f2+=addend; s9.f3+=addend; s9.f4+=addend; s9.f5+=addend; s9.f6+=addend; s9.f7+=addend; s9.f8+=addend; s9.f9+=addend; return s9; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10; } sbyte10; LIBTEST_API sbyte10 STDCALL mono_return_sbyte10 (sbyte10 s10, int addend) { if (s10.f1 != 1) { fprintf(stderr, "mono_return_sbyte10 s10.f1: got %d but expected %d\n", s10.f1, 1); } if (s10.f2 != 2) { fprintf(stderr, "mono_return_sbyte10 s10.f2: got %d but expected %d\n", s10.f2, 2); } if (s10.f3 != 3) { fprintf(stderr, "mono_return_sbyte10 s10.f3: got %d but expected %d\n", s10.f3, 3); } if (s10.f4 != 4) { fprintf(stderr, "mono_return_sbyte10 s10.f4: got %d but expected %d\n", s10.f4, 4); } if (s10.f5 != 5) { fprintf(stderr, "mono_return_sbyte10 s10.f5: got %d but expected %d\n", s10.f5, 5); } if (s10.f6 != 6) { fprintf(stderr, "mono_return_sbyte10 s10.f6: got %d but expected %d\n", s10.f6, 6); } if (s10.f7 != 7) { fprintf(stderr, "mono_return_sbyte10 s10.f7: got %d but expected %d\n", s10.f7, 7); } if (s10.f8 != 8) { fprintf(stderr, "mono_return_sbyte10 s10.f8: got %d but expected %d\n", s10.f8, 8); } if (s10.f9 != 9) { fprintf(stderr, "mono_return_sbyte10 s10.f9: got %d but expected %d\n", s10.f9, 9); } if (s10.f10 != 10) { fprintf(stderr, "mono_return_sbyte10 s10.f10: got %d but expected %d\n", s10.f10, 10); } s10.f1+=addend; s10.f2+=addend; s10.f3+=addend; s10.f4+=addend; s10.f5+=addend; s10.f6+=addend; s10.f7+=addend; s10.f8+=addend; s10.f9+=addend; s10.f10+=addend; return s10; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11; } sbyte11; LIBTEST_API sbyte11 STDCALL mono_return_sbyte11 (sbyte11 s11, int addend) { if (s11.f1 != 1) { fprintf(stderr, "mono_return_sbyte11 s11.f1: got %d but expected %d\n", s11.f1, 1); } if (s11.f2 != 2) { fprintf(stderr, "mono_return_sbyte11 s11.f2: got %d but expected %d\n", s11.f2, 2); } if (s11.f3 != 3) { fprintf(stderr, "mono_return_sbyte11 s11.f3: got %d but expected %d\n", s11.f3, 3); } if (s11.f4 != 4) { fprintf(stderr, "mono_return_sbyte11 s11.f4: got %d but expected %d\n", s11.f4, 4); } if (s11.f5 != 5) { fprintf(stderr, "mono_return_sbyte11 s11.f5: got %d but expected %d\n", s11.f5, 5); } if (s11.f6 != 6) { fprintf(stderr, "mono_return_sbyte11 s11.f6: got %d but expected %d\n", s11.f6, 6); } if (s11.f7 != 7) { fprintf(stderr, "mono_return_sbyte11 s11.f7: got %d but expected %d\n", s11.f7, 7); } if (s11.f8 != 8) { fprintf(stderr, "mono_return_sbyte11 s11.f8: got %d but expected %d\n", s11.f8, 8); } if (s11.f9 != 9) { fprintf(stderr, "mono_return_sbyte11 s11.f9: got %d but expected %d\n", s11.f9, 9); } if (s11.f10 != 10) { fprintf(stderr, "mono_return_sbyte11 s11.f10: got %d but expected %d\n", s11.f10, 10); } if (s11.f11 != 11) { fprintf(stderr, "mono_return_sbyte11 s11.f11: got %d but expected %d\n", s11.f11, 11); } s11.f1+=addend; s11.f2+=addend; s11.f3+=addend; s11.f4+=addend; s11.f5+=addend; s11.f6+=addend; s11.f7+=addend; s11.f8+=addend; s11.f9+=addend; s11.f10+=addend; s11.f11+=addend; return s11; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12; } sbyte12; LIBTEST_API sbyte12 STDCALL mono_return_sbyte12 (sbyte12 s12, int addend) { if (s12.f1 != 1) { fprintf(stderr, "mono_return_sbyte12 s12.f1: got %d but expected %d\n", s12.f1, 1); } if (s12.f2 != 2) { fprintf(stderr, "mono_return_sbyte12 s12.f2: got %d but expected %d\n", s12.f2, 2); } if (s12.f3 != 3) { fprintf(stderr, "mono_return_sbyte12 s12.f3: got %d but expected %d\n", s12.f3, 3); } if (s12.f4 != 4) { fprintf(stderr, "mono_return_sbyte12 s12.f4: got %d but expected %d\n", s12.f4, 4); } if (s12.f5 != 5) { fprintf(stderr, "mono_return_sbyte12 s12.f5: got %d but expected %d\n", s12.f5, 5); } if (s12.f6 != 6) { fprintf(stderr, "mono_return_sbyte12 s12.f6: got %d but expected %d\n", s12.f6, 6); } if (s12.f7 != 7) { fprintf(stderr, "mono_return_sbyte12 s12.f7: got %d but expected %d\n", s12.f7, 7); } if (s12.f8 != 8) { fprintf(stderr, "mono_return_sbyte12 s12.f8: got %d but expected %d\n", s12.f8, 8); } if (s12.f9 != 9) { fprintf(stderr, "mono_return_sbyte12 s12.f9: got %d but expected %d\n", s12.f9, 9); } if (s12.f10 != 10) { fprintf(stderr, "mono_return_sbyte12 s12.f10: got %d but expected %d\n", s12.f10, 10); } if (s12.f11 != 11) { fprintf(stderr, "mono_return_sbyte12 s12.f11: got %d but expected %d\n", s12.f11, 11); } if (s12.f12 != 12) { fprintf(stderr, "mono_return_sbyte12 s12.f12: got %d but expected %d\n", s12.f12, 12); } s12.f1+=addend; s12.f2+=addend; s12.f3+=addend; s12.f4+=addend; s12.f5+=addend; s12.f6+=addend; s12.f7+=addend; s12.f8+=addend; s12.f9+=addend; s12.f10+=addend; s12.f11+=addend; s12.f12+=addend; return s12; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13; } sbyte13; LIBTEST_API sbyte13 STDCALL mono_return_sbyte13 (sbyte13 s13, int addend) { if (s13.f1 != 1) { fprintf(stderr, "mono_return_sbyte13 s13.f1: got %d but expected %d\n", s13.f1, 1); } if (s13.f2 != 2) { fprintf(stderr, "mono_return_sbyte13 s13.f2: got %d but expected %d\n", s13.f2, 2); } if (s13.f3 != 3) { fprintf(stderr, "mono_return_sbyte13 s13.f3: got %d but expected %d\n", s13.f3, 3); } if (s13.f4 != 4) { fprintf(stderr, "mono_return_sbyte13 s13.f4: got %d but expected %d\n", s13.f4, 4); } if (s13.f5 != 5) { fprintf(stderr, "mono_return_sbyte13 s13.f5: got %d but expected %d\n", s13.f5, 5); } if (s13.f6 != 6) { fprintf(stderr, "mono_return_sbyte13 s13.f6: got %d but expected %d\n", s13.f6, 6); } if (s13.f7 != 7) { fprintf(stderr, "mono_return_sbyte13 s13.f7: got %d but expected %d\n", s13.f7, 7); } if (s13.f8 != 8) { fprintf(stderr, "mono_return_sbyte13 s13.f8: got %d but expected %d\n", s13.f8, 8); } if (s13.f9 != 9) { fprintf(stderr, "mono_return_sbyte13 s13.f9: got %d but expected %d\n", s13.f9, 9); } if (s13.f10 != 10) { fprintf(stderr, "mono_return_sbyte13 s13.f10: got %d but expected %d\n", s13.f10, 10); } if (s13.f11 != 11) { fprintf(stderr, "mono_return_sbyte13 s13.f11: got %d but expected %d\n", s13.f11, 11); } if (s13.f12 != 12) { fprintf(stderr, "mono_return_sbyte13 s13.f12: got %d but expected %d\n", s13.f12, 12); } if (s13.f13 != 13) { fprintf(stderr, "mono_return_sbyte13 s13.f13: got %d but expected %d\n", s13.f13, 13); } s13.f1+=addend; s13.f2+=addend; s13.f3+=addend; s13.f4+=addend; s13.f5+=addend; s13.f6+=addend; s13.f7+=addend; s13.f8+=addend; s13.f9+=addend; s13.f10+=addend; s13.f11+=addend; s13.f12+=addend; s13.f13+=addend; return s13; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14; } sbyte14; LIBTEST_API sbyte14 STDCALL mono_return_sbyte14 (sbyte14 s14, int addend) { if (s14.f1 != 1) { fprintf(stderr, "mono_return_sbyte14 s14.f1: got %d but expected %d\n", s14.f1, 1); } if (s14.f2 != 2) { fprintf(stderr, "mono_return_sbyte14 s14.f2: got %d but expected %d\n", s14.f2, 2); } if (s14.f3 != 3) { fprintf(stderr, "mono_return_sbyte14 s14.f3: got %d but expected %d\n", s14.f3, 3); } if (s14.f4 != 4) { fprintf(stderr, "mono_return_sbyte14 s14.f4: got %d but expected %d\n", s14.f4, 4); } if (s14.f5 != 5) { fprintf(stderr, "mono_return_sbyte14 s14.f5: got %d but expected %d\n", s14.f5, 5); } if (s14.f6 != 6) { fprintf(stderr, "mono_return_sbyte14 s14.f6: got %d but expected %d\n", s14.f6, 6); } if (s14.f7 != 7) { fprintf(stderr, "mono_return_sbyte14 s14.f7: got %d but expected %d\n", s14.f7, 7); } if (s14.f8 != 8) { fprintf(stderr, "mono_return_sbyte14 s14.f8: got %d but expected %d\n", s14.f8, 8); } if (s14.f9 != 9) { fprintf(stderr, "mono_return_sbyte14 s14.f9: got %d but expected %d\n", s14.f9, 9); } if (s14.f10 != 10) { fprintf(stderr, "mono_return_sbyte14 s14.f10: got %d but expected %d\n", s14.f10, 10); } if (s14.f11 != 11) { fprintf(stderr, "mono_return_sbyte14 s14.f11: got %d but expected %d\n", s14.f11, 11); } if (s14.f12 != 12) { fprintf(stderr, "mono_return_sbyte14 s14.f12: got %d but expected %d\n", s14.f12, 12); } if (s14.f13 != 13) { fprintf(stderr, "mono_return_sbyte14 s14.f13: got %d but expected %d\n", s14.f13, 13); } if (s14.f14 != 14) { fprintf(stderr, "mono_return_sbyte14 s14.f14: got %d but expected %d\n", s14.f14, 14); } s14.f1+=addend; s14.f2+=addend; s14.f3+=addend; s14.f4+=addend; s14.f5+=addend; s14.f6+=addend; s14.f7+=addend; s14.f8+=addend; s14.f9+=addend; s14.f10+=addend; s14.f11+=addend; s14.f12+=addend; s14.f13+=addend; s14.f14+=addend; return s14; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15; } sbyte15; LIBTEST_API sbyte15 STDCALL mono_return_sbyte15 (sbyte15 s15, int addend) { if (s15.f1 != 1) { fprintf(stderr, "mono_return_sbyte15 s15.f1: got %d but expected %d\n", s15.f1, 1); } if (s15.f2 != 2) { fprintf(stderr, "mono_return_sbyte15 s15.f2: got %d but expected %d\n", s15.f2, 2); } if (s15.f3 != 3) { fprintf(stderr, "mono_return_sbyte15 s15.f3: got %d but expected %d\n", s15.f3, 3); } if (s15.f4 != 4) { fprintf(stderr, "mono_return_sbyte15 s15.f4: got %d but expected %d\n", s15.f4, 4); } if (s15.f5 != 5) { fprintf(stderr, "mono_return_sbyte15 s15.f5: got %d but expected %d\n", s15.f5, 5); } if (s15.f6 != 6) { fprintf(stderr, "mono_return_sbyte15 s15.f6: got %d but expected %d\n", s15.f6, 6); } if (s15.f7 != 7) { fprintf(stderr, "mono_return_sbyte15 s15.f7: got %d but expected %d\n", s15.f7, 7); } if (s15.f8 != 8) { fprintf(stderr, "mono_return_sbyte15 s15.f8: got %d but expected %d\n", s15.f8, 8); } if (s15.f9 != 9) { fprintf(stderr, "mono_return_sbyte15 s15.f9: got %d but expected %d\n", s15.f9, 9); } if (s15.f10 != 10) { fprintf(stderr, "mono_return_sbyte15 s15.f10: got %d but expected %d\n", s15.f10, 10); } if (s15.f11 != 11) { fprintf(stderr, "mono_return_sbyte15 s15.f11: got %d but expected %d\n", s15.f11, 11); } if (s15.f12 != 12) { fprintf(stderr, "mono_return_sbyte15 s15.f12: got %d but expected %d\n", s15.f12, 12); } if (s15.f13 != 13) { fprintf(stderr, "mono_return_sbyte15 s15.f13: got %d but expected %d\n", s15.f13, 13); } if (s15.f14 != 14) { fprintf(stderr, "mono_return_sbyte15 s15.f14: got %d but expected %d\n", s15.f14, 14); } if (s15.f15 != 15) { fprintf(stderr, "mono_return_sbyte15 s15.f15: got %d but expected %d\n", s15.f15, 15); } s15.f1+=addend; s15.f2+=addend; s15.f3+=addend; s15.f4+=addend; s15.f5+=addend; s15.f6+=addend; s15.f7+=addend; s15.f8+=addend; s15.f9+=addend; s15.f10+=addend; s15.f11+=addend; s15.f12+=addend; s15.f13+=addend; s15.f14+=addend; s15.f15+=addend; return s15; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16; } sbyte16; LIBTEST_API sbyte16 STDCALL mono_return_sbyte16 (sbyte16 s16, int addend) { if (s16.f1 != 1) { fprintf(stderr, "mono_return_sbyte16 s16.f1: got %d but expected %d\n", s16.f1, 1); } if (s16.f2 != 2) { fprintf(stderr, "mono_return_sbyte16 s16.f2: got %d but expected %d\n", s16.f2, 2); } if (s16.f3 != 3) { fprintf(stderr, "mono_return_sbyte16 s16.f3: got %d but expected %d\n", s16.f3, 3); } if (s16.f4 != 4) { fprintf(stderr, "mono_return_sbyte16 s16.f4: got %d but expected %d\n", s16.f4, 4); } if (s16.f5 != 5) { fprintf(stderr, "mono_return_sbyte16 s16.f5: got %d but expected %d\n", s16.f5, 5); } if (s16.f6 != 6) { fprintf(stderr, "mono_return_sbyte16 s16.f6: got %d but expected %d\n", s16.f6, 6); } if (s16.f7 != 7) { fprintf(stderr, "mono_return_sbyte16 s16.f7: got %d but expected %d\n", s16.f7, 7); } if (s16.f8 != 8) { fprintf(stderr, "mono_return_sbyte16 s16.f8: got %d but expected %d\n", s16.f8, 8); } if (s16.f9 != 9) { fprintf(stderr, "mono_return_sbyte16 s16.f9: got %d but expected %d\n", s16.f9, 9); } if (s16.f10 != 10) { fprintf(stderr, "mono_return_sbyte16 s16.f10: got %d but expected %d\n", s16.f10, 10); } if (s16.f11 != 11) { fprintf(stderr, "mono_return_sbyte16 s16.f11: got %d but expected %d\n", s16.f11, 11); } if (s16.f12 != 12) { fprintf(stderr, "mono_return_sbyte16 s16.f12: got %d but expected %d\n", s16.f12, 12); } if (s16.f13 != 13) { fprintf(stderr, "mono_return_sbyte16 s16.f13: got %d but expected %d\n", s16.f13, 13); } if (s16.f14 != 14) { fprintf(stderr, "mono_return_sbyte16 s16.f14: got %d but expected %d\n", s16.f14, 14); } if (s16.f15 != 15) { fprintf(stderr, "mono_return_sbyte16 s16.f15: got %d but expected %d\n", s16.f15, 15); } if (s16.f16 != 16) { fprintf(stderr, "mono_return_sbyte16 s16.f16: got %d but expected %d\n", s16.f16, 16); } s16.f1+=addend; s16.f2+=addend; s16.f3+=addend; s16.f4+=addend; s16.f5+=addend; s16.f6+=addend; s16.f7+=addend; s16.f8+=addend; s16.f9+=addend; s16.f10+=addend; s16.f11+=addend; s16.f12+=addend; s16.f13+=addend; s16.f14+=addend; s16.f15+=addend; s16.f16+=addend; return s16; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17; } sbyte17; LIBTEST_API sbyte17 STDCALL mono_return_sbyte17 (sbyte17 s17, int addend) { if (s17.f1 != 1) { fprintf(stderr, "mono_return_sbyte17 s17.f1: got %d but expected %d\n", s17.f1, 1); } if (s17.f2 != 2) { fprintf(stderr, "mono_return_sbyte17 s17.f2: got %d but expected %d\n", s17.f2, 2); } if (s17.f3 != 3) { fprintf(stderr, "mono_return_sbyte17 s17.f3: got %d but expected %d\n", s17.f3, 3); } if (s17.f4 != 4) { fprintf(stderr, "mono_return_sbyte17 s17.f4: got %d but expected %d\n", s17.f4, 4); } if (s17.f5 != 5) { fprintf(stderr, "mono_return_sbyte17 s17.f5: got %d but expected %d\n", s17.f5, 5); } if (s17.f6 != 6) { fprintf(stderr, "mono_return_sbyte17 s17.f6: got %d but expected %d\n", s17.f6, 6); } if (s17.f7 != 7) { fprintf(stderr, "mono_return_sbyte17 s17.f7: got %d but expected %d\n", s17.f7, 7); } if (s17.f8 != 8) { fprintf(stderr, "mono_return_sbyte17 s17.f8: got %d but expected %d\n", s17.f8, 8); } if (s17.f9 != 9) { fprintf(stderr, "mono_return_sbyte17 s17.f9: got %d but expected %d\n", s17.f9, 9); } if (s17.f10 != 10) { fprintf(stderr, "mono_return_sbyte17 s17.f10: got %d but expected %d\n", s17.f10, 10); } if (s17.f11 != 11) { fprintf(stderr, "mono_return_sbyte17 s17.f11: got %d but expected %d\n", s17.f11, 11); } if (s17.f12 != 12) { fprintf(stderr, "mono_return_sbyte17 s17.f12: got %d but expected %d\n", s17.f12, 12); } if (s17.f13 != 13) { fprintf(stderr, "mono_return_sbyte17 s17.f13: got %d but expected %d\n", s17.f13, 13); } if (s17.f14 != 14) { fprintf(stderr, "mono_return_sbyte17 s17.f14: got %d but expected %d\n", s17.f14, 14); } if (s17.f15 != 15) { fprintf(stderr, "mono_return_sbyte17 s17.f15: got %d but expected %d\n", s17.f15, 15); } if (s17.f16 != 16) { fprintf(stderr, "mono_return_sbyte17 s17.f16: got %d but expected %d\n", s17.f16, 16); } if (s17.f17 != 17) { fprintf(stderr, "mono_return_sbyte17 s17.f17: got %d but expected %d\n", s17.f17, 17); } s17.f1+=addend; s17.f2+=addend; s17.f3+=addend; s17.f4+=addend; s17.f5+=addend; s17.f6+=addend; s17.f7+=addend; s17.f8+=addend; s17.f9+=addend; s17.f10+=addend; s17.f11+=addend; s17.f12+=addend; s17.f13+=addend; s17.f14+=addend; s17.f15+=addend; s17.f16+=addend; s17.f17+=addend; return s17; } typedef struct { struct { char f1; } nested1; char f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15; struct { char f16; } nested2; } sbyte16_nested; LIBTEST_API sbyte16_nested STDCALL mono_return_sbyte16_nested (sbyte16_nested sn16, int addend) { if (sn16.nested1.f1 != 1) { fprintf(stderr, "mono_return_sbyte16_nested sn16.nested1.f1: got %d but expected %d\n", sn16.nested1.f1, 1); } if (sn16.f2 != 2) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f2: got %d but expected %d\n", sn16.f2, 2); } if (sn16.f3 != 3) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f3: got %d but expected %d\n", sn16.f3, 3); } if (sn16.f4 != 4) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f4: got %d but expected %d\n", sn16.f4, 4); } if (sn16.f5 != 5) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f5: got %d but expected %d\n", sn16.f5, 5); } if (sn16.f6 != 6) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f6: got %d but expected %d\n", sn16.f6, 6); } if (sn16.f7 != 7) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f7: got %d but expected %d\n", sn16.f7, 7); } if (sn16.f8 != 8) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f8: got %d but expected %d\n", sn16.f8, 8); } if (sn16.f9 != 9) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f9: got %d but expected %d\n", sn16.f9, 9); } if (sn16.f10 != 10) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f10: got %d but expected %d\n", sn16.f10, 10); } if (sn16.f11 != 11) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f11: got %d but expected %d\n", sn16.f11, 11); } if (sn16.f12 != 12) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f12: got %d but expected %d\n", sn16.f12, 12); } if (sn16.f13 != 13) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f13: got %d but expected %d\n", sn16.f13, 13); } if (sn16.f14 != 14) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f14: got %d but expected %d\n", sn16.f14, 14); } if (sn16.f15 != 15) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f15: got %d but expected %d\n", sn16.f15, 15); } if (sn16.nested2.f16 != 16) { fprintf(stderr, "mono_return_sbyte16_nested sn16.nested2.f16: got %d but expected %d\n", sn16.nested2.f16, 16); } sn16.nested1.f1+=addend; sn16.f2+=addend; sn16.f3+=addend; sn16.f4+=addend; sn16.f5+=addend; sn16.f6+=addend; sn16.f7+=addend; sn16.f8+=addend; sn16.f9+=addend; sn16.f10+=addend; sn16.f11+=addend; sn16.f12+=addend; sn16.f13+=addend; sn16.f14+=addend; sn16.f15+=addend; sn16.nested2.f16+=addend; return sn16; } typedef struct { short f1; } short1; LIBTEST_API short1 STDCALL mono_return_short1 (short1 s1, int addend) { if (s1.f1 != 1) { fprintf(stderr, "mono_return_short1 s1.f1: got %d but expected %d\n", s1.f1, 1); } s1.f1+=addend; return s1; } typedef struct { short f1,f2; } short2; LIBTEST_API short2 STDCALL mono_return_short2 (short2 s2, int addend) { if (s2.f1 != 1) { fprintf(stderr, "mono_return_short2 s2.f1: got %d but expected %d\n", s2.f1, 1); } if (s2.f2 != 2) { fprintf(stderr, "mono_return_short2 s2.f2: got %d but expected %d\n", s2.f2, 2); } s2.f1+=addend; s2.f2+=addend; return s2; } typedef struct { short f1,f2,f3; } short3; LIBTEST_API short3 STDCALL mono_return_short3 (short3 s3, int addend) { if (s3.f1 != 1) { fprintf(stderr, "mono_return_short3 s3.f1: got %d but expected %d\n", s3.f1, 1); } if (s3.f2 != 2) { fprintf(stderr, "mono_return_short3 s3.f2: got %d but expected %d\n", s3.f2, 2); } if (s3.f3 != 3) { fprintf(stderr, "mono_return_short3 s3.f3: got %d but expected %d\n", s3.f3, 3); } s3.f1+=addend; s3.f2+=addend; s3.f3+=addend; return s3; } typedef struct { short f1,f2,f3,f4; } short4; LIBTEST_API short4 STDCALL mono_return_short4 (short4 s4, int addend) { if (s4.f1 != 1) { fprintf(stderr, "mono_return_short4 s4.f1: got %d but expected %d\n", s4.f1, 1); } if (s4.f2 != 2) { fprintf(stderr, "mono_return_short4 s4.f2: got %d but expected %d\n", s4.f2, 2); } if (s4.f3 != 3) { fprintf(stderr, "mono_return_short4 s4.f3: got %d but expected %d\n", s4.f3, 3); } if (s4.f4 != 4) { fprintf(stderr, "mono_return_short4 s4.f4: got %d but expected %d\n", s4.f4, 4); } s4.f1+=addend; s4.f2+=addend; s4.f3+=addend; s4.f4+=addend; return s4; } typedef struct { short f1,f2,f3,f4,f5; } short5; LIBTEST_API short5 STDCALL mono_return_short5 (short5 s5, int addend) { if (s5.f1 != 1) { fprintf(stderr, "mono_return_short5 s5.f1: got %d but expected %d\n", s5.f1, 1); } if (s5.f2 != 2) { fprintf(stderr, "mono_return_short5 s5.f2: got %d but expected %d\n", s5.f2, 2); } if (s5.f3 != 3) { fprintf(stderr, "mono_return_short5 s5.f3: got %d but expected %d\n", s5.f3, 3); } if (s5.f4 != 4) { fprintf(stderr, "mono_return_short5 s5.f4: got %d but expected %d\n", s5.f4, 4); } if (s5.f5 != 5) { fprintf(stderr, "mono_return_short5 s5.f5: got %d but expected %d\n", s5.f5, 5); } s5.f1+=addend; s5.f2+=addend; s5.f3+=addend; s5.f4+=addend; s5.f5+=addend; return s5; } typedef struct { short f1,f2,f3,f4,f5,f6; } short6; LIBTEST_API short6 STDCALL mono_return_short6 (short6 s6, int addend) { if (s6.f1 != 1) { fprintf(stderr, "mono_return_short6 s6.f1: got %d but expected %d\n", s6.f1, 1); } if (s6.f2 != 2) { fprintf(stderr, "mono_return_short6 s6.f2: got %d but expected %d\n", s6.f2, 2); } if (s6.f3 != 3) { fprintf(stderr, "mono_return_short6 s6.f3: got %d but expected %d\n", s6.f3, 3); } if (s6.f4 != 4) { fprintf(stderr, "mono_return_short6 s6.f4: got %d but expected %d\n", s6.f4, 4); } if (s6.f5 != 5) { fprintf(stderr, "mono_return_short6 s6.f5: got %d but expected %d\n", s6.f5, 5); } if (s6.f6 != 6) { fprintf(stderr, "mono_return_short6 s6.f6: got %d but expected %d\n", s6.f6, 6); } s6.f1+=addend; s6.f2+=addend; s6.f3+=addend; s6.f4+=addend; s6.f5+=addend; s6.f6+=addend; return s6; } typedef struct { short f1,f2,f3,f4,f5,f6,f7; } short7; LIBTEST_API short7 STDCALL mono_return_short7 (short7 s7, int addend) { if (s7.f1 != 1) { fprintf(stderr, "mono_return_short7 s7.f1: got %d but expected %d\n", s7.f1, 1); } if (s7.f2 != 2) { fprintf(stderr, "mono_return_short7 s7.f2: got %d but expected %d\n", s7.f2, 2); } if (s7.f3 != 3) { fprintf(stderr, "mono_return_short7 s7.f3: got %d but expected %d\n", s7.f3, 3); } if (s7.f4 != 4) { fprintf(stderr, "mono_return_short7 s7.f4: got %d but expected %d\n", s7.f4, 4); } if (s7.f5 != 5) { fprintf(stderr, "mono_return_short7 s7.f5: got %d but expected %d\n", s7.f5, 5); } if (s7.f6 != 6) { fprintf(stderr, "mono_return_short7 s7.f6: got %d but expected %d\n", s7.f6, 6); } if (s7.f7 != 7) { fprintf(stderr, "mono_return_short7 s7.f7: got %d but expected %d\n", s7.f7, 7); } s7.f1+=addend; s7.f2+=addend; s7.f3+=addend; s7.f4+=addend; s7.f5+=addend; s7.f6+=addend; s7.f7+=addend; return s7; } typedef struct { short f1,f2,f3,f4,f5,f6,f7,f8; } short8; LIBTEST_API short8 STDCALL mono_return_short8 (short8 s8, int addend) { if (s8.f1 != 1) { fprintf(stderr, "mono_return_short8 s8.f1: got %d but expected %d\n", s8.f1, 1); } if (s8.f2 != 2) { fprintf(stderr, "mono_return_short8 s8.f2: got %d but expected %d\n", s8.f2, 2); } if (s8.f3 != 3) { fprintf(stderr, "mono_return_short8 s8.f3: got %d but expected %d\n", s8.f3, 3); } if (s8.f4 != 4) { fprintf(stderr, "mono_return_short8 s8.f4: got %d but expected %d\n", s8.f4, 4); } if (s8.f5 != 5) { fprintf(stderr, "mono_return_short8 s8.f5: got %d but expected %d\n", s8.f5, 5); } if (s8.f6 != 6) { fprintf(stderr, "mono_return_short8 s8.f6: got %d but expected %d\n", s8.f6, 6); } if (s8.f7 != 7) { fprintf(stderr, "mono_return_short8 s8.f7: got %d but expected %d\n", s8.f7, 7); } if (s8.f8 != 8) { fprintf(stderr, "mono_return_short8 s8.f8: got %d but expected %d\n", s8.f8, 8); } s8.f1+=addend; s8.f2+=addend; s8.f3+=addend; s8.f4+=addend; s8.f5+=addend; s8.f6+=addend; s8.f7+=addend; s8.f8+=addend; return s8; } typedef struct { short f1,f2,f3,f4,f5,f6,f7,f8,f9; } short9; LIBTEST_API short9 STDCALL mono_return_short9 (short9 s9, int addend) { if (s9.f1 != 1) { fprintf(stderr, "mono_return_short9 s9.f1: got %d but expected %d\n", s9.f1, 1); } if (s9.f2 != 2) { fprintf(stderr, "mono_return_short9 s9.f2: got %d but expected %d\n", s9.f2, 2); } if (s9.f3 != 3) { fprintf(stderr, "mono_return_short9 s9.f3: got %d but expected %d\n", s9.f3, 3); } if (s9.f4 != 4) { fprintf(stderr, "mono_return_short9 s9.f4: got %d but expected %d\n", s9.f4, 4); } if (s9.f5 != 5) { fprintf(stderr, "mono_return_short9 s9.f5: got %d but expected %d\n", s9.f5, 5); } if (s9.f6 != 6) { fprintf(stderr, "mono_return_short9 s9.f6: got %d but expected %d\n", s9.f6, 6); } if (s9.f7 != 7) { fprintf(stderr, "mono_return_short9 s9.f7: got %d but expected %d\n", s9.f7, 7); } if (s9.f8 != 8) { fprintf(stderr, "mono_return_short9 s9.f8: got %d but expected %d\n", s9.f8, 8); } if (s9.f9 != 9) { fprintf(stderr, "mono_return_short9 s9.f9: got %d but expected %d\n", s9.f9, 9); } s9.f1+=addend; s9.f2+=addend; s9.f3+=addend; s9.f4+=addend; s9.f5+=addend; s9.f6+=addend; s9.f7+=addend; s9.f8+=addend; s9.f9+=addend; return s9; } typedef struct { struct { short f1; } nested1; short f2,f3,f4,f5,f6,f7; struct { short f8; } nested2; } short8_nested; LIBTEST_API short8_nested STDCALL mono_return_short8_nested (short8_nested sn8, int addend) { if (sn8.nested1.f1 != 1) { fprintf(stderr, "mono_return_short8_nested sn8.nested1.f1: got %d but expected %d\n", sn8.nested1.f1, 1); } if (sn8.f2 != 2) { fprintf(stderr, "mono_return_short8_nested sn8.f2: got %d but expected %d\n", sn8.f2, 2); } if (sn8.f3 != 3) { fprintf(stderr, "mono_return_short8_nested sn8.f3: got %d but expected %d\n", sn8.f3, 3); } if (sn8.f4 != 4) { fprintf(stderr, "mono_return_short8_nested sn8.f4: got %d but expected %d\n", sn8.f4, 4); } if (sn8.f5 != 5) { fprintf(stderr, "mono_return_short8_nested sn8.f5: got %d but expected %d\n", sn8.f5, 5); } if (sn8.f6 != 6) { fprintf(stderr, "mono_return_short8_nested sn8.f6: got %d but expected %d\n", sn8.f6, 6); } if (sn8.f7 != 7) { fprintf(stderr, "mono_return_short8_nested sn8.f7: got %d but expected %d\n", sn8.f7, 7); } if (sn8.nested2.f8 != 8) { fprintf(stderr, "mono_return_short8_nested sn8.nested2.f8: got %d but expected %d\n", sn8.nested2.f8, 8); } sn8.nested1.f1+=addend; sn8.f2+=addend; sn8.f3+=addend; sn8.f4+=addend; sn8.f5+=addend; sn8.f6+=addend; sn8.f7+=addend; sn8.nested2.f8+=addend; return sn8; } typedef struct { int f1; } int1; LIBTEST_API int1 STDCALL mono_return_int1 (int1 s1, int addend) { if (s1.f1 != 1) { fprintf(stderr, "mono_return_int1 s1.f1: got %d but expected %d\n", s1.f1, 1); } s1.f1+=addend; return s1; } typedef struct { int f1,f2; } int2; LIBTEST_API int2 STDCALL mono_return_int2 (int2 s2, int addend) { if (s2.f1 != 1) { fprintf(stderr, "mono_return_int2 s2.f1: got %d but expected %d\n", s2.f1, 1); } if (s2.f2 != 2) { fprintf(stderr, "mono_return_int2 s2.f2: got %d but expected %d\n", s2.f2, 2); } s2.f1+=addend; s2.f2+=addend; return s2; } typedef struct { int f1,f2,f3; } int3; LIBTEST_API int3 STDCALL mono_return_int3 (int3 s3, int addend) { if (s3.f1 != 1) { fprintf(stderr, "mono_return_int3 s3.f1: got %d but expected %d\n", s3.f1, 1); } if (s3.f2 != 2) { fprintf(stderr, "mono_return_int3 s3.f2: got %d but expected %d\n", s3.f2, 2); } if (s3.f3 != 3) { fprintf(stderr, "mono_return_int3 s3.f3: got %d but expected %d\n", s3.f3, 3); } s3.f1+=addend; s3.f2+=addend; s3.f3+=addend; return s3; } typedef struct { int f1,f2,f3,f4; } int4; LIBTEST_API int4 STDCALL mono_return_int4 (int4 s4, int addend) { if (s4.f1 != 1) { fprintf(stderr, "mono_return_int4 s4.f1: got %d but expected %d\n", s4.f1, 1); } if (s4.f2 != 2) { fprintf(stderr, "mono_return_int4 s4.f2: got %d but expected %d\n", s4.f2, 2); } if (s4.f3 != 3) { fprintf(stderr, "mono_return_int4 s4.f3: got %d but expected %d\n", s4.f3, 3); } if (s4.f4 != 4) { fprintf(stderr, "mono_return_int4 s4.f4: got %d but expected %d\n", s4.f4, 4); } s4.f1+=addend; s4.f2+=addend; s4.f3+=addend; s4.f4+=addend; return s4; } typedef struct { int f1,f2,f3,f4,f5; } int5; LIBTEST_API int5 STDCALL mono_return_int5 (int5 s5, int addend) { if (s5.f1 != 1) { fprintf(stderr, "mono_return_int5 s5.f1: got %d but expected %d\n", s5.f1, 1); } if (s5.f2 != 2) { fprintf(stderr, "mono_return_int5 s5.f2: got %d but expected %d\n", s5.f2, 2); } if (s5.f3 != 3) { fprintf(stderr, "mono_return_int5 s5.f3: got %d but expected %d\n", s5.f3, 3); } if (s5.f4 != 4) { fprintf(stderr, "mono_return_int5 s5.f4: got %d but expected %d\n", s5.f4, 4); } if (s5.f5 != 5) { fprintf(stderr, "mono_return_int5 s5.f5: got %d but expected %d\n", s5.f5, 5); } s5.f1+=addend; s5.f2+=addend; s5.f3+=addend; s5.f4+=addend; s5.f5+=addend; return s5; } typedef struct { struct { int f1; } nested1; int f2,f3; struct { int f4; } nested2; } int4_nested; LIBTEST_API int4_nested STDCALL mono_return_int4_nested (int4_nested sn4, int addend) { if (sn4.nested1.f1 != 1) { fprintf(stderr, "mono_return_int4_nested sn4.nested1.f1: got %d but expected %d\n", sn4.nested1.f1, 1); } if (sn4.f2 != 2) { fprintf(stderr, "mono_return_int4_nested sn4.f2: got %d but expected %d\n", sn4.f2, 2); } if (sn4.f3 != 3) { fprintf(stderr, "mono_return_int4_nested sn4.f3: got %d but expected %d\n", sn4.f3, 3); } if (sn4.nested2.f4 != 4) { fprintf(stderr, "mono_return_int4_nested sn4.nested2.f4: got %d but expected %d\n", sn4.nested2.f4, 4); } sn4.nested1.f1+=addend; sn4.f2+=addend; sn4.f3+=addend; sn4.nested2.f4+=addend; return sn4; } typedef struct { float f1; } float1; LIBTEST_API float1 STDCALL mono_return_float1 (float1 s1, int addend) { if (s1.f1 != 1) { fprintf(stderr, "mono_return_float1 s1.f1: got %f but expected %d\n", s1.f1, 1); } s1.f1+=addend; return s1; } typedef struct { float f1,f2; } float2; LIBTEST_API float2 STDCALL mono_return_float2 (float2 s2, int addend) { if (s2.f1 != 1) { fprintf(stderr, "mono_return_float2 s2.f1: got %f but expected %d\n", s2.f1, 1); } if (s2.f2 != 2) { fprintf(stderr, "mono_return_float2 s2.f2: got %f but expected %d\n", s2.f2, 2); } s2.f1+=addend; s2.f2+=addend; return s2; } typedef struct { float f1,f2,f3; } float3; LIBTEST_API float3 STDCALL mono_return_float3 (float3 s3, int addend) { if (s3.f1 != 1) { fprintf(stderr, "mono_return_float3 s3.f1: got %f but expected %d\n", s3.f1, 1); } if (s3.f2 != 2) { fprintf(stderr, "mono_return_float3 s3.f2: got %f but expected %d\n", s3.f2, 2); } if (s3.f3 != 3) { fprintf(stderr, "mono_return_float3 s3.f3: got %f but expected %d\n", s3.f3, 3); } s3.f1+=addend; s3.f2+=addend; s3.f3+=addend; return s3; } typedef struct { float f1,f2,f3,f4; } float4; LIBTEST_API float4 STDCALL mono_return_float4 (float4 s4, int addend) { if (s4.f1 != 1) { fprintf(stderr, "mono_return_float4 s4.f1: got %f but expected %d\n", s4.f1, 1); } if (s4.f2 != 2) { fprintf(stderr, "mono_return_float4 s4.f2: got %f but expected %d\n", s4.f2, 2); } if (s4.f3 != 3) { fprintf(stderr, "mono_return_float4 s4.f3: got %f but expected %d\n", s4.f3, 3); } if (s4.f4 != 4) { fprintf(stderr, "mono_return_float4 s4.f4: got %f but expected %d\n", s4.f4, 4); } s4.f1+=addend; s4.f2+=addend; s4.f3+=addend; s4.f4+=addend; return s4; } typedef struct { float f1,f2,f3,f4,f5; } float5; LIBTEST_API float5 STDCALL mono_return_float5 (float5 s5, int addend) { if (s5.f1 != 1) { fprintf(stderr, "mono_return_float5 s5.f1: got %f but expected %d\n", s5.f1, 1); } if (s5.f2 != 2) { fprintf(stderr, "mono_return_float5 s5.f2: got %f but expected %d\n", s5.f2, 2); } if (s5.f3 != 3) { fprintf(stderr, "mono_return_float5 s5.f3: got %f but expected %d\n", s5.f3, 3); } if (s5.f4 != 4) { fprintf(stderr, "mono_return_float5 s5.f4: got %f but expected %d\n", s5.f4, 4); } if (s5.f5 != 5) { fprintf(stderr, "mono_return_float5 s5.f5: got %f but expected %d\n", s5.f5, 5); } s5.f1+=addend; s5.f2+=addend; s5.f3+=addend; s5.f4+=addend; s5.f5+=addend; return s5; } typedef struct { float f1,f2,f3,f4,f5,f6; } float6; LIBTEST_API float6 STDCALL mono_return_float6 (float6 s6, int addend) { if (s6.f1 != 1) { fprintf(stderr, "mono_return_float6 s6.f1: got %f but expected %d\n", s6.f1, 1); } if (s6.f2 != 2) { fprintf(stderr, "mono_return_float6 s6.f2: got %f but expected %d\n", s6.f2, 2); } if (s6.f3 != 3) { fprintf(stderr, "mono_return_float6 s6.f3: got %f but expected %d\n", s6.f3, 3); } if (s6.f4 != 4) { fprintf(stderr, "mono_return_float6 s6.f4: got %f but expected %d\n", s6.f4, 4); } if (s6.f5 != 5) { fprintf(stderr, "mono_return_float6 s6.f5: got %f but expected %d\n", s6.f5, 5); } if (s6.f6 != 6) { fprintf(stderr, "mono_return_float6 s6.f6: got %f but expected %d\n", s6.f6, 6); } s6.f1+=addend; s6.f2+=addend; s6.f3+=addend; s6.f4+=addend; s6.f5+=addend; s6.f6+=addend; return s6; } typedef struct { float f1,f2,f3,f4,f5,f6,f7; } float7; LIBTEST_API float7 STDCALL mono_return_float7 (float7 s7, int addend) { if (s7.f1 != 1) { fprintf(stderr, "mono_return_float7 s7.f1: got %f but expected %d\n", s7.f1, 1); } if (s7.f2 != 2) { fprintf(stderr, "mono_return_float7 s7.f2: got %f but expected %d\n", s7.f2, 2); } if (s7.f3 != 3) { fprintf(stderr, "mono_return_float7 s7.f3: got %f but expected %d\n", s7.f3, 3); } if (s7.f4 != 4) { fprintf(stderr, "mono_return_float7 s7.f4: got %f but expected %d\n", s7.f4, 4); } if (s7.f5 != 5) { fprintf(stderr, "mono_return_float7 s7.f5: got %f but expected %d\n", s7.f5, 5); } if (s7.f6 != 6) { fprintf(stderr, "mono_return_float7 s7.f6: got %f but expected %d\n", s7.f6, 6); } if (s7.f7 != 7) { fprintf(stderr, "mono_return_float7 s7.f7: got %f but expected %d\n", s7.f7, 7); } s7.f1+=addend; s7.f2+=addend; s7.f3+=addend; s7.f4+=addend; s7.f5+=addend; s7.f6+=addend; s7.f7+=addend; return s7; } typedef struct { float f1,f2,f3,f4,f5,f6,f7,f8; } float8; LIBTEST_API float8 STDCALL mono_return_float8 (float8 s8, int addend) { if (s8.f1 != 1) { fprintf(stderr, "mono_return_float8 s8.f1: got %f but expected %d\n", s8.f1, 1); } if (s8.f2 != 2) { fprintf(stderr, "mono_return_float8 s8.f2: got %f but expected %d\n", s8.f2, 2); } if (s8.f3 != 3) { fprintf(stderr, "mono_return_float8 s8.f3: got %f but expected %d\n", s8.f3, 3); } if (s8.f4 != 4) { fprintf(stderr, "mono_return_float8 s8.f4: got %f but expected %d\n", s8.f4, 4); } if (s8.f5 != 5) { fprintf(stderr, "mono_return_float8 s8.f5: got %f but expected %d\n", s8.f5, 5); } if (s8.f6 != 6) { fprintf(stderr, "mono_return_float8 s8.f6: got %f but expected %d\n", s8.f6, 6); } if (s8.f7 != 7) { fprintf(stderr, "mono_return_float8 s8.f7: got %f but expected %d\n", s8.f7, 7); } if (s8.f8 != 8) { fprintf(stderr, "mono_return_float8 s8.f8: got %f but expected %d\n", s8.f8, 8); } s8.f1+=addend; s8.f2+=addend; s8.f3+=addend; s8.f4+=addend; s8.f5+=addend; s8.f6+=addend; s8.f7+=addend; s8.f8+=addend; return s8; } typedef struct { float f1,f2,f3,f4,f5,f6,f7,f8,f9; } float9; LIBTEST_API float9 STDCALL mono_return_float9 (float9 s9, int addend) { if (s9.f1 != 1) { fprintf(stderr, "mono_return_float9 s9.f1: got %f but expected %d\n", s9.f1, 1); } if (s9.f2 != 2) { fprintf(stderr, "mono_return_float9 s9.f2: got %f but expected %d\n", s9.f2, 2); } if (s9.f3 != 3) { fprintf(stderr, "mono_return_float9 s9.f3: got %f but expected %d\n", s9.f3, 3); } if (s9.f4 != 4) { fprintf(stderr, "mono_return_float9 s9.f4: got %f but expected %d\n", s9.f4, 4); } if (s9.f5 != 5) { fprintf(stderr, "mono_return_float9 s9.f5: got %f but expected %d\n", s9.f5, 5); } if (s9.f6 != 6) { fprintf(stderr, "mono_return_float9 s9.f6: got %f but expected %d\n", s9.f6, 6); } if (s9.f7 != 7) { fprintf(stderr, "mono_return_float9 s9.f7: got %f but expected %d\n", s9.f7, 7); } if (s9.f8 != 8) { fprintf(stderr, "mono_return_float9 s9.f8: got %f but expected %d\n", s9.f8, 8); } if (s9.f9 != 9) { fprintf(stderr, "mono_return_float9 s9.f9: got %f but expected %d\n", s9.f9, 9); } s9.f1+=addend; s9.f2+=addend; s9.f3+=addend; s9.f4+=addend; s9.f5+=addend; s9.f6+=addend; s9.f7+=addend; s9.f8+=addend; s9.f9+=addend; return s9; } typedef struct { struct { float f1; } nested1; float f2,f3; struct { float f4; } nested2; } float4_nested; LIBTEST_API float4_nested STDCALL mono_return_float4_nested (float4_nested sn4, int addend) { if (sn4.nested1.f1 != 1) { fprintf(stderr, "mono_return_float4_nested sn4.nested1.f1: got %f but expected %d\n", sn4.nested1.f1, 1); } if (sn4.f2 != 2) { fprintf(stderr, "mono_return_float4_nested sn4.f2: got %f but expected %d\n", sn4.f2, 2); } if (sn4.f3 != 3) { fprintf(stderr, "mono_return_float4_nested sn4.f3: got %f but expected %d\n", sn4.f3, 3); } if (sn4.nested2.f4 != 4) { fprintf(stderr, "mono_return_float4_nested sn4.nested2.f4: got %f but expected %d\n", sn4.nested2.f4, 4); } sn4.nested1.f1+=addend; sn4.f2+=addend; sn4.f3+=addend; sn4.nested2.f4+=addend; return sn4; } typedef struct { double f1; } double1; LIBTEST_API double1 STDCALL mono_return_double1 (double1 s1, int addend) { if (s1.f1 != 1) { fprintf(stderr, "mono_return_double1 s1.f1: got %f but expected %d\n", s1.f1, 1); } s1.f1+=addend; return s1; } typedef struct { double f1,f2; } double2; LIBTEST_API double2 STDCALL mono_return_double2 (double2 s2, int addend) { if (s2.f1 != 1) { fprintf(stderr, "mono_return_double2 s2.f1: got %f but expected %d\n", s2.f1, 1); } if (s2.f2 != 2) { fprintf(stderr, "mono_return_double2 s2.f2: got %f but expected %d\n", s2.f2, 2); } s2.f1+=addend; s2.f2+=addend; return s2; } typedef struct { double f1,f2,f3; } double3; LIBTEST_API double3 STDCALL mono_return_double3 (double3 s3, int addend) { if (s3.f1 != 1) { fprintf(stderr, "mono_return_double3 s3.f1: got %f but expected %d\n", s3.f1, 1); } if (s3.f2 != 2) { fprintf(stderr, "mono_return_double3 s3.f2: got %f but expected %d\n", s3.f2, 2); } if (s3.f3 != 3) { fprintf(stderr, "mono_return_double3 s3.f3: got %f but expected %d\n", s3.f3, 3); } s3.f1+=addend; s3.f2+=addend; s3.f3+=addend; return s3; } typedef struct { double f1,f2,f3,f4; } double4; LIBTEST_API double4 STDCALL mono_return_double4 (double4 s4, int addend) { if (s4.f1 != 1) { fprintf(stderr, "mono_return_double4 s4.f1: got %f but expected %d\n", s4.f1, 1); } if (s4.f2 != 2) { fprintf(stderr, "mono_return_double4 s4.f2: got %f but expected %d\n", s4.f2, 2); } if (s4.f3 != 3) { fprintf(stderr, "mono_return_double4 s4.f3: got %f but expected %d\n", s4.f3, 3); } if (s4.f4 != 4) { fprintf(stderr, "mono_return_double4 s4.f4: got %f but expected %d\n", s4.f4, 4); } s4.f1+=addend; s4.f2+=addend; s4.f3+=addend; s4.f4+=addend; return s4; } typedef struct { double f1,f2,f3,f4,f5; } double5; LIBTEST_API double5 STDCALL mono_return_double5 (double5 s5, int addend) { if (s5.f1 != 1) { fprintf(stderr, "mono_return_double5 s5.f1: got %f but expected %d\n", s5.f1, 1); } if (s5.f2 != 2) { fprintf(stderr, "mono_return_double5 s5.f2: got %f but expected %d\n", s5.f2, 2); } if (s5.f3 != 3) { fprintf(stderr, "mono_return_double5 s5.f3: got %f but expected %d\n", s5.f3, 3); } if (s5.f4 != 4) { fprintf(stderr, "mono_return_double5 s5.f4: got %f but expected %d\n", s5.f4, 4); } if (s5.f5 != 5) { fprintf(stderr, "mono_return_double5 s5.f5: got %f but expected %d\n", s5.f5, 5); } s5.f1+=addend; s5.f2+=addend; s5.f3+=addend; s5.f4+=addend; s5.f5+=addend; return s5; } typedef struct { double f1,f2,f3,f4,f5,f6; } double6; LIBTEST_API double6 STDCALL mono_return_double6 (double6 s6, int addend) { if (s6.f1 != 1) { fprintf(stderr, "mono_return_double6 s6.f1: got %f but expected %d\n", s6.f1, 1); } if (s6.f2 != 2) { fprintf(stderr, "mono_return_double6 s6.f2: got %f but expected %d\n", s6.f2, 2); } if (s6.f3 != 3) { fprintf(stderr, "mono_return_double6 s6.f3: got %f but expected %d\n", s6.f3, 3); } if (s6.f4 != 4) { fprintf(stderr, "mono_return_double6 s6.f4: got %f but expected %d\n", s6.f4, 4); } if (s6.f5 != 5) { fprintf(stderr, "mono_return_double6 s6.f5: got %f but expected %d\n", s6.f5, 5); } if (s6.f6 != 6) { fprintf(stderr, "mono_return_double6 s6.f6: got %f but expected %d\n", s6.f6, 6); } s6.f1+=addend; s6.f2+=addend; s6.f3+=addend; s6.f4+=addend; s6.f5+=addend; s6.f6+=addend; return s6; } typedef struct { double f1,f2,f3,f4,f5,f6,f7; } double7; LIBTEST_API double7 STDCALL mono_return_double7 (double7 s7, int addend) { if (s7.f1 != 1) { fprintf(stderr, "mono_return_double7 s7.f1: got %f but expected %d\n", s7.f1, 1); } if (s7.f2 != 2) { fprintf(stderr, "mono_return_double7 s7.f2: got %f but expected %d\n", s7.f2, 2); } if (s7.f3 != 3) { fprintf(stderr, "mono_return_double7 s7.f3: got %f but expected %d\n", s7.f3, 3); } if (s7.f4 != 4) { fprintf(stderr, "mono_return_double7 s7.f4: got %f but expected %d\n", s7.f4, 4); } if (s7.f5 != 5) { fprintf(stderr, "mono_return_double7 s7.f5: got %f but expected %d\n", s7.f5, 5); } if (s7.f6 != 6) { fprintf(stderr, "mono_return_double7 s7.f6: got %f but expected %d\n", s7.f6, 6); } if (s7.f7 != 7) { fprintf(stderr, "mono_return_double7 s7.f7: got %f but expected %d\n", s7.f7, 7); } s7.f1+=addend; s7.f2+=addend; s7.f3+=addend; s7.f4+=addend; s7.f5+=addend; s7.f6+=addend; s7.f7+=addend; return s7; } typedef struct { double f1,f2,f3,f4,f5,f6,f7,f8; } double8; LIBTEST_API double8 STDCALL mono_return_double8 (double8 s8, int addend) { if (s8.f1 != 1) { fprintf(stderr, "mono_return_double8 s8.f1: got %f but expected %d\n", s8.f1, 1); } if (s8.f2 != 2) { fprintf(stderr, "mono_return_double8 s8.f2: got %f but expected %d\n", s8.f2, 2); } if (s8.f3 != 3) { fprintf(stderr, "mono_return_double8 s8.f3: got %f but expected %d\n", s8.f3, 3); } if (s8.f4 != 4) { fprintf(stderr, "mono_return_double8 s8.f4: got %f but expected %d\n", s8.f4, 4); } if (s8.f5 != 5) { fprintf(stderr, "mono_return_double8 s8.f5: got %f but expected %d\n", s8.f5, 5); } if (s8.f6 != 6) { fprintf(stderr, "mono_return_double8 s8.f6: got %f but expected %d\n", s8.f6, 6); } if (s8.f7 != 7) { fprintf(stderr, "mono_return_double8 s8.f7: got %f but expected %d\n", s8.f7, 7); } if (s8.f8 != 8) { fprintf(stderr, "mono_return_double8 s8.f8: got %f but expected %d\n", s8.f8, 8); } s8.f1+=addend; s8.f2+=addend; s8.f3+=addend; s8.f4+=addend; s8.f5+=addend; s8.f6+=addend; s8.f7+=addend; s8.f8+=addend; return s8; } typedef struct { double f1,f2,f3,f4,f5,f6,f7,f8,f9; } double9; LIBTEST_API double9 STDCALL mono_return_double9 (double9 s9, int addend) { if (s9.f1 != 1) { fprintf(stderr, "mono_return_double9 s9.f1: got %f but expected %d\n", s9.f1, 1); } if (s9.f2 != 2) { fprintf(stderr, "mono_return_double9 s9.f2: got %f but expected %d\n", s9.f2, 2); } if (s9.f3 != 3) { fprintf(stderr, "mono_return_double9 s9.f3: got %f but expected %d\n", s9.f3, 3); } if (s9.f4 != 4) { fprintf(stderr, "mono_return_double9 s9.f4: got %f but expected %d\n", s9.f4, 4); } if (s9.f5 != 5) { fprintf(stderr, "mono_return_double9 s9.f5: got %f but expected %d\n", s9.f5, 5); } if (s9.f6 != 6) { fprintf(stderr, "mono_return_double9 s9.f6: got %f but expected %d\n", s9.f6, 6); } if (s9.f7 != 7) { fprintf(stderr, "mono_return_double9 s9.f7: got %f but expected %d\n", s9.f7, 7); } if (s9.f8 != 8) { fprintf(stderr, "mono_return_double9 s9.f8: got %f but expected %d\n", s9.f8, 8); } if (s9.f9 != 9) { fprintf(stderr, "mono_return_double9 s9.f9: got %f but expected %d\n", s9.f9, 9); } s9.f1+=addend; s9.f2+=addend; s9.f3+=addend; s9.f4+=addend; s9.f5+=addend; s9.f6+=addend; s9.f7+=addend; s9.f8+=addend; s9.f9+=addend; return s9; } typedef struct { struct { double f1; } nested1; struct { double f2; } nested2; } double2_nested; LIBTEST_API double2_nested STDCALL mono_return_double2_nested (double2_nested sn2, int addend) { if (sn2.nested1.f1 != 1) { fprintf(stderr, "mono_return_double2_nested sn2.nested1.f1: got %f but expected %d\n", sn2.nested1.f1, 1); } if (sn2.nested2.f2 != 2) { fprintf(stderr, "mono_return_double2_nested sn2.nested2.f2: got %f but expected %d\n", sn2.nested2.f2, 2); } sn2.nested1.f1+=addend; sn2.nested2.f2+=addend; return sn2; } typedef struct { double f1[4]; } double_array4; LIBTEST_API double_array4 STDCALL mono_return_double_array4 (double_array4 sa4, int addend) { if (sa4.f1[0] != 1) { fprintf(stderr, "mono_return_double_array4 sa4.f1[0]: got %f but expected %d\n", sa4.f1[0], 1); } if (sa4.f1[1] != 2) { fprintf(stderr, "mono_return_double_array4 sa4.f1[1]: got %f but expected %d\n", sa4.f1[1], 2); } if (sa4.f1[2] != 3) { fprintf(stderr, "mono_return_double_array4 sa4.f1[2]: got %f but expected %d\n", sa4.f1[2], 3); } if (sa4.f1[3] != 4) { fprintf(stderr, "mono_return_double_array4 sa4.f1[3]: got %f but expected %d\n", sa4.f1[3], 4); } sa4.f1[0]+=addend; sa4.f1[1]+=addend; sa4.f1[2]+=addend; sa4.f1[3]+=addend; return sa4; } typedef struct { int array [3]; } FixedArrayStruct; LIBTEST_API int STDCALL mono_test_marshal_fixed_array (FixedArrayStruct s) { return s.array [0] + s.array [1] + s.array [2]; } typedef struct { char array [16]; char c; } FixedBufferChar; LIBTEST_API int STDCALL mono_test_marshal_fixed_buffer_char (FixedBufferChar *s) { if (!(s->array [0] == 'A' && s->array [1] == 'B' && s->array [2] == 'C' && s->c == 'D')) return 1; s->array [0] = 'E'; s->array [1] = 'F'; s->c = 'G'; return 0; } typedef struct { short array [16]; short c; } FixedBufferUnicode; LIBTEST_API int STDCALL mono_test_marshal_fixed_buffer_unicode (FixedBufferUnicode *s) { if (!(s->array [0] == 'A' && s->array [1] == 'B' && s->array [2] == 'C' && s->c == 'D')) return 1; s->array [0] = 'E'; s->array [1] = 'F'; s->c = 'G'; return 0; } const int NSTRINGS = 6; //test strings const char *utf8Strings[] = { "Managed", "Sîne klâwen durh die wolken sint geslagen" , "काचं शक्नोम्यत्तुम् । नोपहिनस्ति माम्", "我能吞下玻璃而不伤身体", "ღმერთსი შემვედრე,შემვედრე, ნუთუ კვლა დამხსნას შემვედრე,სოფლისა შემვედრე, შემვედრე,შემვედრე,შემვედრე,შრომასა, ცეცხლს, წყალსა და მიწასა, ჰაერთა თანა მრომასა; მომცნეს ფრთენი და აღვფრინდე, მივჰხვდე მას ჩემსა ნდომასა, დღისით და ღამით ვჰხედვიდე მზისა ელვათა კრთომაასაშემვედრე,შემვედრე,", "Τη γλώσσα μου έδωσαν ελληνική", "\0" }; LIBTEST_API char * build_return_string(const char* pReturn) { char *ret = 0; if (pReturn == 0 || *pReturn == 0) return ret; size_t strLength = strlen(pReturn); ret = (char *)(marshal_alloc (sizeof(char)* (strLength + 1))); memcpy(ret, pReturn, strLength); ret [strLength] = '\0'; return ret; } LIBTEST_API char * StringParameterInOut(/*[In,Out]*/ char *s, int index) { // return a copy return build_return_string(s); } LIBTEST_API void StringParameterRefOut(/*out*/ char **s, int index) { char *pszTextutf8 = (char*)utf8Strings[index]; size_t strLength = strlen(pszTextutf8); *s = (char *)(marshal_alloc (sizeof(char)* (strLength + 1))); memcpy(*s, pszTextutf8, strLength); (*s)[strLength] = '\0'; } LIBTEST_API void StringParameterRef(/*ref*/ char **s, int index) { char *pszTextutf8 = (char*)utf8Strings[index]; size_t strLength = strlen(pszTextutf8); // do byte by byte validation of in string size_t szLen = strlen(*s); for (size_t i = 0; i < szLen; i++) { if ((*s)[i] != pszTextutf8[i]) { printf("[in] managed string do not match native string\n"); abort (); } } if (*s) { marshal_free (*s); } // overwrite the orginal *s = (char *)(marshal_alloc (sizeof(char)* (strLength + 1))); memcpy(*s, pszTextutf8, strLength); (*s)[strLength] = '\0'; } LIBTEST_API void StringBuilderParameterInOut(/*[In,Out] StringBuilder*/ char *s, int index) { // if string.empty if (s == 0 || *s == 0) return; char *pszTextutf8 = (char*)utf8Strings[index]; // do byte by byte validation of in string size_t szLen = strlen(s); for (size_t i = 0; i < szLen; i++) { if (s[i] != pszTextutf8[i]) { printf("[in] managed string do not match native string\n"); abort (); } } // modify the string inplace size_t outLen = strlen(pszTextutf8); for (size_t i = 0; i < outLen; i++) { s[i] = pszTextutf8[i]; } s[outLen] = '\0'; } //out string builder LIBTEST_API void StringBuilderParameterOut(/*[Out] StringBuilder*/ char *s, int index) { char *pszTextutf8 = (char*)utf8Strings[index]; printf ("SBPO: Receiving %s\n", s); // modify the string inplace size_t outLen = strlen(pszTextutf8); for (size_t i = 0; i < outLen; i++) { s[i] = pszTextutf8[i]; } s[outLen] = '\0'; } LIBTEST_API char * StringParameterOut(/*[Out]*/ char *s, int index) { // return a copy return build_return_string(s); } // Utf8 field typedef struct FieldWithUtf8 { char *pFirst; int index; }FieldWithUtf8; //utf8 struct field LIBTEST_API void TestStructWithUtf8Field(struct FieldWithUtf8 fieldStruct) { char *pszManagedutf8 = fieldStruct.pFirst; int stringIndex = fieldStruct.index; char *pszNative = 0; size_t outLen = 0; if (pszManagedutf8 == 0 || *pszManagedutf8 == 0) return; pszNative = (char*)utf8Strings[stringIndex]; outLen = strlen(pszNative); // do byte by byte comparision for (size_t i = 0; i < outLen; i++) { if (pszNative[i] != pszManagedutf8[i]) { printf("Native and managed string do not match.\n"); abort (); } } } typedef void (* Callback2)(char *text, int index); LIBTEST_API void Utf8DelegateAsParameter(Callback2 managedCallback) { for (int i = 0; i < NSTRINGS; ++i) { char *pszNative = 0; pszNative = (char*)utf8Strings[i]; managedCallback(pszNative, i); } } LIBTEST_API char* StringBuilderParameterReturn(int index) { char *pszTextutf8 = (char*)utf8Strings[index]; size_t strLength = strlen(pszTextutf8); char * ret = (char *)(marshal_alloc (sizeof(char)* (strLength + 1))); memcpy(ret, pszTextutf8, strLength); ret[strLength] = '\0'; return ret; } LIBTEST_API int STDCALL mono_test_marshal_pointer_array (int *arr[]) { int i; for (i = 0; i < 10; ++i) { if (*arr [i] != -1) return 1; } return 0; } #ifndef WIN32 typedef void (*NativeToManagedExceptionRethrowFunc) (void); void *mono_test_native_to_managed_exception_rethrow_thread (void *arg) { NativeToManagedExceptionRethrowFunc func = (NativeToManagedExceptionRethrowFunc) arg; func (); return NULL; } LIBTEST_API void STDCALL mono_test_native_to_managed_exception_rethrow (NativeToManagedExceptionRethrowFunc func) { pthread_t t; pthread_create (&t, NULL, mono_test_native_to_managed_exception_rethrow_thread, (gpointer)func); pthread_join (t, NULL); } #endif typedef void (*VoidVoidCallback) (void); typedef void (*MonoFtnPtrEHCallback) (guint32 gchandle); typedef void *MonoDomain; typedef void *MonoAssembly; typedef void *MonoImage; typedef void *MonoClass; typedef void *MonoMethod; typedef void *MonoThread; typedef long long MonoObject; typedef MonoObject MonoException; typedef int32_t mono_bool; static int sym_inited = 0; static void (*sym_mono_install_ftnptr_eh_callback) (MonoFtnPtrEHCallback); static MonoObject* (*sym_mono_gchandle_get_target) (guint32 gchandle); static guint32 (*sym_mono_gchandle_new) (MonoObject *, mono_bool pinned); static void (*sym_mono_gchandle_free) (guint32 gchandle); static void (*sym_mono_raise_exception) (MonoException *ex); static void (*sym_mono_domain_unload) (gpointer); static void (*sym_mono_threads_exit_gc_safe_region_unbalanced) (gpointer, gpointer *); static void (*null_function_ptr) (void); static MonoDomain *(*sym_mono_get_root_domain) (void); static MonoDomain *(*sym_mono_domain_get)(void); static mono_bool (*sym_mono_domain_set)(MonoDomain *, mono_bool /*force */); static MonoAssembly *(*sym_mono_domain_assembly_open) (MonoDomain *, const char*); static MonoImage *(*sym_mono_assembly_get_image) (MonoAssembly *); static MonoClass *(*sym_mono_class_from_name)(MonoImage *, const char *, const char *); static MonoMethod *(*sym_mono_class_get_method_from_name)(MonoClass *, const char *, int /* arg_count */); static MonoThread *(*sym_mono_thread_attach)(MonoDomain *); static void (*sym_mono_thread_detach)(MonoThread *); static MonoObject *(*sym_mono_runtime_invoke) (MonoMethod *, void*, void**, MonoObject**); // SYM_LOOKUP(mono_runtime_invoke) // expands to // sym_mono_runtime_invoke = g_cast (lookup_mono_symbol ("mono_runtime_invoke")); // // (the g_cast is necessary for C++ builds) #define SYM_LOOKUP(name) do { \ sym_##name = g_cast (lookup_mono_symbol (#name)); \ } while (0) static void mono_test_init_symbols (void) { if (sym_inited) return; SYM_LOOKUP (mono_install_ftnptr_eh_callback); SYM_LOOKUP (mono_gchandle_get_target); SYM_LOOKUP (mono_gchandle_new); SYM_LOOKUP (mono_gchandle_free); SYM_LOOKUP (mono_raise_exception); SYM_LOOKUP (mono_domain_unload); SYM_LOOKUP (mono_threads_exit_gc_safe_region_unbalanced); SYM_LOOKUP (mono_get_root_domain); SYM_LOOKUP (mono_domain_get); SYM_LOOKUP (mono_domain_set); SYM_LOOKUP (mono_domain_assembly_open); SYM_LOOKUP (mono_assembly_get_image); SYM_LOOKUP (mono_class_from_name); SYM_LOOKUP (mono_class_get_method_from_name); SYM_LOOKUP (mono_thread_attach); SYM_LOOKUP (mono_thread_detach); SYM_LOOKUP (mono_runtime_invoke); sym_inited = 1; } #ifndef TARGET_WASM static jmp_buf test_jmp_buf; static guint32 test_gchandle; static void mono_test_longjmp_callback (guint32 gchandle) { test_gchandle = gchandle; longjmp (test_jmp_buf, 1); } LIBTEST_API void STDCALL mono_test_setjmp_and_call (VoidVoidCallback managedCallback, intptr_t *out_handle) { mono_test_init_symbols (); if (setjmp (test_jmp_buf) == 0) { *out_handle = 0; sym_mono_install_ftnptr_eh_callback (mono_test_longjmp_callback); managedCallback (); *out_handle = 0; /* Do not expect to return here */ } else { sym_mono_install_ftnptr_eh_callback (NULL); *out_handle = test_gchandle; } } #endif LIBTEST_API void STDCALL mono_test_marshal_bstr (void *ptr) { } static void (*mono_test_capture_throw_callback) (guint32 gchandle, guint32 *exception_out); static void mono_test_ftnptr_eh_callback (guint32 gchandle) { guint32 exception_handle = 0; g_assert (gchandle != 0); MonoObject *exc = sym_mono_gchandle_get_target (gchandle); sym_mono_gchandle_free (gchandle); guint32 handle = sym_mono_gchandle_new (exc, FALSE); mono_test_capture_throw_callback (handle, &exception_handle); sym_mono_gchandle_free (handle); g_assert (exception_handle != 0); exc = sym_mono_gchandle_get_target (exception_handle); sym_mono_gchandle_free (exception_handle); sym_mono_raise_exception (exc); g_error ("mono_raise_exception should not return"); } LIBTEST_API void STDCALL mono_test_setup_ftnptr_eh_callback (VoidVoidCallback managed_entry, void (*capture_throw_callback) (guint32, guint32 *)) { mono_test_init_symbols (); mono_test_capture_throw_callback = capture_throw_callback; sym_mono_install_ftnptr_eh_callback (mono_test_ftnptr_eh_callback); managed_entry (); } LIBTEST_API void STDCALL mono_test_cleanup_ftptr_eh_callback (void) { mono_test_init_symbols (); sym_mono_install_ftnptr_eh_callback (NULL); } LIBTEST_API int STDCALL mono_test_cominterop_ccw_queryinterface (MonoComObject *pUnk) { void *pp; int hr = pUnk->vtbl->QueryInterface (pUnk, &IID_INotImplemented, &pp); // Return true if we can't get INotImplemented return pUnk == NULL && hr == S_OK; } typedef struct ccw_qi_shared_data { MonoComObject *pUnk; int i; } ccw_qi_shared_data; static void* ccw_qi_foreign_thread (void *arg) { ccw_qi_shared_data *shared = (ccw_qi_shared_data *)arg; void *pp; MonoComObject *pUnk = shared->pUnk; int hr = pUnk->vtbl->QueryInterface (pUnk, &IID_ITest, &pp); shared->i = (hr == S_OK) ? 0 : 43; return NULL; } LIBTEST_API int STDCALL mono_test_cominterop_ccw_queryinterface_foreign_thread (MonoComObject *pUnk) { #ifdef WIN32 return 0; #else pthread_t t; ccw_qi_shared_data *shared = (ccw_qi_shared_data *)malloc (sizeof (ccw_qi_shared_data)); if (!shared) abort (); shared->pUnk = pUnk; shared->i = 1; int res = pthread_create (&t, NULL, ccw_qi_foreign_thread, (void*)shared); g_assert (res == 0); pthread_join (t, NULL); int result = shared->i; free (shared); return result; #endif } static void* ccw_itest_foreign_thread (void *arg) { ccw_qi_shared_data *shared = (ccw_qi_shared_data *)arg; MonoComObject *pUnk = shared->pUnk; int hr = pUnk->vtbl->SByteIn (pUnk, -100); shared->i = (hr == S_OK) ? 0 : 12; return NULL; } LIBTEST_API int STDCALL mono_test_cominterop_ccw_itest_foreign_thread (MonoComObject *pUnk) { #ifdef WIN32 return 0; #else pthread_t t; ccw_qi_shared_data *shared = (ccw_qi_shared_data *)malloc (sizeof (ccw_qi_shared_data)); if (!shared) abort (); shared->pUnk = pUnk; shared->i = 1; int res = pthread_create (&t, NULL, ccw_itest_foreign_thread, (void*)shared); g_assert (res == 0); pthread_join (t, NULL); int result = shared->i; free (shared); return result; #endif } LIBTEST_API void STDCALL mono_test_MerpCrashSnprintf (void) { fprintf (stderr, "Before overwrite\n"); char buff [1] = { '\0' }; char overflow [1] = { 'a' }; // Not null-terminated g_snprintf (buff, sizeof(buff) * 10, "THISSHOULDOVERRUNTERRIBLY%s", overflow); g_snprintf ((char *) GINT_TO_POINTER(-1), sizeof(buff) * 10, "THISSHOULDOVERRUNTERRIBLY%s", overflow); } LIBTEST_API void STDCALL mono_test_MerpCrashDladdr (void) { #ifndef HOST_WIN32 dlopen (GINT_TO_POINTER(-1), -1); #endif } LIBTEST_API void STDCALL mono_test_MerpCrashMalloc (void) { gpointer x = g_malloc (sizeof(gpointer)); g_free (x); // Double free g_free (x); } LIBTEST_API void STDCALL mono_test_MerpCrashNullFp (void) { null_function_ptr (); } LIBTEST_API void STDCALL mono_test_MerpCrashDomainUnload (void) { mono_test_init_symbols (); sym_mono_domain_unload (GINT_TO_POINTER (-1)); } LIBTEST_API void STDCALL mono_test_MerpCrashUnbalancedGCSafe (void) { mono_test_init_symbols (); gpointer foo = GINT_TO_POINTER (-1); gpointer bar = GINT_TO_POINTER (-2); sym_mono_threads_exit_gc_safe_region_unbalanced (foo, &bar); } LIBTEST_API void STDCALL mono_test_MerpCrashUnhandledExceptionHook (void) { g_assert_not_reached (); } LIBTEST_API void STDCALL mono_test_MerpCrashSignalTerm (void) { raise (SIGTERM); } // for the rest of the signal tests, we use SIGTERM as a fallback LIBTEST_API void STDCALL mono_test_MerpCrashSignalAbrt (void) { #if defined (SIGABRT) raise (SIGABRT); #else raise (SIGTERM); #endif } LIBTEST_API void STDCALL mono_test_MerpCrashSignalFpe (void) { #if defined (SIGFPE) raise (SIGFPE); #else raise (SIGTERM); #endif } LIBTEST_API void STDCALL mono_test_MerpCrashSignalBus (void) { #if defined (SIGBUS) raise (SIGBUS); #else raise (SIGTERM); #endif } LIBTEST_API void STDCALL mono_test_MerpCrashSignalSegv (void) { #if defined (SIGSEGV) raise (SIGSEGV); #else raise (SIGTERM); #endif } LIBTEST_API void STDCALL mono_test_MerpCrashSignalIll (void) { #if defined (SIGILL) raise (SIGILL); #else raise (SIGTERM); #endif } typedef struct _TestAutoDual _TestAutoDual; typedef struct { int (STDCALL *QueryInterface)(_TestAutoDual *iface, REFIID iid, gpointer *out); int (STDCALL *AddRef)(_TestAutoDual *iface); int (STDCALL *Release)(_TestAutoDual *iface); int (STDCALL *GetTypeInfoCount)(_TestAutoDual *iface, unsigned int *count); int (STDCALL *GetTypeInfo)(_TestAutoDual *iface, unsigned int index, unsigned int lcid, gpointer *out); int (STDCALL *GetIDsOfNames)(_TestAutoDual *iface, REFIID iid, gpointer names, unsigned int count, unsigned int lcid, gpointer ids); int (STDCALL *Invoke)(_TestAutoDual *iface, unsigned int dispid, REFIID iid, unsigned int lcid, unsigned short flags, gpointer params, gpointer result, gpointer excepinfo, gpointer err_arg); int (STDCALL *ToString)(_TestAutoDual *iface, gpointer string); int (STDCALL *Equals)(_TestAutoDual *iface, VARIANT other, short *retval); int (STDCALL *GetHashCode)(_TestAutoDual *iface, int *retval); int (STDCALL *GetType)(_TestAutoDual *iface, gpointer retval); int (STDCALL *parent_method_virtual)(_TestAutoDual *iface, int *retval); int (STDCALL *get_parent_property)(_TestAutoDual *iface, int *retval); int (STDCALL *parent_method_override)(_TestAutoDual *iface, int *retval); int (STDCALL *parent_iface_method)(_TestAutoDual *iface, int *retval); int (STDCALL *parent_method)(_TestAutoDual *iface, int *retval); int (STDCALL *child_method_virtual)(_TestAutoDual *iface, int *retval); int (STDCALL *iface1_method)(_TestAutoDual *iface, int *retval); int (STDCALL *iface1_parent_method)(_TestAutoDual *iface, int *retval); int (STDCALL *iface2_method)(_TestAutoDual *iface, int *retval); int (STDCALL *child_method)(_TestAutoDual *iface, int *retval); } _TestAutoDualVtbl; struct _TestAutoDual { const _TestAutoDualVtbl *lpVtbl; }; LIBTEST_API int STDCALL mono_test_ccw_class_type_auto_dual (_TestAutoDual *iface) { int hr, retval; hr = iface->lpVtbl->parent_method_virtual(iface, &retval); if (hr != 0) return 1; if (retval != 101) return 2; hr = iface->lpVtbl->get_parent_property(iface, &retval); if (hr != 0) return 3; if (retval != 102) return 4; hr = iface->lpVtbl->parent_method_override(iface, &retval); if (hr != 0) return 5; if (retval != 203) return 6; hr = iface->lpVtbl->parent_method(iface, &retval); if (hr != 0) return 7; if (retval != 104) return 8; hr = iface->lpVtbl->child_method_virtual(iface, &retval); if (hr != 0) return 11; if (retval != 106) return 12; hr = iface->lpVtbl->iface1_method(iface, &retval); if (hr != 0) return 13; if (retval != 107) return 14; hr = iface->lpVtbl->iface1_parent_method(iface, &retval); if (hr != 0) return 15; if (retval != 108) return 16; hr = iface->lpVtbl->iface2_method(iface, &retval); if (hr != 0) return 17; if (retval != 109) return 18; hr = iface->lpVtbl->child_method(iface, &retval); if (hr != 0) return 19; if (retval != 110) return 20; hr = iface->lpVtbl->parent_iface_method(iface, &retval); if (hr != 0) return 23; if (retval != 112) return 24; return 0; } static const GUID IID_IBanana = {0x12345678, 0, 0, {0, 0, 0, 0, 0, 0, 0, 2}}; typedef struct IBanana IBanana; typedef struct { int (STDCALL *QueryInterface)(IBanana *iface, REFIID iid, gpointer *out); int (STDCALL *AddRef)(IBanana *iface); int (STDCALL *Release)(IBanana *iface); int (STDCALL *GetTypeInfoCount)(IBanana *iface, unsigned int *count); int (STDCALL *GetTypeInfo)(IBanana *iface, unsigned int index, unsigned int lcid, gpointer *out); int (STDCALL *GetIDsOfNames)(IBanana *iface, REFIID iid, gpointer names, unsigned int count, unsigned int lcid, gpointer ids); int (STDCALL *Invoke)(IBanana *iface, unsigned int dispid, REFIID iid, unsigned int lcid, unsigned short flags, gpointer params, gpointer result, gpointer excepinfo, gpointer err_arg); int (STDCALL *iface1_method)(IBanana *iface, int *retval); } IBananaVtbl; struct IBanana { const IBananaVtbl *lpVtbl; }; LIBTEST_API int STDCALL mono_test_ccw_class_type_none (IBanana *iface) { int hr, retval; hr = iface->lpVtbl->iface1_method(iface, &retval); if (hr != 0) return 1; if (retval != 3) return 2; return 0; } LIBTEST_API int STDCALL mono_test_ccw_class_type_auto_dispatch (IDispatch *disp) { IBanana *banana; int hr, retval; #ifdef __cplusplus hr = disp->QueryInterface (IID_IBanana, (void **)&banana); #else hr = disp->lpVtbl->QueryInterface (disp, &IID_IBanana, (void **)&banana); #endif if (hr != 0) return 1; hr = banana->lpVtbl->iface1_method(banana, &retval); if (hr != 0) return 2; if (retval != 3) return 3; banana->lpVtbl->Release(banana); return 0; } static guint8 static_arr[] = { 1, 2, 3, 4 }; LIBTEST_API guint8* mono_test_marshal_return_array (void) { return static_arr; } struct invoke_names { char *assm_name; char *name_space; char *name; char *meth_name; }; static struct invoke_names * make_invoke_names (const char *assm_name, const char *name_space, const char *name, const char *meth_name) { struct invoke_names *names = (struct invoke_names*) malloc (sizeof (struct invoke_names)); names->assm_name = strdup (assm_name); names->name_space = strdup (name_space); names->name = strdup (name); names->meth_name = strdup (meth_name); return names; } static void destroy_invoke_names (struct invoke_names *n) { free (n->assm_name); free (n->name_space); free (n->name); free (n->meth_name); free (n); } static void test_invoke_by_name (struct invoke_names *names) { mono_test_init_symbols (); MonoDomain *domain = sym_mono_domain_get (); MonoThread *thread = NULL; if (!domain) { thread = sym_mono_thread_attach (sym_mono_get_root_domain ()); } domain = sym_mono_domain_get (); g_assert (domain); MonoAssembly *assm = sym_mono_domain_assembly_open (domain, names->assm_name); g_assert (assm); MonoImage *image = sym_mono_assembly_get_image (assm); MonoClass *klass = sym_mono_class_from_name (image, names->name_space, names->name); g_assert (klass); /* meth_name should be a static method that takes no arguments */ MonoMethod *method = sym_mono_class_get_method_from_name (klass, names->meth_name, -1); g_assert (method); MonoObject *args[] = {NULL, }; sym_mono_runtime_invoke (method, NULL, (void**)args, NULL); if (thread) sym_mono_thread_detach (thread); } #ifndef HOST_WIN32 static void* invoke_foreign_thread (void* user_data) { struct invoke_names *names = (struct invoke_names*)user_data; /* * Run a couple of times to check that attach/detach multiple * times from the same thread leaves it in a reasonable coop * thread state. */ for (int i = 0; i < 5; ++i) { test_invoke_by_name (names); sleep (2); } destroy_invoke_names (names); return NULL; } static void* invoke_foreign_delegate (void *user_data) { VoidVoidCallback del = (VoidVoidCallback)user_data; for (int i = 0; i < 5; ++i) { del (); sleep (2); } return NULL; } #endif LIBTEST_API mono_bool STDCALL mono_test_attach_invoke_foreign_thread (const char *assm_name, const char *name_space, const char *name, const char *meth_name, VoidVoidCallback del) { #ifndef HOST_WIN32 if (!del) { struct invoke_names *names = make_invoke_names (assm_name, name_space, name, meth_name); pthread_t t; int res = pthread_create (&t, NULL, invoke_foreign_thread, (void*)names); g_assert (res == 0); pthread_join (t, NULL); return 0; } else { pthread_t t; int res = pthread_create (&t, NULL, invoke_foreign_delegate, del); g_assert (res == 0); pthread_join (t, NULL); return 0; } #else // TODO: Win32 version of this test return 1; #endif } #ifndef HOST_WIN32 struct names_and_mutex { /* if del is NULL, use names, otherwise just call del */ VoidVoidCallback del; struct invoke_names *names; /* mutex to coordinate test and foreign thread */ pthread_mutex_t coord_mutex; pthread_cond_t coord_cond; /* mutex to block the foreign thread */ pthread_mutex_t deadlock_mutex; }; static void* invoke_block_foreign_thread (void *user_data) { // This thread calls into the runtime and then blocks. It should not // prevent the runtime from shutting down. struct names_and_mutex *nm = (struct names_and_mutex *)user_data; if (!nm->del) { test_invoke_by_name (nm->names); } else { nm->del (); } pthread_mutex_lock (&nm->coord_mutex); /* signal the test thread that we called the runtime */ pthread_cond_signal (&nm->coord_cond); pthread_mutex_unlock (&nm->coord_mutex); pthread_mutex_lock (&nm->deadlock_mutex); // blocks forever g_assert_not_reached (); } #endif LIBTEST_API mono_bool STDCALL mono_test_attach_invoke_block_foreign_thread (const char *assm_name, const char *name_space, const char *name, const char *meth_name, VoidVoidCallback del) { #ifndef HOST_WIN32 struct names_and_mutex *nm = malloc (sizeof (struct names_and_mutex)); nm->del = del; if (!del) { struct invoke_names *names = make_invoke_names (assm_name, name_space, name, meth_name); nm->names = names; } else { nm->names = NULL; } pthread_mutex_init (&nm->coord_mutex, NULL); pthread_cond_init (&nm->coord_cond, NULL); pthread_mutex_init (&nm->deadlock_mutex, NULL); pthread_mutex_lock (&nm->deadlock_mutex); // lock the mutex and never unlock it. pthread_t t; int res = pthread_create (&t, NULL, invoke_block_foreign_thread, (void*)nm); g_assert (res == 0); /* wait for the foreign thread to finish calling the runtime before * detaching it and returning */ pthread_mutex_lock (&nm->coord_mutex); pthread_cond_wait (&nm->coord_cond, &nm->coord_mutex); pthread_mutex_unlock (&nm->coord_mutex); pthread_detach (t); return 0; #else // TODO: Win32 version of this test return 1; #endif } static const GUID IID_IDrupe = {0x9f001e6b, 0xa244, 0x3911, {0x88,0xdb, 0xbb,0x2b,0x6d,0x58,0x43,0xaa}}; #ifndef HOST_WIN32 typedef struct IUnknown IUnknown; typedef struct { int (STDCALL *QueryInterface)(IUnknown *iface, REFIID iid, gpointer *out); int (STDCALL *AddRef)(IUnknown *iface); int (STDCALL *Release)(IUnknown *iface); } IUnknownVtbl; struct IUnknown { const IUnknownVtbl *lpVtbl; }; #endif LIBTEST_API int STDCALL mono_test_ccw_query_interface (IUnknown *iface) { IUnknown *drupe; int hr; #ifdef __cplusplus hr = iface->QueryInterface (IID_IDrupe, (void **)&drupe); #else hr = iface->lpVtbl->QueryInterface (iface, &IID_IDrupe, (void **)&drupe); #endif if (hr != 0) return 1; #ifdef __cplusplus drupe->Release(); #else drupe->lpVtbl->Release(drupe); #endif return 0; } #ifdef __cplusplus } // extern C #endif
#include <config.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <glib.h> #include <gmodule.h> #include <errno.h> #include <time.h> #include <math.h> #include <setjmp.h> #include <signal.h> #include "../utils/mono-errno.h" #include "../utils/mono-compiler.h" #ifndef HOST_WIN32 #include <dlfcn.h> #endif #ifdef WIN32 #include <windows.h> #include "initguid.h" #else #include <pthread.h> #endif #ifdef __cplusplus extern "C" { #endif #ifdef WIN32 #define STDCALL __stdcall #else #define STDCALL #define __thiscall /* nothing */ #endif #ifdef __GNUC__ #pragma GCC diagnostic ignored "-Wmissing-prototypes" #endif #ifdef WIN32 extern __declspec(dllimport) void __stdcall CoTaskMemFree(void *ptr); #endif typedef int (STDCALL *SimpleDelegate) (int a); #if defined(WIN32) && defined (_MSC_VER) #define LIBTEST_API __declspec(dllexport) #elif defined(__GNUC__) #define LIBTEST_API __attribute__ ((__visibility__ ("default"))) #else #define LIBTEST_API #endif static void marshal_free (void *ptr) { #ifdef WIN32 CoTaskMemFree (ptr); #else g_free (ptr); #endif } static void* marshal_alloc (gsize size) { #ifdef WIN32 return CoTaskMemAlloc (size); #else return g_malloc (size); #endif } static void* marshal_alloc0 (gsize size) { #ifdef WIN32 void* ptr = CoTaskMemAlloc (size); memset(ptr, 0, size); return ptr; #else return g_malloc0 (size); #endif } static char* marshal_strdup (const char *str) { #ifdef WIN32 if (!str) return NULL; char *buf = (char *) CoTaskMemAlloc (strlen (str) + 1); return strcpy (buf, str); #else return g_strdup (str); #endif } static gunichar2* marshal_bstr_alloc(const gchar* str) { #ifdef WIN32 gunichar2* temp = g_utf8_to_utf16 (str, -1, NULL, NULL, NULL); gunichar2* ret = SysAllocString (temp); g_free (temp); return ret; #else gchar* ret = NULL; int slen = strlen (str); gunichar2* temp; /* allocate len + 1 utf16 characters plus 4 byte integer for length*/ ret = (gchar *)g_malloc ((slen + 1) * sizeof(gunichar2) + sizeof(guint32)); if (ret == NULL) return NULL; temp = g_utf8_to_utf16 (str, -1, NULL, NULL, NULL); memcpy (ret + sizeof(guint32), temp, slen * sizeof(gunichar2)); * ((guint32 *) ret) = slen * sizeof(gunichar2); ret [4 + slen * sizeof(gunichar2)] = 0; ret [5 + slen * sizeof(gunichar2)] = 0; return (gunichar2*)(ret + 4); #endif } #define marshal_new0(type,size) ((type *) marshal_alloc0 (sizeof (type)* (size))) LIBTEST_API int STDCALL mono_cominterop_is_supported (void) { #if defined(TARGET_X86) || defined(TARGET_AMD64) return 1; #endif return 0; } LIBTEST_API unsigned short* STDCALL test_lpwstr_marshal (unsigned short* chars, int length) { int i = 0; unsigned short *res; res = (unsigned short *)marshal_alloc (2 * (length + 1)); // printf("test_lpwstr_marshal()\n"); while ( i < length ) { // printf("X|%u|\n", chars[i]); res [i] = chars[i]; i++; } res [i] = 0; return res; } LIBTEST_API void STDCALL test_lpwstr_marshal_out (unsigned short** chars) { int i = 0; const char abc[] = "ABC"; glong len = strlen(abc); *chars = (unsigned short *)marshal_alloc (2 * (len + 1)); while ( i < len ) { (*chars) [i] = abc[i]; i++; } (*chars) [i] = 0; } typedef struct { int b; int a; int c; } union_test_1_type; LIBTEST_API int STDCALL mono_union_test_1 (union_test_1_type u1) { // printf ("Got values %d %d %d\n", u1.b, u1.a, u1.c); return u1.a + u1.b + u1.c; } LIBTEST_API int STDCALL mono_return_int (int a) { // printf ("Got value %d\n", a); return a; } LIBTEST_API float STDCALL mono_test_marshal_pass_return_float (float f) { return f + 1.0; } struct ss { int i; }; LIBTEST_API int STDCALL mono_return_int_ss (struct ss a) { // printf ("Got value %d\n", a.i); return a.i; } LIBTEST_API struct ss STDCALL mono_return_ss (struct ss a) { // printf ("Got value %d\n", a.i); a.i++; return a; } struct sc1 { char c[1]; }; LIBTEST_API struct sc1 STDCALL mono_return_sc1 (struct sc1 a) { // printf ("Got value %d\n", a.c[0]); a.c[0]++; return a; } struct sc3 { char c[3]; }; LIBTEST_API struct sc3 STDCALL mono_return_sc3 (struct sc3 a) { // printf ("Got values %d %d %d\n", a.c[0], a.c[1], a.c[2]); a.c[0]++; a.c[1] += 2; a.c[2] += 3; return a; } struct sc5 { char c[5]; }; LIBTEST_API struct sc5 STDCALL mono_return_sc5 (struct sc5 a) { // printf ("Got values %d %d %d %d %d\n", a.c[0], a.c[1], a.c[2], a.c[3], a.c[4]); a.c[0]++; a.c[1] += 2; a.c[2] += 3; a.c[3] += 4; a.c[4] += 5; return a; } union su { int i1; int i2; }; LIBTEST_API int STDCALL mono_return_int_su (union su a) { // printf ("Got value %d\n", a.i1); return a.i1; } struct FI { float f1; float f2; float f3; }; struct NestedFloat { struct FI fi; float f4; }; LIBTEST_API struct NestedFloat STDCALL mono_return_nested_float (void) { struct NestedFloat f; f.fi.f1 = 1.0; f.fi.f2 = 2.0; f.fi.f3 = 3.0; f.f4 = 4.0; return f; } struct Scalar4 { double val[4]; }; struct Rect { int x; int y; int width; int height; }; LIBTEST_API char * STDCALL mono_return_struct_4_double (void *ptr, struct Rect rect, struct Scalar4 sc4, int a, int b, int c) { char *buffer = (char *)marshal_alloc (1024 * sizeof (char)); sprintf (buffer, "sc4 = {%.1f, %.1f, %.1f, %.1f }, a=%x, b=%x, c=%x\n", (float) sc4.val [0], (float) sc4.val [1], (float) sc4.val [2], (float) sc4.val [3], a, b, c); return buffer; } LIBTEST_API int STDCALL mono_test_many_int_arguments (int a, int b, int c, int d, int e, int f, int g, int h, int i, int j); LIBTEST_API short STDCALL mono_test_many_short_arguments (short a, short b, short c, short d, short e, short f, short g, short h, short i, short j); LIBTEST_API char STDCALL mono_test_many_char_arguments (char a, char b, char c, char d, char e, char f, char g, char h, char i, char j); LIBTEST_API int STDCALL mono_test_many_int_arguments (int a, int b, int c, int d, int e, int f, int g, int h, int i, int j) { return a + b + c + d + e + f + g + h + i + j; } LIBTEST_API short STDCALL mono_test_many_short_arguments (short a, short b, short c, short d, short e, short f, short g, short h, short i, short j) { return a + b + c + d + e + f + g + h + i + j; } LIBTEST_API char STDCALL mono_test_many_byte_arguments (char a, char b, char c, char d, char e, char f, char g, char h, char i, char j) { return a + b + c + d + e + f + g + h + i + j; } LIBTEST_API float STDCALL mono_test_many_float_arguments (float a, float b, float c, float d, float e, float f, float g, float h, float i, float j) { return a + b + c + d + e + f + g + h + i + j; } LIBTEST_API double STDCALL mono_test_many_double_arguments (double a, double b, double c, double d, double e, double f, double g, double h, double i, double j) { return a + b + c + d + e + f + g + h + i + j; } LIBTEST_API double STDCALL mono_test_split_double_arguments (double a, double b, float c, double d, double e) { return a + b + c + d + e; } LIBTEST_API int STDCALL mono_test_puts_static (char *s) { // printf ("TEST %s\n", s); return 1; } typedef int (STDCALL *SimpleDelegate3) (int a, int b); LIBTEST_API int STDCALL mono_invoke_delegate (SimpleDelegate3 delegate) { int res; // printf ("start invoke %p\n", delegate); res = delegate (2, 3); // printf ("end invoke\n"); return res; } LIBTEST_API int STDCALL mono_invoke_simple_delegate (SimpleDelegate d) { return d (4); } LIBTEST_API int STDCALL mono_test_marshal_char (short a1) { if (a1 == 'a') return 0; return 1; } LIBTEST_API void STDCALL mono_test_marshal_char_array (gunichar2 *s) { const char m[] = "abcdef"; gunichar2* s2; glong len; s2 = g_utf8_to_utf16 (m, -1, NULL, &len, NULL); len = (len * 2) + 2; memcpy (s, s2, len); g_free (s2); } LIBTEST_API int STDCALL mono_test_marshal_ansi_char_array (char *s) { const char m[] = "abcdef"; if (strncmp ("qwer", s, 4)) return 1; memcpy (s, m, sizeof (m)); return 0; } LIBTEST_API int STDCALL mono_test_marshal_unicode_char_array (gunichar2 *s) { const char m[] = "abcdef"; const char expected[] = "qwer"; gunichar2 *s1, *s2; glong len1, len2; s1 = g_utf8_to_utf16 (m, -1, NULL, &len1, NULL); s2 = g_utf8_to_utf16 (expected, -1, NULL, &len2, NULL); len1 = (len1 * 2); len2 = (len2 * 2); if (memcmp (s, s2, len2)) return 1; memcpy (s, s1, len1); return 0; } LIBTEST_API int STDCALL mono_test_empty_pinvoke (int i) { return i; } LIBTEST_API int STDCALL mono_test_marshal_bool_byref (int a, int *b, int c) { int res = *b; *b = 1; return res; } LIBTEST_API int STDCALL mono_test_marshal_bool_in_as_I1_U1 (char bTrue, char bFalse) { if (!bTrue) return 1; if (bFalse) return 2; return 0; } LIBTEST_API int STDCALL mono_test_marshal_bool_out_as_I1_U1 (char* bTrue, char* bFalse) { if (!bTrue || !bFalse) return 3; *bTrue = 1; *bFalse = 0; return 0; } LIBTEST_API int STDCALL mono_test_marshal_bool_ref_as_I1_U1 (char* bTrue, char* bFalse) { if (!bTrue || !bFalse) return 4; if (!(*bTrue)) return 5; if (*bFalse) return 6; *bFalse = 1; *bTrue = 0; return 0; } LIBTEST_API int STDCALL mono_test_marshal_array (int *a1) { int i, sum = 0; for (i = 0; i < 50; i++) sum += a1 [i]; return sum; } LIBTEST_API int STDCALL mono_test_marshal_inout_array (int *a1) { int i, sum = 0; for (i = 0; i < 50; i++) { sum += a1 [i]; a1 [i] = 50 - a1 [i]; } return sum; } LIBTEST_API int /* cdecl */ mono_test_marshal_inout_array_cdecl (int *a1) { return mono_test_marshal_inout_array (a1); } LIBTEST_API int STDCALL mono_test_marshal_out_array (int *a1) { int i; for (i = 0; i < 50; i++) { a1 [i] = i; } return 0; } LIBTEST_API int STDCALL mono_test_marshal_out_byref_array_out_size_param (int **out_arr, int *out_len) { int *arr; int i, len; len = 4; arr = (gint32 *)marshal_alloc (sizeof (gint32) * len); for (i = 0; i < len; ++i) arr [i] = i; *out_arr = arr; *out_len = len; return 0; } LIBTEST_API int STDCALL mono_test_marshal_out_lparray_out_size_param (int *arr, int *out_len) { int i, len; len = 4; for (i = 0; i < len; ++i) arr [i] = i; *out_len = len; return 0; } LIBTEST_API int STDCALL mono_test_marshal_inout_nonblittable_array (gunichar2 *a1) { int i, sum = 0; for (i = 0; i < 10; i++) { a1 [i] = 'F'; } return sum; } typedef struct { int a; int b; int c; const char *d; gunichar2 *d2; } simplestruct; typedef struct { double x; double y; } point; LIBTEST_API simplestruct STDCALL mono_test_return_vtype (int i) { simplestruct res; static gunichar2 test2 [] = { 'T', 'E', 'S', 'T', '2', 0 }; res.a = 0; res.b = 1; res.c = 0; res.d = "TEST"; res.d2 = test2; return res; } LIBTEST_API void STDCALL mono_test_delegate_struct (void) { // printf ("TEST\n"); } typedef char* (STDCALL *ReturnStringDelegate) (const char *s); LIBTEST_API char * STDCALL mono_test_return_string (ReturnStringDelegate func) { char *res; // printf ("mono_test_return_string\n"); res = func ("TEST"); marshal_free (res); // printf ("got string: %s\n", res); return marshal_strdup ("12345"); } typedef int (STDCALL *RefVTypeDelegate) (int a, simplestruct *ss, int b); LIBTEST_API int STDCALL mono_test_ref_vtype (int a, simplestruct *ss, int b, RefVTypeDelegate func) { if (a == 1 && b == 2 && ss->a == 0 && ss->b == 1 && ss->c == 0 && !strcmp (ss->d, "TEST1")) { ss->a = 1; ss->b = 0; ss->c = 1; ss->d = "TEST2"; return func (a, ss, b); } return 1; } typedef int (STDCALL *OutVTypeDelegate) (int a, simplestruct *ss, int b); LIBTEST_API int STDCALL mono_test_marshal_out_struct (int a, simplestruct *ss, int b, OutVTypeDelegate func) { /* Check that the input pointer is ignored */ ss->d = (const char *)0x12345678; func (a, ss, b); if (ss->a && ss->b && ss->c && !strcmp (ss->d, "TEST3")) return 0; else return 1; } typedef int (STDCALL *InVTypeDelegate) (int a, simplestruct *ss, int b); LIBTEST_API int STDCALL mono_test_marshal_in_struct (int a, simplestruct *ss, int b, InVTypeDelegate func) { simplestruct ss2; int res; memcpy (&ss2, ss, sizeof (simplestruct)); res = func (a, ss, b); if (res) { printf ("mono_test_marshal_in_struct () failed: %d\n", res); return 1; } /* Check that no modifications is made to the struct */ if (ss2.a == ss->a && ss2.b == ss->b && ss2.c == ss->c && ss2.d == ss->d) return 0; else return 1; } typedef struct { int a; SimpleDelegate func, func2, func3; } DelegateStruct; LIBTEST_API DelegateStruct STDCALL mono_test_marshal_delegate_struct (DelegateStruct ds) { DelegateStruct res; res.a = ds.func (ds.a) + ds.func2 (ds.a) + (ds.func3 == NULL ? 0 : 1); res.func = ds.func; res.func2 = ds.func2; res.func3 = NULL; return res; } LIBTEST_API int STDCALL mono_test_marshal_byref_struct (simplestruct *ss, int a, int b, int c, char *d) { gboolean res = (ss->a == a && ss->b == b && ss->c == c && strcmp (ss->d, d) == 0); marshal_free ((char*)ss->d); ss->a = !ss->a; ss->b = !ss->b; ss->c = !ss->c; ss->d = marshal_strdup ("DEF"); return res ? 0 : 1; } typedef struct { int a; int b; int c; char *d; unsigned char e; double f; unsigned char g; guint64 h; } simplestruct2; LIBTEST_API int STDCALL mono_test_marshal_struct2 (simplestruct2 ss) { if (ss.a == 0 && ss.b == 1 && ss.c == 0 && !strcmp (ss.d, "TEST") && ss.e == 99 && ss.f == 1.5 && ss.g == 42 && ss.h == (guint64)123) return 0; return 1; } /* on HP some of the struct should be on the stack and not in registers */ LIBTEST_API int STDCALL mono_test_marshal_struct2_2 (int i, int j, int k, simplestruct2 ss) { if (i != 10 || j != 11 || k != 12) return 1; if (ss.a == 0 && ss.b == 1 && ss.c == 0 && !strcmp (ss.d, "TEST") && ss.e == 99 && ss.f == 1.5 && ss.g == 42 && ss.h == (guint64)123) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_lpstruct (simplestruct *ss) { if (ss->a == 0 && ss->b == 1 && ss->c == 0 && !strcmp (ss->d, "TEST")) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_lpstruct_blittable (point *p) { if (p->x == 1.0 && p->y == 2.0) return 0; else return 1; } LIBTEST_API int STDCALL mono_test_marshal_struct_array (simplestruct2 *ss) { if (! (ss[0].a == 0 && ss[0].b == 1 && ss[0].c == 0 && !strcmp (ss[0].d, "TEST") && ss[0].e == 99 && ss[0].f == 1.5 && ss[0].g == 42 && ss[0].h == (guint64)123)) return 1; if (! (ss[1].a == 0 && ss[1].b == 0 && ss[1].c == 0 && !strcmp (ss[1].d, "TEST2") && ss[1].e == 100 && ss[1].f == 2.5 && ss[1].g == 43 && ss[1].h == (guint64)124)) return 1; return 0; } typedef struct long_align_struct { gint32 a; gint64 b; gint64 c; } long_align_struct; LIBTEST_API int STDCALL mono_test_marshal_long_align_struct_array (long_align_struct *ss) { return ss[0].a + ss[0].b + ss[0].c + ss[1].a + ss[1].b + ss[1].c; } LIBTEST_API simplestruct2 * STDCALL mono_test_marshal_class (int i, int j, int k, simplestruct2 *ss, int l) { simplestruct2 *res; if (!ss) return NULL; if (i != 10 || j != 11 || k != 12 || l != 14) return NULL; if (! (ss->a == 0 && ss->b == 1 && ss->c == 0 && !strcmp (ss->d, "TEST") && ss->e == 99 && ss->f == 1.5 && ss->g == 42 && ss->h == (guint64)123)) return NULL; res = marshal_new0 (simplestruct2, 1); memcpy (res, ss, sizeof (simplestruct2)); res->d = marshal_strdup ("TEST"); return res; } LIBTEST_API int STDCALL mono_test_marshal_byref_class (simplestruct2 **ssp) { simplestruct2 *ss = *ssp; simplestruct2 *res; if (! (ss->a == 0 && ss->b == 1 && ss->c == 0 && !strcmp (ss->d, "TEST") && ss->e == 99 && ss->f == 1.5 && ss->g == 42 && ss->h == (guint64)123)) return 1; res = marshal_new0 (simplestruct2, 1); memcpy (res, ss, sizeof (simplestruct2)); res->d = marshal_strdup ("TEST-RES"); *ssp = res; return 0; } MONO_DISABLE_WARNING (4172) // returning address of local static void * get_sp (void) { int i; void *p; /* Yes, this is correct, we are only trying to determine the value of the stack here */ p = &i; return p; } MONO_RESTORE_WARNING LIBTEST_API int STDCALL reliable_delegate (int a) { return a; } /* * Checks whether get_sp() works as expected. It doesn't work with gcc-2.95.3 on linux. */ static gboolean is_get_sp_reliable (void) { void *sp1, *sp2; reliable_delegate(1); sp1 = get_sp(); reliable_delegate(1); sp2 = get_sp(); return sp1 == sp2; } LIBTEST_API int STDCALL mono_test_marshal_delegate (SimpleDelegate delegate) { void *sp1, *sp2; /* Check that the delegate wrapper is stdcall */ delegate (2); sp1 = get_sp (); delegate (2); sp2 = get_sp (); if (is_get_sp_reliable()) g_assert (sp1 == sp2); return delegate (2); } static int STDCALL inc_cb (int i) { return i + 1; } LIBTEST_API int STDCALL mono_test_marshal_out_delegate (SimpleDelegate *delegate) { *delegate = inc_cb; return 0; } LIBTEST_API SimpleDelegate STDCALL mono_test_marshal_return_delegate (SimpleDelegate delegate) { return delegate; } typedef int (STDCALL *DelegateByrefDelegate) (void *); LIBTEST_API int STDCALL mono_test_marshal_delegate_ref_delegate (DelegateByrefDelegate del) { int (STDCALL *ptr) (int i); del (&ptr); return ptr (54); } static int STDCALL return_plus_one (int i) { return i + 1; } LIBTEST_API SimpleDelegate STDCALL mono_test_marshal_return_delegate_2 (void) { return return_plus_one; } typedef simplestruct (STDCALL *SimpleDelegate2) (simplestruct ss); static gboolean is_utf16_equals (gunichar2 *s1, const char *s2) { char *s; int res; s = g_utf16_to_utf8 (s1, -1, NULL, NULL, NULL); res = strcmp (s, s2); g_free (s); return res == 0; } LIBTEST_API int STDCALL mono_test_marshal_struct (simplestruct ss) { if (ss.a == 0 && ss.b == 1 && ss.c == 0 && !strcmp (ss.d, "TEST") && is_utf16_equals (ss.d2, "OK")) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_delegate2 (SimpleDelegate2 delegate) { simplestruct ss, res; ss.a = 0; ss.b = 1; ss.c = 0; ss.d = "TEST"; ss.d2 = g_utf8_to_utf16 ("TEST2", -1, NULL, NULL, NULL); res = delegate (ss); if (! (res.a && !res.b && res.c && !strcmp (res.d, "TEST-RES") && is_utf16_equals (res.d2, "TEST2-RES"))) return 1; return 0; } typedef simplestruct* (STDCALL *SimpleDelegate4) (simplestruct *ss); LIBTEST_API int STDCALL mono_test_marshal_delegate4 (SimpleDelegate4 delegate) { simplestruct ss; simplestruct *res; ss.a = 0; ss.b = 1; ss.c = 0; ss.d = "TEST"; /* Check argument */ res = delegate (&ss); if (!res) return 1; /* Check return value */ if (! (!res->a && res->b && !res->c && !strcmp (res->d, "TEST"))) return 2; /* Check NULL argument and NULL result */ res = delegate (NULL); if (res) return 3; return 0; } typedef int (STDCALL *SimpleDelegate5) (simplestruct **ss); LIBTEST_API int STDCALL mono_test_marshal_delegate5 (SimpleDelegate5 delegate) { simplestruct ss; int res; simplestruct *ptr; ss.a = 0; ss.b = 1; ss.c = 0; ss.d = "TEST"; ptr = &ss; res = delegate (&ptr); if (res != 0) return 1; if (!(ptr->a && !ptr->b && ptr->c && !strcmp (ptr->d, "RES"))) return 2; return 0; } LIBTEST_API int STDCALL mono_test_marshal_delegate6 (SimpleDelegate5 delegate) { delegate (NULL); return 0; } typedef int (STDCALL *SimpleDelegate7) (simplestruct **ss); LIBTEST_API int STDCALL mono_test_marshal_delegate7 (SimpleDelegate7 delegate) { int res; simplestruct *ptr; /* Check that the input pointer is ignored */ ptr = (simplestruct *)0x12345678; res = delegate (&ptr); if (res != 0) return 1; if (!(ptr->a && !ptr->b && ptr->c && !strcmp (ptr->d, "RES"))) return 2; return 0; } typedef int (STDCALL *InOutByvalClassDelegate) (simplestruct *ss); LIBTEST_API int STDCALL mono_test_marshal_inout_byval_class_delegate (InOutByvalClassDelegate delegate) { int res; simplestruct ss; ss.a = FALSE; ss.b = TRUE; ss.c = FALSE; ss.d = g_strdup_printf ("%s", "FOO"); res = delegate (&ss); if (res != 0) return 1; if (!(ss.a && !ss.b && ss.c && !strcmp (ss.d, "RES"))) return 2; return 0; } typedef int (STDCALL *SimpleDelegate8) (gunichar2 *s); LIBTEST_API int STDCALL mono_test_marshal_delegate8 (SimpleDelegate8 delegate, gunichar2 *s) { return delegate (s); } typedef int (STDCALL *return_int_fnt) (int i); typedef int (STDCALL *SimpleDelegate9) (return_int_fnt d); LIBTEST_API int STDCALL mono_test_marshal_delegate9 (SimpleDelegate9 delegate, gpointer ftn) { return delegate ((return_int_fnt)ftn); } static int STDCALL return_self (int i) { return i; } LIBTEST_API int STDCALL mono_test_marshal_delegate10 (SimpleDelegate9 delegate) { return delegate (return_self); } typedef int (STDCALL *PrimitiveByrefDelegate) (int *i); LIBTEST_API int STDCALL mono_test_marshal_primitive_byref_delegate (PrimitiveByrefDelegate delegate) { int i = 1; int res = delegate (&i); if (res != 0) return res; if (i != 2) return 2; return 0; } typedef int (STDCALL *return_int_delegate) (int i); typedef return_int_delegate (STDCALL *ReturnDelegateDelegate) (void); LIBTEST_API int STDCALL mono_test_marshal_return_delegate_delegate (ReturnDelegateDelegate d) { return (d ()) (55); } typedef int (STDCALL *VirtualDelegate) (int); LIBTEST_API int STDCALL mono_test_marshal_virtual_delegate (VirtualDelegate del) { return del (42); } typedef char* (STDCALL *IcallDelegate) (const char *); LIBTEST_API int STDCALL mono_test_marshal_icall_delegate (IcallDelegate del) { char *res = del ("ABC"); return strcmp (res, "ABC") == 0 ? 0 : 1; } typedef char* (STDCALL *NullableReturnDelegate) (void); LIBTEST_API void STDCALL mono_test_marshal_nullable_ret_delegate (NullableReturnDelegate del) { del (); } LIBTEST_API int STDCALL mono_test_marshal_stringbuilder (char *s, int n) { const char m[] = "This is my message. Isn't it nice?"; if (strcmp (s, "ABCD") != 0) return 1; memcpy(s, m, n); s [n] = '\0'; return 0; } LIBTEST_API int STDCALL mono_test_marshal_stringbuilder_append (char *s, int length) { const char out_sentinel[] = "CSHARP_"; const char out_len = strlen (out_sentinel); for (int i=0; i < length; i++) { s [i] = out_sentinel [i % out_len]; } s [length] = '\0'; return 0; } LIBTEST_API int STDCALL mono_test_marshal_stringbuilder_default (char *s, int n) { const char m[] = "This is my message. Isn't it nice?"; memcpy(s, m, n); s [n] = '\0'; return 0; } LIBTEST_API int STDCALL mono_test_marshal_stringbuilder_unicode (gunichar2 *s, int n) { const char m[] = "This is my message. Isn't it nice?"; gunichar2* s2; glong len; s2 = g_utf8_to_utf16 (m, -1, NULL, &len, NULL); len = (len * 2) + 2; if (len > (n * 2)) len = n * 2; memcpy (s, s2, len); g_free (s2); return 0; } LIBTEST_API void STDCALL mono_test_marshal_stringbuilder_out (char **s) { const char m[] = "This is my message. Isn't it nice?"; char *str; str = (char *)marshal_alloc (strlen (m) + 1); memcpy (str, m, strlen (m) + 1); *s = str; } LIBTEST_API int STDCALL mono_test_marshal_stringbuilder_out_unicode (gunichar2 **s) { const char m[] = "This is my message. Isn't it nice?"; gunichar2 *s2; glong len; s2 = g_utf8_to_utf16 (m, -1, NULL, &len, NULL); len = (len * 2) + 2; *s = (gunichar2 *)marshal_alloc (len); memcpy (*s, s2, len); g_free (s2); return 0; } LIBTEST_API int STDCALL mono_test_marshal_stringbuilder_ref (char **s) { const char m[] = "This is my message. Isn't it nice?"; char *str; if (strcmp (*s, "ABC")) return 1; str = (char *)marshal_alloc (strlen (m) + 1); memcpy (str, m, strlen (m) + 1); *s = str; return 0; } LIBTEST_API void STDCALL mono_test_marshal_stringbuilder_utf16_tolower (short *s, int n) { for (int i = 0; i < n; i++) s[i] = tolower(s[i]); } #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wc++-compat" #endif /* * Standard C and C++ doesn't allow empty structs, empty structs will always have a size of 1 byte. * GCC have an extension to allow empty structs, https://gcc.gnu.org/onlinedocs/gcc/Empty-Structures.html. * This cause a little dilemma since runtime build using none GCC compiler will not be compatible with * GCC build C libraries and the other way around. On platforms where empty structs has size of 1 byte * it must be represented in call and cannot be dropped. On Windows x64 structs will always be represented in the call * meaning that an empty struct must have a representation in the callee in order to correctly follow the ABI used by the * C/C++ standard and the runtime. */ typedef struct { #if !defined(__GNUC__) || defined(TARGET_WIN32) char a; #endif } EmptyStruct; #ifdef __GNUC__ #pragma GCC diagnostic pop #endif LIBTEST_API int STDCALL mono_test_marshal_empty_string_array (char **array) { return (array == NULL) ? 0 : 1; } LIBTEST_API int STDCALL mono_test_marshal_string_array (char **array) { if (strcmp (array [0], "ABC")) return 1; if (strcmp (array [1], "DEF")) return 2; if (array [2] != NULL) return 3; return 0; } LIBTEST_API int STDCALL mono_test_marshal_byref_string_array (char ***array) { if (*array == NULL) return 0; if (strcmp ((*array) [0], "Alpha")) return 2; if (strcmp ((*array) [1], "Beta")) return 2; if (strcmp ((*array) [2], "Gamma")) return 2; return 1; } LIBTEST_API int STDCALL mono_test_marshal_stringbuilder_array (char **array) { if (strcmp (array [0], "ABC")) return 1; if (strcmp (array [1], "DEF")) return 2; strcpy (array [0], "DEF"); strcpy (array [1], "ABC"); return 0; } LIBTEST_API int STDCALL mono_test_marshal_unicode_string_array (gunichar2 **array, char **array2) { GError *gerror = NULL; char *s; s = g_utf16_to_utf8 (array [0], -1, NULL, NULL, &gerror); if (strcmp (s, "ABC")) { g_free (s); return 1; } else g_free (s); s = g_utf16_to_utf8 (array [1], -1, NULL, NULL, &gerror); if (strcmp (s, "DEF")) { g_free (s); return 2; } else g_free (s); if (strcmp (array2 [0], "ABC")) return 3; if (strcmp (array2 [1], "DEF")) return 4; return 0; } /* this does not work on Redhat gcc 2.96 */ LIBTEST_API int STDCALL mono_test_empty_struct (int a, EmptyStruct es, int b) { // printf ("mono_test_empty_struct %d %d\n", a, b); // Intel icc on ia64 passes 'es' in 2 registers #if defined(__ia64) && defined(__INTEL_COMPILER) return 0; #else if (a == 1 && b == 2) return 0; return 1; #endif } LIBTEST_API EmptyStruct STDCALL mono_test_return_empty_struct (int a) { EmptyStruct s; memset (&s, 0, sizeof (s)); #if !(defined(__i386__) && defined(__clang__)) /* https://bugzilla.xamarin.com/show_bug.cgi?id=58901 */ g_assert (a == 42); #endif return s; } typedef struct { char a[100]; } ByValStrStruct; LIBTEST_API ByValStrStruct * STDCALL mono_test_byvalstr_gen (void) { ByValStrStruct *ret; ret = (ByValStrStruct *)malloc (sizeof (ByValStrStruct)); memset(ret, 'a', sizeof(ByValStrStruct)-1); ret->a[sizeof(ByValStrStruct)-1] = 0; return ret; } LIBTEST_API int STDCALL mono_test_byvalstr_check (ByValStrStruct* data, char* correctString) { int ret; ret = strcmp(data->a, correctString); // printf ("T1: %s\n", data->a); // printf ("T2: %s\n", correctString); /* we need g_free because the allocation was performed by mono_test_byvalstr_gen */ g_free (data); return (ret != 0); } typedef struct { guint16 a[4]; int flag; } ByValStrStruct_Unicode; LIBTEST_API int STDCALL mono_test_byvalstr_check_unicode (ByValStrStruct_Unicode *ref, int test) { if (ref->flag != 0x1234abcd){ printf ("overwritten data"); return 1; } if (test == 1 || test == 3){ if (ref->a [0] != '1' || ref->a [1] != '2' || ref->a [2] != '3') return 1; return 0; } if (test == 2){ if (ref->a [0] != '1' || ref->a [1] != '2') return 1; return 0; } return 10; } LIBTEST_API int STDCALL NameManglingAnsi (char *data) { return data [0] + data [1] + data [2]; } LIBTEST_API int STDCALL NameManglingAnsiA (char *data) { g_assert_not_reached (); } LIBTEST_API int STDCALL NameManglingAnsiW (char *data) { g_assert_not_reached (); } LIBTEST_API int STDCALL NameManglingAnsi2A (char *data) { return data [0] + data [1] + data [2]; } LIBTEST_API int STDCALL NameManglingAnsi2W (char *data) { g_assert_not_reached (); } LIBTEST_API int STDCALL NameManglingUnicode (char *data) { g_assert_not_reached (); } LIBTEST_API int STDCALL NameManglingUnicodeW (gunichar2 *data) { return data [0] + data [1] + data [2]; } LIBTEST_API int STDCALL NameManglingUnicode2 (gunichar2 *data) { return data [0] + data [1] + data [2]; } LIBTEST_API int STDCALL NameManglingAutoW (char *data) { #ifdef WIN32 return (data [0] + data [1] + data [2]) == 131 ? 0 : 1; #else g_assert_not_reached (); #endif } LIBTEST_API int STDCALL NameManglingAuto (char *data) { #ifndef WIN32 return (data [0] + data [1] + data [2]) == 198 ? 0 : 1; #else g_assert_not_reached (); #endif } typedef int (STDCALL *intcharFunc)(const char*); LIBTEST_API void STDCALL callFunction (intcharFunc f) { f ("ABC"); } typedef struct { const char* str; int i; } SimpleObj; LIBTEST_API int STDCALL class_marshal_test0 (SimpleObj *obj1) { // printf ("class_marshal_test0 %s %d\n", obj1->str, obj1->i); if (strcmp(obj1->str, "T1")) return -1; if (obj1->i != 4) return -2; return 0; } LIBTEST_API int STDCALL class_marshal_test4 (SimpleObj *obj1) { if (obj1) return -1; return 0; } LIBTEST_API void STDCALL class_marshal_test1 (SimpleObj **obj1) { SimpleObj *res = (SimpleObj *)malloc (sizeof (SimpleObj)); res->str = marshal_strdup ("ABC"); res->i = 5; *obj1 = res; } LIBTEST_API int STDCALL class_marshal_test2 (SimpleObj **obj1) { // printf ("class_marshal_test2 %s %d\n", (*obj1)->str, (*obj1)->i); if (strcmp((*obj1)->str, "ABC")) return -1; if ((*obj1)->i != 5) return -2; return 0; } LIBTEST_API int STDCALL string_marshal_test0 (char *str) { if (strcmp (str, "TEST0")) return -1; return 0; } LIBTEST_API void STDCALL string_marshal_test1 (const char **str) { *str = marshal_strdup ("TEST1"); } LIBTEST_API int STDCALL string_marshal_test2 (char **str) { // printf ("string_marshal_test2 %s\n", *str); if (strcmp (*str, "TEST1")) return -1; *str = marshal_strdup ("TEST2"); return 0; } LIBTEST_API int STDCALL string_marshal_test3 (char *str) { if (str) return -1; return 0; } typedef struct { int a; int b; } BlittableClass; LIBTEST_API BlittableClass* STDCALL TestBlittableClass (BlittableClass *vl) { BlittableClass *res; // printf ("TestBlittableClass %d %d\n", vl->a, vl->b); if (vl) { vl->a++; vl->b++; res = marshal_new0 (BlittableClass, 1); memcpy (res, vl, sizeof (BlittableClass)); } else { res = marshal_new0 (BlittableClass, 1); res->a = 42; res->b = 43; } return res; } typedef struct OSVERSIONINFO_STRUCT { int a; int b; } OSVERSIONINFO_STRUCT; LIBTEST_API int STDCALL MyGetVersionEx (OSVERSIONINFO_STRUCT *osvi) { // printf ("GOT %d %d\n", osvi->a, osvi->b); osvi->a += 1; osvi->b += 1; return osvi->a + osvi->b; } LIBTEST_API int STDCALL BugGetVersionEx (int a, int b, int c, int d, int e, int f, int g, int h, OSVERSIONINFO_STRUCT *osvi) { // printf ("GOT %d %d\n", osvi->a, osvi->b); osvi->a += 1; osvi->b += 1; return osvi->a + osvi->b; } LIBTEST_API int STDCALL mono_test_marshal_point (point pt) { // printf("point %g %g\n", pt.x, pt.y); if (pt.x == 1.25 && pt.y == 3.5) return 0; return 1; } typedef struct { int x; double y; } mixed_point; LIBTEST_API int STDCALL mono_test_marshal_mixed_point (mixed_point pt) { // printf("mixed point %d %g\n", pt.x, pt.y); if (pt.x == 5 && pt.y == 6.75) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_mixed_point_2 (mixed_point *pt) { if (pt->x != 5 || pt->y != 6.75) return 1; pt->x = 10; pt->y = 12.35; return 0; } LIBTEST_API int STDCALL marshal_test_ref_bool(int i, char *b1, short *b2, int *b3) { int res = 1; if (*b1 != 0 && *b1 != 1) return 1; if (*b2 != 0 && *b2 != -1) /* variant_bool */ return 1; if (*b3 != 0 && *b3 != 1) return 1; if (i == ((*b1 << 2) | (-*b2 << 1) | *b3)) res = 0; *b1 = !*b1; *b2 = ~*b2; *b3 = !*b3; return res; } struct BoolStruct { int i; char b1; short b2; /* variant_bool */ int b3; }; LIBTEST_API int STDCALL marshal_test_bool_struct(struct BoolStruct *s) { int res = 1; if (s->b1 != 0 && s->b1 != 1) return 1; if (s->b2 != 0 && s->b2 != -1) return 1; if (s->b3 != 0 && s->b3 != 1) return 1; if (s->i == ((s->b1 << 2) | (-s->b2 << 1) | s->b3)) res = 0; s->b1 = !s->b1; s->b2 = ~s->b2; s->b3 = !s->b3; return res; } typedef struct { gint64 l; } LongStruct2; typedef struct { int i; LongStruct2 l; } LongStruct; LIBTEST_API int STDCALL mono_test_marshal_long_struct (LongStruct *s) { return s->i + s->l.l; } LIBTEST_API void STDCALL mono_test_last_error (int err) { #ifdef WIN32 SetLastError (err); /* * Make sure argument register used calling SetLastError * get's cleaned before returning back to caller. This is done to ensure * we don't get a undetected failure if error is preserved in register * on return since we read back value directly when doing p/invoke with SetLastError = true * into first argument register and then pass it to Mono function setting value in TLS. * If there is a codegen bug reading last error or the code has been incorrectly eliminated * this test could still succeed since expected error code could be left in argument register. * Below code just do something that shouldn't touch last error and won't be optimized away * but will change the argument registers to something different than err. */ char buffer[256] = { 0 }; char value[] = "Dummy"; strncpy (buffer, value, STRING_LENGTH (value)); #else mono_set_errno (err); #endif } LIBTEST_API int STDCALL mono_test_asany (void *ptr, int what) { switch (what) { case 1: return (*(int*)ptr == 5) ? 0 : 1; case 2: return strcmp ((const char*)ptr, "ABC") == 0 ? 0 : 1; case 3: { simplestruct2 ss = *(simplestruct2*)ptr; if (ss.a == 0 && ss.b == 1 && ss.c == 0 && !strcmp (ss.d, "TEST") && ss.e == 99 && ss.f == 1.5 && ss.g == 42 && ss.h == (guint64)123) return 0; else return 1; } case 4: { GError *gerror = NULL; char *s; s = g_utf16_to_utf8 ((const gunichar2 *)ptr, -1, NULL, NULL, &gerror); if (!s) return 1; if (!strcmp (s, "ABC")) { g_free (s); return 0; } else { g_free (s); return 1; } } case 5: { return (*(intptr_t*)ptr == 5) ? 0 : 1; } default: g_assert_not_reached (); } return 1; } typedef struct { int i; int j; int k; char *s; } AsAnyStruct; LIBTEST_API int STDCALL mono_test_marshal_asany_in (void* ptr) { AsAnyStruct *asAny = (AsAnyStruct *)ptr; int res = asAny->i + asAny->j + asAny->k; return res; } LIBTEST_API int STDCALL mono_test_marshal_asany_inout (void* ptr) { AsAnyStruct *asAny = (AsAnyStruct *)ptr; int res = asAny->i + asAny->j + asAny->k; marshal_free (asAny->s); asAny->i = 10; asAny->j = 20; asAny->k = 30; asAny->s = 0; return res; } LIBTEST_API int STDCALL mono_test_marshal_asany_out (void* ptr) { AsAnyStruct *asAny = (AsAnyStruct *)ptr; int res = asAny->i + asAny->j + asAny->k; asAny->i = 10; asAny->j = 20; asAny->k = 30; asAny->s = 0; return res; } /* * AMD64 marshalling tests. */ typedef struct amd64_struct1 { int i; int j; int k; int l; } amd64_struct1; LIBTEST_API amd64_struct1 STDCALL mono_test_marshal_amd64_pass_return_struct1 (amd64_struct1 s) { s.i ++; s.j ++; s.k ++; s.l ++; return s; } LIBTEST_API amd64_struct1 STDCALL mono_test_marshal_amd64_pass_return_struct1_many_args (amd64_struct1 s, int i1, int i2, int i3, int i4, int i5, int i6, int i7, int i8) { s.i ++; s.j ++; s.k ++; s.l += 1 + i1 + i2 + i3 + i4 + i5 + i6 + i7 + i8; return s; } typedef struct amd64_struct2 { int i; int j; } amd64_struct2; LIBTEST_API amd64_struct2 STDCALL mono_test_marshal_amd64_pass_return_struct2 (amd64_struct2 s) { s.i ++; s.j ++; return s; } typedef struct amd64_struct3 { int i; } amd64_struct3; LIBTEST_API amd64_struct3 STDCALL mono_test_marshal_amd64_pass_return_struct3 (amd64_struct3 s) { s.i ++; return s; } typedef struct amd64_struct4 { double d1, d2; } amd64_struct4; LIBTEST_API amd64_struct4 STDCALL mono_test_marshal_amd64_pass_return_struct4 (amd64_struct4 s) { s.d1 ++; s.d2 ++; return s; } /* * IA64 marshalling tests. */ typedef struct test_struct5 { float d1, d2; } test_struct5; LIBTEST_API test_struct5 STDCALL mono_test_marshal_ia64_pass_return_struct5 (double d1, double d2, test_struct5 s, int i, double d3, double d4) { s.d1 += d1 + d2 + i; s.d2 += d3 + d4 + i; return s; } typedef struct test_struct6 { double d1, d2; } test_struct6; LIBTEST_API test_struct6 STDCALL mono_test_marshal_ia64_pass_return_struct6 (double d1, double d2, test_struct6 s, int i, double d3, double d4) { s.d1 += d1 + d2 + i; s.d2 += d3 + d4; return s; } static guint32 custom_res [2]; LIBTEST_API void* STDCALL mono_test_marshal_pass_return_custom (int i, guint32 *ptr, int j) { /* ptr will be freed by CleanupNative, so make a copy */ custom_res [0] = 0; /* not allocated by AllocHGlobal */ custom_res [1] = ptr [1]; return &custom_res; } LIBTEST_API int STDCALL mono_test_marshal_pass_out_custom (int i, guint32 **ptr, int j) { custom_res [0] = 0; custom_res [1] = i + j + 10; *ptr = custom_res; return 0; } LIBTEST_API int STDCALL mono_test_marshal_pass_inout_custom (int i, guint32 *ptr, int j) { ptr [0] = 0; ptr [1] = i + ptr [1] + j; return 0; } LIBTEST_API int STDCALL mono_test_marshal_pass_out_byval_custom (int i, guint32 *ptr, int j) { return ptr == NULL ? 0 : 1; } LIBTEST_API int STDCALL mono_test_marshal_pass_byref_custom (int i, guint32 **ptr, int j) { (*ptr)[1] += i + j; return 0; } LIBTEST_API void* STDCALL mono_test_marshal_pass_return_custom2 (int i, guint32 *ptr, int j) { g_assert_not_reached (); return NULL; } LIBTEST_API void* STDCALL mono_test_marshal_pass_return_custom_null (int i, guint32 *ptr, int j) { g_assert (ptr == NULL); return NULL; } typedef void *(STDCALL *PassReturnPtrDelegate) (void *ptr); LIBTEST_API int STDCALL mono_test_marshal_pass_return_custom_in_delegate (PassReturnPtrDelegate del) { guint32 buf [2]; guint32 res; guint32 *ptr; buf [0] = 0; buf [1] = 10; ptr = (guint32 *)del (&buf); res = ptr [1]; #ifdef WIN32 /* FIXME: Freed with FreeHGlobal */ #else g_free (ptr); #endif return res; } LIBTEST_API int STDCALL mono_test_marshal_pass_return_custom_null_in_delegate (PassReturnPtrDelegate del) { void *ptr = del (NULL); return (ptr == NULL) ? 15 : 0; } typedef void (STDCALL *CustomOutParamDelegate) (void **pptr); LIBTEST_API int STDCALL mono_test_marshal_custom_out_param_delegate (CustomOutParamDelegate del) { void* pptr = (void*)del; del (&pptr); if(pptr != NULL) return 1; return 0; } typedef int (STDCALL *ReturnEnumDelegate) (int e); LIBTEST_API int STDCALL mono_test_marshal_return_enum_delegate (ReturnEnumDelegate func) { return func (1); } typedef struct { int a, b, c; gint64 d; } BlittableStruct; typedef BlittableStruct (STDCALL *SimpleDelegate10) (BlittableStruct ss); LIBTEST_API int STDCALL mono_test_marshal_blittable_struct_delegate (SimpleDelegate10 delegate) { BlittableStruct ss, res; ss.a = 1; ss.b = 2; ss.c = 3; ss.d = 55; res = delegate (ss); if (! ((res.a == -1) && (res.b == -2) && (res.c == -3) && (res.d == -55))) return 1; return 0; } LIBTEST_API int STDCALL mono_test_stdcall_name_mangling (int a, int b, int c) { return a + b + c; } LIBTEST_API int mono_test_stdcall_mismatch_1 (int a, int b, int c) { return a + b + c; } LIBTEST_API int STDCALL mono_test_stdcall_mismatch_2 (int a, int b, int c) { return a + b + c; } /* * PASSING AND RETURNING SMALL STRUCTURES FROM DELEGATES TESTS */ typedef struct { int i; } SmallStruct1; typedef SmallStruct1 (STDCALL *SmallStructDelegate1) (SmallStruct1 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate1 (SmallStructDelegate1 delegate) { SmallStruct1 ss, res; ss.i = 1; res = delegate (ss); if (! (res.i == -1)) return 1; return 0; } typedef struct { gint16 i, j; } SmallStruct2; typedef SmallStruct2 (STDCALL *SmallStructDelegate2) (SmallStruct2 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate2 (SmallStructDelegate2 delegate) { SmallStruct2 ss, res; ss.i = 2; ss.j = 3; res = delegate (ss); if (! ((res.i == -2) && (res.j == -3))) return 1; return 0; } typedef struct { gint16 i; gint8 j; } SmallStruct3; typedef SmallStruct3 (STDCALL *SmallStructDelegate3) (SmallStruct3 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate3 (SmallStructDelegate3 delegate) { SmallStruct3 ss, res; ss.i = 1; ss.j = 2; res = delegate (ss); if (! ((res.i == -1) && (res.j == -2))) return 1; return 0; } typedef struct { gint16 i; } SmallStruct4; typedef SmallStruct4 (STDCALL *SmallStructDelegate4) (SmallStruct4 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate4 (SmallStructDelegate4 delegate) { SmallStruct4 ss, res; ss.i = 1; res = delegate (ss); if (! (res.i == -1)) return 1; return 0; } typedef struct { gint64 i; } SmallStruct5; typedef SmallStruct5 (STDCALL *SmallStructDelegate5) (SmallStruct5 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate5 (SmallStructDelegate5 delegate) { SmallStruct5 ss, res; ss.i = 5; res = delegate (ss); if (! (res.i == -5)) return 1; return 0; } typedef struct { int i, j; } SmallStruct6; typedef SmallStruct6 (STDCALL *SmallStructDelegate6) (SmallStruct6 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate6 (SmallStructDelegate6 delegate) { SmallStruct6 ss, res; ss.i = 1; ss.j = 2; res = delegate (ss); if (! ((res.i == -1) && (res.j == -2))) return 1; return 0; } typedef struct { int i; gint16 j; } SmallStruct7; typedef SmallStruct7 (STDCALL *SmallStructDelegate7) (SmallStruct7 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate7 (SmallStructDelegate7 delegate) { SmallStruct7 ss, res; ss.i = 1; ss.j = 2; res = delegate (ss); if (! ((res.i == -1) && (res.j == -2))) return 1; return 0; } typedef struct { float i; } SmallStruct8; typedef SmallStruct8 (STDCALL *SmallStructDelegate8) (SmallStruct8 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate8 (SmallStructDelegate8 delegate) { SmallStruct8 ss, res; ss.i = 1.0; res = delegate (ss); if (! ((res.i == -1.0))) return 1; return 0; } typedef struct { double i; } SmallStruct9; typedef SmallStruct9 (STDCALL *SmallStructDelegate9) (SmallStruct9 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate9 (SmallStructDelegate9 delegate) { SmallStruct9 ss, res; ss.i = 1.0; res = delegate (ss); if (! ((res.i == -1.0))) return 1; return 0; } typedef struct { float i, j; } SmallStruct10; typedef SmallStruct10 (STDCALL *SmallStructDelegate10) (SmallStruct10 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate10 (SmallStructDelegate10 delegate) { SmallStruct10 ss, res; ss.i = 1.0; ss.j = 2.0; res = delegate (ss); if (! ((res.i == -1.0) && (res.j == -2.0))) return 1; return 0; } typedef struct { float i; int j; } SmallStruct11; typedef SmallStruct11 (STDCALL *SmallStructDelegate11) (SmallStruct11 ss); LIBTEST_API int STDCALL mono_test_marshal_small_struct_delegate11 (SmallStructDelegate11 delegate) { SmallStruct11 ss, res; ss.i = 1.0; ss.j = 2; res = delegate (ss); if (! ((res.i == -1.0) && (res.j == -2))) return 1; return 0; } typedef int (STDCALL *ArrayDelegate) (int i, char *j, void *arr); LIBTEST_API int STDCALL mono_test_marshal_array_delegate (void *arr, int len, ArrayDelegate del) { return del (len, NULL, arr); } typedef int (STDCALL *ArrayDelegateLong) (gint64 i, char *j, void *arr); LIBTEST_API int STDCALL mono_test_marshal_array_delegate_long (void *arr, gint64 len, ArrayDelegateLong del) { return del (len, NULL, arr); } LIBTEST_API int STDCALL mono_test_marshal_out_array_delegate (int *arr, int len, ArrayDelegate del) { del (len, NULL, arr); if ((arr [0] != 1) || (arr [1] != 2)) return 1; else return 0; } typedef gunichar2* (STDCALL *UnicodeStringDelegate) (gunichar2 *message); LIBTEST_API int STDCALL mono_test_marshal_return_unicode_string_delegate (UnicodeStringDelegate del) { const char m[] = "abcdef"; gunichar2 *s2, *res; glong len; s2 = g_utf8_to_utf16 (m, -1, NULL, &len, NULL); res = del (s2); marshal_free (res); return 0; } LIBTEST_API int STDCALL mono_test_marshal_out_string_array_delegate (char **arr, int len, ArrayDelegate del) { del (len, NULL, arr); if (!strcmp (arr [0], "ABC") && !strcmp (arr [1], "DEF")) return 0; else return 1; } typedef int (*CdeclDelegate) (int i, int j); LIBTEST_API int STDCALL mono_test_marshal_cdecl_delegate (CdeclDelegate del) { int i; for (i = 0; i < 1000; ++i) del (1, 2); return 0; } typedef char** (STDCALL *ReturnStringArrayDelegate) (int i); LIBTEST_API int STDCALL mono_test_marshal_return_string_array_delegate (ReturnStringArrayDelegate d) { char **arr = d (2); int res; if (arr == NULL) return 3; if (strcmp (arr [0], "ABC") || strcmp (arr [1], "DEF")) res = 1; else res = 0; marshal_free (arr); return res; } typedef int (STDCALL *ByrefStringDelegate) (char **s); LIBTEST_API int STDCALL mono_test_marshal_byref_string_delegate (ByrefStringDelegate d) { char *s = (char*)"ABC"; int res; res = d (&s); if (res != 0) return res; if (!strcmp (s, "DEF")) res = 0; else res = 2; marshal_free (s); return res; } LIBTEST_API int STDCALL add_delegate (int i, int j) { return i + j; } LIBTEST_API gpointer STDCALL mono_test_marshal_return_fnptr (void) { return (gpointer)&add_delegate; } LIBTEST_API int STDCALL mono_xr (int code) { printf ("codigo %x\n", code); return code + 1234; } typedef struct { int handle; } HandleRef; LIBTEST_API HandleRef STDCALL mono_xr_as_handle (int code) { HandleRef ref; memset (&ref, 0, sizeof (ref)); return ref; } typedef struct { int a; void *handle1; void *handle2; int b; } HandleStructs; LIBTEST_API int STDCALL mono_safe_handle_struct_ref (HandleStructs *x) { printf ("Dingus Ref! \n"); printf ("Values: %d %d %p %p\n", x->a, x->b, x->handle1, x->handle2); if (x->a != 1234) return 1; if (x->b != 8743) return 2; if (x->handle1 != (void*) 0x7080feed) return 3; if (x->handle2 != (void*) 0x1234abcd) return 4; return 0xf00d; } LIBTEST_API int STDCALL mono_safe_handle_struct (HandleStructs x) { printf ("Dingus Standard! \n"); printf ("Values: %d %d %p %p\n", x.a, x.b, x.handle1, x.handle2); if (x.a != 1234) return 1; if (x.b != 8743) return 2; if (x.handle1 != (void*) 0x7080feed) return 3; if (x.handle2 != (void*) 0x1234abcd) return 4; return 0xf00f; } typedef struct { void *a; } TrivialHandle; LIBTEST_API int STDCALL mono_safe_handle_struct_simple (TrivialHandle x) { printf ("The value is %p\n", x.a); return ((int)(gsize)x.a) * 2; } LIBTEST_API int STDCALL mono_safe_handle_return (void) { return 0x1000f00d; } LIBTEST_API void STDCALL mono_safe_handle_ref (void **handle) { if (*handle != 0){ *handle = (void *) 0x800d; return; } *handle = (void *) 0xbad; } LIBTEST_API void* STDCALL mono_safe_handle_ref_nomod (void **handle) { return *handle; } LIBTEST_API double STDCALL mono_test_marshal_date_time (double d, double *d2) { *d2 = d; return d; } /* * COM INTEROP TESTS */ #ifndef WIN32 typedef struct { guint32 a; guint16 b; guint16 c; guint8 d[8]; } GUID; typedef const GUID *REFIID; typedef struct IDispatch IDispatch; typedef struct { int (STDCALL *QueryInterface)(IDispatch *iface, REFIID iid, gpointer *out); int (STDCALL *AddRef)(IDispatch *iface); int (STDCALL *Release)(IDispatch *iface); int (STDCALL *GetTypeInfoCount)(IDispatch *iface, unsigned int *count); int (STDCALL *GetTypeInfo)(IDispatch *iface, unsigned int index, unsigned int lcid, gpointer *out); int (STDCALL *GetIDsOfNames)(IDispatch *iface, REFIID iid, gpointer names, unsigned int count, unsigned int lcid, gpointer ids); int (STDCALL *Invoke)(IDispatch *iface, unsigned int dispid, REFIID iid, unsigned int lcid, unsigned short flags, gpointer params, gpointer result, gpointer excepinfo, gpointer err_arg); } IDispatchVtbl; struct IDispatch { const IDispatchVtbl *lpVtbl; }; typedef struct { guint16 vt; guint16 wReserved1; guint16 wReserved2; guint16 wReserved3; union { gint64 llVal; gint32 lVal; guint8 bVal; gint16 iVal; float fltVal; double dblVal; gint16 boolVal; gunichar2* bstrVal; gint8 cVal; guint16 uiVal; guint32 ulVal; guint64 ullVal; gpointer byref; struct { gpointer pvRecord; gpointer pRecInfo; }; }; } VARIANT; typedef enum { VARIANT_TRUE = -1, VARIANT_FALSE = 0 } VariantBool; typedef enum { VT_EMPTY = 0, VT_NULL = 1, VT_I2 = 2, VT_I4 = 3, VT_R4 = 4, VT_R8 = 5, VT_CY = 6, VT_DATE = 7, VT_BSTR = 8, VT_DISPATCH = 9, VT_ERROR = 10, VT_BOOL = 11, VT_VARIANT = 12, VT_UNKNOWN = 13, VT_DECIMAL = 14, VT_I1 = 16, VT_UI1 = 17, VT_UI2 = 18, VT_UI4 = 19, VT_I8 = 20, VT_UI8 = 21, VT_INT = 22, VT_UINT = 23, VT_VOID = 24, VT_HRESULT = 25, VT_PTR = 26, VT_SAFEARRAY = 27, VT_CARRAY = 28, VT_USERDEFINED = 29, VT_LPSTR = 30, VT_LPWSTR = 31, VT_RECORD = 36, VT_FILETIME = 64, VT_BLOB = 65, VT_STREAM = 66, VT_STORAGE = 67, VT_STREAMED_OBJECT = 68, VT_STORED_OBJECT = 69, VT_BLOB_OBJECT = 70, VT_CF = 71, VT_CLSID = 72, VT_VECTOR = 4096, VT_ARRAY = 8192, VT_BYREF = 16384 } VarEnum; void VariantInit(VARIANT* vt) { vt->vt = VT_EMPTY; } #define S_OK 0 #endif LIBTEST_API int STDCALL mono_test_marshal_bstr_in(gunichar2* bstr) { gint32 result = 0; gchar* bstr_utf8 = g_utf16_to_utf8 (bstr, -1, NULL, NULL, NULL); result = strcmp("mono_test_marshal_bstr_in", bstr_utf8); g_free(bstr_utf8); if (result == 0) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_bstr_out(gunichar2** bstr) { *bstr = marshal_bstr_alloc ("mono_test_marshal_bstr_out"); return 0; } LIBTEST_API int STDCALL mono_test_marshal_bstr_in_null(gunichar2* bstr) { if (!bstr) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_bstr_out_null(gunichar2** bstr) { *bstr = NULL; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_sbyte(VARIANT variant) { if (variant.vt == VT_I1 && variant.cVal == 100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_byte(VARIANT variant) { if (variant.vt == VT_UI1 && variant.bVal == 100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_short(VARIANT variant) { if (variant.vt == VT_I2 && variant.iVal == 314) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_ushort(VARIANT variant) { if (variant.vt == VT_UI2 && variant.uiVal == 314) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_int(VARIANT variant) { if (variant.vt == VT_I4 && variant.lVal == 314) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_uint(VARIANT variant) { if (variant.vt == VT_UI4 && variant.ulVal == 314) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_long(VARIANT variant) { if (variant.vt == VT_I8 && variant.llVal == 314) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_ulong(VARIANT variant) { if (variant.vt == VT_UI8 && variant.ullVal == 314) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_float(VARIANT variant) { if (variant.vt == VT_R4 && (variant.fltVal - 3.14)/3.14 < .001) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_double(VARIANT variant) { if (variant.vt == VT_R8 && (variant.dblVal - 3.14)/3.14 < .001) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_bstr(VARIANT variant) { gint32 result = 0; gchar* bstr_utf8 = g_utf16_to_utf8 (variant.bstrVal, -1, NULL, NULL, NULL); result = strcmp("PI", bstr_utf8); g_free(bstr_utf8); if (variant.vt == VT_BSTR && !result) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_bool_true (VARIANT variant) { if (variant.vt == VT_BOOL && variant.boolVal == VARIANT_TRUE) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_in_bool_false (VARIANT variant) { if (variant.vt == VT_BOOL && variant.boolVal == VARIANT_FALSE) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_sbyte(VARIANT* variant) { variant->vt = VT_I1; variant->cVal = 100; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_sbyte_byref(VARIANT* variant) { variant->vt = VT_I1|VT_BYREF; variant->byref = marshal_alloc(1); *((gint8*)variant->byref) = 100; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_byte(VARIANT* variant) { variant->vt = VT_UI1; variant->bVal = 100; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_byte_byref(VARIANT* variant) { variant->vt = VT_UI1|VT_BYREF; variant->byref = marshal_alloc(1); *((gint8*)variant->byref) = 100; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_short(VARIANT* variant) { variant->vt = VT_I2; variant->iVal = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_short_byref(VARIANT* variant) { variant->vt = VT_I2|VT_BYREF; variant->byref = marshal_alloc(2); *((gint16*)variant->byref) = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_ushort(VARIANT* variant) { variant->vt = VT_UI2; variant->uiVal = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_ushort_byref(VARIANT* variant) { variant->vt = VT_UI2|VT_BYREF; variant->byref = marshal_alloc(2); *((guint16*)variant->byref) = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_int(VARIANT* variant) { variant->vt = VT_I4; variant->lVal = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_int_byref(VARIANT* variant) { variant->vt = VT_I4|VT_BYREF; variant->byref = marshal_alloc(4); *((gint32*)variant->byref) = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_uint(VARIANT* variant) { variant->vt = VT_UI4; variant->ulVal = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_uint_byref(VARIANT* variant) { variant->vt = VT_UI4|VT_BYREF; variant->byref = marshal_alloc(4); *((guint32*)variant->byref) = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_long(VARIANT* variant) { variant->vt = VT_I8; variant->llVal = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_long_byref(VARIANT* variant) { variant->vt = VT_I8|VT_BYREF; variant->byref = marshal_alloc(8); *((gint64*)variant->byref) = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_ulong(VARIANT* variant) { variant->vt = VT_UI8; variant->ullVal = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_ulong_byref(VARIANT* variant) { variant->vt = VT_UI8|VT_BYREF; variant->byref = marshal_alloc(8); *((guint64*)variant->byref) = 314; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_float(VARIANT* variant) { variant->vt = VT_R4; variant->fltVal = 3.14; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_float_byref(VARIANT* variant) { variant->vt = VT_R4|VT_BYREF; variant->byref = marshal_alloc(4); *((float*)variant->byref) = 3.14; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_double(VARIANT* variant) { variant->vt = VT_R8; variant->dblVal = 3.14; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_double_byref(VARIANT* variant) { variant->vt = VT_R8|VT_BYREF; variant->byref = marshal_alloc(8); *((double*)variant->byref) = 3.14; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bstr(VARIANT* variant) { variant->vt = VT_BSTR; variant->bstrVal = marshal_bstr_alloc("PI"); return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bstr_byref(VARIANT* variant) { variant->vt = VT_BSTR|VT_BYREF; variant->byref = marshal_alloc(sizeof(gpointer)); *((gunichar**)variant->byref) = (gunichar*)marshal_bstr_alloc("PI"); return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bool_true (VARIANT* variant) { variant->vt = VT_BOOL; variant->boolVal = VARIANT_TRUE; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bool_true_byref (VARIANT* variant) { variant->vt = VT_BOOL|VT_BYREF; variant->byref = marshal_alloc(2); *((gint16*)variant->byref) = VARIANT_TRUE; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bool_false (VARIANT* variant) { variant->vt = VT_BOOL; variant->boolVal = VARIANT_FALSE; return 0; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bool_false_byref (VARIANT* variant) { variant->vt = VT_BOOL|VT_BYREF; variant->byref = marshal_alloc(2); *((gint16*)variant->byref) = VARIANT_FALSE; return 0; } typedef int (STDCALL *VarFunc) (int vt, VARIANT variant); typedef int (STDCALL *VarRefFunc) (int vt, VARIANT* variant); LIBTEST_API int STDCALL mono_test_marshal_variant_in_sbyte_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_I1; vt.cVal = -100; return func (VT_I1, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_byte_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_UI1; vt.bVal = 100; return func (VT_UI1, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_short_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_I2; vt.iVal = -100; return func (VT_I2, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_ushort_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_UI2; vt.uiVal = 100; return func (VT_UI2, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_int_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_I4; vt.lVal = -100; return func (VT_I4, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_uint_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_UI4; vt.ulVal = 100; return func (VT_UI4, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_long_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_I8; vt.llVal = -100; return func (VT_I8, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_ulong_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_UI8; vt.ullVal = 100; return func (VT_UI8, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_float_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_R4; vt.fltVal = 3.14; return func (VT_R4, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_double_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_R8; vt.dblVal = 3.14; return func (VT_R8, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_bstr_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_BSTR; vt.bstrVal = marshal_bstr_alloc("PI"); return func (VT_BSTR, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_bool_true_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_BOOL; vt.boolVal = VARIANT_TRUE; return func (VT_BOOL, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_in_bool_false_unmanaged(VarFunc func) { VARIANT vt; vt.vt = VT_BOOL; vt.boolVal = VARIANT_FALSE; return func (VT_BOOL, vt); } LIBTEST_API int STDCALL mono_test_marshal_variant_out_sbyte_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_I1, &vt); if (vt.vt == VT_I1 && vt.cVal == -100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_byte_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_UI1, &vt); if (vt.vt == VT_UI1 && vt.bVal == 100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_short_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_I2, &vt); if (vt.vt == VT_I2 && vt.iVal == -100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_ushort_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_UI2, &vt); if (vt.vt == VT_UI2 && vt.uiVal == 100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_int_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_I4, &vt); if (vt.vt == VT_I4 && vt.lVal == -100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_uint_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_UI4, &vt); if (vt.vt == VT_UI4 && vt.ulVal == 100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_long_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_I8, &vt); if (vt.vt == VT_I8 && vt.llVal == -100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_ulong_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_UI8, &vt); if (vt.vt == VT_UI8 && vt.ullVal == 100) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_float_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_R4, &vt); if (vt.vt == VT_R4 && fabs (vt.fltVal - 3.14f) < 1e-10) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_double_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_R8, &vt); if (vt.vt == VT_R8 && fabs (vt.dblVal - 3.14) < 1e-10) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bstr_unmanaged(VarRefFunc func) { VARIANT vt; gchar* bstr_utf8; gint32 result = 0; VariantInit (&vt); func (VT_BSTR, &vt); bstr_utf8 = g_utf16_to_utf8 (vt.bstrVal, -1, NULL, NULL, NULL); result = strcmp("PI", bstr_utf8); g_free(bstr_utf8); if (vt.vt == VT_BSTR && !result) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bool_true_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_BOOL, &vt); if (vt.vt == VT_BOOL && vt.boolVal == VARIANT_TRUE) return 0; return 1; } LIBTEST_API int STDCALL mono_test_marshal_variant_out_bool_false_unmanaged(VarRefFunc func) { VARIANT vt; VariantInit (&vt); func (VT_BOOL, &vt); if (vt.vt == VT_BOOL && vt.boolVal == VARIANT_TRUE) return 0; return 1; } typedef struct _StructWithVariant { VARIANT data; } StructWithVariant; typedef int (STDCALL *CheckStructWithVariantFunc) (StructWithVariant sv); LIBTEST_API int STDCALL mono_test_marshal_struct_with_variant_in_unmanaged(CheckStructWithVariantFunc func) { StructWithVariant sv; sv.data.vt = VT_I4; sv.data.lVal = -123; return func(sv); } LIBTEST_API int STDCALL mono_test_marshal_struct_with_variant_out_unmanaged (StructWithVariant sv) { if (sv.data.vt != VT_I4) return 1; if (sv.data.lVal != -123) return 2; return 0; } typedef struct _StructWithBstr { gunichar2* data; } StructWithBstr; typedef int (STDCALL *CheckStructWithBstrFunc) (StructWithBstr sb); LIBTEST_API int STDCALL mono_test_marshal_struct_with_bstr_in_unmanaged(CheckStructWithBstrFunc func) { StructWithBstr sb; sb.data = marshal_bstr_alloc("this is a test string"); return func(sb); } LIBTEST_API int STDCALL mono_test_marshal_struct_with_bstr_out_unmanaged (StructWithBstr sb) { char *s = g_utf16_to_utf8 (sb.data, g_utf16_len (sb.data), NULL, NULL, NULL); gboolean same = !strcmp (s, "this is a test string"); g_free (s); if (!same) return 1; return 0; } typedef struct MonoComObject MonoComObject; typedef struct MonoDefItfObject MonoDefItfObject; typedef struct { int (STDCALL *QueryInterface)(MonoDefItfObject* pUnk, gpointer riid, gpointer* ppv); int (STDCALL *AddRef)(MonoDefItfObject* pUnk); int (STDCALL *Release)(MonoDefItfObject* pUnk); int (STDCALL *Method)(MonoDefItfObject* pUnk, int *value); } MonoDefItf; typedef struct { int (STDCALL *QueryInterface)(MonoComObject* pUnk, gpointer riid, gpointer* ppv); int (STDCALL *AddRef)(MonoComObject* pUnk); int (STDCALL *Release)(MonoComObject* pUnk); int (STDCALL *get_ITest)(MonoComObject* pUnk, MonoComObject* *ppUnk); int (STDCALL *SByteIn)(MonoComObject* pUnk, char a); int (STDCALL *ByteIn)(MonoComObject* pUnk, unsigned char a); int (STDCALL *ShortIn)(MonoComObject* pUnk, short a); int (STDCALL *UShortIn)(MonoComObject* pUnk, unsigned short a); int (STDCALL *IntIn)(MonoComObject* pUnk, int a); int (STDCALL *UIntIn)(MonoComObject* pUnk, unsigned int a); int (STDCALL *LongIn)(MonoComObject* pUnk, gint64 a); int (STDCALL *ULongIn)(MonoComObject* pUnk, guint64 a); int (STDCALL *FloatIn)(MonoComObject* pUnk, float a); int (STDCALL *DoubleIn)(MonoComObject* pUnk, double a); int (STDCALL *ITestIn)(MonoComObject* pUnk, MonoComObject* pUnk2); int (STDCALL *ITestOut)(MonoComObject* pUnk, MonoComObject* *ppUnk); int (STDCALL *Return22NoICall)(MonoComObject* pUnk); int (STDCALL *IntOut)(MonoComObject* pUnk, int *a); int (STDCALL *ArrayIn)(MonoComObject* pUnk, void *array); int (STDCALL *ArrayIn2)(MonoComObject* pUnk, void *array); int (STDCALL *ArrayIn3)(MonoComObject* pUnk, void *array); int (STDCALL *ArrayOut)(MonoComObject* pUnk, guint32 *array, guint32 *result); int (STDCALL *GetDefInterface1)(MonoComObject* pUnk, MonoDefItfObject **iface); int (STDCALL *GetDefInterface2)(MonoComObject* pUnk, MonoDefItfObject **iface); } MonoIUnknown; struct MonoComObject { MonoIUnknown* vtbl; int m_ref; }; struct MonoDefItfObject { MonoDefItf* vtbl; }; static GUID IID_ITest = {0, 0, 0, {0,0,0,0,0,0,0,1}}; static GUID IID_IMonoUnknown = {0, 0, 0, {0xc0,0,0,0,0,0,0,0x46}}; static GUID IID_IMonoDispatch = {0x00020400, 0, 0, {0xc0,0,0,0,0,0,0,0x46}}; static GUID IID_INotImplemented = {0x12345678, 0, 0, {0x9a, 0xbc, 0xde, 0xf0, 0, 0, 0, 0}}; LIBTEST_API int STDCALL MonoQueryInterface(MonoComObject* pUnk, gpointer riid, gpointer* ppv) { *ppv = NULL; if (!memcmp(riid, &IID_IMonoUnknown, sizeof(GUID))) { *ppv = pUnk; return S_OK; } else if (!memcmp(riid, &IID_ITest, sizeof(GUID))) { *ppv = pUnk; return S_OK; } else if (!memcmp(riid, &IID_IMonoDispatch, sizeof(GUID))) { *ppv = pUnk; return S_OK; } return 0x80004002; //E_NOINTERFACE; } LIBTEST_API int STDCALL MonoAddRef(MonoComObject* pUnk) { return ++(pUnk->m_ref); } LIBTEST_API int STDCALL MonoRelease(MonoComObject* pUnk) { return --(pUnk->m_ref); } LIBTEST_API int STDCALL SByteIn(MonoComObject* pUnk, char a) { return S_OK; } LIBTEST_API int STDCALL ByteIn(MonoComObject* pUnk, unsigned char a) { return S_OK; } LIBTEST_API int STDCALL ShortIn(MonoComObject* pUnk, short a) { return S_OK; } LIBTEST_API int STDCALL UShortIn(MonoComObject* pUnk, unsigned short a) { return S_OK; } LIBTEST_API int STDCALL IntIn(MonoComObject* pUnk, int a) { return S_OK; } LIBTEST_API int STDCALL UIntIn(MonoComObject* pUnk, unsigned int a) { return S_OK; } LIBTEST_API int STDCALL LongIn(MonoComObject* pUnk, gint64 a) { return S_OK; } LIBTEST_API int STDCALL ULongIn(MonoComObject* pUnk, guint64 a) { return S_OK; } LIBTEST_API int STDCALL FloatIn(MonoComObject* pUnk, float a) { return S_OK; } LIBTEST_API int STDCALL DoubleIn(MonoComObject* pUnk, double a) { return S_OK; } LIBTEST_API int STDCALL ITestIn(MonoComObject* pUnk, MonoComObject *pUnk2) { return S_OK; } LIBTEST_API int STDCALL ITestOut(MonoComObject* pUnk, MonoComObject* *ppUnk) { return S_OK; } LIBTEST_API int STDCALL Return22NoICall(MonoComObject* pUnk) { return 22; } LIBTEST_API int STDCALL IntOut(MonoComObject* pUnk, int *a) { return S_OK; } LIBTEST_API int STDCALL ArrayIn(MonoComObject* pUnk, void *array) { return S_OK; } LIBTEST_API int STDCALL ArrayIn2(MonoComObject* pUnk, void *array) { return S_OK; } LIBTEST_API int STDCALL ArrayIn3(MonoComObject* pUnk, void *array) { return S_OK; } LIBTEST_API int STDCALL ArrayOut(MonoComObject* pUnk, guint32 *array, guint32 *result) { return S_OK; } LIBTEST_API int STDCALL GetDefInterface1(MonoComObject* pUnk, MonoDefItfObject **obj) { return S_OK; } LIBTEST_API int STDCALL GetDefInterface2(MonoComObject* pUnk, MonoDefItfObject **obj) { return S_OK; } static void create_com_object (MonoComObject** pOut); LIBTEST_API int STDCALL get_ITest(MonoComObject* pUnk, MonoComObject* *ppUnk) { create_com_object (ppUnk); return S_OK; } static void create_com_object (MonoComObject** pOut) { *pOut = marshal_new0 (MonoComObject, 1); (*pOut)->vtbl = marshal_new0 (MonoIUnknown, 1); (*pOut)->m_ref = 1; (*pOut)->vtbl->QueryInterface = MonoQueryInterface; (*pOut)->vtbl->AddRef = MonoAddRef; (*pOut)->vtbl->Release = MonoRelease; (*pOut)->vtbl->SByteIn = SByteIn; (*pOut)->vtbl->ByteIn = ByteIn; (*pOut)->vtbl->ShortIn = ShortIn; (*pOut)->vtbl->UShortIn = UShortIn; (*pOut)->vtbl->IntIn = IntIn; (*pOut)->vtbl->UIntIn = UIntIn; (*pOut)->vtbl->LongIn = LongIn; (*pOut)->vtbl->ULongIn = ULongIn; (*pOut)->vtbl->FloatIn = FloatIn; (*pOut)->vtbl->DoubleIn = DoubleIn; (*pOut)->vtbl->ITestIn = ITestIn; (*pOut)->vtbl->ITestOut = ITestOut; (*pOut)->vtbl->get_ITest = get_ITest; (*pOut)->vtbl->Return22NoICall = Return22NoICall; (*pOut)->vtbl->IntOut = IntOut; (*pOut)->vtbl->ArrayIn = ArrayIn; (*pOut)->vtbl->ArrayIn2 = ArrayIn2; (*pOut)->vtbl->ArrayIn3 = ArrayIn3; (*pOut)->vtbl->ArrayOut = ArrayOut; (*pOut)->vtbl->GetDefInterface1 = GetDefInterface1; (*pOut)->vtbl->GetDefInterface2 = GetDefInterface2; } static MonoComObject* same_object = NULL; LIBTEST_API int STDCALL mono_test_marshal_com_object_create(MonoComObject* *pUnk) { create_com_object (pUnk); if (!same_object) same_object = *pUnk; return 0; } LIBTEST_API int STDCALL mono_test_marshal_com_object_same(MonoComObject* *pUnk) { *pUnk = same_object; return 0; } LIBTEST_API int STDCALL mono_test_marshal_com_object_destroy(MonoComObject *pUnk) { int ref = --(pUnk->m_ref); g_free(pUnk->vtbl); g_free(pUnk); return ref; } LIBTEST_API int STDCALL mono_test_marshal_com_object_ref_count(MonoComObject *pUnk) { return pUnk->m_ref; } LIBTEST_API int STDCALL mono_test_marshal_ccw_itest (MonoComObject *pUnk) { int hr = 0; MonoComObject* pTest; if (!pUnk) return 1; hr = pUnk->vtbl->SByteIn (pUnk, -100); if (hr != 0) return 2; hr = pUnk->vtbl->ByteIn (pUnk, 100); if (hr != 0) return 3; hr = pUnk->vtbl->ShortIn (pUnk, -100); if (hr != 0) return 4; hr = pUnk->vtbl->UShortIn (pUnk, 100); if (hr != 0) return 5; hr = pUnk->vtbl->IntIn (pUnk, -100); if (hr != 0) return 6; hr = pUnk->vtbl->UIntIn (pUnk, 100); if (hr != 0) return 7; hr = pUnk->vtbl->LongIn (pUnk, -100); if (hr != 0) return 8; hr = pUnk->vtbl->ULongIn (pUnk, 100); if (hr != 0) return 9; hr = pUnk->vtbl->FloatIn (pUnk, 3.14f); if (hr != 0) return 10; hr = pUnk->vtbl->DoubleIn (pUnk, 3.14); if (hr != 0) return 11; hr = pUnk->vtbl->ITestIn (pUnk, pUnk); if (hr != 0) return 12; hr = pUnk->vtbl->ITestOut (pUnk, &pTest); if (hr != 0) return 13; return 0; } // Xamarin-47560 LIBTEST_API int STDCALL mono_test_marshal_array_ccw_itest (int count, MonoComObject ** ppUnk) { int hr = 0; if (!ppUnk) return 1; if (count < 1) return 2; if (!ppUnk[0]) return 3; hr = ppUnk[0]->vtbl->SByteIn (ppUnk[0], -100); if (hr != 0) return 4; return 0; } LIBTEST_API int STDCALL mono_test_marshal_retval_ccw_itest (MonoComObject *pUnk, int test_null) { int hr = 0, i = 0; if (!pUnk) return 1; hr = pUnk->vtbl->IntOut (pUnk, &i); if (hr != 0) return 2; if (i != 33) return 3; if (test_null) { hr = pUnk->vtbl->IntOut (pUnk, NULL); if (hr != 0) return 4; } return 0; } LIBTEST_API int STDCALL mono_test_default_interface_ccw (MonoComObject *pUnk) { MonoDefItfObject *obj; int ret, value; ret = pUnk->vtbl->GetDefInterface1(pUnk, &obj); if (ret) return 1; value = 0; ret = obj->vtbl->Method(obj, &value); obj->vtbl->Release(obj); if (ret) return 2; if (value != 1) return 3; ret = pUnk->vtbl->GetDefInterface2(pUnk, &obj); if (ret) return 4; ret = obj->vtbl->Method(obj, &value); obj->vtbl->Release(obj); if (ret) return 5; if (value != 2) return 6; return 0; } /* * mono_method_get_unmanaged_thunk tests */ #if defined(__GNUC__) && ((defined(__i386__) && (defined(__linux__) || defined (__APPLE__)) || defined (__FreeBSD__) || defined(__OpenBSD__)) || (defined(__ppc__) && defined(__APPLE__))) #define ALIGN(size) __attribute__ ((__aligned__(size))) #else #define ALIGN(size) #endif /* thunks.cs:TestStruct */ typedef struct _TestStruct { int A; double B; } TestStruct; /* Searches for mono symbols in all loaded modules */ static gpointer lookup_mono_symbol (const char *symbol_name) { gpointer symbol = NULL; GModule *mod = g_module_open (NULL, G_MODULE_BIND_LAZY); g_assert (mod != NULL); const gboolean success = g_module_symbol (mod, symbol_name, &symbol); g_assertf (success, "%s", symbol_name); return success ? symbol : NULL; } LIBTEST_API gpointer STDCALL mono_test_marshal_lookup_symbol (const char *symbol_name) { #ifndef HOST_WIN32 return dlsym (RTLD_DEFAULT, symbol_name); #else // This isn't really proper, but it should work return lookup_mono_symbol (symbol_name); #endif } // FIXME use runtime headers #define MONO_BEGIN_EFRAME { void *__dummy; void *__region_cookie = mono_threads_enter_gc_unsafe_region ? mono_threads_enter_gc_unsafe_region (&__dummy) : NULL; #define MONO_END_EFRAME if (mono_threads_exit_gc_unsafe_region) mono_threads_exit_gc_unsafe_region (__region_cookie, &__dummy); } /** * test_method_thunk: * * @test_id: the test number * @test_method_handle: MonoMethod* of the C# test method * @create_object_method_handle: MonoMethod* of thunks.cs:Test.CreateObject */ LIBTEST_API int STDCALL test_method_thunk (int test_id, gpointer test_method_handle, gpointer create_object_method_handle) { int ret = 0; // FIXME use runtime headers gpointer (*mono_method_get_unmanaged_thunk)(gpointer) = (gpointer (*)(gpointer))lookup_mono_symbol ("mono_method_get_unmanaged_thunk"); // FIXME use runtime headers gpointer (*mono_string_new_wrapper)(const char *) = (gpointer (*)(const char *))lookup_mono_symbol ("mono_string_new_wrapper"); // FIXME use runtime headers char *(*mono_string_to_utf8)(gpointer) = (char *(*)(gpointer))lookup_mono_symbol ("mono_string_to_utf8"); // FIXME use runtime headers gpointer (*mono_object_unbox)(gpointer) = (gpointer (*)(gpointer))lookup_mono_symbol ("mono_object_unbox"); // FIXME use runtime headers gpointer (*mono_threads_enter_gc_unsafe_region) (gpointer) = (gpointer (*)(gpointer))lookup_mono_symbol ("mono_threads_enter_gc_unsafe_region"); // FIXME use runtime headers void (*mono_threads_exit_gc_unsafe_region) (gpointer, gpointer) = (void (*)(gpointer, gpointer))lookup_mono_symbol ("mono_threads_exit_gc_unsafe_region"); gpointer test_method, ex = NULL; gpointer (STDCALL *CreateObject)(gpointer*); MONO_BEGIN_EFRAME; if (!mono_method_get_unmanaged_thunk) { ret = 1; goto done; } test_method = mono_method_get_unmanaged_thunk (test_method_handle); if (!test_method) { ret = 2; goto done; } CreateObject = (gpointer (STDCALL *)(gpointer *))mono_method_get_unmanaged_thunk (create_object_method_handle); if (!CreateObject) { ret = 3; goto done; } switch (test_id) { case 0: { /* thunks.cs:Test.Test0 */ void (STDCALL *F)(gpointer *) = (void (STDCALL *)(gpointer *))test_method; F (&ex); break; } case 1: { /* thunks.cs:Test.Test1 */ int (STDCALL *F)(gpointer *) = (int (STDCALL *)(gpointer *))test_method; if (F (&ex) != 42) { ret = 4; goto done; } break; } case 2: { /* thunks.cs:Test.Test2 */ gpointer (STDCALL *F)(gpointer, gpointer*) = (gpointer (STDCALL *)(gpointer, gpointer *))test_method; gpointer str = mono_string_new_wrapper ("foo"); if (str != F (str, &ex)) { ret = 4; goto done; } break; } case 3: { /* thunks.cs:Test.Test3 */ gpointer (STDCALL *F)(gpointer, gpointer, gpointer*); gpointer obj; gpointer str; F = (gpointer (STDCALL *)(gpointer, gpointer, gpointer *))test_method; obj = CreateObject (&ex); str = mono_string_new_wrapper ("bar"); if (str != F (obj, str, &ex)) { ret = 4; goto done; } break; } case 4: { /* thunks.cs:Test.Test4 */ int (STDCALL *F)(gpointer, gpointer, int, gpointer*); gpointer obj; gpointer str; F = (int (STDCALL *)(gpointer, gpointer, int, gpointer *))test_method; obj = CreateObject (&ex); str = mono_string_new_wrapper ("bar"); if (42 != F (obj, str, 42, &ex)) { ret = 4; goto done; } break; } case 5: { /* thunks.cs:Test.Test5 */ int (STDCALL *F)(gpointer, gpointer, int, gpointer*); gpointer obj; gpointer str; F = (int (STDCALL *)(gpointer, gpointer, int, gpointer *))test_method; obj = CreateObject (&ex); str = mono_string_new_wrapper ("bar"); F (obj, str, 42, &ex); if (!ex) { ret = 4; goto done; } break; } case 6: { /* thunks.cs:Test.Test6 */ int (STDCALL *F)(gpointer, guint8, gint16, gint32, gint64, float, double, gpointer, gpointer*); gpointer obj; gpointer str = mono_string_new_wrapper ("Test6"); int res; F = (int (STDCALL *)(gpointer, guint8, gint16, gint32, gint64, float, double, gpointer, gpointer *))test_method; obj = CreateObject (&ex); res = F (obj, 254, 32700, -245378, 6789600, 3.1415, 3.1415, str, &ex); if (ex) { ret = 4; goto done; } if (!res) { ret = 5; goto done; } break; } case 7: { /* thunks.cs:Test.Test7 */ gint64 (STDCALL *F)(gpointer*) = (gint64 (STDCALL *)(gpointer *))test_method; if (F (&ex) != G_MAXINT64) { ret = 4; goto done; } break; } case 8: { /* thunks.cs:Test.Test8 */ void (STDCALL *F)(guint8*, gint16*, gint32*, gint64*, float*, double*, gpointer*, gpointer*); guint8 a1; gint16 a2; gint32 a3; gint64 a4; float a5; double a6; gpointer a7; F = (void (STDCALL *)(guint8 *, gint16 *, gint32 *, gint64 *, float *, double *, gpointer *, gpointer *))test_method; F (&a1, &a2, &a3, &a4, &a5, &a6, &a7, &ex); if (ex) { ret = 4; goto done; } if (!(a1 == 254 && a2 == 32700 && a3 == -245378 && a4 == 6789600 && (fabs (a5 - 3.1415) < 0.001) && (fabs (a6 - 3.1415) < 0.001) && strcmp (mono_string_to_utf8 (a7), "Test8") == 0)){ ret = 5; goto done; } break; } case 9: { /* thunks.cs:Test.Test9 */ void (STDCALL *F)(guint8*, gint16*, gint32*, gint64*, float*, double*, gpointer*, gpointer*); guint8 a1; gint16 a2; gint32 a3; gint64 a4; float a5; double a6; gpointer a7; F = (void (STDCALL *)(guint8 *, gint16 *, gint32 *, gint64 *, float *, double *, gpointer *, gpointer *))test_method; F (&a1, &a2, &a3, &a4, &a5, &a6, &a7, &ex); if (!ex) { ret = 4; goto done; } break; } case 10: { /* thunks.cs:Test.Test10 */ void (STDCALL *F)(gpointer*, gpointer*); gpointer obj1, obj2; obj1 = obj2 = CreateObject (&ex); if (ex) { ret = 4; goto done; } F = (void (STDCALL *)(gpointer *, gpointer *))test_method; F (&obj1, &ex); if (ex) { ret = 5; goto done; } if (obj1 == obj2) { ret = 6; goto done; } break; } case 100: { /* thunks.cs:TestStruct.Test0 */ int (STDCALL *F)(gpointer*, gpointer*); gpointer obj; TestStruct *a1; int res; obj = CreateObject (&ex); if (ex) { ret = 4; goto done; } if (!obj) { ret = 5; goto done; } a1 = (TestStruct *)mono_object_unbox (obj); if (!a1) { ret = 6; goto done; } a1->A = 42; a1->B = 3.1415; F = (int (STDCALL *)(gpointer *, gpointer *))test_method; res = F ((gpointer *)obj, &ex); if (ex) { ret = 7; goto done; } if (!res) { ret = 8; goto done; } /* check whether the call was really by value */ if (a1->A != 42 || a1->B != 3.1415) { ret = 9; goto done; } break; } case 101: { /* thunks.cs:TestStruct.Test1 */ void (STDCALL *F)(gpointer, gpointer*); TestStruct *a1; gpointer obj; obj = CreateObject (&ex); if (ex) { ret = 4; goto done; } if (!obj) { ret = 5; goto done; } a1 = (TestStruct *)mono_object_unbox (obj); if (!a1) { ret = 6; goto done; } F = (void (STDCALL *)(gpointer, gpointer *))test_method; F (obj, &ex); if (ex) { ret = 7; goto done; } if (a1->A != 42) { ret = 8; goto done; } if (!(fabs (a1->B - 3.1415) < 0.001)) { ret = 9; goto done; } break; } case 102: { /* thunks.cs:TestStruct.Test2 */ gpointer (STDCALL *F)(gpointer*); TestStruct *a1; gpointer obj; F = (gpointer (STDCALL *)(gpointer *))test_method; obj = F (&ex); if (ex) { ret = 4; goto done; } if (!obj) { ret = 5; goto done; } a1 = (TestStruct *)mono_object_unbox (obj); if (a1->A != 42) { ret = 5; goto done; } if (!(fabs (a1->B - 3.1415) < 0.001)) { ret = 6; goto done; } break; } case 103: { /* thunks.cs:TestStruct.Test3 */ void (STDCALL *F)(gpointer, gpointer*); TestStruct *a1; gpointer obj; obj = CreateObject (&ex); if (ex) { ret = 4; goto done; } if (!obj) { ret = 5; goto done; } a1 = (TestStruct *)mono_object_unbox (obj); if (!a1) { ret = 6; goto done; } a1->A = 42; a1->B = 3.1415; F = (void (STDCALL *)(gpointer, gpointer *))test_method; F (obj, &ex); if (ex) { ret = 4; goto done; } if (a1->A != 1) { ret = 5; goto done; } if (a1->B != 17) { ret = 6; goto done; } break; } default: ret = 9; } done: MONO_END_EFRAME; return ret; } typedef struct { char a; } winx64_struct1; LIBTEST_API int STDCALL mono_test_Winx64_struct1_in (winx64_struct1 var) { if (var.a != 123) return 1; return 0; } typedef struct { char a; char b; } winx64_struct2; LIBTEST_API int STDCALL mono_test_Winx64_struct2_in (winx64_struct2 var) { if (var.a != 4) return 1; if (var.b != 5) return 2; return 0; } typedef struct { char a; char b; short c; } winx64_struct3; LIBTEST_API int STDCALL mono_test_Winx64_struct3_in (winx64_struct3 var) { if (var.a != 4) return 1; if (var.b != 5) return 2; if (var.c != 0x1234) return 3; return 0; } typedef struct { char a; char b; short c; unsigned int d; } winx64_struct4; LIBTEST_API int STDCALL mono_test_Winx64_struct4_in (winx64_struct4 var) { if (var.a != 4) return 1; if (var.b != 5) return 2; if (var.c != 0x1234) return 3; if (var.d != 0x87654321) return 4; return 0; } typedef struct { char a; char b; char c; } winx64_struct5; LIBTEST_API int STDCALL mono_test_Winx64_struct5_in (winx64_struct5 var) { if (var.a != 4) return 1; if (var.b != 5) return 2; if (var.c != 6) return 3; return 0; } typedef struct { winx64_struct1 a; short b; char c; } winx64_struct6; LIBTEST_API int STDCALL mono_test_Winx64_struct6_in (winx64_struct6 var) { if (var.a.a != 4) return 1; if (var.b != 5) return 2; if (var.c != 6) return 3; return 0; } LIBTEST_API int STDCALL mono_test_Winx64_structs_in1 (winx64_struct1 var1, winx64_struct2 var2, winx64_struct3 var3, winx64_struct4 var4) { if (var1.a != 123) return 1; if (var2.a != 4) return 2; if (var2.b != 5) return 3; if (var3.a != 4) return 4; if (var3.b != 5) return 2; if (var3.c != 0x1234) return 5; if (var4.a != 4) return 6; if (var4.b != 5) return 7; if (var4.c != 0x1234) return 8; if (var4.d != 0x87654321) return 9; return 0; } LIBTEST_API int STDCALL mono_test_Winx64_structs_in2 (winx64_struct1 var1, winx64_struct1 var2, winx64_struct1 var3, winx64_struct1 var4, winx64_struct1 var5) { if (var1.a != 1) return 1; if (var2.a != 2) return 2; if (var3.a != 3) return 3; if (var4.a != 4) return 4; if (var5.a != 5) return 5; return 0; } LIBTEST_API int STDCALL mono_test_Winx64_structs_in3 (winx64_struct1 var1, winx64_struct5 var2, winx64_struct1 var3, winx64_struct5 var4, winx64_struct1 var5, winx64_struct5 var6) { if (var1.a != 1) return 1; if (var2.a != 2) return 2; if (var2.b != 3) return 2; if (var2.c != 4) return 4; if (var3.a != 5) return 5; if (var4.a != 6) return 6; if (var4.b != 7) return 7; if (var4.c != 8) return 8; if (var5.a != 9) return 9; if (var6.a != 10) return 10; if (var6.b != 11) return 11; if (var6.c != 12) return 12; return 0; } LIBTEST_API winx64_struct1 STDCALL mono_test_Winx64_struct1_ret (void) { winx64_struct1 ret; ret.a = 123; return ret; } LIBTEST_API winx64_struct2 STDCALL mono_test_Winx64_struct2_ret (void) { winx64_struct2 ret; ret.a = 4; ret.b = 5; return ret; } LIBTEST_API winx64_struct3 STDCALL mono_test_Winx64_struct3_ret (void) { winx64_struct3 ret; ret.a = 4; ret.b = 5; ret.c = 0x1234; return ret; } LIBTEST_API winx64_struct4 STDCALL mono_test_Winx64_struct4_ret (void) { winx64_struct4 ret; ret.a = 4; ret.b = 5; ret.c = 0x1234; ret.d = 0x87654321; return ret; } LIBTEST_API winx64_struct5 STDCALL mono_test_Winx64_struct5_ret (void) { winx64_struct5 ret; ret.a = 4; ret.b = 5; ret.c = 6; return ret; } LIBTEST_API winx64_struct1 STDCALL mono_test_Winx64_struct1_ret_5_args (char a, char b, char c, char d, char e) { winx64_struct1 ret; ret.a = a + b + c + d + e; return ret; } LIBTEST_API winx64_struct5 STDCALL mono_test_Winx64_struct5_ret6_args (char a, char b, char c, char d, char e) { winx64_struct5 ret; ret.a = a + b; ret.b = c + d; ret.c = e; return ret; } typedef struct { float a; float b; } winx64_floatStruct; LIBTEST_API int STDCALL mono_test_Winx64_floatStruct (winx64_floatStruct a) { if (a.a > 5.6 || a.a < 5.4) return 1; if (a.b > 9.6 || a.b < 9.4) return 2; return 0; } typedef struct { double a; } winx64_doubleStruct; LIBTEST_API int STDCALL mono_test_Winx64_doubleStruct (winx64_doubleStruct a) { if (a.a > 5.6 || a.a < 5.4) return 1; return 0; } typedef int (STDCALL *managed_struct1_delegate) (winx64_struct1 a); LIBTEST_API int STDCALL mono_test_managed_Winx64_struct1_in(managed_struct1_delegate func) { winx64_struct1 val; val.a = 5; return func (val); } typedef int (STDCALL *managed_struct5_delegate) (winx64_struct5 a); LIBTEST_API int STDCALL mono_test_managed_Winx64_struct5_in(managed_struct5_delegate func) { winx64_struct5 val; val.a = 5; val.b = 0x10; val.c = (char)0x99; return func (val); } typedef int (STDCALL *managed_struct1_struct5_delegate) (winx64_struct1 a, winx64_struct5 b, winx64_struct1 c, winx64_struct5 d, winx64_struct1 e, winx64_struct5 f); LIBTEST_API int STDCALL mono_test_managed_Winx64_struct1_struct5_in(managed_struct1_struct5_delegate func) { winx64_struct1 a, c, e; winx64_struct5 b, d, f; a.a = 1; b.a = 2; b.b = 3; b.c = 4; c.a = 5; d.a = 6; d.b = 7; d.c = 8; e.a = 9; f.a = 10; f.b = 11; f.c = 12; return func (a, b, c, d, e, f); } typedef winx64_struct1 (STDCALL *managed_struct1_ret_delegate) (void); LIBTEST_API int STDCALL mono_test_Winx64_struct1_ret_managed (managed_struct1_ret_delegate func) { winx64_struct1 ret; ret = func (); if (ret.a != 0x45) return 1; return 0; } typedef winx64_struct5 (STDCALL *managed_struct5_ret_delegate) (void); LIBTEST_API int STDCALL mono_test_Winx64_struct5_ret_managed (managed_struct5_ret_delegate func) { winx64_struct5 ret; ret = func (); if (ret.a != 0x12) return 1; if (ret.b != 0x34) return 2; if (ret.c != 0x56) return 3; return 0; } LIBTEST_API int STDCALL mono_test_marshal_bool_in (int arg, unsigned int expected, unsigned int bDefaultMarsh, unsigned int bBoolCustMarsh, char bI1CustMarsh, unsigned char bU1CustMarsh, short bVBCustMarsh) { switch (arg) { case 1: if (bDefaultMarsh != expected) return 1; break; case 2: if (bBoolCustMarsh != expected) return 2; break; case 3: if (bI1CustMarsh != expected) return 3; break; case 4: if (bU1CustMarsh != expected) return 4; break; case 5: if (bVBCustMarsh != expected) return 5; break; default: return 999; } return 0; } LIBTEST_API int STDCALL mono_test_marshal_bool_out (int arg, unsigned int testVal, unsigned int* bDefaultMarsh, unsigned int* bBoolCustMarsh, char* bI1CustMarsh, unsigned char* bU1CustMarsh, unsigned short* bVBCustMarsh) { switch (arg) { case 1: if (!bDefaultMarsh) return 1; *bDefaultMarsh = testVal; break; case 2: if (!bBoolCustMarsh) return 2; *bBoolCustMarsh = testVal; break; case 3: if (!bI1CustMarsh) return 3; *bI1CustMarsh = (char)testVal; break; case 4: if (!bU1CustMarsh) return 4; *bU1CustMarsh = (unsigned char)testVal; break; case 5: if (!bVBCustMarsh) return 5; *bVBCustMarsh = (unsigned short)testVal; break; default: return 999; } return 0; } LIBTEST_API int STDCALL mono_test_marshal_bool_ref (int arg, unsigned int expected, unsigned int testVal, unsigned int* bDefaultMarsh, unsigned int* bBoolCustMarsh, char* bI1CustMarsh, unsigned char* bU1CustMarsh, unsigned short* bVBCustMarsh) { switch (arg) { case 1: if (!bDefaultMarsh) return 1; if (*bDefaultMarsh != expected) return 2; *bDefaultMarsh = testVal; break; case 2: if (!bBoolCustMarsh) return 3; if (*bBoolCustMarsh != expected) return 4; *bBoolCustMarsh = testVal; break; case 3: if (!bI1CustMarsh) return 5; if (*bI1CustMarsh != expected) return 6; *bI1CustMarsh = (char)testVal; break; case 4: if (!bU1CustMarsh) return 7; if (*bU1CustMarsh != expected) return 8; *bU1CustMarsh = (unsigned char)testVal; break; case 5: if (!bVBCustMarsh) return 9; if (*bVBCustMarsh != expected) return 10; *bVBCustMarsh = (unsigned short)testVal; break; default: return 999; } return 0; } typedef int (STDCALL *MarshalBoolInDelegate) (int arg, unsigned int expected, unsigned int bDefaultMarsh, unsigned int bBoolCustMarsh, char bI1CustMarsh, unsigned char bU1CustMarsh, unsigned short bVBCustMarsh); LIBTEST_API int STDCALL mono_test_managed_marshal_bool_in (int arg, unsigned int expected, unsigned int testVal, MarshalBoolInDelegate pfcn) { if (!pfcn) return 0x9900; switch (arg) { case 1: return pfcn (arg, expected, testVal, 0, 0, 0, 0); case 2: return pfcn (arg, expected, 0, testVal, 0, 0, 0); case 3: return pfcn (arg, expected, 0, 0, testVal, 0, 0); case 4: return pfcn (arg, expected, 0, 0, 0, testVal, 0); case 5: return pfcn (arg, expected, 0, 0, 0, 0, testVal); default: return 0x9800; } return 0; } typedef int (STDCALL *MarshalBoolOutDelegate) (int arg, unsigned int expected, unsigned int* bDefaultMarsh, unsigned int* bBoolCustMarsh, char* bI1CustMarsh, unsigned char* bU1CustMarsh, unsigned short* bVBCustMarsh); LIBTEST_API int STDCALL mono_test_managed_marshal_bool_out (int arg, unsigned int expected, unsigned int testVal, MarshalBoolOutDelegate pfcn) { int ret; unsigned int lDefaultMarsh, lBoolCustMarsh; char lI1CustMarsh = 0; unsigned char lU1CustMarsh = 0; unsigned short lVBCustMarsh = 0; lDefaultMarsh = lBoolCustMarsh = 0; if (!pfcn) return 0x9900; switch (arg) { case 1: { unsigned int ltVal = 0; ret = pfcn (arg, testVal, &ltVal, &lBoolCustMarsh, &lI1CustMarsh, &lU1CustMarsh, &lVBCustMarsh); if (ret) return 0x0100 + ret; if (expected != ltVal) return 0x0200; break; } case 2: { unsigned int ltVal = 0; ret = pfcn (arg, testVal, &lDefaultMarsh, &ltVal, &lI1CustMarsh, &lU1CustMarsh, &lVBCustMarsh); if (ret) return 0x0300 + ret; if (expected != ltVal) return 0x0400; break; } case 3: { char ltVal = 0; ret = pfcn (arg, testVal, &lDefaultMarsh, &lBoolCustMarsh, &ltVal, &lU1CustMarsh, &lVBCustMarsh); if (ret) return 0x0500 + ret; if (expected != ltVal) return 0x0600; break; } case 4: { unsigned char ltVal = 0; ret = pfcn (arg, testVal, &lDefaultMarsh, &lBoolCustMarsh, &lI1CustMarsh, &ltVal, &lVBCustMarsh); if (ret) return 0x0700 + ret; if (expected != ltVal) return 0x0800; break; } case 5: { unsigned short ltVal = 0; ret = pfcn (arg, testVal, &lDefaultMarsh, &lBoolCustMarsh, &lI1CustMarsh, &lU1CustMarsh, &ltVal); if (ret) return 0x0900 + ret; if (expected != ltVal) return 0x1000; break; } default: return 0x9800; } return 0; } typedef int (STDCALL *MarshalBoolRefDelegate) (int arg, unsigned int expected, unsigned int testVal, unsigned int* bDefaultMarsh, unsigned int* bBoolCustMarsh, char* bI1CustMarsh, unsigned char* bU1CustMarsh, unsigned short* bVBCustMarsh); LIBTEST_API int STDCALL mono_test_managed_marshal_bool_ref (int arg, unsigned int expected, unsigned int testVal, unsigned int outExpected, unsigned int outTestVal, MarshalBoolRefDelegate pfcn) { int ret; unsigned int lDefaultMarsh, lBoolCustMarsh; char lI1CustMarsh = 0; unsigned char lU1CustMarsh = 0; unsigned short lVBCustMarsh = 0; lDefaultMarsh = lBoolCustMarsh = 0; if (!pfcn) return 0x9900; switch (arg) { case 1: { unsigned int ltestVal = testVal; ret = pfcn (arg, expected, outTestVal, &ltestVal, &lBoolCustMarsh, &lI1CustMarsh, &lU1CustMarsh, &lVBCustMarsh); if (ret) return 0x0100 + ret; if (outExpected != ltestVal) return 0x0200; break; } case 2: { unsigned int ltestVal = testVal; ret = pfcn (arg, expected, outTestVal, &lDefaultMarsh, &ltestVal, &lI1CustMarsh, &lU1CustMarsh, &lVBCustMarsh); if (ret) return 0x0300 + ret; if (outExpected != ltestVal) return 0x0400; break; } case 3: { char ltestVal = testVal; ret = pfcn (arg, expected, outTestVal, &lDefaultMarsh, &lBoolCustMarsh, &ltestVal, &lU1CustMarsh, &lVBCustMarsh); if (ret) return 0x0500 + ret; if (outExpected != ltestVal) return 0x0600; break; } case 4: { unsigned char ltestVal = testVal; ret = pfcn (arg, expected, outTestVal, &lDefaultMarsh, &lBoolCustMarsh, &lI1CustMarsh, &ltestVal, &lVBCustMarsh); if (ret) return 0x0700 + ret; if (outExpected != ltestVal) return 0x0800; break; } case 5: { unsigned short ltestVal = testVal; ret = pfcn (arg, expected, outTestVal, &lDefaultMarsh, &lBoolCustMarsh, &lI1CustMarsh, &lU1CustMarsh, &ltestVal); if (ret) return 0x0900 + ret; if (outExpected != ltestVal) return 0x1000; break; } default: return 0x9800; } return 0; } #ifdef WIN32 LIBTEST_API int STDCALL mono_test_marshal_safearray_out_1dim_vt_bstr_empty (SAFEARRAY** safearray) { /* Create an empty one-dimensional array of variants */ SAFEARRAY *pSA; SAFEARRAYBOUND dimensions [1]; dimensions [0].lLbound = 0; dimensions [0].cElements = 0; pSA = SafeArrayCreate (VT_VARIANT, 1, dimensions); *safearray = pSA; return S_OK; } LIBTEST_API int STDCALL mono_test_marshal_safearray_out_1dim_vt_bstr (SAFEARRAY** safearray) { /* Create a one-dimensional array of 10 variants filled with "0" to "9" */ SAFEARRAY *pSA; SAFEARRAYBOUND dimensions [1]; long i; gchar buffer [20]; HRESULT hr = S_OK; long indices [1]; dimensions [0].lLbound = 0; dimensions [0].cElements = 10; pSA= SafeArrayCreate (VT_VARIANT, 1, dimensions); for (i= dimensions [0].lLbound; i< (dimensions [0].cElements + dimensions [0].lLbound); i++) { VARIANT vOut; VariantInit (&vOut); vOut.vt = VT_BSTR; _ltoa (i,buffer,10); vOut.bstrVal= marshal_bstr_alloc (buffer); indices [0] = i; if ((hr = SafeArrayPutElement (pSA, indices, &vOut)) != S_OK) { VariantClear (&vOut); SafeArrayDestroy (pSA); return hr; } VariantClear (&vOut); } *safearray = pSA; return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_out_2dim_vt_i4 (SAFEARRAY** safearray) { /* Create a two-dimensional array of 4x3 variants filled with 11, 12, 13, etc. */ SAFEARRAY *pSA; SAFEARRAYBOUND dimensions [2]; long i, j; HRESULT hr = S_OK; long indices [2]; dimensions [0].lLbound = 0; dimensions [0].cElements = 4; dimensions [1].lLbound = 0; dimensions [1].cElements = 3; pSA= SafeArrayCreate(VT_VARIANT, 2, dimensions); for (i= dimensions [0].lLbound; i< (dimensions [0].cElements + dimensions [0].lLbound); i++) { for (j= dimensions [1].lLbound; j< (dimensions [1].cElements + dimensions [1].lLbound); j++) { VARIANT vOut; VariantInit (&vOut); vOut.vt = VT_I4; vOut.lVal = (i+1)*10+(j+1); indices [0] = i; indices [1] = j; if ((hr = SafeArrayPutElement (pSA, indices, &vOut)) != S_OK) { VariantClear (&vOut); SafeArrayDestroy (pSA); return hr; } VariantClear (&vOut); // does a deep destroy of source VARIANT } } *safearray = pSA; return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_out_4dim_vt_i4 (SAFEARRAY** safearray) { /* Create a four-dimensional array of 10x3x6x7 variants filled with their indices */ /* Also use non zero lower bounds */ SAFEARRAY *pSA; SAFEARRAYBOUND dimensions [4]; long i; HRESULT hr = S_OK; VARIANT *pData; dimensions [0].lLbound = 15; dimensions [0].cElements = 10; dimensions [1].lLbound = 20; dimensions [1].cElements = 3; dimensions [2].lLbound = 5; dimensions [2].cElements = 6; dimensions [3].lLbound = 12; dimensions [3].cElements = 7; pSA= SafeArrayCreate (VT_VARIANT, 4, dimensions); SafeArrayAccessData (pSA, (void **)&pData); for (i= 0; i< 10*3*6*7; i++) { VariantInit(&pData [i]); pData [i].vt = VT_I4; pData [i].lVal = i; } SafeArrayUnaccessData (pSA); *safearray = pSA; return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_byval_1dim_empty (SAFEARRAY* safearray) { /* Check that array is one dimensional and empty */ UINT dim; long lbound, ubound; dim = SafeArrayGetDim (safearray); if (dim != 1) return 1; SafeArrayGetLBound (safearray, 1, &lbound); SafeArrayGetUBound (safearray, 1, &ubound); if ((lbound > 0) || (ubound > 0)) return 1; return 0; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_byval_1dim_vt_i4 (SAFEARRAY* safearray) { /* Check that array is one dimensional containing integers from 1 to 10 */ UINT dim; long lbound, ubound; VARIANT *pData; long i; int result=0; dim = SafeArrayGetDim (safearray); if (dim != 1) return 1; SafeArrayGetLBound (safearray, 1, &lbound); SafeArrayGetUBound (safearray, 1, &ubound); if ((lbound != 0) || (ubound != 9)) return 1; SafeArrayAccessData (safearray, (void **)&pData); for (i= lbound; i <= ubound; i++) { if ((VariantChangeType (&pData [i], &pData [i], VARIANT_NOUSEROVERRIDE, VT_I4) != S_OK) || (pData [i].lVal != i + 1)) result = 1; } SafeArrayUnaccessData (safearray); return result; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_byval_1dim_vt_mixed (SAFEARRAY* safearray) { /* Check that array is one dimensional containing integers mixed with strings from 0 to 12 */ UINT dim; long lbound, ubound; VARIANT *pData; long i; long indices [1]; VARIANT element; int result=0; VariantInit (&element); dim = SafeArrayGetDim (safearray); if (dim != 1) return 1; SafeArrayGetLBound (safearray, 1, &lbound); SafeArrayGetUBound (safearray, 1, &ubound); if ((lbound != 0) || (ubound != 12)) return 1; SafeArrayAccessData (safearray, (void **)&pData); for (i= lbound; i <= ubound; i++) { if ((i%2 == 0) && (pData [i].vt != VT_I4)) result = 1; if ((i%2 == 1) && (pData [i].vt != VT_BSTR)) result = 1; if ((VariantChangeType (&pData [i], &pData [i], VARIANT_NOUSEROVERRIDE, VT_I4) != S_OK) || (pData [i].lVal != i)) result = 1; } SafeArrayUnaccessData (safearray); /* Change the first element of the array to verify that [in] parameters are not marshalled back to the managed side */ indices [0] = 0; element.vt = VT_I4; element.lVal = 333; SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); return result; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_byval_2dim_vt_i4 (SAFEARRAY* safearray) { /* Check that array is one dimensional containing integers mixed with strings from 0 to 12 */ UINT dim; long lbound1, ubound1, lbound2, ubound2; long i, j, failed; long indices [2]; VARIANT element; VariantInit (&element); dim = SafeArrayGetDim (safearray); if (dim != 2) return 1; SafeArrayGetLBound (safearray, 1, &lbound1); SafeArrayGetUBound (safearray, 1, &ubound1); if ((lbound1 != 0) || (ubound1 != 1)) return 1; SafeArrayGetLBound (safearray, 2, &lbound2); SafeArrayGetUBound (safearray, 2, &ubound2); if ((lbound2 != 0) || (ubound2 != 3)) { return 1; } for (i= lbound1; i <= ubound1; i++) { indices [0] = i; for (j= lbound2; j <= ubound2; j++) { indices [1] = j; if (SafeArrayGetElement (safearray, indices, &element) != S_OK) return 1; failed = ((element.vt != VT_I4) || (element.lVal != 10*(i+1)+(j+1))); VariantClear (&element); if (failed) return 1; } } /* Change the first element of the array to verify that [in] parameters are not marshalled back to the managed side */ indices [0] = 0; indices [1] = 0; element.vt = VT_I4; element.lVal = 333; SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); return 0; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_byval_3dim_vt_bstr (SAFEARRAY* safearray) { /* Check that array is one dimensional containing integers mixed with strings from 0 to 12 */ UINT dim; long lbound1, ubound1, lbound2, ubound2, lbound3, ubound3; long i, j, k, failed; long indices [3]; VARIANT element; VariantInit (&element); dim = SafeArrayGetDim (safearray); if (dim != 3) return 1; SafeArrayGetLBound (safearray, 1, &lbound1); SafeArrayGetUBound (safearray, 1, &ubound1); if ((lbound1 != 0) || (ubound1 != 1)) return 1; SafeArrayGetLBound (safearray, 2, &lbound2); SafeArrayGetUBound (safearray, 2, &ubound2); if ((lbound2 != 0) || (ubound2 != 1)) return 1; SafeArrayGetLBound (safearray, 3, &lbound3); SafeArrayGetUBound (safearray, 3, &ubound3); if ((lbound3 != 0) || (ubound3 != 2)) return 1; for (i= lbound1; i <= ubound1; i++) { indices [0] = i; for (j= lbound2; j <= ubound2; j++) { indices [1] = j; for (k= lbound3; k <= ubound3; k++) { indices [2] = k; if (SafeArrayGetElement (safearray, indices, &element) != S_OK) return 1; failed = ((element.vt != VT_BSTR) || (VariantChangeType (&element, &element, VARIANT_NOUSEROVERRIDE, VT_I4) != S_OK) || (element.lVal != 100*(i+1)+10*(j+1)+(k+1))); VariantClear (&element); if (failed) return 1; } } } /* Change the first element of the array to verify that [in] parameters are not marshalled back to the managed side */ indices [0] = 0; indices [1] = 0; indices [2] = 0; element.vt = VT_BSTR; element.bstrVal = SysAllocString(L"Should not be copied"); SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); return 0; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_byref_3dim_vt_bstr (SAFEARRAY** safearray) { return mono_test_marshal_safearray_in_byval_3dim_vt_bstr (*safearray); } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_out_byref_1dim_empty (SAFEARRAY** safearray) { /* Check that the input array is what is expected and change it so the caller can check */ /* correct marshalling back to managed code */ UINT dim; long lbound, ubound; SAFEARRAYBOUND dimensions [1]; long i; wchar_t buffer [20]; HRESULT hr = S_OK; long indices [1]; /* Check that in array is one dimensional and empty */ dim = SafeArrayGetDim (*safearray); if (dim != 1) { return 1; } SafeArrayGetLBound (*safearray, 1, &lbound); SafeArrayGetUBound (*safearray, 1, &ubound); if ((lbound > 0) || (ubound > 0)) { return 1; } /* Re-dimension the array and return a one-dimensional array of 8 variants filled with "0" to "7" */ dimensions [0].lLbound = 0; dimensions [0].cElements = 8; hr = SafeArrayRedim (*safearray, dimensions); if (hr != S_OK) return 1; for (i= dimensions [0].lLbound; i< (dimensions [0].lLbound + dimensions [0].cElements); i++) { VARIANT vOut; VariantInit (&vOut); vOut.vt = VT_BSTR; _ltow (i,buffer,10); vOut.bstrVal = SysAllocString (buffer); indices [0] = i; if ((hr = SafeArrayPutElement (*safearray, indices, &vOut)) != S_OK) { VariantClear (&vOut); SafeArrayDestroy (*safearray); return hr; } VariantClear (&vOut); } return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_out_byref_3dim_vt_bstr (SAFEARRAY** safearray) { /* Check that the input array is what is expected and change it so the caller can check */ /* correct marshalling back to managed code */ UINT dim; long lbound1, ubound1, lbound2, ubound2, lbound3, ubound3; SAFEARRAYBOUND dimensions [1]; long i, j, k, failed; wchar_t buffer [20]; HRESULT hr = S_OK; long indices [3]; VARIANT element; VariantInit (&element); /* Check that in array is three dimensional and contains the expected values */ dim = SafeArrayGetDim (*safearray); if (dim != 3) return 1; SafeArrayGetLBound (*safearray, 1, &lbound1); SafeArrayGetUBound (*safearray, 1, &ubound1); if ((lbound1 != 0) || (ubound1 != 1)) return 1; SafeArrayGetLBound (*safearray, 2, &lbound2); SafeArrayGetUBound (*safearray, 2, &ubound2); if ((lbound2 != 0) || (ubound2 != 1)) return 1; SafeArrayGetLBound (*safearray, 3, &lbound3); SafeArrayGetUBound (*safearray, 3, &ubound3); if ((lbound3 != 0) || (ubound3 != 2)) return 1; for (i= lbound1; i <= ubound1; i++) { indices [0] = i; for (j= lbound2; j <= ubound2; j++) { indices [1] = j; for (k= lbound3; k <= ubound3; k++) { indices [2] = k; if (SafeArrayGetElement (*safearray, indices, &element) != S_OK) return 1; failed = ((element.vt != VT_BSTR) || (VariantChangeType (&element, &element, VARIANT_NOUSEROVERRIDE, VT_I4) != S_OK) || (element.lVal != 100*(i+1)+10*(j+1)+(k+1))); VariantClear (&element); if (failed) return 1; } } } hr = SafeArrayDestroy (*safearray); if (hr != S_OK) return 1; /* Return a new one-dimensional array of 8 variants filled with "0" to "7" */ dimensions [0].lLbound = 0; dimensions [0].cElements = 8; *safearray = SafeArrayCreate (VT_VARIANT, 1, dimensions); for (i= dimensions [0].lLbound; i< (dimensions [0].lLbound + dimensions [0].cElements); i++) { VARIANT vOut; VariantInit (&vOut); vOut.vt = VT_BSTR; _ltow (i,buffer,10); vOut.bstrVal = SysAllocString (buffer); indices [0] = i; if ((hr = SafeArrayPutElement (*safearray, indices, &vOut)) != S_OK) { VariantClear (&vOut); SafeArrayDestroy (*safearray); return hr; } VariantClear (&vOut); } return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_out_byref_1dim_vt_i4 (SAFEARRAY** safearray) { /* Check that the input array is what is expected and change it so the caller can check */ /* correct marshalling back to managed code */ UINT dim; long lbound1, ubound1; long i, failed; HRESULT hr = S_OK; long indices [1]; VARIANT element; VariantInit (&element); /* Check that in array is one dimensional and contains the expected value */ dim = SafeArrayGetDim (*safearray); if (dim != 1) return 1; SafeArrayGetLBound (*safearray, 1, &lbound1); SafeArrayGetUBound (*safearray, 1, &ubound1); ubound1 = 1; if ((lbound1 != 0) || (ubound1 != 1)) return 1; ubound1 = 0; for (i= lbound1; i <= ubound1; i++) { indices [0] = i; if (SafeArrayGetElement (*safearray, indices, &element) != S_OK) return 1; failed = (element.vt != VT_I4) || (element.lVal != i+1); VariantClear (&element); if (failed) return 1; } /* Change one of the elements of the array to verify that [out] parameter is marshalled back to the managed side */ indices [0] = 0; element.vt = VT_I4; element.lVal = -1; SafeArrayPutElement (*safearray, indices, &element); VariantClear (&element); return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_out_byval_1dim_vt_i4 (SAFEARRAY* safearray) { /* Check that the input array is what is expected and change it so the caller can check */ /* correct marshalling back to managed code */ UINT dim; long lbound1, ubound1; SAFEARRAYBOUND dimensions [1]; long i, failed; HRESULT hr = S_OK; long indices [1]; VARIANT element; VariantInit (&element); /* Check that in array is one dimensional and contains the expected value */ dim = SafeArrayGetDim (safearray); if (dim != 1) return 1; SafeArrayGetLBound (safearray, 1, &lbound1); SafeArrayGetUBound (safearray, 1, &ubound1); if ((lbound1 != 0) || (ubound1 != 0)) return 1; for (i= lbound1; i <= ubound1; i++) { indices [0] = i; if (SafeArrayGetElement (safearray, indices, &element) != S_OK) return 1; failed = (element.vt != VT_I4) || (element.lVal != i+1); VariantClear (&element); if (failed) return 1; } /* Change the array to verify how [out] parameter is marshalled back to the managed side */ /* Redimension the array */ dimensions [0].lLbound = lbound1; dimensions [0].cElements = 2; hr = SafeArrayRedim(safearray, dimensions); indices [0] = 0; element.vt = VT_I4; element.lVal = 12345; SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); indices [0] = 1; element.vt = VT_I4; element.lVal = -12345; SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_out_byval_3dim_vt_bstr (SAFEARRAY* safearray) { /* Check that the input array is what is expected and change it so the caller can check */ /* correct marshalling back to managed code */ UINT dim; long lbound1, ubound1, lbound2, ubound2, lbound3, ubound3; long i, j, k, failed; HRESULT hr = S_OK; long indices [3]; VARIANT element; VariantInit (&element); /* Check that in array is three dimensional and contains the expected values */ dim = SafeArrayGetDim (safearray); if (dim != 3) return 1; SafeArrayGetLBound (safearray, 1, &lbound1); SafeArrayGetUBound (safearray, 1, &ubound1); if ((lbound1 != 0) || (ubound1 != 1)) return 1; SafeArrayGetLBound (safearray, 2, &lbound2); SafeArrayGetUBound (safearray, 2, &ubound2); if ((lbound2 != 0) || (ubound2 != 1)) return 1; SafeArrayGetLBound (safearray, 3, &lbound3); SafeArrayGetUBound (safearray, 3, &ubound3); if ((lbound3 != 0) || (ubound3 != 2)) return 1; for (i= lbound1; i <= ubound1; i++) { indices [0] = i; for (j= lbound2; j <= ubound2; j++) { indices [1] = j; for (k= lbound3; k <= ubound3; k++) { indices [2] = k; if (SafeArrayGetElement (safearray, indices, &element) != S_OK) return 1; failed = ((element.vt != VT_BSTR) || (VariantChangeType (&element, &element, VARIANT_NOUSEROVERRIDE, VT_I4) != S_OK) || (element.lVal != 100*(i+1)+10*(j+1)+(k+1))); VariantClear (&element); if (failed) return 1; } } } /* Change the elements of the array to verify that [out] parameter is marshalled back to the managed side */ indices [0] = 1; indices [1] = 1; indices [2] = 2; element.vt = VT_I4; element.lVal = 333; SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); indices [0] = 1; indices [1] = 1; indices [2] = 1; element.vt = VT_I4; element.lVal = 111; SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); indices [0] = 0; indices [1] = 1; indices [2] = 0; element.vt = VT_BSTR; element.bstrVal = marshal_bstr_alloc("ABCDEFG"); SafeArrayPutElement (safearray, indices, &element); VariantClear (&element); return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_mixed( SAFEARRAY *safearray1, SAFEARRAY **safearray2, SAFEARRAY *safearray3, SAFEARRAY **safearray4 ) { HRESULT hr = S_OK; /* Initialize out parameters */ *safearray2 = NULL; /* array1: Check that in array is one dimensional and contains the expected value */ hr = mono_test_marshal_safearray_in_out_byval_1dim_vt_i4 (safearray1); /* array2: Fill in with some values to check on the managed side */ if (hr == S_OK) hr = mono_test_marshal_safearray_out_1dim_vt_bstr (safearray2); /* array3: Check that in array is one dimensional and contains the expected value */ if (hr == S_OK) hr = mono_test_marshal_safearray_in_byval_1dim_vt_mixed(safearray3); /* array4: Check input values and fill in with some values to check on the managed side */ if (hr == S_OK) hr = mono_test_marshal_safearray_in_out_byref_3dim_vt_bstr(safearray4); return hr; } LIBTEST_API int STDCALL mono_test_marshal_safearray_in_ccw(MonoComObject *pUnk) { SAFEARRAY *array; VARIANT var; long index; int ret; array = SafeArrayCreateVector(VT_VARIANT, 0, 2); var.vt = VT_BSTR; var.bstrVal = marshal_bstr_alloc("Test"); index = 0; SafeArrayPutElement(array, &index, &var); var.vt = VT_I4; var.intVal = 2345; index = 1; SafeArrayPutElement(array, &index, &var); ret = pUnk->vtbl->ArrayIn (pUnk, (void *)array); if (!ret) ret = pUnk->vtbl->ArrayIn2 (pUnk, (void *)array); if (!ret) ret = pUnk->vtbl->ArrayIn3 (pUnk, (void *)array); SafeArrayDestroy(array); return ret; } LIBTEST_API int STDCALL mono_test_marshal_lparray_out_ccw(MonoComObject *pUnk) { guint32 array, result; int ret; ret = pUnk->vtbl->ArrayOut (pUnk, &array, &result); if (ret) return ret; if (array != 55) return 1; if (result != 1) return 2; ret = pUnk->vtbl->ArrayOut (pUnk, NULL, &result); if (ret) return ret; if (result != 0) return 3; return 0; } #endif static int call_managed_res; static void call_managed (gpointer arg) { SimpleDelegate del = (SimpleDelegate)arg; call_managed_res = del (42); } LIBTEST_API int STDCALL mono_test_marshal_thread_attach (SimpleDelegate del) { #ifdef WIN32 return 43; #else int res; pthread_t t; res = pthread_create (&t, NULL, (gpointer (*)(gpointer))call_managed, (gpointer)del); g_assert (res == 0); pthread_join (t, NULL); return call_managed_res; #endif } typedef struct { char arr [4 * 1024]; } LargeStruct; typedef int (STDCALL *LargeStructDelegate) (LargeStruct *s); static void call_managed_large_vt (gpointer arg) { LargeStructDelegate del = (LargeStructDelegate)arg; LargeStruct s; call_managed_res = del (&s); } LIBTEST_API int STDCALL mono_test_marshal_thread_attach_large_vt (SimpleDelegate del) { #ifdef WIN32 return 43; #else int res; pthread_t t; res = pthread_create (&t, NULL, (gpointer (*)(gpointer))call_managed_large_vt, (gpointer)del); g_assert (res == 0); pthread_join (t, NULL); return call_managed_res; #endif } typedef int (STDCALL *Callback) (void); static Callback callback; LIBTEST_API void STDCALL mono_test_marshal_set_callback (Callback cb) { callback = cb; } LIBTEST_API int STDCALL mono_test_marshal_call_callback (void) { return callback (); } LIBTEST_API int STDCALL mono_test_marshal_lpstr (char *str) { return strcmp ("ABC", str); } LIBTEST_API int STDCALL mono_test_marshal_lpwstr (gunichar2 *str) { char *s; int res; s = g_utf16_to_utf8 (str, -1, NULL, NULL, NULL); res = strcmp ("ABC", s); g_free (s); return res; } LIBTEST_API char* STDCALL mono_test_marshal_return_lpstr (void) { char *res = (char *)marshal_alloc (4); strcpy (res, "XYZ"); return res; } LIBTEST_API gunichar2* STDCALL mono_test_marshal_return_lpwstr (void) { gunichar2 *res = (gunichar2 *)marshal_alloc (8); gunichar2* tmp = g_utf8_to_utf16 ("XYZ", -1, NULL, NULL, NULL); memcpy (res, tmp, 8); g_free (tmp); return res; } typedef #if defined (HOST_WIN32) && defined (HOST_X86) && defined (__GNUC__) // Workaround gcc ABI bug. It returns the struct in ST0 instead of edx:eax. // Mono and Visual C++ agree. union #else struct #endif { double d; } SingleDoubleStruct; LIBTEST_API SingleDoubleStruct STDCALL mono_test_marshal_return_single_double_struct (void) { SingleDoubleStruct res = {3.0}; return res; } LIBTEST_API int STDCALL mono_test_has_thiscall_globals (void) { // Visual C++ does not accept __thiscall on global functions, only // member function and function pointers. Gcc accepts it also on global functions. #if defined (HOST_X86) && defined (HOST_WIN32) && !defined (_MSC_VER) return 1; #else return 0; #endif } LIBTEST_API int STDCALL mono_test_has_thiscall_pointers (void) { #if defined (HOST_X86) && defined (HOST_WIN32) return 1; #else return 0; #endif } LIBTEST_API int #ifndef _MSC_VER __thiscall #endif _mono_test_native_thiscall1 (int arg) { return arg; } LIBTEST_API int #ifndef _MSC_VER __thiscall #endif _mono_test_native_thiscall2 (int arg, int arg2) { return arg + (arg2^1); } LIBTEST_API int #ifndef _MSC_VER __thiscall #endif _mono_test_native_thiscall3 (int arg, int arg2, int arg3) { return arg + (arg2^1) + (arg3^2); } typedef int ( #ifndef _MSC_VER __thiscall #endif *ThiscallFunction)(int arg, int arg2); LIBTEST_API ThiscallFunction STDCALL mono_test_get_native_thiscall2 (void) { return _mono_test_native_thiscall2; } LIBTEST_API int STDCALL _mono_test_managed_thiscall1 (int (__thiscall*fn)(int), int arg) { return fn(arg); } LIBTEST_API int STDCALL _mono_test_managed_thiscall2 (int (__thiscall*fn)(int,int), int arg, int arg2) { return fn(arg, arg2); } LIBTEST_API int STDCALL _mono_test_managed_thiscall3 (int (__thiscall*fn)(int,int,int), int arg, int arg2, int arg3) { return fn(arg, arg2, arg3); } typedef struct { char f1; } sbyte1; LIBTEST_API sbyte1 STDCALL mono_return_sbyte1 (sbyte1 s1, int addend) { if (s1.f1 != 1) { fprintf(stderr, "mono_return_sbyte1 s1.f1: got %d but expected %d\n", s1.f1, 1); } s1.f1+=addend; return s1; } typedef struct { char f1,f2; } sbyte2; LIBTEST_API sbyte2 STDCALL mono_return_sbyte2 (sbyte2 s2, int addend) { if (s2.f1 != 1) { fprintf(stderr, "mono_return_sbyte2 s2.f1: got %d but expected %d\n", s2.f1, 1); } if (s2.f2 != 2) { fprintf(stderr, "mono_return_sbyte2 s2.f2: got %d but expected %d\n", s2.f2, 2); } s2.f1+=addend; s2.f2+=addend; return s2; } typedef struct { char f1,f2,f3; } sbyte3; LIBTEST_API sbyte3 STDCALL mono_return_sbyte3 (sbyte3 s3, int addend) { if (s3.f1 != 1) { fprintf(stderr, "mono_return_sbyte3 s3.f1: got %d but expected %d\n", s3.f1, 1); } if (s3.f2 != 2) { fprintf(stderr, "mono_return_sbyte3 s3.f2: got %d but expected %d\n", s3.f2, 2); } if (s3.f3 != 3) { fprintf(stderr, "mono_return_sbyte3 s3.f3: got %d but expected %d\n", s3.f3, 3); } s3.f1+=addend; s3.f2+=addend; s3.f3+=addend; return s3; } typedef struct { char f1,f2,f3,f4; } sbyte4; LIBTEST_API sbyte4 STDCALL mono_return_sbyte4 (sbyte4 s4, int addend) { if (s4.f1 != 1) { fprintf(stderr, "mono_return_sbyte4 s4.f1: got %d but expected %d\n", s4.f1, 1); } if (s4.f2 != 2) { fprintf(stderr, "mono_return_sbyte4 s4.f2: got %d but expected %d\n", s4.f2, 2); } if (s4.f3 != 3) { fprintf(stderr, "mono_return_sbyte4 s4.f3: got %d but expected %d\n", s4.f3, 3); } if (s4.f4 != 4) { fprintf(stderr, "mono_return_sbyte4 s4.f4: got %d but expected %d\n", s4.f4, 4); } s4.f1+=addend; s4.f2+=addend; s4.f3+=addend; s4.f4+=addend; return s4; } typedef struct { char f1,f2,f3,f4,f5; } sbyte5; LIBTEST_API sbyte5 STDCALL mono_return_sbyte5 (sbyte5 s5, int addend) { if (s5.f1 != 1) { fprintf(stderr, "mono_return_sbyte5 s5.f1: got %d but expected %d\n", s5.f1, 1); } if (s5.f2 != 2) { fprintf(stderr, "mono_return_sbyte5 s5.f2: got %d but expected %d\n", s5.f2, 2); } if (s5.f3 != 3) { fprintf(stderr, "mono_return_sbyte5 s5.f3: got %d but expected %d\n", s5.f3, 3); } if (s5.f4 != 4) { fprintf(stderr, "mono_return_sbyte5 s5.f4: got %d but expected %d\n", s5.f4, 4); } if (s5.f5 != 5) { fprintf(stderr, "mono_return_sbyte5 s5.f5: got %d but expected %d\n", s5.f5, 5); } s5.f1+=addend; s5.f2+=addend; s5.f3+=addend; s5.f4+=addend; s5.f5+=addend; return s5; } typedef struct { char f1,f2,f3,f4,f5,f6; } sbyte6; LIBTEST_API sbyte6 STDCALL mono_return_sbyte6 (sbyte6 s6, int addend) { if (s6.f1 != 1) { fprintf(stderr, "mono_return_sbyte6 s6.f1: got %d but expected %d\n", s6.f1, 1); } if (s6.f2 != 2) { fprintf(stderr, "mono_return_sbyte6 s6.f2: got %d but expected %d\n", s6.f2, 2); } if (s6.f3 != 3) { fprintf(stderr, "mono_return_sbyte6 s6.f3: got %d but expected %d\n", s6.f3, 3); } if (s6.f4 != 4) { fprintf(stderr, "mono_return_sbyte6 s6.f4: got %d but expected %d\n", s6.f4, 4); } if (s6.f5 != 5) { fprintf(stderr, "mono_return_sbyte6 s6.f5: got %d but expected %d\n", s6.f5, 5); } if (s6.f6 != 6) { fprintf(stderr, "mono_return_sbyte6 s6.f6: got %d but expected %d\n", s6.f6, 6); } s6.f1+=addend; s6.f2+=addend; s6.f3+=addend; s6.f4+=addend; s6.f5+=addend; s6.f6+=addend; return s6; } typedef struct { char f1,f2,f3,f4,f5,f6,f7; } sbyte7; LIBTEST_API sbyte7 STDCALL mono_return_sbyte7 (sbyte7 s7, int addend) { if (s7.f1 != 1) { fprintf(stderr, "mono_return_sbyte7 s7.f1: got %d but expected %d\n", s7.f1, 1); } if (s7.f2 != 2) { fprintf(stderr, "mono_return_sbyte7 s7.f2: got %d but expected %d\n", s7.f2, 2); } if (s7.f3 != 3) { fprintf(stderr, "mono_return_sbyte7 s7.f3: got %d but expected %d\n", s7.f3, 3); } if (s7.f4 != 4) { fprintf(stderr, "mono_return_sbyte7 s7.f4: got %d but expected %d\n", s7.f4, 4); } if (s7.f5 != 5) { fprintf(stderr, "mono_return_sbyte7 s7.f5: got %d but expected %d\n", s7.f5, 5); } if (s7.f6 != 6) { fprintf(stderr, "mono_return_sbyte7 s7.f6: got %d but expected %d\n", s7.f6, 6); } if (s7.f7 != 7) { fprintf(stderr, "mono_return_sbyte7 s7.f7: got %d but expected %d\n", s7.f7, 7); } s7.f1+=addend; s7.f2+=addend; s7.f3+=addend; s7.f4+=addend; s7.f5+=addend; s7.f6+=addend; s7.f7+=addend; return s7; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8; } sbyte8; LIBTEST_API sbyte8 STDCALL mono_return_sbyte8 (sbyte8 s8, int addend) { if (s8.f1 != 1) { fprintf(stderr, "mono_return_sbyte8 s8.f1: got %d but expected %d\n", s8.f1, 1); } if (s8.f2 != 2) { fprintf(stderr, "mono_return_sbyte8 s8.f2: got %d but expected %d\n", s8.f2, 2); } if (s8.f3 != 3) { fprintf(stderr, "mono_return_sbyte8 s8.f3: got %d but expected %d\n", s8.f3, 3); } if (s8.f4 != 4) { fprintf(stderr, "mono_return_sbyte8 s8.f4: got %d but expected %d\n", s8.f4, 4); } if (s8.f5 != 5) { fprintf(stderr, "mono_return_sbyte8 s8.f5: got %d but expected %d\n", s8.f5, 5); } if (s8.f6 != 6) { fprintf(stderr, "mono_return_sbyte8 s8.f6: got %d but expected %d\n", s8.f6, 6); } if (s8.f7 != 7) { fprintf(stderr, "mono_return_sbyte8 s8.f7: got %d but expected %d\n", s8.f7, 7); } if (s8.f8 != 8) { fprintf(stderr, "mono_return_sbyte8 s8.f8: got %d but expected %d\n", s8.f8, 8); } s8.f1+=addend; s8.f2+=addend; s8.f3+=addend; s8.f4+=addend; s8.f5+=addend; s8.f6+=addend; s8.f7+=addend; s8.f8+=addend; return s8; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9; } sbyte9; LIBTEST_API sbyte9 STDCALL mono_return_sbyte9 (sbyte9 s9, int addend) { if (s9.f1 != 1) { fprintf(stderr, "mono_return_sbyte9 s9.f1: got %d but expected %d\n", s9.f1, 1); } if (s9.f2 != 2) { fprintf(stderr, "mono_return_sbyte9 s9.f2: got %d but expected %d\n", s9.f2, 2); } if (s9.f3 != 3) { fprintf(stderr, "mono_return_sbyte9 s9.f3: got %d but expected %d\n", s9.f3, 3); } if (s9.f4 != 4) { fprintf(stderr, "mono_return_sbyte9 s9.f4: got %d but expected %d\n", s9.f4, 4); } if (s9.f5 != 5) { fprintf(stderr, "mono_return_sbyte9 s9.f5: got %d but expected %d\n", s9.f5, 5); } if (s9.f6 != 6) { fprintf(stderr, "mono_return_sbyte9 s9.f6: got %d but expected %d\n", s9.f6, 6); } if (s9.f7 != 7) { fprintf(stderr, "mono_return_sbyte9 s9.f7: got %d but expected %d\n", s9.f7, 7); } if (s9.f8 != 8) { fprintf(stderr, "mono_return_sbyte9 s9.f8: got %d but expected %d\n", s9.f8, 8); } if (s9.f9 != 9) { fprintf(stderr, "mono_return_sbyte9 s9.f9: got %d but expected %d\n", s9.f9, 9); } s9.f1+=addend; s9.f2+=addend; s9.f3+=addend; s9.f4+=addend; s9.f5+=addend; s9.f6+=addend; s9.f7+=addend; s9.f8+=addend; s9.f9+=addend; return s9; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10; } sbyte10; LIBTEST_API sbyte10 STDCALL mono_return_sbyte10 (sbyte10 s10, int addend) { if (s10.f1 != 1) { fprintf(stderr, "mono_return_sbyte10 s10.f1: got %d but expected %d\n", s10.f1, 1); } if (s10.f2 != 2) { fprintf(stderr, "mono_return_sbyte10 s10.f2: got %d but expected %d\n", s10.f2, 2); } if (s10.f3 != 3) { fprintf(stderr, "mono_return_sbyte10 s10.f3: got %d but expected %d\n", s10.f3, 3); } if (s10.f4 != 4) { fprintf(stderr, "mono_return_sbyte10 s10.f4: got %d but expected %d\n", s10.f4, 4); } if (s10.f5 != 5) { fprintf(stderr, "mono_return_sbyte10 s10.f5: got %d but expected %d\n", s10.f5, 5); } if (s10.f6 != 6) { fprintf(stderr, "mono_return_sbyte10 s10.f6: got %d but expected %d\n", s10.f6, 6); } if (s10.f7 != 7) { fprintf(stderr, "mono_return_sbyte10 s10.f7: got %d but expected %d\n", s10.f7, 7); } if (s10.f8 != 8) { fprintf(stderr, "mono_return_sbyte10 s10.f8: got %d but expected %d\n", s10.f8, 8); } if (s10.f9 != 9) { fprintf(stderr, "mono_return_sbyte10 s10.f9: got %d but expected %d\n", s10.f9, 9); } if (s10.f10 != 10) { fprintf(stderr, "mono_return_sbyte10 s10.f10: got %d but expected %d\n", s10.f10, 10); } s10.f1+=addend; s10.f2+=addend; s10.f3+=addend; s10.f4+=addend; s10.f5+=addend; s10.f6+=addend; s10.f7+=addend; s10.f8+=addend; s10.f9+=addend; s10.f10+=addend; return s10; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11; } sbyte11; LIBTEST_API sbyte11 STDCALL mono_return_sbyte11 (sbyte11 s11, int addend) { if (s11.f1 != 1) { fprintf(stderr, "mono_return_sbyte11 s11.f1: got %d but expected %d\n", s11.f1, 1); } if (s11.f2 != 2) { fprintf(stderr, "mono_return_sbyte11 s11.f2: got %d but expected %d\n", s11.f2, 2); } if (s11.f3 != 3) { fprintf(stderr, "mono_return_sbyte11 s11.f3: got %d but expected %d\n", s11.f3, 3); } if (s11.f4 != 4) { fprintf(stderr, "mono_return_sbyte11 s11.f4: got %d but expected %d\n", s11.f4, 4); } if (s11.f5 != 5) { fprintf(stderr, "mono_return_sbyte11 s11.f5: got %d but expected %d\n", s11.f5, 5); } if (s11.f6 != 6) { fprintf(stderr, "mono_return_sbyte11 s11.f6: got %d but expected %d\n", s11.f6, 6); } if (s11.f7 != 7) { fprintf(stderr, "mono_return_sbyte11 s11.f7: got %d but expected %d\n", s11.f7, 7); } if (s11.f8 != 8) { fprintf(stderr, "mono_return_sbyte11 s11.f8: got %d but expected %d\n", s11.f8, 8); } if (s11.f9 != 9) { fprintf(stderr, "mono_return_sbyte11 s11.f9: got %d but expected %d\n", s11.f9, 9); } if (s11.f10 != 10) { fprintf(stderr, "mono_return_sbyte11 s11.f10: got %d but expected %d\n", s11.f10, 10); } if (s11.f11 != 11) { fprintf(stderr, "mono_return_sbyte11 s11.f11: got %d but expected %d\n", s11.f11, 11); } s11.f1+=addend; s11.f2+=addend; s11.f3+=addend; s11.f4+=addend; s11.f5+=addend; s11.f6+=addend; s11.f7+=addend; s11.f8+=addend; s11.f9+=addend; s11.f10+=addend; s11.f11+=addend; return s11; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12; } sbyte12; LIBTEST_API sbyte12 STDCALL mono_return_sbyte12 (sbyte12 s12, int addend) { if (s12.f1 != 1) { fprintf(stderr, "mono_return_sbyte12 s12.f1: got %d but expected %d\n", s12.f1, 1); } if (s12.f2 != 2) { fprintf(stderr, "mono_return_sbyte12 s12.f2: got %d but expected %d\n", s12.f2, 2); } if (s12.f3 != 3) { fprintf(stderr, "mono_return_sbyte12 s12.f3: got %d but expected %d\n", s12.f3, 3); } if (s12.f4 != 4) { fprintf(stderr, "mono_return_sbyte12 s12.f4: got %d but expected %d\n", s12.f4, 4); } if (s12.f5 != 5) { fprintf(stderr, "mono_return_sbyte12 s12.f5: got %d but expected %d\n", s12.f5, 5); } if (s12.f6 != 6) { fprintf(stderr, "mono_return_sbyte12 s12.f6: got %d but expected %d\n", s12.f6, 6); } if (s12.f7 != 7) { fprintf(stderr, "mono_return_sbyte12 s12.f7: got %d but expected %d\n", s12.f7, 7); } if (s12.f8 != 8) { fprintf(stderr, "mono_return_sbyte12 s12.f8: got %d but expected %d\n", s12.f8, 8); } if (s12.f9 != 9) { fprintf(stderr, "mono_return_sbyte12 s12.f9: got %d but expected %d\n", s12.f9, 9); } if (s12.f10 != 10) { fprintf(stderr, "mono_return_sbyte12 s12.f10: got %d but expected %d\n", s12.f10, 10); } if (s12.f11 != 11) { fprintf(stderr, "mono_return_sbyte12 s12.f11: got %d but expected %d\n", s12.f11, 11); } if (s12.f12 != 12) { fprintf(stderr, "mono_return_sbyte12 s12.f12: got %d but expected %d\n", s12.f12, 12); } s12.f1+=addend; s12.f2+=addend; s12.f3+=addend; s12.f4+=addend; s12.f5+=addend; s12.f6+=addend; s12.f7+=addend; s12.f8+=addend; s12.f9+=addend; s12.f10+=addend; s12.f11+=addend; s12.f12+=addend; return s12; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13; } sbyte13; LIBTEST_API sbyte13 STDCALL mono_return_sbyte13 (sbyte13 s13, int addend) { if (s13.f1 != 1) { fprintf(stderr, "mono_return_sbyte13 s13.f1: got %d but expected %d\n", s13.f1, 1); } if (s13.f2 != 2) { fprintf(stderr, "mono_return_sbyte13 s13.f2: got %d but expected %d\n", s13.f2, 2); } if (s13.f3 != 3) { fprintf(stderr, "mono_return_sbyte13 s13.f3: got %d but expected %d\n", s13.f3, 3); } if (s13.f4 != 4) { fprintf(stderr, "mono_return_sbyte13 s13.f4: got %d but expected %d\n", s13.f4, 4); } if (s13.f5 != 5) { fprintf(stderr, "mono_return_sbyte13 s13.f5: got %d but expected %d\n", s13.f5, 5); } if (s13.f6 != 6) { fprintf(stderr, "mono_return_sbyte13 s13.f6: got %d but expected %d\n", s13.f6, 6); } if (s13.f7 != 7) { fprintf(stderr, "mono_return_sbyte13 s13.f7: got %d but expected %d\n", s13.f7, 7); } if (s13.f8 != 8) { fprintf(stderr, "mono_return_sbyte13 s13.f8: got %d but expected %d\n", s13.f8, 8); } if (s13.f9 != 9) { fprintf(stderr, "mono_return_sbyte13 s13.f9: got %d but expected %d\n", s13.f9, 9); } if (s13.f10 != 10) { fprintf(stderr, "mono_return_sbyte13 s13.f10: got %d but expected %d\n", s13.f10, 10); } if (s13.f11 != 11) { fprintf(stderr, "mono_return_sbyte13 s13.f11: got %d but expected %d\n", s13.f11, 11); } if (s13.f12 != 12) { fprintf(stderr, "mono_return_sbyte13 s13.f12: got %d but expected %d\n", s13.f12, 12); } if (s13.f13 != 13) { fprintf(stderr, "mono_return_sbyte13 s13.f13: got %d but expected %d\n", s13.f13, 13); } s13.f1+=addend; s13.f2+=addend; s13.f3+=addend; s13.f4+=addend; s13.f5+=addend; s13.f6+=addend; s13.f7+=addend; s13.f8+=addend; s13.f9+=addend; s13.f10+=addend; s13.f11+=addend; s13.f12+=addend; s13.f13+=addend; return s13; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14; } sbyte14; LIBTEST_API sbyte14 STDCALL mono_return_sbyte14 (sbyte14 s14, int addend) { if (s14.f1 != 1) { fprintf(stderr, "mono_return_sbyte14 s14.f1: got %d but expected %d\n", s14.f1, 1); } if (s14.f2 != 2) { fprintf(stderr, "mono_return_sbyte14 s14.f2: got %d but expected %d\n", s14.f2, 2); } if (s14.f3 != 3) { fprintf(stderr, "mono_return_sbyte14 s14.f3: got %d but expected %d\n", s14.f3, 3); } if (s14.f4 != 4) { fprintf(stderr, "mono_return_sbyte14 s14.f4: got %d but expected %d\n", s14.f4, 4); } if (s14.f5 != 5) { fprintf(stderr, "mono_return_sbyte14 s14.f5: got %d but expected %d\n", s14.f5, 5); } if (s14.f6 != 6) { fprintf(stderr, "mono_return_sbyte14 s14.f6: got %d but expected %d\n", s14.f6, 6); } if (s14.f7 != 7) { fprintf(stderr, "mono_return_sbyte14 s14.f7: got %d but expected %d\n", s14.f7, 7); } if (s14.f8 != 8) { fprintf(stderr, "mono_return_sbyte14 s14.f8: got %d but expected %d\n", s14.f8, 8); } if (s14.f9 != 9) { fprintf(stderr, "mono_return_sbyte14 s14.f9: got %d but expected %d\n", s14.f9, 9); } if (s14.f10 != 10) { fprintf(stderr, "mono_return_sbyte14 s14.f10: got %d but expected %d\n", s14.f10, 10); } if (s14.f11 != 11) { fprintf(stderr, "mono_return_sbyte14 s14.f11: got %d but expected %d\n", s14.f11, 11); } if (s14.f12 != 12) { fprintf(stderr, "mono_return_sbyte14 s14.f12: got %d but expected %d\n", s14.f12, 12); } if (s14.f13 != 13) { fprintf(stderr, "mono_return_sbyte14 s14.f13: got %d but expected %d\n", s14.f13, 13); } if (s14.f14 != 14) { fprintf(stderr, "mono_return_sbyte14 s14.f14: got %d but expected %d\n", s14.f14, 14); } s14.f1+=addend; s14.f2+=addend; s14.f3+=addend; s14.f4+=addend; s14.f5+=addend; s14.f6+=addend; s14.f7+=addend; s14.f8+=addend; s14.f9+=addend; s14.f10+=addend; s14.f11+=addend; s14.f12+=addend; s14.f13+=addend; s14.f14+=addend; return s14; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15; } sbyte15; LIBTEST_API sbyte15 STDCALL mono_return_sbyte15 (sbyte15 s15, int addend) { if (s15.f1 != 1) { fprintf(stderr, "mono_return_sbyte15 s15.f1: got %d but expected %d\n", s15.f1, 1); } if (s15.f2 != 2) { fprintf(stderr, "mono_return_sbyte15 s15.f2: got %d but expected %d\n", s15.f2, 2); } if (s15.f3 != 3) { fprintf(stderr, "mono_return_sbyte15 s15.f3: got %d but expected %d\n", s15.f3, 3); } if (s15.f4 != 4) { fprintf(stderr, "mono_return_sbyte15 s15.f4: got %d but expected %d\n", s15.f4, 4); } if (s15.f5 != 5) { fprintf(stderr, "mono_return_sbyte15 s15.f5: got %d but expected %d\n", s15.f5, 5); } if (s15.f6 != 6) { fprintf(stderr, "mono_return_sbyte15 s15.f6: got %d but expected %d\n", s15.f6, 6); } if (s15.f7 != 7) { fprintf(stderr, "mono_return_sbyte15 s15.f7: got %d but expected %d\n", s15.f7, 7); } if (s15.f8 != 8) { fprintf(stderr, "mono_return_sbyte15 s15.f8: got %d but expected %d\n", s15.f8, 8); } if (s15.f9 != 9) { fprintf(stderr, "mono_return_sbyte15 s15.f9: got %d but expected %d\n", s15.f9, 9); } if (s15.f10 != 10) { fprintf(stderr, "mono_return_sbyte15 s15.f10: got %d but expected %d\n", s15.f10, 10); } if (s15.f11 != 11) { fprintf(stderr, "mono_return_sbyte15 s15.f11: got %d but expected %d\n", s15.f11, 11); } if (s15.f12 != 12) { fprintf(stderr, "mono_return_sbyte15 s15.f12: got %d but expected %d\n", s15.f12, 12); } if (s15.f13 != 13) { fprintf(stderr, "mono_return_sbyte15 s15.f13: got %d but expected %d\n", s15.f13, 13); } if (s15.f14 != 14) { fprintf(stderr, "mono_return_sbyte15 s15.f14: got %d but expected %d\n", s15.f14, 14); } if (s15.f15 != 15) { fprintf(stderr, "mono_return_sbyte15 s15.f15: got %d but expected %d\n", s15.f15, 15); } s15.f1+=addend; s15.f2+=addend; s15.f3+=addend; s15.f4+=addend; s15.f5+=addend; s15.f6+=addend; s15.f7+=addend; s15.f8+=addend; s15.f9+=addend; s15.f10+=addend; s15.f11+=addend; s15.f12+=addend; s15.f13+=addend; s15.f14+=addend; s15.f15+=addend; return s15; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16; } sbyte16; LIBTEST_API sbyte16 STDCALL mono_return_sbyte16 (sbyte16 s16, int addend) { if (s16.f1 != 1) { fprintf(stderr, "mono_return_sbyte16 s16.f1: got %d but expected %d\n", s16.f1, 1); } if (s16.f2 != 2) { fprintf(stderr, "mono_return_sbyte16 s16.f2: got %d but expected %d\n", s16.f2, 2); } if (s16.f3 != 3) { fprintf(stderr, "mono_return_sbyte16 s16.f3: got %d but expected %d\n", s16.f3, 3); } if (s16.f4 != 4) { fprintf(stderr, "mono_return_sbyte16 s16.f4: got %d but expected %d\n", s16.f4, 4); } if (s16.f5 != 5) { fprintf(stderr, "mono_return_sbyte16 s16.f5: got %d but expected %d\n", s16.f5, 5); } if (s16.f6 != 6) { fprintf(stderr, "mono_return_sbyte16 s16.f6: got %d but expected %d\n", s16.f6, 6); } if (s16.f7 != 7) { fprintf(stderr, "mono_return_sbyte16 s16.f7: got %d but expected %d\n", s16.f7, 7); } if (s16.f8 != 8) { fprintf(stderr, "mono_return_sbyte16 s16.f8: got %d but expected %d\n", s16.f8, 8); } if (s16.f9 != 9) { fprintf(stderr, "mono_return_sbyte16 s16.f9: got %d but expected %d\n", s16.f9, 9); } if (s16.f10 != 10) { fprintf(stderr, "mono_return_sbyte16 s16.f10: got %d but expected %d\n", s16.f10, 10); } if (s16.f11 != 11) { fprintf(stderr, "mono_return_sbyte16 s16.f11: got %d but expected %d\n", s16.f11, 11); } if (s16.f12 != 12) { fprintf(stderr, "mono_return_sbyte16 s16.f12: got %d but expected %d\n", s16.f12, 12); } if (s16.f13 != 13) { fprintf(stderr, "mono_return_sbyte16 s16.f13: got %d but expected %d\n", s16.f13, 13); } if (s16.f14 != 14) { fprintf(stderr, "mono_return_sbyte16 s16.f14: got %d but expected %d\n", s16.f14, 14); } if (s16.f15 != 15) { fprintf(stderr, "mono_return_sbyte16 s16.f15: got %d but expected %d\n", s16.f15, 15); } if (s16.f16 != 16) { fprintf(stderr, "mono_return_sbyte16 s16.f16: got %d but expected %d\n", s16.f16, 16); } s16.f1+=addend; s16.f2+=addend; s16.f3+=addend; s16.f4+=addend; s16.f5+=addend; s16.f6+=addend; s16.f7+=addend; s16.f8+=addend; s16.f9+=addend; s16.f10+=addend; s16.f11+=addend; s16.f12+=addend; s16.f13+=addend; s16.f14+=addend; s16.f15+=addend; s16.f16+=addend; return s16; } typedef struct { char f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17; } sbyte17; LIBTEST_API sbyte17 STDCALL mono_return_sbyte17 (sbyte17 s17, int addend) { if (s17.f1 != 1) { fprintf(stderr, "mono_return_sbyte17 s17.f1: got %d but expected %d\n", s17.f1, 1); } if (s17.f2 != 2) { fprintf(stderr, "mono_return_sbyte17 s17.f2: got %d but expected %d\n", s17.f2, 2); } if (s17.f3 != 3) { fprintf(stderr, "mono_return_sbyte17 s17.f3: got %d but expected %d\n", s17.f3, 3); } if (s17.f4 != 4) { fprintf(stderr, "mono_return_sbyte17 s17.f4: got %d but expected %d\n", s17.f4, 4); } if (s17.f5 != 5) { fprintf(stderr, "mono_return_sbyte17 s17.f5: got %d but expected %d\n", s17.f5, 5); } if (s17.f6 != 6) { fprintf(stderr, "mono_return_sbyte17 s17.f6: got %d but expected %d\n", s17.f6, 6); } if (s17.f7 != 7) { fprintf(stderr, "mono_return_sbyte17 s17.f7: got %d but expected %d\n", s17.f7, 7); } if (s17.f8 != 8) { fprintf(stderr, "mono_return_sbyte17 s17.f8: got %d but expected %d\n", s17.f8, 8); } if (s17.f9 != 9) { fprintf(stderr, "mono_return_sbyte17 s17.f9: got %d but expected %d\n", s17.f9, 9); } if (s17.f10 != 10) { fprintf(stderr, "mono_return_sbyte17 s17.f10: got %d but expected %d\n", s17.f10, 10); } if (s17.f11 != 11) { fprintf(stderr, "mono_return_sbyte17 s17.f11: got %d but expected %d\n", s17.f11, 11); } if (s17.f12 != 12) { fprintf(stderr, "mono_return_sbyte17 s17.f12: got %d but expected %d\n", s17.f12, 12); } if (s17.f13 != 13) { fprintf(stderr, "mono_return_sbyte17 s17.f13: got %d but expected %d\n", s17.f13, 13); } if (s17.f14 != 14) { fprintf(stderr, "mono_return_sbyte17 s17.f14: got %d but expected %d\n", s17.f14, 14); } if (s17.f15 != 15) { fprintf(stderr, "mono_return_sbyte17 s17.f15: got %d but expected %d\n", s17.f15, 15); } if (s17.f16 != 16) { fprintf(stderr, "mono_return_sbyte17 s17.f16: got %d but expected %d\n", s17.f16, 16); } if (s17.f17 != 17) { fprintf(stderr, "mono_return_sbyte17 s17.f17: got %d but expected %d\n", s17.f17, 17); } s17.f1+=addend; s17.f2+=addend; s17.f3+=addend; s17.f4+=addend; s17.f5+=addend; s17.f6+=addend; s17.f7+=addend; s17.f8+=addend; s17.f9+=addend; s17.f10+=addend; s17.f11+=addend; s17.f12+=addend; s17.f13+=addend; s17.f14+=addend; s17.f15+=addend; s17.f16+=addend; s17.f17+=addend; return s17; } typedef struct { struct { char f1; } nested1; char f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15; struct { char f16; } nested2; } sbyte16_nested; LIBTEST_API sbyte16_nested STDCALL mono_return_sbyte16_nested (sbyte16_nested sn16, int addend) { if (sn16.nested1.f1 != 1) { fprintf(stderr, "mono_return_sbyte16_nested sn16.nested1.f1: got %d but expected %d\n", sn16.nested1.f1, 1); } if (sn16.f2 != 2) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f2: got %d but expected %d\n", sn16.f2, 2); } if (sn16.f3 != 3) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f3: got %d but expected %d\n", sn16.f3, 3); } if (sn16.f4 != 4) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f4: got %d but expected %d\n", sn16.f4, 4); } if (sn16.f5 != 5) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f5: got %d but expected %d\n", sn16.f5, 5); } if (sn16.f6 != 6) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f6: got %d but expected %d\n", sn16.f6, 6); } if (sn16.f7 != 7) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f7: got %d but expected %d\n", sn16.f7, 7); } if (sn16.f8 != 8) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f8: got %d but expected %d\n", sn16.f8, 8); } if (sn16.f9 != 9) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f9: got %d but expected %d\n", sn16.f9, 9); } if (sn16.f10 != 10) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f10: got %d but expected %d\n", sn16.f10, 10); } if (sn16.f11 != 11) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f11: got %d but expected %d\n", sn16.f11, 11); } if (sn16.f12 != 12) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f12: got %d but expected %d\n", sn16.f12, 12); } if (sn16.f13 != 13) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f13: got %d but expected %d\n", sn16.f13, 13); } if (sn16.f14 != 14) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f14: got %d but expected %d\n", sn16.f14, 14); } if (sn16.f15 != 15) { fprintf(stderr, "mono_return_sbyte16_nested sn16.f15: got %d but expected %d\n", sn16.f15, 15); } if (sn16.nested2.f16 != 16) { fprintf(stderr, "mono_return_sbyte16_nested sn16.nested2.f16: got %d but expected %d\n", sn16.nested2.f16, 16); } sn16.nested1.f1+=addend; sn16.f2+=addend; sn16.f3+=addend; sn16.f4+=addend; sn16.f5+=addend; sn16.f6+=addend; sn16.f7+=addend; sn16.f8+=addend; sn16.f9+=addend; sn16.f10+=addend; sn16.f11+=addend; sn16.f12+=addend; sn16.f13+=addend; sn16.f14+=addend; sn16.f15+=addend; sn16.nested2.f16+=addend; return sn16; } typedef struct { short f1; } short1; LIBTEST_API short1 STDCALL mono_return_short1 (short1 s1, int addend) { if (s1.f1 != 1) { fprintf(stderr, "mono_return_short1 s1.f1: got %d but expected %d\n", s1.f1, 1); } s1.f1+=addend; return s1; } typedef struct { short f1,f2; } short2; LIBTEST_API short2 STDCALL mono_return_short2 (short2 s2, int addend) { if (s2.f1 != 1) { fprintf(stderr, "mono_return_short2 s2.f1: got %d but expected %d\n", s2.f1, 1); } if (s2.f2 != 2) { fprintf(stderr, "mono_return_short2 s2.f2: got %d but expected %d\n", s2.f2, 2); } s2.f1+=addend; s2.f2+=addend; return s2; } typedef struct { short f1,f2,f3; } short3; LIBTEST_API short3 STDCALL mono_return_short3 (short3 s3, int addend) { if (s3.f1 != 1) { fprintf(stderr, "mono_return_short3 s3.f1: got %d but expected %d\n", s3.f1, 1); } if (s3.f2 != 2) { fprintf(stderr, "mono_return_short3 s3.f2: got %d but expected %d\n", s3.f2, 2); } if (s3.f3 != 3) { fprintf(stderr, "mono_return_short3 s3.f3: got %d but expected %d\n", s3.f3, 3); } s3.f1+=addend; s3.f2+=addend; s3.f3+=addend; return s3; } typedef struct { short f1,f2,f3,f4; } short4; LIBTEST_API short4 STDCALL mono_return_short4 (short4 s4, int addend) { if (s4.f1 != 1) { fprintf(stderr, "mono_return_short4 s4.f1: got %d but expected %d\n", s4.f1, 1); } if (s4.f2 != 2) { fprintf(stderr, "mono_return_short4 s4.f2: got %d but expected %d\n", s4.f2, 2); } if (s4.f3 != 3) { fprintf(stderr, "mono_return_short4 s4.f3: got %d but expected %d\n", s4.f3, 3); } if (s4.f4 != 4) { fprintf(stderr, "mono_return_short4 s4.f4: got %d but expected %d\n", s4.f4, 4); } s4.f1+=addend; s4.f2+=addend; s4.f3+=addend; s4.f4+=addend; return s4; } typedef struct { short f1,f2,f3,f4,f5; } short5; LIBTEST_API short5 STDCALL mono_return_short5 (short5 s5, int addend) { if (s5.f1 != 1) { fprintf(stderr, "mono_return_short5 s5.f1: got %d but expected %d\n", s5.f1, 1); } if (s5.f2 != 2) { fprintf(stderr, "mono_return_short5 s5.f2: got %d but expected %d\n", s5.f2, 2); } if (s5.f3 != 3) { fprintf(stderr, "mono_return_short5 s5.f3: got %d but expected %d\n", s5.f3, 3); } if (s5.f4 != 4) { fprintf(stderr, "mono_return_short5 s5.f4: got %d but expected %d\n", s5.f4, 4); } if (s5.f5 != 5) { fprintf(stderr, "mono_return_short5 s5.f5: got %d but expected %d\n", s5.f5, 5); } s5.f1+=addend; s5.f2+=addend; s5.f3+=addend; s5.f4+=addend; s5.f5+=addend; return s5; } typedef struct { short f1,f2,f3,f4,f5,f6; } short6; LIBTEST_API short6 STDCALL mono_return_short6 (short6 s6, int addend) { if (s6.f1 != 1) { fprintf(stderr, "mono_return_short6 s6.f1: got %d but expected %d\n", s6.f1, 1); } if (s6.f2 != 2) { fprintf(stderr, "mono_return_short6 s6.f2: got %d but expected %d\n", s6.f2, 2); } if (s6.f3 != 3) { fprintf(stderr, "mono_return_short6 s6.f3: got %d but expected %d\n", s6.f3, 3); } if (s6.f4 != 4) { fprintf(stderr, "mono_return_short6 s6.f4: got %d but expected %d\n", s6.f4, 4); } if (s6.f5 != 5) { fprintf(stderr, "mono_return_short6 s6.f5: got %d but expected %d\n", s6.f5, 5); } if (s6.f6 != 6) { fprintf(stderr, "mono_return_short6 s6.f6: got %d but expected %d\n", s6.f6, 6); } s6.f1+=addend; s6.f2+=addend; s6.f3+=addend; s6.f4+=addend; s6.f5+=addend; s6.f6+=addend; return s6; } typedef struct { short f1,f2,f3,f4,f5,f6,f7; } short7; LIBTEST_API short7 STDCALL mono_return_short7 (short7 s7, int addend) { if (s7.f1 != 1) { fprintf(stderr, "mono_return_short7 s7.f1: got %d but expected %d\n", s7.f1, 1); } if (s7.f2 != 2) { fprintf(stderr, "mono_return_short7 s7.f2: got %d but expected %d\n", s7.f2, 2); } if (s7.f3 != 3) { fprintf(stderr, "mono_return_short7 s7.f3: got %d but expected %d\n", s7.f3, 3); } if (s7.f4 != 4) { fprintf(stderr, "mono_return_short7 s7.f4: got %d but expected %d\n", s7.f4, 4); } if (s7.f5 != 5) { fprintf(stderr, "mono_return_short7 s7.f5: got %d but expected %d\n", s7.f5, 5); } if (s7.f6 != 6) { fprintf(stderr, "mono_return_short7 s7.f6: got %d but expected %d\n", s7.f6, 6); } if (s7.f7 != 7) { fprintf(stderr, "mono_return_short7 s7.f7: got %d but expected %d\n", s7.f7, 7); } s7.f1+=addend; s7.f2+=addend; s7.f3+=addend; s7.f4+=addend; s7.f5+=addend; s7.f6+=addend; s7.f7+=addend; return s7; } typedef struct { short f1,f2,f3,f4,f5,f6,f7,f8; } short8; LIBTEST_API short8 STDCALL mono_return_short8 (short8 s8, int addend) { if (s8.f1 != 1) { fprintf(stderr, "mono_return_short8 s8.f1: got %d but expected %d\n", s8.f1, 1); } if (s8.f2 != 2) { fprintf(stderr, "mono_return_short8 s8.f2: got %d but expected %d\n", s8.f2, 2); } if (s8.f3 != 3) { fprintf(stderr, "mono_return_short8 s8.f3: got %d but expected %d\n", s8.f3, 3); } if (s8.f4 != 4) { fprintf(stderr, "mono_return_short8 s8.f4: got %d but expected %d\n", s8.f4, 4); } if (s8.f5 != 5) { fprintf(stderr, "mono_return_short8 s8.f5: got %d but expected %d\n", s8.f5, 5); } if (s8.f6 != 6) { fprintf(stderr, "mono_return_short8 s8.f6: got %d but expected %d\n", s8.f6, 6); } if (s8.f7 != 7) { fprintf(stderr, "mono_return_short8 s8.f7: got %d but expected %d\n", s8.f7, 7); } if (s8.f8 != 8) { fprintf(stderr, "mono_return_short8 s8.f8: got %d but expected %d\n", s8.f8, 8); } s8.f1+=addend; s8.f2+=addend; s8.f3+=addend; s8.f4+=addend; s8.f5+=addend; s8.f6+=addend; s8.f7+=addend; s8.f8+=addend; return s8; } typedef struct { short f1,f2,f3,f4,f5,f6,f7,f8,f9; } short9; LIBTEST_API short9 STDCALL mono_return_short9 (short9 s9, int addend) { if (s9.f1 != 1) { fprintf(stderr, "mono_return_short9 s9.f1: got %d but expected %d\n", s9.f1, 1); } if (s9.f2 != 2) { fprintf(stderr, "mono_return_short9 s9.f2: got %d but expected %d\n", s9.f2, 2); } if (s9.f3 != 3) { fprintf(stderr, "mono_return_short9 s9.f3: got %d but expected %d\n", s9.f3, 3); } if (s9.f4 != 4) { fprintf(stderr, "mono_return_short9 s9.f4: got %d but expected %d\n", s9.f4, 4); } if (s9.f5 != 5) { fprintf(stderr, "mono_return_short9 s9.f5: got %d but expected %d\n", s9.f5, 5); } if (s9.f6 != 6) { fprintf(stderr, "mono_return_short9 s9.f6: got %d but expected %d\n", s9.f6, 6); } if (s9.f7 != 7) { fprintf(stderr, "mono_return_short9 s9.f7: got %d but expected %d\n", s9.f7, 7); } if (s9.f8 != 8) { fprintf(stderr, "mono_return_short9 s9.f8: got %d but expected %d\n", s9.f8, 8); } if (s9.f9 != 9) { fprintf(stderr, "mono_return_short9 s9.f9: got %d but expected %d\n", s9.f9, 9); } s9.f1+=addend; s9.f2+=addend; s9.f3+=addend; s9.f4+=addend; s9.f5+=addend; s9.f6+=addend; s9.f7+=addend; s9.f8+=addend; s9.f9+=addend; return s9; } typedef struct { struct { short f1; } nested1; short f2,f3,f4,f5,f6,f7; struct { short f8; } nested2; } short8_nested; LIBTEST_API short8_nested STDCALL mono_return_short8_nested (short8_nested sn8, int addend) { if (sn8.nested1.f1 != 1) { fprintf(stderr, "mono_return_short8_nested sn8.nested1.f1: got %d but expected %d\n", sn8.nested1.f1, 1); } if (sn8.f2 != 2) { fprintf(stderr, "mono_return_short8_nested sn8.f2: got %d but expected %d\n", sn8.f2, 2); } if (sn8.f3 != 3) { fprintf(stderr, "mono_return_short8_nested sn8.f3: got %d but expected %d\n", sn8.f3, 3); } if (sn8.f4 != 4) { fprintf(stderr, "mono_return_short8_nested sn8.f4: got %d but expected %d\n", sn8.f4, 4); } if (sn8.f5 != 5) { fprintf(stderr, "mono_return_short8_nested sn8.f5: got %d but expected %d\n", sn8.f5, 5); } if (sn8.f6 != 6) { fprintf(stderr, "mono_return_short8_nested sn8.f6: got %d but expected %d\n", sn8.f6, 6); } if (sn8.f7 != 7) { fprintf(stderr, "mono_return_short8_nested sn8.f7: got %d but expected %d\n", sn8.f7, 7); } if (sn8.nested2.f8 != 8) { fprintf(stderr, "mono_return_short8_nested sn8.nested2.f8: got %d but expected %d\n", sn8.nested2.f8, 8); } sn8.nested1.f1+=addend; sn8.f2+=addend; sn8.f3+=addend; sn8.f4+=addend; sn8.f5+=addend; sn8.f6+=addend; sn8.f7+=addend; sn8.nested2.f8+=addend; return sn8; } typedef struct { int f1; } int1; LIBTEST_API int1 STDCALL mono_return_int1 (int1 s1, int addend) { if (s1.f1 != 1) { fprintf(stderr, "mono_return_int1 s1.f1: got %d but expected %d\n", s1.f1, 1); } s1.f1+=addend; return s1; } typedef struct { int f1,f2; } int2; LIBTEST_API int2 STDCALL mono_return_int2 (int2 s2, int addend) { if (s2.f1 != 1) { fprintf(stderr, "mono_return_int2 s2.f1: got %d but expected %d\n", s2.f1, 1); } if (s2.f2 != 2) { fprintf(stderr, "mono_return_int2 s2.f2: got %d but expected %d\n", s2.f2, 2); } s2.f1+=addend; s2.f2+=addend; return s2; } typedef struct { int f1,f2,f3; } int3; LIBTEST_API int3 STDCALL mono_return_int3 (int3 s3, int addend) { if (s3.f1 != 1) { fprintf(stderr, "mono_return_int3 s3.f1: got %d but expected %d\n", s3.f1, 1); } if (s3.f2 != 2) { fprintf(stderr, "mono_return_int3 s3.f2: got %d but expected %d\n", s3.f2, 2); } if (s3.f3 != 3) { fprintf(stderr, "mono_return_int3 s3.f3: got %d but expected %d\n", s3.f3, 3); } s3.f1+=addend; s3.f2+=addend; s3.f3+=addend; return s3; } typedef struct { int f1,f2,f3,f4; } int4; LIBTEST_API int4 STDCALL mono_return_int4 (int4 s4, int addend) { if (s4.f1 != 1) { fprintf(stderr, "mono_return_int4 s4.f1: got %d but expected %d\n", s4.f1, 1); } if (s4.f2 != 2) { fprintf(stderr, "mono_return_int4 s4.f2: got %d but expected %d\n", s4.f2, 2); } if (s4.f3 != 3) { fprintf(stderr, "mono_return_int4 s4.f3: got %d but expected %d\n", s4.f3, 3); } if (s4.f4 != 4) { fprintf(stderr, "mono_return_int4 s4.f4: got %d but expected %d\n", s4.f4, 4); } s4.f1+=addend; s4.f2+=addend; s4.f3+=addend; s4.f4+=addend; return s4; } typedef struct { int f1,f2,f3,f4,f5; } int5; LIBTEST_API int5 STDCALL mono_return_int5 (int5 s5, int addend) { if (s5.f1 != 1) { fprintf(stderr, "mono_return_int5 s5.f1: got %d but expected %d\n", s5.f1, 1); } if (s5.f2 != 2) { fprintf(stderr, "mono_return_int5 s5.f2: got %d but expected %d\n", s5.f2, 2); } if (s5.f3 != 3) { fprintf(stderr, "mono_return_int5 s5.f3: got %d but expected %d\n", s5.f3, 3); } if (s5.f4 != 4) { fprintf(stderr, "mono_return_int5 s5.f4: got %d but expected %d\n", s5.f4, 4); } if (s5.f5 != 5) { fprintf(stderr, "mono_return_int5 s5.f5: got %d but expected %d\n", s5.f5, 5); } s5.f1+=addend; s5.f2+=addend; s5.f3+=addend; s5.f4+=addend; s5.f5+=addend; return s5; } typedef struct { struct { int f1; } nested1; int f2,f3; struct { int f4; } nested2; } int4_nested; LIBTEST_API int4_nested STDCALL mono_return_int4_nested (int4_nested sn4, int addend) { if (sn4.nested1.f1 != 1) { fprintf(stderr, "mono_return_int4_nested sn4.nested1.f1: got %d but expected %d\n", sn4.nested1.f1, 1); } if (sn4.f2 != 2) { fprintf(stderr, "mono_return_int4_nested sn4.f2: got %d but expected %d\n", sn4.f2, 2); } if (sn4.f3 != 3) { fprintf(stderr, "mono_return_int4_nested sn4.f3: got %d but expected %d\n", sn4.f3, 3); } if (sn4.nested2.f4 != 4) { fprintf(stderr, "mono_return_int4_nested sn4.nested2.f4: got %d but expected %d\n", sn4.nested2.f4, 4); } sn4.nested1.f1+=addend; sn4.f2+=addend; sn4.f3+=addend; sn4.nested2.f4+=addend; return sn4; } typedef struct { float f1; } float1; LIBTEST_API float1 STDCALL mono_return_float1 (float1 s1, int addend) { if (s1.f1 != 1) { fprintf(stderr, "mono_return_float1 s1.f1: got %f but expected %d\n", s1.f1, 1); } s1.f1+=addend; return s1; } typedef struct { float f1,f2; } float2; LIBTEST_API float2 STDCALL mono_return_float2 (float2 s2, int addend) { if (s2.f1 != 1) { fprintf(stderr, "mono_return_float2 s2.f1: got %f but expected %d\n", s2.f1, 1); } if (s2.f2 != 2) { fprintf(stderr, "mono_return_float2 s2.f2: got %f but expected %d\n", s2.f2, 2); } s2.f1+=addend; s2.f2+=addend; return s2; } typedef struct { float f1,f2,f3; } float3; LIBTEST_API float3 STDCALL mono_return_float3 (float3 s3, int addend) { if (s3.f1 != 1) { fprintf(stderr, "mono_return_float3 s3.f1: got %f but expected %d\n", s3.f1, 1); } if (s3.f2 != 2) { fprintf(stderr, "mono_return_float3 s3.f2: got %f but expected %d\n", s3.f2, 2); } if (s3.f3 != 3) { fprintf(stderr, "mono_return_float3 s3.f3: got %f but expected %d\n", s3.f3, 3); } s3.f1+=addend; s3.f2+=addend; s3.f3+=addend; return s3; } typedef struct { float f1,f2,f3,f4; } float4; LIBTEST_API float4 STDCALL mono_return_float4 (float4 s4, int addend) { if (s4.f1 != 1) { fprintf(stderr, "mono_return_float4 s4.f1: got %f but expected %d\n", s4.f1, 1); } if (s4.f2 != 2) { fprintf(stderr, "mono_return_float4 s4.f2: got %f but expected %d\n", s4.f2, 2); } if (s4.f3 != 3) { fprintf(stderr, "mono_return_float4 s4.f3: got %f but expected %d\n", s4.f3, 3); } if (s4.f4 != 4) { fprintf(stderr, "mono_return_float4 s4.f4: got %f but expected %d\n", s4.f4, 4); } s4.f1+=addend; s4.f2+=addend; s4.f3+=addend; s4.f4+=addend; return s4; } typedef struct { float f1,f2,f3,f4,f5; } float5; LIBTEST_API float5 STDCALL mono_return_float5 (float5 s5, int addend) { if (s5.f1 != 1) { fprintf(stderr, "mono_return_float5 s5.f1: got %f but expected %d\n", s5.f1, 1); } if (s5.f2 != 2) { fprintf(stderr, "mono_return_float5 s5.f2: got %f but expected %d\n", s5.f2, 2); } if (s5.f3 != 3) { fprintf(stderr, "mono_return_float5 s5.f3: got %f but expected %d\n", s5.f3, 3); } if (s5.f4 != 4) { fprintf(stderr, "mono_return_float5 s5.f4: got %f but expected %d\n", s5.f4, 4); } if (s5.f5 != 5) { fprintf(stderr, "mono_return_float5 s5.f5: got %f but expected %d\n", s5.f5, 5); } s5.f1+=addend; s5.f2+=addend; s5.f3+=addend; s5.f4+=addend; s5.f5+=addend; return s5; } typedef struct { float f1,f2,f3,f4,f5,f6; } float6; LIBTEST_API float6 STDCALL mono_return_float6 (float6 s6, int addend) { if (s6.f1 != 1) { fprintf(stderr, "mono_return_float6 s6.f1: got %f but expected %d\n", s6.f1, 1); } if (s6.f2 != 2) { fprintf(stderr, "mono_return_float6 s6.f2: got %f but expected %d\n", s6.f2, 2); } if (s6.f3 != 3) { fprintf(stderr, "mono_return_float6 s6.f3: got %f but expected %d\n", s6.f3, 3); } if (s6.f4 != 4) { fprintf(stderr, "mono_return_float6 s6.f4: got %f but expected %d\n", s6.f4, 4); } if (s6.f5 != 5) { fprintf(stderr, "mono_return_float6 s6.f5: got %f but expected %d\n", s6.f5, 5); } if (s6.f6 != 6) { fprintf(stderr, "mono_return_float6 s6.f6: got %f but expected %d\n", s6.f6, 6); } s6.f1+=addend; s6.f2+=addend; s6.f3+=addend; s6.f4+=addend; s6.f5+=addend; s6.f6+=addend; return s6; } typedef struct { float f1,f2,f3,f4,f5,f6,f7; } float7; LIBTEST_API float7 STDCALL mono_return_float7 (float7 s7, int addend) { if (s7.f1 != 1) { fprintf(stderr, "mono_return_float7 s7.f1: got %f but expected %d\n", s7.f1, 1); } if (s7.f2 != 2) { fprintf(stderr, "mono_return_float7 s7.f2: got %f but expected %d\n", s7.f2, 2); } if (s7.f3 != 3) { fprintf(stderr, "mono_return_float7 s7.f3: got %f but expected %d\n", s7.f3, 3); } if (s7.f4 != 4) { fprintf(stderr, "mono_return_float7 s7.f4: got %f but expected %d\n", s7.f4, 4); } if (s7.f5 != 5) { fprintf(stderr, "mono_return_float7 s7.f5: got %f but expected %d\n", s7.f5, 5); } if (s7.f6 != 6) { fprintf(stderr, "mono_return_float7 s7.f6: got %f but expected %d\n", s7.f6, 6); } if (s7.f7 != 7) { fprintf(stderr, "mono_return_float7 s7.f7: got %f but expected %d\n", s7.f7, 7); } s7.f1+=addend; s7.f2+=addend; s7.f3+=addend; s7.f4+=addend; s7.f5+=addend; s7.f6+=addend; s7.f7+=addend; return s7; } typedef struct { float f1,f2,f3,f4,f5,f6,f7,f8; } float8; LIBTEST_API float8 STDCALL mono_return_float8 (float8 s8, int addend) { if (s8.f1 != 1) { fprintf(stderr, "mono_return_float8 s8.f1: got %f but expected %d\n", s8.f1, 1); } if (s8.f2 != 2) { fprintf(stderr, "mono_return_float8 s8.f2: got %f but expected %d\n", s8.f2, 2); } if (s8.f3 != 3) { fprintf(stderr, "mono_return_float8 s8.f3: got %f but expected %d\n", s8.f3, 3); } if (s8.f4 != 4) { fprintf(stderr, "mono_return_float8 s8.f4: got %f but expected %d\n", s8.f4, 4); } if (s8.f5 != 5) { fprintf(stderr, "mono_return_float8 s8.f5: got %f but expected %d\n", s8.f5, 5); } if (s8.f6 != 6) { fprintf(stderr, "mono_return_float8 s8.f6: got %f but expected %d\n", s8.f6, 6); } if (s8.f7 != 7) { fprintf(stderr, "mono_return_float8 s8.f7: got %f but expected %d\n", s8.f7, 7); } if (s8.f8 != 8) { fprintf(stderr, "mono_return_float8 s8.f8: got %f but expected %d\n", s8.f8, 8); } s8.f1+=addend; s8.f2+=addend; s8.f3+=addend; s8.f4+=addend; s8.f5+=addend; s8.f6+=addend; s8.f7+=addend; s8.f8+=addend; return s8; } typedef struct { float f1,f2,f3,f4,f5,f6,f7,f8,f9; } float9; LIBTEST_API float9 STDCALL mono_return_float9 (float9 s9, int addend) { if (s9.f1 != 1) { fprintf(stderr, "mono_return_float9 s9.f1: got %f but expected %d\n", s9.f1, 1); } if (s9.f2 != 2) { fprintf(stderr, "mono_return_float9 s9.f2: got %f but expected %d\n", s9.f2, 2); } if (s9.f3 != 3) { fprintf(stderr, "mono_return_float9 s9.f3: got %f but expected %d\n", s9.f3, 3); } if (s9.f4 != 4) { fprintf(stderr, "mono_return_float9 s9.f4: got %f but expected %d\n", s9.f4, 4); } if (s9.f5 != 5) { fprintf(stderr, "mono_return_float9 s9.f5: got %f but expected %d\n", s9.f5, 5); } if (s9.f6 != 6) { fprintf(stderr, "mono_return_float9 s9.f6: got %f but expected %d\n", s9.f6, 6); } if (s9.f7 != 7) { fprintf(stderr, "mono_return_float9 s9.f7: got %f but expected %d\n", s9.f7, 7); } if (s9.f8 != 8) { fprintf(stderr, "mono_return_float9 s9.f8: got %f but expected %d\n", s9.f8, 8); } if (s9.f9 != 9) { fprintf(stderr, "mono_return_float9 s9.f9: got %f but expected %d\n", s9.f9, 9); } s9.f1+=addend; s9.f2+=addend; s9.f3+=addend; s9.f4+=addend; s9.f5+=addend; s9.f6+=addend; s9.f7+=addend; s9.f8+=addend; s9.f9+=addend; return s9; } typedef struct { struct { float f1; } nested1; float f2,f3; struct { float f4; } nested2; } float4_nested; LIBTEST_API float4_nested STDCALL mono_return_float4_nested (float4_nested sn4, int addend) { if (sn4.nested1.f1 != 1) { fprintf(stderr, "mono_return_float4_nested sn4.nested1.f1: got %f but expected %d\n", sn4.nested1.f1, 1); } if (sn4.f2 != 2) { fprintf(stderr, "mono_return_float4_nested sn4.f2: got %f but expected %d\n", sn4.f2, 2); } if (sn4.f3 != 3) { fprintf(stderr, "mono_return_float4_nested sn4.f3: got %f but expected %d\n", sn4.f3, 3); } if (sn4.nested2.f4 != 4) { fprintf(stderr, "mono_return_float4_nested sn4.nested2.f4: got %f but expected %d\n", sn4.nested2.f4, 4); } sn4.nested1.f1+=addend; sn4.f2+=addend; sn4.f3+=addend; sn4.nested2.f4+=addend; return sn4; } typedef struct { double f1; } double1; LIBTEST_API double1 STDCALL mono_return_double1 (double1 s1, int addend) { if (s1.f1 != 1) { fprintf(stderr, "mono_return_double1 s1.f1: got %f but expected %d\n", s1.f1, 1); } s1.f1+=addend; return s1; } typedef struct { double f1,f2; } double2; LIBTEST_API double2 STDCALL mono_return_double2 (double2 s2, int addend) { if (s2.f1 != 1) { fprintf(stderr, "mono_return_double2 s2.f1: got %f but expected %d\n", s2.f1, 1); } if (s2.f2 != 2) { fprintf(stderr, "mono_return_double2 s2.f2: got %f but expected %d\n", s2.f2, 2); } s2.f1+=addend; s2.f2+=addend; return s2; } typedef struct { double f1,f2,f3; } double3; LIBTEST_API double3 STDCALL mono_return_double3 (double3 s3, int addend) { if (s3.f1 != 1) { fprintf(stderr, "mono_return_double3 s3.f1: got %f but expected %d\n", s3.f1, 1); } if (s3.f2 != 2) { fprintf(stderr, "mono_return_double3 s3.f2: got %f but expected %d\n", s3.f2, 2); } if (s3.f3 != 3) { fprintf(stderr, "mono_return_double3 s3.f3: got %f but expected %d\n", s3.f3, 3); } s3.f1+=addend; s3.f2+=addend; s3.f3+=addend; return s3; } typedef struct { double f1,f2,f3,f4; } double4; LIBTEST_API double4 STDCALL mono_return_double4 (double4 s4, int addend) { if (s4.f1 != 1) { fprintf(stderr, "mono_return_double4 s4.f1: got %f but expected %d\n", s4.f1, 1); } if (s4.f2 != 2) { fprintf(stderr, "mono_return_double4 s4.f2: got %f but expected %d\n", s4.f2, 2); } if (s4.f3 != 3) { fprintf(stderr, "mono_return_double4 s4.f3: got %f but expected %d\n", s4.f3, 3); } if (s4.f4 != 4) { fprintf(stderr, "mono_return_double4 s4.f4: got %f but expected %d\n", s4.f4, 4); } s4.f1+=addend; s4.f2+=addend; s4.f3+=addend; s4.f4+=addend; return s4; } typedef struct { double f1,f2,f3,f4,f5; } double5; LIBTEST_API double5 STDCALL mono_return_double5 (double5 s5, int addend) { if (s5.f1 != 1) { fprintf(stderr, "mono_return_double5 s5.f1: got %f but expected %d\n", s5.f1, 1); } if (s5.f2 != 2) { fprintf(stderr, "mono_return_double5 s5.f2: got %f but expected %d\n", s5.f2, 2); } if (s5.f3 != 3) { fprintf(stderr, "mono_return_double5 s5.f3: got %f but expected %d\n", s5.f3, 3); } if (s5.f4 != 4) { fprintf(stderr, "mono_return_double5 s5.f4: got %f but expected %d\n", s5.f4, 4); } if (s5.f5 != 5) { fprintf(stderr, "mono_return_double5 s5.f5: got %f but expected %d\n", s5.f5, 5); } s5.f1+=addend; s5.f2+=addend; s5.f3+=addend; s5.f4+=addend; s5.f5+=addend; return s5; } typedef struct { double f1,f2,f3,f4,f5,f6; } double6; LIBTEST_API double6 STDCALL mono_return_double6 (double6 s6, int addend) { if (s6.f1 != 1) { fprintf(stderr, "mono_return_double6 s6.f1: got %f but expected %d\n", s6.f1, 1); } if (s6.f2 != 2) { fprintf(stderr, "mono_return_double6 s6.f2: got %f but expected %d\n", s6.f2, 2); } if (s6.f3 != 3) { fprintf(stderr, "mono_return_double6 s6.f3: got %f but expected %d\n", s6.f3, 3); } if (s6.f4 != 4) { fprintf(stderr, "mono_return_double6 s6.f4: got %f but expected %d\n", s6.f4, 4); } if (s6.f5 != 5) { fprintf(stderr, "mono_return_double6 s6.f5: got %f but expected %d\n", s6.f5, 5); } if (s6.f6 != 6) { fprintf(stderr, "mono_return_double6 s6.f6: got %f but expected %d\n", s6.f6, 6); } s6.f1+=addend; s6.f2+=addend; s6.f3+=addend; s6.f4+=addend; s6.f5+=addend; s6.f6+=addend; return s6; } typedef struct { double f1,f2,f3,f4,f5,f6,f7; } double7; LIBTEST_API double7 STDCALL mono_return_double7 (double7 s7, int addend) { if (s7.f1 != 1) { fprintf(stderr, "mono_return_double7 s7.f1: got %f but expected %d\n", s7.f1, 1); } if (s7.f2 != 2) { fprintf(stderr, "mono_return_double7 s7.f2: got %f but expected %d\n", s7.f2, 2); } if (s7.f3 != 3) { fprintf(stderr, "mono_return_double7 s7.f3: got %f but expected %d\n", s7.f3, 3); } if (s7.f4 != 4) { fprintf(stderr, "mono_return_double7 s7.f4: got %f but expected %d\n", s7.f4, 4); } if (s7.f5 != 5) { fprintf(stderr, "mono_return_double7 s7.f5: got %f but expected %d\n", s7.f5, 5); } if (s7.f6 != 6) { fprintf(stderr, "mono_return_double7 s7.f6: got %f but expected %d\n", s7.f6, 6); } if (s7.f7 != 7) { fprintf(stderr, "mono_return_double7 s7.f7: got %f but expected %d\n", s7.f7, 7); } s7.f1+=addend; s7.f2+=addend; s7.f3+=addend; s7.f4+=addend; s7.f5+=addend; s7.f6+=addend; s7.f7+=addend; return s7; } typedef struct { double f1,f2,f3,f4,f5,f6,f7,f8; } double8; LIBTEST_API double8 STDCALL mono_return_double8 (double8 s8, int addend) { if (s8.f1 != 1) { fprintf(stderr, "mono_return_double8 s8.f1: got %f but expected %d\n", s8.f1, 1); } if (s8.f2 != 2) { fprintf(stderr, "mono_return_double8 s8.f2: got %f but expected %d\n", s8.f2, 2); } if (s8.f3 != 3) { fprintf(stderr, "mono_return_double8 s8.f3: got %f but expected %d\n", s8.f3, 3); } if (s8.f4 != 4) { fprintf(stderr, "mono_return_double8 s8.f4: got %f but expected %d\n", s8.f4, 4); } if (s8.f5 != 5) { fprintf(stderr, "mono_return_double8 s8.f5: got %f but expected %d\n", s8.f5, 5); } if (s8.f6 != 6) { fprintf(stderr, "mono_return_double8 s8.f6: got %f but expected %d\n", s8.f6, 6); } if (s8.f7 != 7) { fprintf(stderr, "mono_return_double8 s8.f7: got %f but expected %d\n", s8.f7, 7); } if (s8.f8 != 8) { fprintf(stderr, "mono_return_double8 s8.f8: got %f but expected %d\n", s8.f8, 8); } s8.f1+=addend; s8.f2+=addend; s8.f3+=addend; s8.f4+=addend; s8.f5+=addend; s8.f6+=addend; s8.f7+=addend; s8.f8+=addend; return s8; } typedef struct { double f1,f2,f3,f4,f5,f6,f7,f8,f9; } double9; LIBTEST_API double9 STDCALL mono_return_double9 (double9 s9, int addend) { if (s9.f1 != 1) { fprintf(stderr, "mono_return_double9 s9.f1: got %f but expected %d\n", s9.f1, 1); } if (s9.f2 != 2) { fprintf(stderr, "mono_return_double9 s9.f2: got %f but expected %d\n", s9.f2, 2); } if (s9.f3 != 3) { fprintf(stderr, "mono_return_double9 s9.f3: got %f but expected %d\n", s9.f3, 3); } if (s9.f4 != 4) { fprintf(stderr, "mono_return_double9 s9.f4: got %f but expected %d\n", s9.f4, 4); } if (s9.f5 != 5) { fprintf(stderr, "mono_return_double9 s9.f5: got %f but expected %d\n", s9.f5, 5); } if (s9.f6 != 6) { fprintf(stderr, "mono_return_double9 s9.f6: got %f but expected %d\n", s9.f6, 6); } if (s9.f7 != 7) { fprintf(stderr, "mono_return_double9 s9.f7: got %f but expected %d\n", s9.f7, 7); } if (s9.f8 != 8) { fprintf(stderr, "mono_return_double9 s9.f8: got %f but expected %d\n", s9.f8, 8); } if (s9.f9 != 9) { fprintf(stderr, "mono_return_double9 s9.f9: got %f but expected %d\n", s9.f9, 9); } s9.f1+=addend; s9.f2+=addend; s9.f3+=addend; s9.f4+=addend; s9.f5+=addend; s9.f6+=addend; s9.f7+=addend; s9.f8+=addend; s9.f9+=addend; return s9; } typedef struct { struct { double f1; } nested1; struct { double f2; } nested2; } double2_nested; LIBTEST_API double2_nested STDCALL mono_return_double2_nested (double2_nested sn2, int addend) { if (sn2.nested1.f1 != 1) { fprintf(stderr, "mono_return_double2_nested sn2.nested1.f1: got %f but expected %d\n", sn2.nested1.f1, 1); } if (sn2.nested2.f2 != 2) { fprintf(stderr, "mono_return_double2_nested sn2.nested2.f2: got %f but expected %d\n", sn2.nested2.f2, 2); } sn2.nested1.f1+=addend; sn2.nested2.f2+=addend; return sn2; } typedef struct { double f1[4]; } double_array4; LIBTEST_API double_array4 STDCALL mono_return_double_array4 (double_array4 sa4, int addend) { if (sa4.f1[0] != 1) { fprintf(stderr, "mono_return_double_array4 sa4.f1[0]: got %f but expected %d\n", sa4.f1[0], 1); } if (sa4.f1[1] != 2) { fprintf(stderr, "mono_return_double_array4 sa4.f1[1]: got %f but expected %d\n", sa4.f1[1], 2); } if (sa4.f1[2] != 3) { fprintf(stderr, "mono_return_double_array4 sa4.f1[2]: got %f but expected %d\n", sa4.f1[2], 3); } if (sa4.f1[3] != 4) { fprintf(stderr, "mono_return_double_array4 sa4.f1[3]: got %f but expected %d\n", sa4.f1[3], 4); } sa4.f1[0]+=addend; sa4.f1[1]+=addend; sa4.f1[2]+=addend; sa4.f1[3]+=addend; return sa4; } typedef struct { int array [3]; } FixedArrayStruct; LIBTEST_API int STDCALL mono_test_marshal_fixed_array (FixedArrayStruct s) { return s.array [0] + s.array [1] + s.array [2]; } typedef struct { char array [16]; char c; } FixedBufferChar; LIBTEST_API int STDCALL mono_test_marshal_fixed_buffer_char (FixedBufferChar *s) { if (!(s->array [0] == 'A' && s->array [1] == 'B' && s->array [2] == 'C' && s->c == 'D')) return 1; s->array [0] = 'E'; s->array [1] = 'F'; s->c = 'G'; return 0; } typedef struct { short array [16]; short c; } FixedBufferUnicode; LIBTEST_API int STDCALL mono_test_marshal_fixed_buffer_unicode (FixedBufferUnicode *s) { if (!(s->array [0] == 'A' && s->array [1] == 'B' && s->array [2] == 'C' && s->c == 'D')) return 1; s->array [0] = 'E'; s->array [1] = 'F'; s->c = 'G'; return 0; } const int NSTRINGS = 6; //test strings const char *utf8Strings[] = { "Managed", "Sîne klâwen durh die wolken sint geslagen" , "काचं शक्नोम्यत्तुम् । नोपहिनस्ति माम्", "我能吞下玻璃而不伤身体", "ღმერთსი შემვედრე,შემვედრე, ნუთუ კვლა დამხსნას შემვედრე,სოფლისა შემვედრე, შემვედრე,შემვედრე,შემვედრე,შრომასა, ცეცხლს, წყალსა და მიწასა, ჰაერთა თანა მრომასა; მომცნეს ფრთენი და აღვფრინდე, მივჰხვდე მას ჩემსა ნდომასა, დღისით და ღამით ვჰხედვიდე მზისა ელვათა კრთომაასაშემვედრე,შემვედრე,", "Τη γλώσσα μου έδωσαν ελληνική", "\0" }; LIBTEST_API char * build_return_string(const char* pReturn) { char *ret = 0; if (pReturn == 0 || *pReturn == 0) return ret; size_t strLength = strlen(pReturn); ret = (char *)(marshal_alloc (sizeof(char)* (strLength + 1))); memcpy(ret, pReturn, strLength); ret [strLength] = '\0'; return ret; } LIBTEST_API char * StringParameterInOut(/*[In,Out]*/ char *s, int index) { // return a copy return build_return_string(s); } LIBTEST_API void StringParameterRefOut(/*out*/ char **s, int index) { char *pszTextutf8 = (char*)utf8Strings[index]; size_t strLength = strlen(pszTextutf8); *s = (char *)(marshal_alloc (sizeof(char)* (strLength + 1))); memcpy(*s, pszTextutf8, strLength); (*s)[strLength] = '\0'; } LIBTEST_API void StringParameterRef(/*ref*/ char **s, int index) { char *pszTextutf8 = (char*)utf8Strings[index]; size_t strLength = strlen(pszTextutf8); // do byte by byte validation of in string size_t szLen = strlen(*s); for (size_t i = 0; i < szLen; i++) { if ((*s)[i] != pszTextutf8[i]) { printf("[in] managed string do not match native string\n"); abort (); } } if (*s) { marshal_free (*s); } // overwrite the orginal *s = (char *)(marshal_alloc (sizeof(char)* (strLength + 1))); memcpy(*s, pszTextutf8, strLength); (*s)[strLength] = '\0'; } LIBTEST_API void StringBuilderParameterInOut(/*[In,Out] StringBuilder*/ char *s, int index) { // if string.empty if (s == 0 || *s == 0) return; char *pszTextutf8 = (char*)utf8Strings[index]; // do byte by byte validation of in string size_t szLen = strlen(s); for (size_t i = 0; i < szLen; i++) { if (s[i] != pszTextutf8[i]) { printf("[in] managed string do not match native string\n"); abort (); } } // modify the string inplace size_t outLen = strlen(pszTextutf8); for (size_t i = 0; i < outLen; i++) { s[i] = pszTextutf8[i]; } s[outLen] = '\0'; } //out string builder LIBTEST_API void StringBuilderParameterOut(/*[Out] StringBuilder*/ char *s, int index) { char *pszTextutf8 = (char*)utf8Strings[index]; printf ("SBPO: Receiving %s\n", s); // modify the string inplace size_t outLen = strlen(pszTextutf8); for (size_t i = 0; i < outLen; i++) { s[i] = pszTextutf8[i]; } s[outLen] = '\0'; } LIBTEST_API char * StringParameterOut(/*[Out]*/ char *s, int index) { // return a copy return build_return_string(s); } // Utf8 field typedef struct FieldWithUtf8 { char *pFirst; int index; }FieldWithUtf8; //utf8 struct field LIBTEST_API void TestStructWithUtf8Field(struct FieldWithUtf8 fieldStruct) { char *pszManagedutf8 = fieldStruct.pFirst; int stringIndex = fieldStruct.index; char *pszNative = 0; size_t outLen = 0; if (pszManagedutf8 == 0 || *pszManagedutf8 == 0) return; pszNative = (char*)utf8Strings[stringIndex]; outLen = strlen(pszNative); // do byte by byte comparision for (size_t i = 0; i < outLen; i++) { if (pszNative[i] != pszManagedutf8[i]) { printf("Native and managed string do not match.\n"); abort (); } } } typedef void (* Callback2)(char *text, int index); LIBTEST_API void Utf8DelegateAsParameter(Callback2 managedCallback) { for (int i = 0; i < NSTRINGS; ++i) { char *pszNative = 0; pszNative = (char*)utf8Strings[i]; managedCallback(pszNative, i); } } LIBTEST_API char* StringBuilderParameterReturn(int index) { char *pszTextutf8 = (char*)utf8Strings[index]; size_t strLength = strlen(pszTextutf8); char * ret = (char *)(marshal_alloc (sizeof(char)* (strLength + 1))); memcpy(ret, pszTextutf8, strLength); ret[strLength] = '\0'; return ret; } LIBTEST_API int STDCALL mono_test_marshal_pointer_array (int *arr[]) { int i; for (i = 0; i < 10; ++i) { if (*arr [i] != -1) return 1; } return 0; } #ifndef WIN32 typedef void (*NativeToManagedExceptionRethrowFunc) (void); void *mono_test_native_to_managed_exception_rethrow_thread (void *arg) { NativeToManagedExceptionRethrowFunc func = (NativeToManagedExceptionRethrowFunc) arg; func (); return NULL; } LIBTEST_API void STDCALL mono_test_native_to_managed_exception_rethrow (NativeToManagedExceptionRethrowFunc func) { pthread_t t; pthread_create (&t, NULL, mono_test_native_to_managed_exception_rethrow_thread, (gpointer)func); pthread_join (t, NULL); } #endif typedef void (*VoidVoidCallback) (void); typedef void (*MonoFtnPtrEHCallback) (guint32 gchandle); typedef void *MonoDomain; typedef void *MonoAssembly; typedef void *MonoImage; typedef void *MonoClass; typedef void *MonoMethod; typedef void *MonoThread; typedef long long MonoObject; typedef MonoObject MonoException; typedef int32_t mono_bool; static int sym_inited = 0; static void (*sym_mono_install_ftnptr_eh_callback) (MonoFtnPtrEHCallback); static MonoObject* (*sym_mono_gchandle_get_target) (guint32 gchandle); static guint32 (*sym_mono_gchandle_new) (MonoObject *, mono_bool pinned); static void (*sym_mono_gchandle_free) (guint32 gchandle); static void (*sym_mono_raise_exception) (MonoException *ex); static void (*sym_mono_domain_unload) (gpointer); static void (*sym_mono_threads_exit_gc_safe_region_unbalanced) (gpointer, gpointer *); static void (*null_function_ptr) (void); static MonoDomain *(*sym_mono_get_root_domain) (void); static MonoDomain *(*sym_mono_domain_get)(void); static mono_bool (*sym_mono_domain_set)(MonoDomain *, mono_bool /*force */); static MonoAssembly *(*sym_mono_domain_assembly_open) (MonoDomain *, const char*); static MonoImage *(*sym_mono_assembly_get_image) (MonoAssembly *); static MonoClass *(*sym_mono_class_from_name)(MonoImage *, const char *, const char *); static MonoMethod *(*sym_mono_class_get_method_from_name)(MonoClass *, const char *, int /* arg_count */); static MonoThread *(*sym_mono_thread_attach)(MonoDomain *); static void (*sym_mono_thread_detach)(MonoThread *); static MonoObject *(*sym_mono_runtime_invoke) (MonoMethod *, void*, void**, MonoObject**); // SYM_LOOKUP(mono_runtime_invoke) // expands to // sym_mono_runtime_invoke = g_cast (lookup_mono_symbol ("mono_runtime_invoke")); // // (the g_cast is necessary for C++ builds) #define SYM_LOOKUP(name) do { \ sym_##name = g_cast (lookup_mono_symbol (#name)); \ } while (0) static void mono_test_init_symbols (void) { if (sym_inited) return; SYM_LOOKUP (mono_install_ftnptr_eh_callback); SYM_LOOKUP (mono_gchandle_get_target); SYM_LOOKUP (mono_gchandle_new); SYM_LOOKUP (mono_gchandle_free); SYM_LOOKUP (mono_raise_exception); SYM_LOOKUP (mono_domain_unload); SYM_LOOKUP (mono_threads_exit_gc_safe_region_unbalanced); SYM_LOOKUP (mono_get_root_domain); SYM_LOOKUP (mono_domain_get); SYM_LOOKUP (mono_domain_set); SYM_LOOKUP (mono_domain_assembly_open); SYM_LOOKUP (mono_assembly_get_image); SYM_LOOKUP (mono_class_from_name); SYM_LOOKUP (mono_class_get_method_from_name); SYM_LOOKUP (mono_thread_attach); SYM_LOOKUP (mono_thread_detach); SYM_LOOKUP (mono_runtime_invoke); sym_inited = 1; } #ifndef TARGET_WASM static jmp_buf test_jmp_buf; static guint32 test_gchandle; static void mono_test_longjmp_callback (guint32 gchandle) { test_gchandle = gchandle; longjmp (test_jmp_buf, 1); } LIBTEST_API void STDCALL mono_test_setjmp_and_call (VoidVoidCallback managedCallback, intptr_t *out_handle) { mono_test_init_symbols (); if (setjmp (test_jmp_buf) == 0) { *out_handle = 0; sym_mono_install_ftnptr_eh_callback (mono_test_longjmp_callback); managedCallback (); *out_handle = 0; /* Do not expect to return here */ } else { sym_mono_install_ftnptr_eh_callback (NULL); *out_handle = test_gchandle; } } #endif LIBTEST_API void STDCALL mono_test_marshal_bstr (void *ptr) { } static void (*mono_test_capture_throw_callback) (guint32 gchandle, guint32 *exception_out); static void mono_test_ftnptr_eh_callback (guint32 gchandle) { guint32 exception_handle = 0; g_assert (gchandle != 0); MonoObject *exc = sym_mono_gchandle_get_target (gchandle); sym_mono_gchandle_free (gchandle); guint32 handle = sym_mono_gchandle_new (exc, FALSE); mono_test_capture_throw_callback (handle, &exception_handle); sym_mono_gchandle_free (handle); g_assert (exception_handle != 0); exc = sym_mono_gchandle_get_target (exception_handle); sym_mono_gchandle_free (exception_handle); sym_mono_raise_exception (exc); g_error ("mono_raise_exception should not return"); } LIBTEST_API void STDCALL mono_test_setup_ftnptr_eh_callback (VoidVoidCallback managed_entry, void (*capture_throw_callback) (guint32, guint32 *)) { mono_test_init_symbols (); mono_test_capture_throw_callback = capture_throw_callback; sym_mono_install_ftnptr_eh_callback (mono_test_ftnptr_eh_callback); managed_entry (); } LIBTEST_API void STDCALL mono_test_cleanup_ftptr_eh_callback (void) { mono_test_init_symbols (); sym_mono_install_ftnptr_eh_callback (NULL); } LIBTEST_API int STDCALL mono_test_cominterop_ccw_queryinterface (MonoComObject *pUnk) { void *pp; int hr = pUnk->vtbl->QueryInterface (pUnk, &IID_INotImplemented, &pp); // Return true if we can't get INotImplemented return pUnk == NULL && hr == S_OK; } typedef struct ccw_qi_shared_data { MonoComObject *pUnk; int i; } ccw_qi_shared_data; static void* ccw_qi_foreign_thread (void *arg) { ccw_qi_shared_data *shared = (ccw_qi_shared_data *)arg; void *pp; MonoComObject *pUnk = shared->pUnk; int hr = pUnk->vtbl->QueryInterface (pUnk, &IID_ITest, &pp); shared->i = (hr == S_OK) ? 0 : 43; return NULL; } LIBTEST_API int STDCALL mono_test_cominterop_ccw_queryinterface_foreign_thread (MonoComObject *pUnk) { #ifdef WIN32 return 0; #else pthread_t t; ccw_qi_shared_data *shared = (ccw_qi_shared_data *)malloc (sizeof (ccw_qi_shared_data)); if (!shared) abort (); shared->pUnk = pUnk; shared->i = 1; int res = pthread_create (&t, NULL, ccw_qi_foreign_thread, (void*)shared); g_assert (res == 0); pthread_join (t, NULL); int result = shared->i; free (shared); return result; #endif } static void* ccw_itest_foreign_thread (void *arg) { ccw_qi_shared_data *shared = (ccw_qi_shared_data *)arg; MonoComObject *pUnk = shared->pUnk; int hr = pUnk->vtbl->SByteIn (pUnk, -100); shared->i = (hr == S_OK) ? 0 : 12; return NULL; } LIBTEST_API int STDCALL mono_test_cominterop_ccw_itest_foreign_thread (MonoComObject *pUnk) { #ifdef WIN32 return 0; #else pthread_t t; ccw_qi_shared_data *shared = (ccw_qi_shared_data *)malloc (sizeof (ccw_qi_shared_data)); if (!shared) abort (); shared->pUnk = pUnk; shared->i = 1; int res = pthread_create (&t, NULL, ccw_itest_foreign_thread, (void*)shared); g_assert (res == 0); pthread_join (t, NULL); int result = shared->i; free (shared); return result; #endif } LIBTEST_API void STDCALL mono_test_MerpCrashSnprintf (void) { fprintf (stderr, "Before overwrite\n"); char buff [1] = { '\0' }; char overflow [1] = { 'a' }; // Not null-terminated g_snprintf (buff, sizeof(buff) * 10, "THISSHOULDOVERRUNTERRIBLY%s", overflow); g_snprintf ((char *) GINT_TO_POINTER(-1), sizeof(buff) * 10, "THISSHOULDOVERRUNTERRIBLY%s", overflow); } LIBTEST_API void STDCALL mono_test_MerpCrashDladdr (void) { #ifndef HOST_WIN32 dlopen (GINT_TO_POINTER(-1), -1); #endif } LIBTEST_API void STDCALL mono_test_MerpCrashMalloc (void) { gpointer x = g_malloc (sizeof(gpointer)); g_free (x); // Double free g_free (x); } LIBTEST_API void STDCALL mono_test_MerpCrashNullFp (void) { null_function_ptr (); } LIBTEST_API void STDCALL mono_test_MerpCrashDomainUnload (void) { mono_test_init_symbols (); sym_mono_domain_unload (GINT_TO_POINTER (-1)); } LIBTEST_API void STDCALL mono_test_MerpCrashUnbalancedGCSafe (void) { mono_test_init_symbols (); gpointer foo = GINT_TO_POINTER (-1); gpointer bar = GINT_TO_POINTER (-2); sym_mono_threads_exit_gc_safe_region_unbalanced (foo, &bar); } LIBTEST_API void STDCALL mono_test_MerpCrashUnhandledExceptionHook (void) { g_assert_not_reached (); } LIBTEST_API void STDCALL mono_test_MerpCrashSignalTerm (void) { raise (SIGTERM); } // for the rest of the signal tests, we use SIGTERM as a fallback LIBTEST_API void STDCALL mono_test_MerpCrashSignalAbrt (void) { #if defined (SIGABRT) raise (SIGABRT); #else raise (SIGTERM); #endif } LIBTEST_API void STDCALL mono_test_MerpCrashSignalFpe (void) { #if defined (SIGFPE) raise (SIGFPE); #else raise (SIGTERM); #endif } LIBTEST_API void STDCALL mono_test_MerpCrashSignalBus (void) { #if defined (SIGBUS) raise (SIGBUS); #else raise (SIGTERM); #endif } LIBTEST_API void STDCALL mono_test_MerpCrashSignalSegv (void) { #if defined (SIGSEGV) raise (SIGSEGV); #else raise (SIGTERM); #endif } LIBTEST_API void STDCALL mono_test_MerpCrashSignalIll (void) { #if defined (SIGILL) raise (SIGILL); #else raise (SIGTERM); #endif } typedef struct _TestAutoDual _TestAutoDual; typedef struct { int (STDCALL *QueryInterface)(_TestAutoDual *iface, REFIID iid, gpointer *out); int (STDCALL *AddRef)(_TestAutoDual *iface); int (STDCALL *Release)(_TestAutoDual *iface); int (STDCALL *GetTypeInfoCount)(_TestAutoDual *iface, unsigned int *count); int (STDCALL *GetTypeInfo)(_TestAutoDual *iface, unsigned int index, unsigned int lcid, gpointer *out); int (STDCALL *GetIDsOfNames)(_TestAutoDual *iface, REFIID iid, gpointer names, unsigned int count, unsigned int lcid, gpointer ids); int (STDCALL *Invoke)(_TestAutoDual *iface, unsigned int dispid, REFIID iid, unsigned int lcid, unsigned short flags, gpointer params, gpointer result, gpointer excepinfo, gpointer err_arg); int (STDCALL *ToString)(_TestAutoDual *iface, gpointer string); int (STDCALL *Equals)(_TestAutoDual *iface, VARIANT other, short *retval); int (STDCALL *GetHashCode)(_TestAutoDual *iface, int *retval); int (STDCALL *GetType)(_TestAutoDual *iface, gpointer retval); int (STDCALL *parent_method_virtual)(_TestAutoDual *iface, int *retval); int (STDCALL *get_parent_property)(_TestAutoDual *iface, int *retval); int (STDCALL *parent_method_override)(_TestAutoDual *iface, int *retval); int (STDCALL *parent_iface_method)(_TestAutoDual *iface, int *retval); int (STDCALL *parent_method)(_TestAutoDual *iface, int *retval); int (STDCALL *child_method_virtual)(_TestAutoDual *iface, int *retval); int (STDCALL *iface1_method)(_TestAutoDual *iface, int *retval); int (STDCALL *iface1_parent_method)(_TestAutoDual *iface, int *retval); int (STDCALL *iface2_method)(_TestAutoDual *iface, int *retval); int (STDCALL *child_method)(_TestAutoDual *iface, int *retval); } _TestAutoDualVtbl; struct _TestAutoDual { const _TestAutoDualVtbl *lpVtbl; }; LIBTEST_API int STDCALL mono_test_ccw_class_type_auto_dual (_TestAutoDual *iface) { int hr, retval; hr = iface->lpVtbl->parent_method_virtual(iface, &retval); if (hr != 0) return 1; if (retval != 101) return 2; hr = iface->lpVtbl->get_parent_property(iface, &retval); if (hr != 0) return 3; if (retval != 102) return 4; hr = iface->lpVtbl->parent_method_override(iface, &retval); if (hr != 0) return 5; if (retval != 203) return 6; hr = iface->lpVtbl->parent_method(iface, &retval); if (hr != 0) return 7; if (retval != 104) return 8; hr = iface->lpVtbl->child_method_virtual(iface, &retval); if (hr != 0) return 11; if (retval != 106) return 12; hr = iface->lpVtbl->iface1_method(iface, &retval); if (hr != 0) return 13; if (retval != 107) return 14; hr = iface->lpVtbl->iface1_parent_method(iface, &retval); if (hr != 0) return 15; if (retval != 108) return 16; hr = iface->lpVtbl->iface2_method(iface, &retval); if (hr != 0) return 17; if (retval != 109) return 18; hr = iface->lpVtbl->child_method(iface, &retval); if (hr != 0) return 19; if (retval != 110) return 20; hr = iface->lpVtbl->parent_iface_method(iface, &retval); if (hr != 0) return 23; if (retval != 112) return 24; return 0; } static const GUID IID_IBanana = {0x12345678, 0, 0, {0, 0, 0, 0, 0, 0, 0, 2}}; typedef struct IBanana IBanana; typedef struct { int (STDCALL *QueryInterface)(IBanana *iface, REFIID iid, gpointer *out); int (STDCALL *AddRef)(IBanana *iface); int (STDCALL *Release)(IBanana *iface); int (STDCALL *GetTypeInfoCount)(IBanana *iface, unsigned int *count); int (STDCALL *GetTypeInfo)(IBanana *iface, unsigned int index, unsigned int lcid, gpointer *out); int (STDCALL *GetIDsOfNames)(IBanana *iface, REFIID iid, gpointer names, unsigned int count, unsigned int lcid, gpointer ids); int (STDCALL *Invoke)(IBanana *iface, unsigned int dispid, REFIID iid, unsigned int lcid, unsigned short flags, gpointer params, gpointer result, gpointer excepinfo, gpointer err_arg); int (STDCALL *iface1_method)(IBanana *iface, int *retval); } IBananaVtbl; struct IBanana { const IBananaVtbl *lpVtbl; }; LIBTEST_API int STDCALL mono_test_ccw_class_type_none (IBanana *iface) { int hr, retval; hr = iface->lpVtbl->iface1_method(iface, &retval); if (hr != 0) return 1; if (retval != 3) return 2; return 0; } LIBTEST_API int STDCALL mono_test_ccw_class_type_auto_dispatch (IDispatch *disp) { IBanana *banana; int hr, retval; #ifdef __cplusplus hr = disp->QueryInterface (IID_IBanana, (void **)&banana); #else hr = disp->lpVtbl->QueryInterface (disp, &IID_IBanana, (void **)&banana); #endif if (hr != 0) return 1; hr = banana->lpVtbl->iface1_method(banana, &retval); if (hr != 0) return 2; if (retval != 3) return 3; banana->lpVtbl->Release(banana); return 0; } static guint8 static_arr[] = { 1, 2, 3, 4 }; LIBTEST_API guint8* mono_test_marshal_return_array (void) { return static_arr; } struct invoke_names { char *assm_name; char *name_space; char *name; char *meth_name; }; static struct invoke_names * make_invoke_names (const char *assm_name, const char *name_space, const char *name, const char *meth_name) { struct invoke_names *names = (struct invoke_names*) malloc (sizeof (struct invoke_names)); names->assm_name = strdup (assm_name); names->name_space = strdup (name_space); names->name = strdup (name); names->meth_name = strdup (meth_name); return names; } static void destroy_invoke_names (struct invoke_names *n) { free (n->assm_name); free (n->name_space); free (n->name); free (n->meth_name); free (n); } static void test_invoke_by_name (struct invoke_names *names) { mono_test_init_symbols (); MonoDomain *domain = sym_mono_domain_get (); MonoThread *thread = NULL; if (!domain) { thread = sym_mono_thread_attach (sym_mono_get_root_domain ()); } domain = sym_mono_domain_get (); g_assert (domain); MonoAssembly *assm = sym_mono_domain_assembly_open (domain, names->assm_name); g_assert (assm); MonoImage *image = sym_mono_assembly_get_image (assm); MonoClass *klass = sym_mono_class_from_name (image, names->name_space, names->name); g_assert (klass); /* meth_name should be a static method that takes no arguments */ MonoMethod *method = sym_mono_class_get_method_from_name (klass, names->meth_name, -1); g_assert (method); MonoObject *args[] = {NULL, }; sym_mono_runtime_invoke (method, NULL, (void**)args, NULL); if (thread) sym_mono_thread_detach (thread); } #ifndef HOST_WIN32 static void* invoke_foreign_thread (void* user_data) { struct invoke_names *names = (struct invoke_names*)user_data; /* * Run a couple of times to check that attach/detach multiple * times from the same thread leaves it in a reasonable coop * thread state. */ for (int i = 0; i < 5; ++i) { test_invoke_by_name (names); sleep (2); } destroy_invoke_names (names); return NULL; } static void* invoke_foreign_delegate (void *user_data) { VoidVoidCallback del = (VoidVoidCallback)user_data; for (int i = 0; i < 5; ++i) { del (); sleep (2); } return NULL; } #endif LIBTEST_API mono_bool STDCALL mono_test_attach_invoke_foreign_thread (const char *assm_name, const char *name_space, const char *name, const char *meth_name, VoidVoidCallback del) { #ifndef HOST_WIN32 if (!del) { struct invoke_names *names = make_invoke_names (assm_name, name_space, name, meth_name); pthread_t t; int res = pthread_create (&t, NULL, invoke_foreign_thread, (void*)names); g_assert (res == 0); pthread_join (t, NULL); return 0; } else { pthread_t t; int res = pthread_create (&t, NULL, invoke_foreign_delegate, del); g_assert (res == 0); pthread_join (t, NULL); return 0; } #else // TODO: Win32 version of this test return 1; #endif } #ifndef HOST_WIN32 struct names_and_mutex { /* if del is NULL, use names, otherwise just call del */ VoidVoidCallback del; struct invoke_names *names; /* mutex to coordinate test and foreign thread */ pthread_mutex_t coord_mutex; pthread_cond_t coord_cond; /* mutex to block the foreign thread */ pthread_mutex_t deadlock_mutex; }; static void* invoke_block_foreign_thread (void *user_data) { // This thread calls into the runtime and then blocks. It should not // prevent the runtime from shutting down. struct names_and_mutex *nm = (struct names_and_mutex *)user_data; if (!nm->del) { test_invoke_by_name (nm->names); } else { nm->del (); } pthread_mutex_lock (&nm->coord_mutex); /* signal the test thread that we called the runtime */ pthread_cond_signal (&nm->coord_cond); pthread_mutex_unlock (&nm->coord_mutex); pthread_mutex_lock (&nm->deadlock_mutex); // blocks forever g_assert_not_reached (); } #endif LIBTEST_API mono_bool STDCALL mono_test_attach_invoke_block_foreign_thread (const char *assm_name, const char *name_space, const char *name, const char *meth_name, VoidVoidCallback del) { #ifndef HOST_WIN32 struct names_and_mutex *nm = malloc (sizeof (struct names_and_mutex)); nm->del = del; if (!del) { struct invoke_names *names = make_invoke_names (assm_name, name_space, name, meth_name); nm->names = names; } else { nm->names = NULL; } pthread_mutex_init (&nm->coord_mutex, NULL); pthread_cond_init (&nm->coord_cond, NULL); pthread_mutex_init (&nm->deadlock_mutex, NULL); pthread_mutex_lock (&nm->deadlock_mutex); // lock the mutex and never unlock it. pthread_t t; int res = pthread_create (&t, NULL, invoke_block_foreign_thread, (void*)nm); g_assert (res == 0); /* wait for the foreign thread to finish calling the runtime before * detaching it and returning */ pthread_mutex_lock (&nm->coord_mutex); pthread_cond_wait (&nm->coord_cond, &nm->coord_mutex); pthread_mutex_unlock (&nm->coord_mutex); pthread_detach (t); return 0; #else // TODO: Win32 version of this test return 1; #endif } static const GUID IID_IDrupe = {0x9f001e6b, 0xa244, 0x3911, {0x88,0xdb, 0xbb,0x2b,0x6d,0x58,0x43,0xaa}}; #ifndef HOST_WIN32 typedef struct IUnknown IUnknown; typedef struct { int (STDCALL *QueryInterface)(IUnknown *iface, REFIID iid, gpointer *out); int (STDCALL *AddRef)(IUnknown *iface); int (STDCALL *Release)(IUnknown *iface); } IUnknownVtbl; struct IUnknown { const IUnknownVtbl *lpVtbl; }; #endif LIBTEST_API int STDCALL mono_test_ccw_query_interface (IUnknown *iface) { IUnknown *drupe; int hr; #ifdef __cplusplus hr = iface->QueryInterface (IID_IDrupe, (void **)&drupe); #else hr = iface->lpVtbl->QueryInterface (iface, &IID_IDrupe, (void **)&drupe); #endif if (hr != 0) return 1; #ifdef __cplusplus drupe->Release(); #else drupe->lpVtbl->Release(drupe); #endif return 0; } #ifdef __cplusplus } // extern C #endif
-1
dotnet/runtime
66,268
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…
…nvert them to TO_I4/TO_I8 in the front end.
vargaz
2022-03-06T20:28:39Z
2022-03-08T15:18:15Z
f396c3496a905451bcb4649c44c6d2e627690d05
3959a4a9beeb292816008309e12b6d7150c05235
[mono][jit] Remove OP_FCONV_TO_I/OP_RCONV_TO_I from the back ends, co…. …nvert them to TO_I4/TO_I8 in the front end.
./src/mono/sample/wasm/browser-webpack/README.md
## Sample for packaging dotnet.js via WebPack ``` dotnet build /p:TargetOS=Browser /p:TargetArchitecture=wasm /p:Configuration=Debug /t:RunSample ```
## Sample for packaging dotnet.js via WebPack ``` dotnet build /p:TargetOS=Browser /p:TargetArchitecture=wasm /p:Configuration=Debug /t:RunSample ```
-1